1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2019 Netronome Systems, Inc. */
4 #include <linux/bpf_trace.h>
5 #include <linux/netdevice.h>
7 #include "../nfp_app.h"
8 #include "../nfp_net.h"
9 #include "../nfp_net_dp.h"
10 #include "../nfp_net_xsk.h"
11 #include "../crypto/crypto.h"
12 #include "../crypto/fw.h"
15 /* Transmit processing
17 * One queue controller peripheral queue is used for transmit. The
18 * driver en-queues packets for transmit by advancing the write
19 * pointer. The device indicates that packets have transmitted by
20 * advancing the read pointer. The driver maintains a local copy of
21 * the read and write pointer in @struct nfp_net_tx_ring. The driver
22 * keeps @wr_p in sync with the queue controller write pointer and can
23 * determine how many packets have been transmitted by comparing its
24 * copy of the read pointer @rd_p with the read pointer maintained by
25 * the queue controller peripheral.
28 /* Wrappers for deciding when to stop and restart TX queues */
29 static int nfp_nfd3_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
31 return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4);
34 static int nfp_nfd3_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
36 return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1);
40 * nfp_nfd3_tx_ring_stop() - stop tx ring
42 * @tx_ring: driver tx queue structure
44 * Safely stop TX ring. Remember that while we are running .start_xmit()
45 * someone else may be cleaning the TX ring completions so we need to be
49 nfp_nfd3_tx_ring_stop(struct netdev_queue *nd_q,
50 struct nfp_net_tx_ring *tx_ring)
52 netif_tx_stop_queue(nd_q);
54 /* We can race with the TX completion out of NAPI so recheck */
56 if (unlikely(nfp_nfd3_tx_ring_should_wake(tx_ring)))
57 netif_tx_start_queue(nd_q);
61 * nfp_nfd3_tx_tso() - Set up Tx descriptor for LSO
62 * @r_vec: per-ring structure
63 * @txbuf: Pointer to driver soft TX descriptor
64 * @txd: Pointer to HW TX descriptor
65 * @skb: Pointer to SKB
66 * @md_bytes: Prepend length
68 * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
69 * Return error on packet header greater than maximum supported LSO header size.
72 nfp_nfd3_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfd3_tx_buf *txbuf,
73 struct nfp_nfd3_tx_desc *txd, struct sk_buff *skb, u32 md_bytes)
75 u32 l3_offset, l4_offset, hdrlen;
81 if (!skb->encapsulation) {
82 l3_offset = skb_network_offset(skb);
83 l4_offset = skb_transport_offset(skb);
84 hdrlen = skb_tcp_all_headers(skb);
86 l3_offset = skb_inner_network_offset(skb);
87 l4_offset = skb_inner_transport_offset(skb);
88 hdrlen = skb_inner_tcp_all_headers(skb);
91 txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
92 txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
94 mss = skb_shinfo(skb)->gso_size & NFD3_DESC_TX_MSS_MASK;
95 txd->l3_offset = l3_offset - md_bytes;
96 txd->l4_offset = l4_offset - md_bytes;
97 txd->lso_hdrlen = hdrlen - md_bytes;
98 txd->mss = cpu_to_le16(mss);
99 txd->flags |= NFD3_DESC_TX_LSO;
101 u64_stats_update_begin(&r_vec->tx_sync);
103 u64_stats_update_end(&r_vec->tx_sync);
107 * nfp_nfd3_tx_csum() - Set TX CSUM offload flags in TX descriptor
108 * @dp: NFP Net data path struct
109 * @r_vec: per-ring structure
110 * @txbuf: Pointer to driver soft TX descriptor
111 * @txd: Pointer to TX descriptor
112 * @skb: Pointer to SKB
114 * This function sets the TX checksum flags in the TX descriptor based
115 * on the configuration and the protocol of the packet to be transmitted.
118 nfp_nfd3_tx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
119 struct nfp_nfd3_tx_buf *txbuf, struct nfp_nfd3_tx_desc *txd,
122 struct ipv6hdr *ipv6h;
126 if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
129 if (skb->ip_summed != CHECKSUM_PARTIAL)
132 txd->flags |= NFD3_DESC_TX_CSUM;
133 if (skb->encapsulation)
134 txd->flags |= NFD3_DESC_TX_ENCAP;
136 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
137 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
139 if (iph->version == 4) {
140 txd->flags |= NFD3_DESC_TX_IP4_CSUM;
141 l4_hdr = iph->protocol;
142 } else if (ipv6h->version == 6) {
143 l4_hdr = ipv6h->nexthdr;
145 nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
151 txd->flags |= NFD3_DESC_TX_TCP_CSUM;
154 txd->flags |= NFD3_DESC_TX_UDP_CSUM;
157 nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr);
161 u64_stats_update_begin(&r_vec->tx_sync);
162 if (skb->encapsulation)
163 r_vec->hw_csum_tx_inner += txbuf->pkt_cnt;
165 r_vec->hw_csum_tx += txbuf->pkt_cnt;
166 u64_stats_update_end(&r_vec->tx_sync);
169 static int nfp_nfd3_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
171 struct metadata_dst *md_dst = skb_metadata_dst(skb);
176 if (likely(!md_dst && !tls_handle))
178 if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX)) {
184 md_bytes = 4 + !!md_dst * 4 + !!tls_handle * 8;
186 if (unlikely(skb_cow_head(skb, md_bytes)))
190 data = skb_push(skb, md_bytes) + md_bytes;
193 put_unaligned_be32(md_dst->u.port_info.port_id, data);
194 meta_id = NFP_NET_META_PORTID;
197 /* conn handle is opaque, we just use u64 to be able to quickly
201 memcpy(data, &tls_handle, sizeof(tls_handle));
202 meta_id <<= NFP_NET_META_FIELD_SIZE;
203 meta_id |= NFP_NET_META_CONN_HANDLE;
207 put_unaligned_be32(meta_id, data);
213 * nfp_nfd3_tx() - Main transmit entry point
214 * @skb: SKB to transmit
215 * @netdev: netdev structure
217 * Return: NETDEV_TX_OK on success.
219 netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev)
221 struct nfp_net *nn = netdev_priv(netdev);
222 int f, nr_frags, wr_idx, md_bytes;
223 struct nfp_net_tx_ring *tx_ring;
224 struct nfp_net_r_vector *r_vec;
225 struct nfp_nfd3_tx_buf *txbuf;
226 struct nfp_nfd3_tx_desc *txd;
227 struct netdev_queue *nd_q;
228 const skb_frag_t *frag;
229 struct nfp_net_dp *dp;
236 qidx = skb_get_queue_mapping(skb);
237 tx_ring = &dp->tx_rings[qidx];
238 r_vec = tx_ring->r_vec;
240 nr_frags = skb_shinfo(skb)->nr_frags;
242 if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
243 nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
244 qidx, tx_ring->wr_p, tx_ring->rd_p);
245 nd_q = netdev_get_tx_queue(dp->netdev, qidx);
246 netif_tx_stop_queue(nd_q);
247 nfp_net_tx_xmit_more_flush(tx_ring);
248 u64_stats_update_begin(&r_vec->tx_sync);
250 u64_stats_update_end(&r_vec->tx_sync);
251 return NETDEV_TX_BUSY;
254 skb = nfp_net_tls_tx(dp, r_vec, skb, &tls_handle, &nr_frags);
255 if (unlikely(!skb)) {
256 nfp_net_tx_xmit_more_flush(tx_ring);
260 md_bytes = nfp_nfd3_prep_tx_meta(skb, tls_handle);
261 if (unlikely(md_bytes < 0))
264 /* Start with the head skbuf */
265 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
267 if (dma_mapping_error(dp->dev, dma_addr))
270 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
272 /* Stash the soft descriptor of the head then initialize it */
273 txbuf = &tx_ring->txbufs[wr_idx];
275 txbuf->dma_addr = dma_addr;
278 txbuf->real_len = skb->len;
280 /* Build TX descriptor */
281 txd = &tx_ring->txds[wr_idx];
282 txd->offset_eop = (nr_frags ? 0 : NFD3_DESC_TX_EOP) | md_bytes;
283 txd->dma_len = cpu_to_le16(skb_headlen(skb));
284 nfp_desc_set_dma_addr_40b(txd, dma_addr);
285 txd->data_len = cpu_to_le16(skb->len);
291 /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
292 nfp_nfd3_tx_tso(r_vec, txbuf, txd, skb, md_bytes);
293 nfp_nfd3_tx_csum(dp, r_vec, txbuf, txd, skb);
294 if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
295 txd->flags |= NFD3_DESC_TX_VLAN;
296 txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
303 /* all descs must match except for in addr, length and eop */
304 second_half = txd->vals8[1];
306 for (f = 0; f < nr_frags; f++) {
307 frag = &skb_shinfo(skb)->frags[f];
308 fsize = skb_frag_size(frag);
310 dma_addr = skb_frag_dma_map(dp->dev, frag, 0,
311 fsize, DMA_TO_DEVICE);
312 if (dma_mapping_error(dp->dev, dma_addr))
315 wr_idx = D_IDX(tx_ring, wr_idx + 1);
316 tx_ring->txbufs[wr_idx].skb = skb;
317 tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
318 tx_ring->txbufs[wr_idx].fidx = f;
320 txd = &tx_ring->txds[wr_idx];
321 txd->dma_len = cpu_to_le16(fsize);
322 nfp_desc_set_dma_addr_40b(txd, dma_addr);
323 txd->offset_eop = md_bytes |
324 ((f == nr_frags - 1) ? NFD3_DESC_TX_EOP : 0);
325 txd->vals8[1] = second_half;
328 u64_stats_update_begin(&r_vec->tx_sync);
330 u64_stats_update_end(&r_vec->tx_sync);
333 skb_tx_timestamp(skb);
335 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
337 tx_ring->wr_p += nr_frags + 1;
338 if (nfp_nfd3_tx_ring_should_stop(tx_ring))
339 nfp_nfd3_tx_ring_stop(nd_q, tx_ring);
341 tx_ring->wr_ptr_add += nr_frags + 1;
342 if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, netdev_xmit_more()))
343 nfp_net_tx_xmit_more_flush(tx_ring);
349 frag = &skb_shinfo(skb)->frags[f];
350 dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
351 skb_frag_size(frag), DMA_TO_DEVICE);
352 tx_ring->txbufs[wr_idx].skb = NULL;
353 tx_ring->txbufs[wr_idx].dma_addr = 0;
354 tx_ring->txbufs[wr_idx].fidx = -2;
357 wr_idx += tx_ring->cnt;
359 dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
360 skb_headlen(skb), DMA_TO_DEVICE);
361 tx_ring->txbufs[wr_idx].skb = NULL;
362 tx_ring->txbufs[wr_idx].dma_addr = 0;
363 tx_ring->txbufs[wr_idx].fidx = -2;
365 nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
367 nfp_net_tx_xmit_more_flush(tx_ring);
368 u64_stats_update_begin(&r_vec->tx_sync);
370 u64_stats_update_end(&r_vec->tx_sync);
371 nfp_net_tls_tx_undo(skb, tls_handle);
372 dev_kfree_skb_any(skb);
377 * nfp_nfd3_tx_complete() - Handled completed TX packets
378 * @tx_ring: TX ring structure
379 * @budget: NAPI budget (only used as bool to determine if in NAPI context)
381 void nfp_nfd3_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
383 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
384 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
385 u32 done_pkts = 0, done_bytes = 0;
386 struct netdev_queue *nd_q;
390 if (tx_ring->wr_p == tx_ring->rd_p)
393 /* Work out how many descriptors have been transmitted */
394 qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp);
396 if (qcp_rd_p == tx_ring->qcp_rd_p)
399 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
402 const skb_frag_t *frag;
403 struct nfp_nfd3_tx_buf *tx_buf;
408 idx = D_IDX(tx_ring, tx_ring->rd_p++);
409 tx_buf = &tx_ring->txbufs[idx];
415 nr_frags = skb_shinfo(skb)->nr_frags;
420 dma_unmap_single(dp->dev, tx_buf->dma_addr,
421 skb_headlen(skb), DMA_TO_DEVICE);
423 done_pkts += tx_buf->pkt_cnt;
424 done_bytes += tx_buf->real_len;
427 frag = &skb_shinfo(skb)->frags[fidx];
428 dma_unmap_page(dp->dev, tx_buf->dma_addr,
429 skb_frag_size(frag), DMA_TO_DEVICE);
432 /* check for last gather fragment */
433 if (fidx == nr_frags - 1)
434 napi_consume_skb(skb, budget);
436 tx_buf->dma_addr = 0;
441 tx_ring->qcp_rd_p = qcp_rd_p;
443 u64_stats_update_begin(&r_vec->tx_sync);
444 r_vec->tx_bytes += done_bytes;
445 r_vec->tx_pkts += done_pkts;
446 u64_stats_update_end(&r_vec->tx_sync);
451 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
452 netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
453 if (nfp_nfd3_tx_ring_should_wake(tx_ring)) {
454 /* Make sure TX thread will see updated tx_ring->rd_p */
457 if (unlikely(netif_tx_queue_stopped(nd_q)))
458 netif_tx_wake_queue(nd_q);
461 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
462 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
463 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
466 static bool nfp_nfd3_xdp_complete(struct nfp_net_tx_ring *tx_ring)
468 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
469 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
470 u32 done_pkts = 0, done_bytes = 0;
475 /* Work out how many descriptors have been transmitted */
476 qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp);
478 if (qcp_rd_p == tx_ring->qcp_rd_p)
481 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
483 done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
484 todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
486 tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
490 idx = D_IDX(tx_ring, tx_ring->rd_p);
493 done_bytes += tx_ring->txbufs[idx].real_len;
496 u64_stats_update_begin(&r_vec->tx_sync);
497 r_vec->tx_bytes += done_bytes;
498 r_vec->tx_pkts += done_pkts;
499 u64_stats_update_end(&r_vec->tx_sync);
501 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
502 "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
503 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
508 /* Receive processing
512 nfp_nfd3_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
517 frag = napi_alloc_frag(dp->fl_bufsz);
523 page = dev_alloc_page();
526 frag = page_address(page);
529 *dma_addr = nfp_net_dma_map_rx(dp, frag);
530 if (dma_mapping_error(dp->dev, *dma_addr)) {
531 nfp_net_free_frag(frag, dp->xdp_prog);
532 nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
540 * nfp_nfd3_rx_give_one() - Put mapped skb on the software and hardware rings
541 * @dp: NFP Net data path struct
542 * @rx_ring: RX ring structure
543 * @frag: page fragment buffer
544 * @dma_addr: DMA address of skb mapping
547 nfp_nfd3_rx_give_one(const struct nfp_net_dp *dp,
548 struct nfp_net_rx_ring *rx_ring,
549 void *frag, dma_addr_t dma_addr)
553 wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
555 nfp_net_dma_sync_dev_rx(dp, dma_addr);
557 /* Stash SKB and DMA address away */
558 rx_ring->rxbufs[wr_idx].frag = frag;
559 rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
561 /* Fill freelist descriptor */
562 rx_ring->rxds[wr_idx].fld.reserved = 0;
563 rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
564 /* DMA address is expanded to 48-bit width in freelist for NFP3800,
565 * so the *_48b macro is used accordingly, it's also OK to fill
566 * a 40-bit address since the top 8 bits are get set to 0.
568 nfp_desc_set_dma_addr_48b(&rx_ring->rxds[wr_idx].fld,
569 dma_addr + dp->rx_dma_off);
572 if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
573 /* Update write pointer of the freelist queue. Make
574 * sure all writes are flushed before telling the hardware.
577 nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH);
582 * nfp_nfd3_rx_ring_fill_freelist() - Give buffers from the ring to FW
583 * @dp: NFP Net data path struct
584 * @rx_ring: RX ring to fill
586 void nfp_nfd3_rx_ring_fill_freelist(struct nfp_net_dp *dp,
587 struct nfp_net_rx_ring *rx_ring)
591 if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
592 return nfp_net_xsk_rx_ring_fill_freelist(rx_ring);
594 for (i = 0; i < rx_ring->cnt - 1; i++)
595 nfp_nfd3_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
596 rx_ring->rxbufs[i].dma_addr);
600 * nfp_nfd3_rx_csum_has_errors() - group check if rxd has any csum errors
601 * @flags: RX descriptor flags field in CPU byte order
603 static int nfp_nfd3_rx_csum_has_errors(u16 flags)
605 u16 csum_all_checked, csum_all_ok;
607 csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
608 csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
610 return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
614 * nfp_nfd3_rx_csum() - set SKB checksum field based on RX descriptor flags
615 * @dp: NFP Net data path struct
616 * @r_vec: per-ring structure
617 * @rxd: Pointer to RX descriptor
618 * @meta: Parsed metadata prepend
619 * @skb: Pointer to SKB
622 nfp_nfd3_rx_csum(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
623 const struct nfp_net_rx_desc *rxd,
624 const struct nfp_meta_parsed *meta, struct sk_buff *skb)
626 skb_checksum_none_assert(skb);
628 if (!(dp->netdev->features & NETIF_F_RXCSUM))
631 if (meta->csum_type) {
632 skb->ip_summed = meta->csum_type;
633 skb->csum = meta->csum;
634 u64_stats_update_begin(&r_vec->rx_sync);
635 r_vec->hw_csum_rx_complete++;
636 u64_stats_update_end(&r_vec->rx_sync);
640 if (nfp_nfd3_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
641 u64_stats_update_begin(&r_vec->rx_sync);
642 r_vec->hw_csum_rx_error++;
643 u64_stats_update_end(&r_vec->rx_sync);
647 /* Assume that the firmware will never report inner CSUM_OK unless outer
648 * L4 headers were successfully parsed. FW will always report zero UDP
649 * checksum as CSUM_OK.
651 if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
652 rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
653 __skb_incr_checksum_unnecessary(skb);
654 u64_stats_update_begin(&r_vec->rx_sync);
655 r_vec->hw_csum_rx_ok++;
656 u64_stats_update_end(&r_vec->rx_sync);
659 if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
660 rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
661 __skb_incr_checksum_unnecessary(skb);
662 u64_stats_update_begin(&r_vec->rx_sync);
663 r_vec->hw_csum_rx_inner_ok++;
664 u64_stats_update_end(&r_vec->rx_sync);
669 nfp_nfd3_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
670 unsigned int type, __be32 *hash)
672 if (!(netdev->features & NETIF_F_RXHASH))
676 case NFP_NET_RSS_IPV4:
677 case NFP_NET_RSS_IPV6:
678 case NFP_NET_RSS_IPV6_EX:
679 meta->hash_type = PKT_HASH_TYPE_L3;
682 meta->hash_type = PKT_HASH_TYPE_L4;
686 meta->hash = get_unaligned_be32(hash);
690 nfp_nfd3_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
691 void *data, struct nfp_net_rx_desc *rxd)
693 struct nfp_net_rx_hash *rx_hash = data;
695 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
698 nfp_nfd3_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type),
703 nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
704 void *data, void *pkt, unsigned int pkt_len, int meta_len)
708 meta_info = get_unaligned_be32(data);
712 switch (meta_info & NFP_NET_META_FIELD_MASK) {
713 case NFP_NET_META_HASH:
714 meta_info >>= NFP_NET_META_FIELD_SIZE;
715 nfp_nfd3_set_hash(netdev, meta,
716 meta_info & NFP_NET_META_FIELD_MASK,
720 case NFP_NET_META_MARK:
721 meta->mark = get_unaligned_be32(data);
724 case NFP_NET_META_PORTID:
725 meta->portid = get_unaligned_be32(data);
728 case NFP_NET_META_CSUM:
729 meta->csum_type = CHECKSUM_COMPLETE;
731 (__force __wsum)__get_unaligned_cpu32(data);
734 case NFP_NET_META_RESYNC_INFO:
735 if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
738 data += sizeof(struct nfp_net_tls_resync_req);
744 meta_info >>= NFP_NET_META_FIELD_SIZE;
751 nfp_nfd3_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
752 struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
755 u64_stats_update_begin(&r_vec->rx_sync);
757 /* If we have both skb and rxbuf the replacement buffer allocation
758 * must have failed, count this as an alloc failure.
761 r_vec->rx_replace_buf_alloc_fail++;
762 u64_stats_update_end(&r_vec->rx_sync);
764 /* skb is build based on the frag, free_skb() would free the frag
765 * so to be able to reuse it we need an extra ref.
767 if (skb && rxbuf && skb->head == rxbuf->frag)
768 page_ref_inc(virt_to_head_page(rxbuf->frag));
770 nfp_nfd3_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
772 dev_kfree_skb_any(skb);
776 nfp_nfd3_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
777 struct nfp_net_tx_ring *tx_ring,
778 struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
779 unsigned int pkt_len, bool *completed)
781 unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA;
782 struct nfp_nfd3_tx_buf *txbuf;
783 struct nfp_nfd3_tx_desc *txd;
786 /* Reject if xdp_adjust_tail grow packet beyond DMA area */
787 if (pkt_len + dma_off > dma_map_sz)
790 if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
792 nfp_nfd3_xdp_complete(tx_ring);
796 if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
797 nfp_nfd3_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf,
803 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
805 /* Stash the soft descriptor of the head then initialize it */
806 txbuf = &tx_ring->txbufs[wr_idx];
808 nfp_nfd3_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr);
810 txbuf->frag = rxbuf->frag;
811 txbuf->dma_addr = rxbuf->dma_addr;
814 txbuf->real_len = pkt_len;
816 dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
817 pkt_len, DMA_BIDIRECTIONAL);
819 /* Build TX descriptor */
820 txd = &tx_ring->txds[wr_idx];
821 txd->offset_eop = NFD3_DESC_TX_EOP;
822 txd->dma_len = cpu_to_le16(pkt_len);
823 nfp_desc_set_dma_addr_40b(txd, rxbuf->dma_addr + dma_off);
824 txd->data_len = cpu_to_le16(pkt_len);
831 tx_ring->wr_ptr_add++;
836 * nfp_nfd3_rx() - receive up to @budget packets on @rx_ring
837 * @rx_ring: RX ring to receive from
838 * @budget: NAPI budget
840 * Note, this function is separated out from the napi poll function to
841 * more cleanly separate packet receive code from other bookkeeping
842 * functions performed in the napi poll function.
844 * Return: Number of packets received.
846 static int nfp_nfd3_rx(struct nfp_net_rx_ring *rx_ring, int budget)
848 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
849 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
850 struct nfp_net_tx_ring *tx_ring;
851 struct bpf_prog *xdp_prog;
852 bool xdp_tx_cmpl = false;
853 unsigned int true_bufsz;
859 xdp_prog = READ_ONCE(dp->xdp_prog);
860 true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
861 xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
863 tx_ring = r_vec->xdp_ring;
865 while (pkts_polled < budget) {
866 unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
867 struct nfp_net_rx_buf *rxbuf;
868 struct nfp_net_rx_desc *rxd;
869 struct nfp_meta_parsed meta;
870 bool redir_egress = false;
871 struct net_device *netdev;
872 dma_addr_t new_dma_addr;
873 u32 meta_len_xdp = 0;
876 idx = D_IDX(rx_ring, rx_ring->rd_p);
878 rxd = &rx_ring->rxds[idx];
879 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
882 /* Memory barrier to ensure that we won't do other reads
887 memset(&meta, 0, sizeof(meta));
892 rxbuf = &rx_ring->rxbufs[idx];
894 * <-- [rx_offset] -->
895 * ---------------------------------------------------------
896 * | [XX] | metadata | packet | XXXX |
897 * ---------------------------------------------------------
898 * <---------------- data_len --------------->
900 * The rx_offset is fixed for all packets, the meta_len can vary
901 * on a packet by packet basis. If rx_offset is set to zero
902 * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
903 * buffer and is immediately followed by the packet (no [XX]).
905 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
906 data_len = le16_to_cpu(rxd->rxd.data_len);
907 pkt_len = data_len - meta_len;
909 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
910 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
913 pkt_off += dp->rx_offset;
914 meta_off = pkt_off - meta_len;
917 u64_stats_update_begin(&r_vec->rx_sync);
919 r_vec->rx_bytes += pkt_len;
920 u64_stats_update_end(&r_vec->rx_sync);
922 if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
923 (dp->rx_offset && meta_len > dp->rx_offset))) {
924 nn_dp_warn(dp, "oversized RX packet metadata %u\n",
926 nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
930 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
933 if (!dp->chained_metadata_format) {
934 nfp_nfd3_set_hash_desc(dp->netdev, &meta,
935 rxbuf->frag + meta_off, rxd);
936 } else if (meta_len) {
937 if (unlikely(nfp_nfd3_parse_meta(dp->netdev, &meta,
938 rxbuf->frag + meta_off,
939 rxbuf->frag + pkt_off,
940 pkt_len, meta_len))) {
941 nn_dp_warn(dp, "invalid RX packet metadata\n");
942 nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf,
948 if (xdp_prog && !meta.portid) {
949 void *orig_data = rxbuf->frag + pkt_off;
950 unsigned int dma_off;
953 xdp_prepare_buff(&xdp,
954 rxbuf->frag + NFP_NET_RX_BUF_HEADROOM,
955 pkt_off - NFP_NET_RX_BUF_HEADROOM,
958 act = bpf_prog_run_xdp(xdp_prog, &xdp);
960 pkt_len = xdp.data_end - xdp.data;
961 pkt_off += xdp.data - orig_data;
965 meta_len_xdp = xdp.data - xdp.data_meta;
968 dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
969 if (unlikely(!nfp_nfd3_tx_xdp_buf(dp, rx_ring,
975 trace_xdp_exception(dp->netdev,
979 bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
982 trace_xdp_exception(dp->netdev, xdp_prog, act);
985 nfp_nfd3_rx_give_one(dp, rx_ring, rxbuf->frag,
991 if (likely(!meta.portid)) {
993 } else if (meta.portid == NFP_META_PORT_ID_CTRL) {
994 struct nfp_net *nn = netdev_priv(dp->netdev);
996 nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off,
998 nfp_nfd3_rx_give_one(dp, rx_ring, rxbuf->frag,
1004 nn = netdev_priv(dp->netdev);
1005 netdev = nfp_app_dev_get(nn->app, meta.portid,
1007 if (unlikely(!netdev)) {
1008 nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf,
1013 if (nfp_netdev_is_nfp_repr(netdev))
1014 nfp_repr_inc_rx_stats(netdev, pkt_len);
1017 skb = build_skb(rxbuf->frag, true_bufsz);
1018 if (unlikely(!skb)) {
1019 nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1022 new_frag = nfp_nfd3_napi_alloc_one(dp, &new_dma_addr);
1023 if (unlikely(!new_frag)) {
1024 nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
1028 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
1030 nfp_nfd3_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
1032 skb_reserve(skb, pkt_off);
1033 skb_put(skb, pkt_len);
1035 skb->mark = meta.mark;
1036 skb_set_hash(skb, meta.hash, meta.hash_type);
1038 skb_record_rx_queue(skb, rx_ring->idx);
1039 skb->protocol = eth_type_trans(skb, netdev);
1041 nfp_nfd3_rx_csum(dp, r_vec, rxd, &meta, skb);
1043 #ifdef CONFIG_TLS_DEVICE
1044 if (rxd->rxd.flags & PCIE_DESC_RX_DECRYPTED) {
1045 skb->decrypted = true;
1046 u64_stats_update_begin(&r_vec->rx_sync);
1048 u64_stats_update_end(&r_vec->rx_sync);
1052 if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
1053 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1054 le16_to_cpu(rxd->rxd.vlan));
1056 skb_metadata_set(skb, meta_len_xdp);
1058 if (likely(!redir_egress)) {
1059 napi_gro_receive(&rx_ring->r_vec->napi, skb);
1062 skb_reset_network_header(skb);
1063 __skb_push(skb, ETH_HLEN);
1064 dev_queue_xmit(skb);
1069 if (tx_ring->wr_ptr_add)
1070 nfp_net_tx_xmit_more_flush(tx_ring);
1071 else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
1073 if (!nfp_nfd3_xdp_complete(tx_ring))
1074 pkts_polled = budget;
1081 * nfp_nfd3_poll() - napi poll function
1082 * @napi: NAPI structure
1083 * @budget: NAPI budget
1085 * Return: number of packets polled.
1087 int nfp_nfd3_poll(struct napi_struct *napi, int budget)
1089 struct nfp_net_r_vector *r_vec =
1090 container_of(napi, struct nfp_net_r_vector, napi);
1091 unsigned int pkts_polled = 0;
1094 nfp_nfd3_tx_complete(r_vec->tx_ring, budget);
1096 pkts_polled = nfp_nfd3_rx(r_vec->rx_ring, budget);
1098 if (pkts_polled < budget)
1099 if (napi_complete_done(napi, pkts_polled))
1100 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
1102 if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
1103 struct dim_sample dim_sample = {};
1108 start = u64_stats_fetch_begin(&r_vec->rx_sync);
1109 pkts = r_vec->rx_pkts;
1110 bytes = r_vec->rx_bytes;
1111 } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
1113 dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
1114 net_dim(&r_vec->rx_dim, dim_sample);
1117 if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
1118 struct dim_sample dim_sample = {};
1123 start = u64_stats_fetch_begin(&r_vec->tx_sync);
1124 pkts = r_vec->tx_pkts;
1125 bytes = r_vec->tx_bytes;
1126 } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
1128 dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
1129 net_dim(&r_vec->tx_dim, dim_sample);
1135 /* Control device data path
1139 nfp_nfd3_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1140 struct sk_buff *skb, bool old)
1142 unsigned int real_len = skb->len, meta_len = 0;
1143 struct nfp_net_tx_ring *tx_ring;
1144 struct nfp_nfd3_tx_buf *txbuf;
1145 struct nfp_nfd3_tx_desc *txd;
1146 struct nfp_net_dp *dp;
1147 dma_addr_t dma_addr;
1150 dp = &r_vec->nfp_net->dp;
1151 tx_ring = r_vec->tx_ring;
1153 if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
1154 nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
1158 if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
1159 u64_stats_update_begin(&r_vec->tx_sync);
1161 u64_stats_update_end(&r_vec->tx_sync);
1163 __skb_queue_tail(&r_vec->queue, skb);
1165 __skb_queue_head(&r_vec->queue, skb);
1169 if (nfp_app_ctrl_has_meta(nn->app)) {
1170 if (unlikely(skb_headroom(skb) < 8)) {
1171 nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
1175 put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
1176 put_unaligned_be32(NFP_NET_META_PORTID, skb_push(skb, 4));
1179 /* Start with the head skbuf */
1180 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
1182 if (dma_mapping_error(dp->dev, dma_addr))
1185 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
1187 /* Stash the soft descriptor of the head then initialize it */
1188 txbuf = &tx_ring->txbufs[wr_idx];
1190 txbuf->dma_addr = dma_addr;
1193 txbuf->real_len = real_len;
1195 /* Build TX descriptor */
1196 txd = &tx_ring->txds[wr_idx];
1197 txd->offset_eop = meta_len | NFD3_DESC_TX_EOP;
1198 txd->dma_len = cpu_to_le16(skb_headlen(skb));
1199 nfp_desc_set_dma_addr_40b(txd, dma_addr);
1200 txd->data_len = cpu_to_le16(skb->len);
1204 txd->lso_hdrlen = 0;
1207 tx_ring->wr_ptr_add++;
1208 nfp_net_tx_xmit_more_flush(tx_ring);
1213 nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n");
1215 u64_stats_update_begin(&r_vec->tx_sync);
1217 u64_stats_update_end(&r_vec->tx_sync);
1218 dev_kfree_skb_any(skb);
1222 static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
1224 struct sk_buff *skb;
1226 while ((skb = __skb_dequeue(&r_vec->queue)))
1227 if (nfp_nfd3_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
1232 nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
1234 u32 meta_type, meta_tag;
1236 if (!nfp_app_ctrl_has_meta(nn->app))
1242 meta_type = get_unaligned_be32(data);
1243 meta_tag = get_unaligned_be32(data + 4);
1245 return (meta_type == NFP_NET_META_PORTID &&
1246 meta_tag == NFP_META_PORT_ID_CTRL);
1250 nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
1251 struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
1253 unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
1254 struct nfp_net_rx_buf *rxbuf;
1255 struct nfp_net_rx_desc *rxd;
1256 dma_addr_t new_dma_addr;
1257 struct sk_buff *skb;
1261 idx = D_IDX(rx_ring, rx_ring->rd_p);
1263 rxd = &rx_ring->rxds[idx];
1264 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
1267 /* Memory barrier to ensure that we won't do other reads
1268 * before the DD bit.
1274 rxbuf = &rx_ring->rxbufs[idx];
1275 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
1276 data_len = le16_to_cpu(rxd->rxd.data_len);
1277 pkt_len = data_len - meta_len;
1279 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
1280 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1281 pkt_off += meta_len;
1283 pkt_off += dp->rx_offset;
1284 meta_off = pkt_off - meta_len;
1287 u64_stats_update_begin(&r_vec->rx_sync);
1289 r_vec->rx_bytes += pkt_len;
1290 u64_stats_update_end(&r_vec->rx_sync);
1292 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
1294 if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
1295 nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
1297 nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1301 skb = build_skb(rxbuf->frag, dp->fl_bufsz);
1302 if (unlikely(!skb)) {
1303 nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1306 new_frag = nfp_nfd3_napi_alloc_one(dp, &new_dma_addr);
1307 if (unlikely(!new_frag)) {
1308 nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
1312 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
1314 nfp_nfd3_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
1316 skb_reserve(skb, pkt_off);
1317 skb_put(skb, pkt_len);
1319 nfp_app_ctrl_rx(nn->app, skb);
1324 static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
1326 struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
1327 struct nfp_net *nn = r_vec->nfp_net;
1328 struct nfp_net_dp *dp = &nn->dp;
1329 unsigned int budget = 512;
1331 while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
1337 void nfp_nfd3_ctrl_poll(struct tasklet_struct *t)
1339 struct nfp_net_r_vector *r_vec = from_tasklet(r_vec, t, tasklet);
1341 spin_lock(&r_vec->lock);
1342 nfp_nfd3_tx_complete(r_vec->tx_ring, 0);
1343 __nfp_ctrl_tx_queued(r_vec);
1344 spin_unlock(&r_vec->lock);
1346 if (nfp_ctrl_rx(r_vec)) {
1347 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
1349 tasklet_schedule(&r_vec->tasklet);
1350 nn_dp_warn(&r_vec->nfp_net->dp,
1351 "control message budget exceeded!\n");