1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
4 * Copyright (C) 2020 Marvell.
8 #include <linux/etherdevice.h>
11 #include <linux/bpf.h>
12 #include <linux/bpf_trace.h>
13 #include <net/ip6_checksum.h>
16 #include "otx2_common.h"
17 #include "otx2_struct.h"
18 #include "otx2_txrx.h"
22 #define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
23 #define PTP_PORT 0x13F
24 /* PTPv2 header Original Timestamp starts at byte offset 34 and
25 * contains 6 byte seconds field and 4 byte nano seconds field.
27 #define PTP_SYNC_SEC_OFFSET 34
29 static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
30 struct bpf_prog *prog,
31 struct nix_cqe_rx_s *cqe,
32 struct otx2_cq_queue *cq,
33 bool *need_xdp_flush);
35 static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
36 struct otx2_cq_queue *cq)
38 u64 incr = (u64)(cq->cq_idx) << 32;
41 status = otx2_atomic64_fetch_add(incr, pfvf->cq_op_addr);
43 if (unlikely(status & BIT_ULL(CQ_OP_STAT_OP_ERR) ||
44 status & BIT_ULL(CQ_OP_STAT_CQ_ERR))) {
45 dev_err(pfvf->dev, "CQ stopped due to error");
49 cq->cq_tail = status & 0xFFFFF;
50 cq->cq_head = (status >> 20) & 0xFFFFF;
51 if (cq->cq_tail < cq->cq_head)
52 cq->pend_cqe = (cq->cqe_cnt - cq->cq_head) +
55 cq->pend_cqe = cq->cq_tail - cq->cq_head;
60 static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
62 struct nix_cqe_hdr_s *cqe_hdr;
64 cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head);
65 if (cqe_hdr->cqe_type == NIX_XQE_TYPE_INVALID)
69 cq->cq_head &= (cq->cqe_cnt - 1);
74 static unsigned int frag_num(unsigned int i)
77 return (i & ~3) + 3 - (i & 3);
83 static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
84 struct sk_buff *skb, int seg, int *len)
86 const skb_frag_t *frag;
90 /* First segment is always skb->data */
92 page = virt_to_page(skb->data);
93 offset = offset_in_page(skb->data);
94 *len = skb_headlen(skb);
96 frag = &skb_shinfo(skb)->frags[seg - 1];
97 page = skb_frag_page(frag);
98 offset = skb_frag_off(frag);
99 *len = skb_frag_size(frag);
101 return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE);
104 static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
108 for (seg = 0; seg < sg->num_segs; seg++) {
109 otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
110 sg->size[seg], DMA_TO_DEVICE);
115 static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
116 struct otx2_snd_queue *sq,
117 struct nix_cqe_tx_s *cqe)
119 struct nix_send_comp_s *snd_comp = &cqe->comp;
124 sg = &sq->sg[snd_comp->sqe_id];
126 pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]);
127 otx2_dma_unmap_page(pfvf, sg->dma_addr[0],
128 sg->size[0], DMA_TO_DEVICE);
129 page = virt_to_page(phys_to_virt(pa));
133 static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
134 struct otx2_cq_queue *cq,
135 struct otx2_snd_queue *sq,
136 struct nix_cqe_tx_s *cqe,
137 int budget, int *tx_pkts, int *tx_bytes)
139 struct nix_send_comp_s *snd_comp = &cqe->comp;
140 struct skb_shared_hwtstamps ts;
141 struct sk_buff *skb = NULL;
146 if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf))
147 net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n",
148 pfvf->netdev->name, cq->cint_idx,
151 sg = &sq->sg[snd_comp->sqe_id];
152 skb = (struct sk_buff *)sg->skb;
156 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
157 timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id];
158 if (timestamp != 1) {
159 timestamp = pfvf->ptp->convert_tx_ptp_tstmp(timestamp);
160 err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
162 memset(&ts, 0, sizeof(ts));
163 ts.hwtstamp = ns_to_ktime(tsns);
164 skb_tstamp_tx(skb, &ts);
169 *tx_bytes += skb->len;
171 otx2_dma_unmap_skb_frags(pfvf, sg);
172 napi_consume_skb(skb, budget);
176 static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
177 struct sk_buff *skb, void *data)
182 if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED))
185 timestamp = pfvf->ptp->convert_rx_ptp_tstmp(*(u64 *)data);
186 /* The first 8 bytes is the timestamp */
187 err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
191 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns);
194 static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
195 u64 iova, int len, struct nix_rx_parse_s *parse,
202 va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
204 if (likely(!skb_shinfo(skb)->nr_frags)) {
205 /* Check if data starts at some nonzero offset
206 * from the start of the buffer. For now the
207 * only possible offset is 8 bytes in the case
208 * where packet is prepended by a timestamp.
211 otx2_set_rxtstamp(pfvf, skb, va);
212 off = OTX2_HW_TIMESTAMP_LEN;
216 page = virt_to_page(va);
217 if (likely(skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)) {
218 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
219 va - page_address(page) + off,
220 len - off, pfvf->rbsize);
224 /* If more than MAX_SKB_FRAGS fragments are received then
225 * give back those buffer pointers to hardware for reuse.
227 pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL);
232 static void otx2_set_rxhash(struct otx2_nic *pfvf,
233 struct nix_cqe_rx_s *cqe, struct sk_buff *skb)
235 enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
236 struct otx2_rss_info *rss;
239 if (!(pfvf->netdev->features & NETIF_F_RXHASH))
242 rss = &pfvf->hw.rss_info;
243 if (rss->flowkey_cfg) {
244 if (rss->flowkey_cfg &
245 ~(NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))
246 hash_type = PKT_HASH_TYPE_L4;
248 hash_type = PKT_HASH_TYPE_L3;
249 hash = cqe->hdr.flow_tag;
251 skb_set_hash(skb, hash, hash_type);
254 static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe,
257 struct nix_rx_sg_s *sg = &cqe->sg;
263 end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
264 while (start < end) {
265 sg = (struct nix_rx_sg_s *)start;
266 seg_addr = &sg->seg_addr;
267 for (seg = 0; seg < sg->segs; seg++, seg_addr++)
268 pfvf->hw_ops->aura_freeptr(pfvf, qidx,
269 *seg_addr & ~0x07ULL);
270 start += sizeof(*sg);
274 static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
275 struct nix_cqe_rx_s *cqe, int qidx)
277 struct otx2_drv_stats *stats = &pfvf->hw.drv_stats;
278 struct nix_rx_parse_s *parse = &cqe->parse;
280 if (netif_msg_rx_err(pfvf))
281 netdev_err(pfvf->netdev,
282 "RQ%d: Error pkt with errlev:0x%x errcode:0x%x\n",
283 qidx, parse->errlev, parse->errcode);
285 if (parse->errlev == NPC_ERRLVL_RE) {
286 switch (parse->errcode) {
288 case ERRCODE_FCS_RCV:
289 atomic_inc(&stats->rx_fcs_errs);
291 case ERRCODE_UNDERSIZE:
292 atomic_inc(&stats->rx_undersize_errs);
294 case ERRCODE_OVERSIZE:
295 atomic_inc(&stats->rx_oversize_errs);
297 case ERRCODE_OL2_LEN_MISMATCH:
298 atomic_inc(&stats->rx_len_errs);
301 atomic_inc(&stats->rx_other_errs);
304 } else if (parse->errlev == NPC_ERRLVL_NIX) {
305 switch (parse->errcode) {
306 case ERRCODE_OL3_LEN:
307 case ERRCODE_OL4_LEN:
308 case ERRCODE_IL3_LEN:
309 case ERRCODE_IL4_LEN:
310 atomic_inc(&stats->rx_len_errs);
312 case ERRCODE_OL4_CSUM:
313 case ERRCODE_IL4_CSUM:
314 atomic_inc(&stats->rx_csum_errs);
317 atomic_inc(&stats->rx_other_errs);
321 atomic_inc(&stats->rx_other_errs);
322 /* For now ignore all the NPC parser errors and
323 * pass the packets to stack.
328 /* If RXALL is enabled pass on packets to stack. */
329 if (pfvf->netdev->features & NETIF_F_RXALL)
332 /* Free buffer back to pool */
334 otx2_free_rcv_seg(pfvf, cqe, qidx);
338 static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
339 struct napi_struct *napi,
340 struct otx2_cq_queue *cq,
341 struct nix_cqe_rx_s *cqe, bool *need_xdp_flush)
343 struct nix_rx_parse_s *parse = &cqe->parse;
344 struct nix_rx_sg_s *sg = &cqe->sg;
345 struct sk_buff *skb = NULL;
351 if (unlikely(parse->errlev || parse->errcode)) {
352 if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
357 if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush))
360 skb = napi_get_frags(napi);
365 end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
366 while (start < end) {
367 sg = (struct nix_rx_sg_s *)start;
368 seg_addr = &sg->seg_addr;
369 seg_size = (void *)sg;
370 for (seg = 0; seg < sg->segs; seg++, seg_addr++) {
371 if (otx2_skb_add_frag(pfvf, skb, *seg_addr,
372 seg_size[seg], parse, cq->cq_idx))
375 start += sizeof(*sg);
377 otx2_set_rxhash(pfvf, cqe, skb);
379 skb_record_rx_queue(skb, cq->cq_idx);
380 if (pfvf->netdev->features & NETIF_F_RXCSUM)
381 skb->ip_summed = CHECKSUM_UNNECESSARY;
383 skb_mark_for_recycle(skb);
385 napi_gro_frags(napi);
388 static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
389 struct napi_struct *napi,
390 struct otx2_cq_queue *cq, int budget)
392 bool need_xdp_flush = false;
393 struct nix_cqe_rx_s *cqe;
394 int processed_cqe = 0;
396 if (cq->pend_cqe >= budget)
399 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
403 while (likely(processed_cqe < budget) && cq->pend_cqe) {
404 cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head);
405 if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID ||
412 cq->cq_head &= (cq->cqe_cnt - 1);
414 otx2_rcv_pkt_handler(pfvf, napi, cq, cqe, &need_xdp_flush);
416 cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
417 cqe->sg.seg_addr = 0x00;
424 /* Free CQEs to HW */
425 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
426 ((u64)cq->cq_idx << 32) | processed_cqe);
428 return processed_cqe;
431 int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
433 struct otx2_nic *pfvf = dev;
434 int cnt = cq->pool_ptrs;
437 while (cq->pool_ptrs) {
438 if (otx2_alloc_buffer(pfvf, cq, &bufptr))
440 otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
444 return cnt - cq->pool_ptrs;
447 static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
448 struct otx2_cq_queue *cq, int budget)
450 int tx_pkts = 0, tx_bytes = 0, qidx;
451 struct otx2_snd_queue *sq;
452 struct nix_cqe_tx_s *cqe;
453 int processed_cqe = 0;
455 if (cq->pend_cqe >= budget)
458 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
462 qidx = cq->cq_idx - pfvf->hw.rx_queues;
463 sq = &pfvf->qset.sq[qidx];
465 while (likely(processed_cqe < budget) && cq->pend_cqe) {
466 cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
467 if (unlikely(!cqe)) {
473 qidx = cq->cq_idx - pfvf->hw.rx_queues;
475 if (cq->cq_type == CQ_XDP)
476 otx2_xdp_snd_pkt_handler(pfvf, sq, cqe);
478 otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[qidx],
479 cqe, budget, &tx_pkts, &tx_bytes);
481 cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
486 sq->cons_head &= (sq->sqe_cnt - 1);
489 /* Free CQEs to HW */
490 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
491 ((u64)cq->cq_idx << 32) | processed_cqe);
493 if (likely(tx_pkts)) {
494 struct netdev_queue *txq;
496 qidx = cq->cq_idx - pfvf->hw.rx_queues;
498 if (qidx >= pfvf->hw.tx_queues)
499 qidx -= pfvf->hw.xdp_queues;
500 txq = netdev_get_tx_queue(pfvf->netdev, qidx);
501 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
502 /* Check if queue was stopped earlier due to ring full */
504 if (netif_tx_queue_stopped(txq) &&
505 netif_carrier_ok(pfvf->netdev))
506 netif_tx_wake_queue(txq);
511 static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll)
513 struct dim_sample dim_sample;
514 u64 rx_frames, rx_bytes;
516 rx_frames = OTX2_GET_RX_STATS(RX_BCAST) + OTX2_GET_RX_STATS(RX_MCAST) +
517 OTX2_GET_RX_STATS(RX_UCAST);
518 rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
519 dim_update_sample(pfvf->napi_events, rx_frames, rx_bytes, &dim_sample);
520 net_dim(&cq_poll->dim, dim_sample);
523 int otx2_napi_handler(struct napi_struct *napi, int budget)
525 struct otx2_cq_queue *rx_cq = NULL;
526 struct otx2_cq_poll *cq_poll;
527 int workdone = 0, cq_idx, i;
528 struct otx2_cq_queue *cq;
529 struct otx2_qset *qset;
530 struct otx2_nic *pfvf;
533 cq_poll = container_of(napi, struct otx2_cq_poll, napi);
534 pfvf = (struct otx2_nic *)cq_poll->dev;
537 for (i = 0; i < CQS_PER_CINT; i++) {
538 cq_idx = cq_poll->cq_ids[i];
539 if (unlikely(cq_idx == CINT_INVALID_CQ))
541 cq = &qset->cq[cq_idx];
542 if (cq->cq_type == CQ_RX) {
544 workdone += otx2_rx_napi_handler(pfvf, napi,
547 workdone += otx2_tx_napi_handler(pfvf, cq, budget);
551 if (rx_cq && rx_cq->pool_ptrs)
552 filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
554 otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
556 if (workdone < budget && napi_complete_done(napi, workdone)) {
557 /* If interface is going down, don't re-enable IRQ */
558 if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
561 /* Check for adaptive interrupt coalesce */
563 ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
564 OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) {
565 /* Adjust irq coalese using net_dim */
566 otx2_adjust_adaptive_coalese(pfvf, cq_poll);
567 /* Update irq coalescing */
568 for (i = 0; i < pfvf->hw.cint_cnt; i++)
569 otx2_config_irq_coalescing(pfvf, i);
572 if (unlikely(!filled_cnt)) {
573 struct refill_work *work;
574 struct delayed_work *dwork;
576 work = &pfvf->refill_wrk[cq->cq_idx];
577 dwork = &work->pool_refill_work;
578 /* Schedule a task if no other task is running */
579 if (!cq->refill_task_sched) {
581 cq->refill_task_sched = true;
582 schedule_delayed_work(dwork,
583 msecs_to_jiffies(100));
586 /* Re-enable interrupts */
588 NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
595 void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
600 /* Packet data stores should finish before SQE is flushed to HW */
604 memcpy(sq->lmt_addr, sq->sqe_base, size);
605 status = otx2_lmt_flush(sq->io_addr);
606 } while (status == 0);
609 sq->head &= (sq->sqe_cnt - 1);
612 #define MAX_SEGS_PER_SG 3
613 /* Add SQE scatter/gather subdescriptor structure */
614 static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
615 struct sk_buff *skb, int num_segs, int *offset)
617 struct nix_sqe_sg_s *sg = NULL;
618 u64 dma_addr, *iova = NULL;
622 sq->sg[sq->head].num_segs = 0;
624 for (seg = 0; seg < num_segs; seg++) {
625 if ((seg % MAX_SEGS_PER_SG) == 0) {
626 sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
627 sg->ld_type = NIX_SEND_LDTYPE_LDD;
628 sg->subdc = NIX_SUBDC_SG;
630 sg_lens = (void *)sg;
631 iova = (void *)sg + sizeof(*sg);
632 /* Next subdc always starts at a 16byte boundary.
633 * So if sg->segs is whether 2 or 3, offset += 16bytes.
635 if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
636 *offset += sizeof(*sg) + (3 * sizeof(u64));
638 *offset += sizeof(*sg) + sizeof(u64);
640 dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
641 if (dma_mapping_error(pfvf->dev, dma_addr))
644 sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = len;
648 /* Save DMA mapping info for later unmapping */
649 sq->sg[sq->head].dma_addr[seg] = dma_addr;
650 sq->sg[sq->head].size[seg] = len;
651 sq->sg[sq->head].num_segs++;
654 sq->sg[sq->head].skb = (u64)skb;
658 /* Add SQE extended header subdescriptor */
659 static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
660 struct sk_buff *skb, int *offset)
662 struct nix_sqe_ext_s *ext;
664 ext = (struct nix_sqe_ext_s *)(sq->sqe_base + *offset);
665 ext->subdc = NIX_SUBDC_EXT;
666 if (skb_shinfo(skb)->gso_size) {
668 ext->lso_sb = skb_tcp_all_headers(skb);
669 ext->lso_mps = skb_shinfo(skb)->gso_size;
671 /* Only TSOv4 and TSOv6 GSO offloads are supported */
672 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
673 ext->lso_format = pfvf->hw.lso_tsov4_idx;
675 /* HW adds payload size to 'ip_hdr->tot_len' while
676 * sending TSO segment, hence set payload length
677 * in IP header of the packet to just header length.
679 ip_hdr(skb)->tot_len =
680 htons(ext->lso_sb - skb_network_offset(skb));
681 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
682 ext->lso_format = pfvf->hw.lso_tsov6_idx;
683 ipv6_hdr(skb)->payload_len = htons(tcp_hdrlen(skb));
684 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
685 __be16 l3_proto = vlan_get_protocol(skb);
686 struct udphdr *udph = udp_hdr(skb);
689 ext->lso_sb = skb_transport_offset(skb) +
690 sizeof(struct udphdr);
692 /* HW adds payload size to length fields in IP and
693 * UDP headers while segmentation, hence adjust the
694 * lengths to just header sizes.
696 iplen = htons(ext->lso_sb - skb_network_offset(skb));
697 if (l3_proto == htons(ETH_P_IP)) {
698 ip_hdr(skb)->tot_len = iplen;
699 ext->lso_format = pfvf->hw.lso_udpv4_idx;
701 ipv6_hdr(skb)->payload_len = iplen;
702 ext->lso_format = pfvf->hw.lso_udpv6_idx;
705 udph->len = htons(sizeof(struct udphdr));
707 } else if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
711 #define OTX2_VLAN_PTR_OFFSET (ETH_HLEN - ETH_TLEN)
712 if (skb_vlan_tag_present(skb)) {
713 if (skb->vlan_proto == htons(ETH_P_8021Q)) {
714 ext->vlan1_ins_ena = 1;
715 ext->vlan1_ins_ptr = OTX2_VLAN_PTR_OFFSET;
716 ext->vlan1_ins_tci = skb_vlan_tag_get(skb);
717 } else if (skb->vlan_proto == htons(ETH_P_8021AD)) {
718 ext->vlan0_ins_ena = 1;
719 ext->vlan0_ins_ptr = OTX2_VLAN_PTR_OFFSET;
720 ext->vlan0_ins_tci = skb_vlan_tag_get(skb);
724 *offset += sizeof(*ext);
727 static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
728 int alg, u64 iova, int ptp_offset,
729 u64 base_ns, bool udp_csum_crt)
731 struct nix_sqe_mem_s *mem;
733 mem = (struct nix_sqe_mem_s *)(sq->sqe_base + *offset);
734 mem->subdc = NIX_SUBDC_MEM;
736 mem->wmem = 1; /* wait for the memory operation */
740 mem->start_offset = ptp_offset;
741 mem->udp_csum_crt = !!udp_csum_crt;
742 mem->base_ns = base_ns;
746 *offset += sizeof(*mem);
749 /* Add SQE header subdescriptor structure */
750 static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
751 struct nix_sqe_hdr_s *sqe_hdr,
752 struct sk_buff *skb, u16 qidx)
756 /* Check if SQE was framed before, if yes then no need to
757 * set these constants again and again.
759 if (!sqe_hdr->total) {
760 /* Don't free Tx buffers to Aura */
762 sqe_hdr->aura = sq->aura_id;
763 /* Post a CQE Tx after pkt transmission */
765 sqe_hdr->sq = (qidx >= pfvf->hw.tx_queues) ?
766 qidx + pfvf->hw.xdp_queues : qidx;
768 sqe_hdr->total = skb->len;
769 /* Set SQE identifier which will be used later for freeing SKB */
770 sqe_hdr->sqe_id = sq->head;
772 /* Offload TCP/UDP checksum to HW */
773 if (skb->ip_summed == CHECKSUM_PARTIAL) {
774 sqe_hdr->ol3ptr = skb_network_offset(skb);
775 sqe_hdr->ol4ptr = skb_transport_offset(skb);
776 /* get vlan protocol Ethertype */
777 if (eth_type_vlan(skb->protocol))
778 skb->protocol = vlan_get_protocol(skb);
780 if (skb->protocol == htons(ETH_P_IP)) {
781 proto = ip_hdr(skb)->protocol;
782 /* In case of TSO, HW needs this to be explicitly set.
783 * So set this always, instead of adding a check.
785 sqe_hdr->ol3type = NIX_SENDL3TYPE_IP4_CKSUM;
786 } else if (skb->protocol == htons(ETH_P_IPV6)) {
787 proto = ipv6_hdr(skb)->nexthdr;
788 sqe_hdr->ol3type = NIX_SENDL3TYPE_IP6;
791 if (proto == IPPROTO_TCP)
792 sqe_hdr->ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
793 else if (proto == IPPROTO_UDP)
794 sqe_hdr->ol4type = NIX_SENDL4TYPE_UDP_CKSUM;
798 static int otx2_dma_map_tso_skb(struct otx2_nic *pfvf,
799 struct otx2_snd_queue *sq,
800 struct sk_buff *skb, int sqe, int hdr_len)
802 int num_segs = skb_shinfo(skb)->nr_frags + 1;
803 struct sg_list *sg = &sq->sg[sqe];
809 /* Get payload length at skb->data */
810 len = skb_headlen(skb) - hdr_len;
812 for (seg = 0; seg < num_segs; seg++) {
813 /* Skip skb->data, if there is no payload */
816 dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
817 if (dma_mapping_error(pfvf->dev, dma_addr))
820 /* Save DMA mapping info for later unmapping */
821 sg->dma_addr[sg->num_segs] = dma_addr;
822 sg->size[sg->num_segs] = len;
827 otx2_dma_unmap_skb_frags(pfvf, sg);
831 static u64 otx2_tso_frag_dma_addr(struct otx2_snd_queue *sq,
832 struct sk_buff *skb, int seg,
833 u64 seg_addr, int hdr_len, int sqe)
835 struct sg_list *sg = &sq->sg[sqe];
836 const skb_frag_t *frag;
840 return sg->dma_addr[0] + (seg_addr - (u64)skb->data);
842 frag = &skb_shinfo(skb)->frags[seg];
843 offset = seg_addr - (u64)skb_frag_address(frag);
844 if (skb_headlen(skb) - hdr_len)
846 return sg->dma_addr[seg] + offset;
849 static void otx2_sqe_tso_add_sg(struct otx2_snd_queue *sq,
850 struct sg_list *list, int *offset)
852 struct nix_sqe_sg_s *sg = NULL;
857 /* Add SG descriptors with buffer addresses */
858 for (seg = 0; seg < list->num_segs; seg++) {
859 if ((seg % MAX_SEGS_PER_SG) == 0) {
860 sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
861 sg->ld_type = NIX_SEND_LDTYPE_LDD;
862 sg->subdc = NIX_SUBDC_SG;
864 sg_lens = (void *)sg;
865 iova = (void *)sg + sizeof(*sg);
866 /* Next subdc always starts at a 16byte boundary.
867 * So if sg->segs is whether 2 or 3, offset += 16bytes.
869 if ((list->num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
870 *offset += sizeof(*sg) + (3 * sizeof(u64));
872 *offset += sizeof(*sg) + sizeof(u64);
874 sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = list->size[seg];
875 *iova++ = list->dma_addr[seg];
880 static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
881 struct sk_buff *skb, u16 qidx)
883 struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx);
884 int hdr_len, tcp_data, seg_len, pkt_len, offset;
885 struct nix_sqe_hdr_s *sqe_hdr;
886 int first_sqe = sq->head;
890 hdr_len = tso_start(skb, &tso);
892 /* Map SKB's fragments to DMA.
893 * It's done here to avoid mapping for every TSO segment's packet.
895 if (otx2_dma_map_tso_skb(pfvf, sq, skb, first_sqe, hdr_len)) {
896 dev_kfree_skb_any(skb);
900 netdev_tx_sent_queue(txq, skb->len);
902 tcp_data = skb->len - hdr_len;
903 while (tcp_data > 0) {
906 seg_len = min_t(int, skb_shinfo(skb)->gso_size, tcp_data);
909 /* Set SQE's SEND_HDR */
910 memset(sq->sqe_base, 0, sq->sqe_size);
911 sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
912 otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
913 offset = sizeof(*sqe_hdr);
915 /* Add TSO segment's pkt header */
916 hdr = sq->tso_hdrs->base + (sq->head * TSO_HEADER_SIZE);
917 tso_build_hdr(skb, hdr, &tso, seg_len, tcp_data == 0);
919 sq->tso_hdrs->iova + (sq->head * TSO_HEADER_SIZE);
920 list.size[0] = hdr_len;
923 /* Add TSO segment's payload data fragments */
925 while (seg_len > 0) {
928 size = min_t(int, tso.size, seg_len);
930 list.size[list.num_segs] = size;
931 list.dma_addr[list.num_segs] =
932 otx2_tso_frag_dma_addr(sq, skb,
933 tso.next_frag_idx - 1,
934 (u64)tso.data, hdr_len,
939 tso_build_data(skb, &tso, size);
941 sqe_hdr->total = pkt_len;
942 otx2_sqe_tso_add_sg(sq, &list, &offset);
944 /* DMA mappings and skb needs to be freed only after last
945 * TSO segment is transmitted out. So set 'PNC' only for
946 * last segment. Also point last segment's sqe_id to first
947 * segment's SQE index where skb address and DMA mappings
952 sqe_hdr->sqe_id = first_sqe;
953 sq->sg[first_sqe].skb = (u64)skb;
958 sqe_hdr->sizem1 = (offset / 16) - 1;
960 /* Flush SQE to HW */
961 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
965 static bool is_hw_tso_supported(struct otx2_nic *pfvf,
968 int payload_len, last_seg_size;
970 if (test_bit(HW_TSO, &pfvf->hw.cap_flag))
973 /* On 96xx A0, HW TSO not supported */
974 if (!is_96xx_B0(pfvf->pdev))
977 /* HW has an issue due to which when the payload of the last LSO
978 * segment is shorter than 16 bytes, some header fields may not
979 * be correctly modified, hence don't offload such TSO segments.
982 payload_len = skb->len - skb_tcp_all_headers(skb);
983 last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
984 if (last_seg_size && last_seg_size < 16)
990 static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
992 if (!skb_shinfo(skb)->gso_size)
996 if (is_hw_tso_supported(pfvf, skb))
1000 return skb_shinfo(skb)->gso_segs;
1003 static bool otx2_validate_network_transport(struct sk_buff *skb)
1005 if ((ip_hdr(skb)->protocol == IPPROTO_UDP) ||
1006 (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)) {
1007 struct udphdr *udph = udp_hdr(skb);
1009 if (udph->source == htons(PTP_PORT) &&
1010 udph->dest == htons(PTP_PORT))
1017 static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, bool *udp_csum_crt)
1019 struct ethhdr *eth = (struct ethhdr *)(skb->data);
1020 u16 nix_offload_hlen = 0, inner_vhlen = 0;
1021 bool udp_hdr_present = false, is_sync;
1022 u8 *data = skb->data, *msgtype;
1023 __be16 proto = eth->h_proto;
1024 int network_depth = 0;
1026 /* NIX is programmed to offload outer VLAN header
1027 * in case of single vlan protocol field holds Network header ETH_IP/V6
1028 * in case of stacked vlan protocol field holds Inner vlan (8100)
1030 if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX &&
1031 skb->dev->features & NETIF_F_HW_VLAN_STAG_TX) {
1032 if (skb->vlan_proto == htons(ETH_P_8021AD)) {
1033 /* Get vlan protocol */
1034 proto = __vlan_get_protocol(skb, eth->h_proto, NULL);
1035 /* SKB APIs like skb_transport_offset does not include
1036 * offloaded vlan header length. Need to explicitly add
1039 nix_offload_hlen = VLAN_HLEN;
1040 inner_vhlen = VLAN_HLEN;
1041 } else if (skb->vlan_proto == htons(ETH_P_8021Q)) {
1042 nix_offload_hlen = VLAN_HLEN;
1044 } else if (eth_type_vlan(eth->h_proto)) {
1045 proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
1048 switch (ntohs(proto)) {
1051 *offset = network_depth;
1053 *offset = ETH_HLEN + nix_offload_hlen +
1058 if (!otx2_validate_network_transport(skb))
1061 *offset = nix_offload_hlen + skb_transport_offset(skb) +
1062 sizeof(struct udphdr);
1063 udp_hdr_present = true;
1067 msgtype = data + *offset;
1068 /* Check PTP messageId is SYNC or not */
1069 is_sync = !(*msgtype & 0xf);
1071 *udp_csum_crt = udp_hdr_present;
1078 static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
1079 struct otx2_snd_queue *sq, int *offset)
1081 struct ethhdr *eth = (struct ethhdr *)(skb->data);
1082 struct ptpv2_tstamp *origin_tstamp;
1083 bool udp_csum_crt = false;
1084 unsigned int udphoff;
1085 struct timespec64 ts;
1090 if (unlikely(!skb_shinfo(skb)->gso_size &&
1091 (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
1092 if (unlikely(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC &&
1093 otx2_ptp_is_sync(skb, &ptp_offset, &udp_csum_crt))) {
1094 origin_tstamp = (struct ptpv2_tstamp *)
1095 ((u8 *)skb->data + ptp_offset +
1096 PTP_SYNC_SEC_OFFSET);
1097 ts = ns_to_timespec64(pfvf->ptp->tstamp);
1098 origin_tstamp->seconds_msb = htons((ts.tv_sec >> 32) & 0xffff);
1099 origin_tstamp->seconds_lsb = htonl(ts.tv_sec & 0xffffffff);
1100 origin_tstamp->nanoseconds = htonl(ts.tv_nsec);
1101 /* Point to correction field in PTP packet */
1104 /* When user disables hw checksum, stack calculates the csum,
1105 * but it does not cover ptp timestamp which is added later.
1106 * Recalculate the checksum manually considering the timestamp.
1109 struct udphdr *uh = udp_hdr(skb);
1111 if (skb->ip_summed != CHECKSUM_PARTIAL && uh->check != 0) {
1112 udphoff = skb_transport_offset(skb);
1114 skb_csum = skb_checksum(skb, udphoff, skb->len - udphoff,
1116 if (ntohs(eth->h_proto) == ETH_P_IPV6)
1117 uh->check = csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1118 &ipv6_hdr(skb)->daddr,
1120 ipv6_hdr(skb)->nexthdr,
1123 uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,
1131 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1133 iova = sq->timestamps->iova + (sq->head * sizeof(u64));
1134 otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova,
1135 ptp_offset, pfvf->ptp->base_ns, udp_csum_crt);
1137 skb_tx_timestamp(skb);
1141 bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
1142 struct sk_buff *skb, u16 qidx)
1144 struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx);
1145 struct otx2_nic *pfvf = netdev_priv(netdev);
1146 int offset, num_segs, free_desc;
1147 struct nix_sqe_hdr_s *sqe_hdr;
1149 /* Check if there is enough room between producer
1150 * and consumer index.
1152 free_desc = (sq->cons_head - sq->head - 1 + sq->sqe_cnt) & (sq->sqe_cnt - 1);
1153 if (free_desc < sq->sqe_thresh)
1156 if (free_desc < otx2_get_sqe_count(pfvf, skb))
1159 num_segs = skb_shinfo(skb)->nr_frags + 1;
1161 /* If SKB doesn't fit in a single SQE, linearize it.
1162 * TODO: Consider adding JUMP descriptor instead.
1164 if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) {
1165 if (__skb_linearize(skb)) {
1166 dev_kfree_skb_any(skb);
1169 num_segs = skb_shinfo(skb)->nr_frags + 1;
1172 if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
1173 /* Insert vlan tag before giving pkt to tso */
1174 if (skb_vlan_tag_present(skb))
1175 skb = __vlan_hwaccel_push_inside(skb);
1176 otx2_sq_append_tso(pfvf, sq, skb, qidx);
1180 /* Set SQE's SEND_HDR.
1181 * Do not clear the first 64bit as it contains constant info.
1183 memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
1184 sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
1185 otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
1186 offset = sizeof(*sqe_hdr);
1188 /* Add extended header if needed */
1189 otx2_sqe_add_ext(pfvf, sq, skb, &offset);
1191 /* Add SG subdesc with data frags */
1192 if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) {
1193 otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]);
1197 otx2_set_txtstamp(pfvf, skb, sq, &offset);
1199 sqe_hdr->sizem1 = (offset / 16) - 1;
1201 netdev_tx_sent_queue(txq, skb->len);
1203 /* Flush SQE to HW */
1204 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
1208 EXPORT_SYMBOL(otx2_sq_append_skb);
1210 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx)
1212 struct nix_cqe_rx_s *cqe;
1213 struct otx2_pool *pool;
1214 int processed_cqe = 0;
1219 xdp_rxq_info_unreg(&cq->xdp_rxq);
1221 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
1224 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
1225 pool = &pfvf->qset.pool[pool_id];
1227 while (cq->pend_cqe) {
1228 cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq);
1234 if (cqe->sg.segs > 1) {
1235 otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx);
1238 iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
1240 otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize);
1243 /* Free CQEs to HW */
1244 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
1245 ((u64)cq->cq_idx << 32) | processed_cqe);
1248 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
1250 struct sk_buff *skb = NULL;
1251 struct otx2_snd_queue *sq;
1252 struct nix_cqe_tx_s *cqe;
1253 int processed_cqe = 0;
1257 qidx = cq->cq_idx - pfvf->hw.rx_queues;
1258 sq = &pfvf->qset.sq[qidx];
1260 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
1263 while (cq->pend_cqe) {
1264 cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
1270 sg = &sq->sg[cqe->comp.sqe_id];
1271 skb = (struct sk_buff *)sg->skb;
1273 otx2_dma_unmap_skb_frags(pfvf, sg);
1274 dev_kfree_skb_any(skb);
1275 sg->skb = (u64)NULL;
1279 /* Free CQEs to HW */
1280 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
1281 ((u64)cq->cq_idx << 32) | processed_cqe);
1284 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
1286 struct msg_req *msg;
1289 mutex_lock(&pfvf->mbox.lock);
1291 msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox);
1293 msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox);
1296 mutex_unlock(&pfvf->mbox.lock);
1300 err = otx2_sync_mbox_msg(&pfvf->mbox);
1301 mutex_unlock(&pfvf->mbox.lock);
1305 static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
1306 int len, int *offset)
1308 struct nix_sqe_sg_s *sg = NULL;
1311 sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
1312 sg->ld_type = NIX_SEND_LDTYPE_LDD;
1313 sg->subdc = NIX_SUBDC_SG;
1315 sg->seg1_size = len;
1316 iova = (void *)sg + sizeof(*sg);
1318 *offset += sizeof(*sg) + sizeof(u64);
1320 sq->sg[sq->head].dma_addr[0] = dma_addr;
1321 sq->sg[sq->head].size[0] = len;
1322 sq->sg[sq->head].num_segs = 1;
1325 bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
1327 struct nix_sqe_hdr_s *sqe_hdr;
1328 struct otx2_snd_queue *sq;
1329 int offset, free_sqe;
1331 sq = &pfvf->qset.sq[qidx];
1332 free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
1333 if (free_sqe < sq->sqe_thresh)
1336 memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
1338 sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
1340 if (!sqe_hdr->total) {
1341 sqe_hdr->aura = sq->aura_id;
1346 sqe_hdr->total = len;
1347 sqe_hdr->sqe_id = sq->head;
1349 offset = sizeof(*sqe_hdr);
1351 otx2_xdp_sqe_add_sg(sq, iova, len, &offset);
1352 sqe_hdr->sizem1 = (offset / 16) - 1;
1353 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
1358 static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
1359 struct bpf_prog *prog,
1360 struct nix_cqe_rx_s *cqe,
1361 struct otx2_cq_queue *cq,
1362 bool *need_xdp_flush)
1364 unsigned char *hard_start, *data;
1365 int qidx = cq->cq_idx;
1366 struct xdp_buff xdp;
1372 iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
1373 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
1374 page = virt_to_page(phys_to_virt(pa));
1376 xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq);
1378 data = (unsigned char *)phys_to_virt(pa);
1379 hard_start = page_address(page);
1380 xdp_prepare_buff(&xdp, hard_start, data - hard_start,
1381 cqe->sg.seg_size, false);
1383 act = bpf_prog_run_xdp(prog, &xdp);
1389 qidx += pfvf->hw.tx_queues;
1391 return otx2_xdp_sq_append_pkt(pfvf, iova,
1392 cqe->sg.seg_size, qidx);
1395 err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
1397 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
1400 *need_xdp_flush = true;
1406 bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act);
1409 trace_xdp_exception(pfvf->netdev, prog, act);
1412 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,