1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
4 #include <uapi/linux/bpf.h>
6 #include <linux/inetdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/filter.h>
11 #include <linux/pci.h>
13 #include <net/checksum.h>
14 #include <net/ip6_checksum.h>
15 #include <net/page_pool/helpers.h>
18 #include <net/mana/mana.h>
19 #include <net/mana/mana_auxiliary.h>
21 static DEFINE_IDA(mana_adev_ida);
23 static int mana_adev_idx_alloc(void)
25 return ida_alloc(&mana_adev_ida, GFP_KERNEL);
28 static void mana_adev_idx_free(int idx)
30 ida_free(&mana_adev_ida, idx);
33 /* Microsoft Azure Network Adapter (MANA) functions */
35 static int mana_open(struct net_device *ndev)
37 struct mana_port_context *apc = netdev_priv(ndev);
40 err = mana_alloc_queues(ndev);
44 apc->port_is_up = true;
46 /* Ensure port state updated before txq state */
49 netif_carrier_on(ndev);
50 netif_tx_wake_all_queues(ndev);
55 static int mana_close(struct net_device *ndev)
57 struct mana_port_context *apc = netdev_priv(ndev);
62 return mana_detach(ndev, true);
65 static bool mana_can_tx(struct gdma_queue *wq)
67 return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
70 static unsigned int mana_checksum_info(struct sk_buff *skb)
72 if (skb->protocol == htons(ETH_P_IP)) {
73 struct iphdr *ip = ip_hdr(skb);
75 if (ip->protocol == IPPROTO_TCP)
78 if (ip->protocol == IPPROTO_UDP)
80 } else if (skb->protocol == htons(ETH_P_IPV6)) {
81 struct ipv6hdr *ip6 = ipv6_hdr(skb);
83 if (ip6->nexthdr == IPPROTO_TCP)
86 if (ip6->nexthdr == IPPROTO_UDP)
90 /* No csum offloading */
94 static void mana_add_sge(struct mana_tx_package *tp, struct mana_skb_head *ash,
95 int sg_i, dma_addr_t da, int sge_len, u32 gpa_mkey)
97 ash->dma_handle[sg_i] = da;
98 ash->size[sg_i] = sge_len;
100 tp->wqe_req.sgl[sg_i].address = da;
101 tp->wqe_req.sgl[sg_i].mem_key = gpa_mkey;
102 tp->wqe_req.sgl[sg_i].size = sge_len;
105 static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
106 struct mana_tx_package *tp, int gso_hs)
108 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
109 int hsg = 1; /* num of SGEs of linear part */
110 struct gdma_dev *gd = apc->ac->gdma_dev;
111 int skb_hlen = skb_headlen(skb);
112 int sge0_len, sge1_len = 0;
113 struct gdma_context *gc;
120 gc = gd->gdma_context;
123 if (gso_hs && gso_hs < skb_hlen) {
125 sge1_len = skb_hlen - gso_hs;
130 da = dma_map_single(dev, skb->data, sge0_len, DMA_TO_DEVICE);
131 if (dma_mapping_error(dev, da))
134 mana_add_sge(tp, ash, 0, da, sge0_len, gd->gpa_mkey);
138 da = dma_map_single(dev, skb->data + sge0_len, sge1_len,
140 if (dma_mapping_error(dev, da))
143 mana_add_sge(tp, ash, sg_i, da, sge1_len, gd->gpa_mkey);
147 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
150 frag = &skb_shinfo(skb)->frags[i];
151 da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
153 if (dma_mapping_error(dev, da))
156 mana_add_sge(tp, ash, sg_i, da, skb_frag_size(frag),
163 for (i = sg_i - 1; i >= hsg; i--)
164 dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
167 for (i = hsg - 1; i >= 0; i--)
168 dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
174 /* Handle the case when GSO SKB linear length is too large.
175 * MANA NIC requires GSO packets to put only the packet header to SGE0.
176 * So, we need 2 SGEs for the skb linear part which contains more than the
178 * Return a positive value for the number of SGEs, or a negative value
181 static int mana_fix_skb_head(struct net_device *ndev, struct sk_buff *skb,
184 int num_sge = 1 + skb_shinfo(skb)->nr_frags;
185 int skb_hlen = skb_headlen(skb);
187 if (gso_hs < skb_hlen) {
189 } else if (gso_hs > skb_hlen) {
192 "TX nonlinear head: hs:%d, skb_hlen:%d\n",
201 /* Get the GSO packet's header size */
202 static int mana_get_gso_hs(struct sk_buff *skb)
206 if (skb->encapsulation) {
207 gso_hs = skb_inner_tcp_all_headers(skb);
209 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
210 gso_hs = skb_transport_offset(skb) +
211 sizeof(struct udphdr);
213 gso_hs = skb_tcp_all_headers(skb);
220 netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
222 enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
223 struct mana_port_context *apc = netdev_priv(ndev);
224 int gso_hs = 0; /* zero for non-GSO pkts */
225 u16 txq_idx = skb_get_queue_mapping(skb);
226 struct gdma_dev *gd = apc->ac->gdma_dev;
227 bool ipv4 = false, ipv6 = false;
228 struct mana_tx_package pkg = {};
229 struct netdev_queue *net_txq;
230 struct mana_stats_tx *tx_stats;
231 struct gdma_queue *gdma_sq;
232 unsigned int csum_type;
233 struct mana_txq *txq;
237 if (unlikely(!apc->port_is_up))
240 if (skb_cow_head(skb, MANA_HEADROOM))
243 txq = &apc->tx_qp[txq_idx].txq;
244 gdma_sq = txq->gdma_sq;
245 cq = &apc->tx_qp[txq_idx].tx_cq;
246 tx_stats = &txq->stats;
248 pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
249 pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
251 if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
252 pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
253 pkt_fmt = MANA_LONG_PKT_FMT;
255 pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
258 if (skb_vlan_tag_present(skb)) {
259 pkt_fmt = MANA_LONG_PKT_FMT;
260 pkg.tx_oob.l_oob.inject_vlan_pri_tag = 1;
261 pkg.tx_oob.l_oob.pcp = skb_vlan_tag_get_prio(skb);
262 pkg.tx_oob.l_oob.dei = skb_vlan_tag_get_cfi(skb);
263 pkg.tx_oob.l_oob.vlan_id = skb_vlan_tag_get_id(skb);
266 pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
268 if (pkt_fmt == MANA_SHORT_PKT_FMT) {
269 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
270 u64_stats_update_begin(&tx_stats->syncp);
271 tx_stats->short_pkt_fmt++;
272 u64_stats_update_end(&tx_stats->syncp);
274 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
275 u64_stats_update_begin(&tx_stats->syncp);
276 tx_stats->long_pkt_fmt++;
277 u64_stats_update_end(&tx_stats->syncp);
280 pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
281 pkg.wqe_req.flags = 0;
282 pkg.wqe_req.client_data_unit = 0;
284 pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
286 if (skb->protocol == htons(ETH_P_IP))
288 else if (skb->protocol == htons(ETH_P_IPV6))
291 if (skb_is_gso(skb)) {
294 gso_hs = mana_get_gso_hs(skb);
296 num_sge = mana_fix_skb_head(ndev, skb, gso_hs);
298 pkg.wqe_req.num_sge = num_sge;
302 u64_stats_update_begin(&tx_stats->syncp);
303 if (skb->encapsulation) {
304 tx_stats->tso_inner_packets++;
305 tx_stats->tso_inner_bytes += skb->len - gso_hs;
307 tx_stats->tso_packets++;
308 tx_stats->tso_bytes += skb->len - gso_hs;
310 u64_stats_update_end(&tx_stats->syncp);
312 pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
313 pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
315 pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
316 pkg.tx_oob.s_oob.comp_tcp_csum = 1;
317 pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
319 pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size;
320 pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
322 ip_hdr(skb)->tot_len = 0;
323 ip_hdr(skb)->check = 0;
324 tcp_hdr(skb)->check =
325 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
326 ip_hdr(skb)->daddr, 0,
329 ipv6_hdr(skb)->payload_len = 0;
330 tcp_hdr(skb)->check =
331 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
332 &ipv6_hdr(skb)->daddr, 0,
335 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
336 csum_type = mana_checksum_info(skb);
338 u64_stats_update_begin(&tx_stats->syncp);
339 tx_stats->csum_partial++;
340 u64_stats_update_end(&tx_stats->syncp);
342 if (csum_type == IPPROTO_TCP) {
343 pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
344 pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
346 pkg.tx_oob.s_oob.comp_tcp_csum = 1;
347 pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
349 } else if (csum_type == IPPROTO_UDP) {
350 pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
351 pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
353 pkg.tx_oob.s_oob.comp_udp_csum = 1;
355 /* Can't do offload of this type of checksum */
356 if (skb_checksum_help(skb))
361 WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
363 if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
364 pkg.wqe_req.sgl = pkg.sgl_array;
366 pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
367 sizeof(struct gdma_sge),
372 pkg.wqe_req.sgl = pkg.sgl_ptr;
375 if (mana_map_skb(skb, apc, &pkg, gso_hs)) {
376 u64_stats_update_begin(&tx_stats->syncp);
377 tx_stats->mana_map_err++;
378 u64_stats_update_end(&tx_stats->syncp);
382 skb_queue_tail(&txq->pending_skbs, skb);
385 net_txq = netdev_get_tx_queue(ndev, txq_idx);
387 err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
388 (struct gdma_posted_wqe_info *)skb->cb);
389 if (!mana_can_tx(gdma_sq)) {
390 netif_tx_stop_queue(net_txq);
391 apc->eth_stats.stop_queue++;
395 (void)skb_dequeue_tail(&txq->pending_skbs);
396 netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
397 err = NETDEV_TX_BUSY;
402 atomic_inc(&txq->pending_sends);
404 mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
406 /* skb may be freed after mana_gd_post_work_request. Do not use it. */
409 tx_stats = &txq->stats;
410 u64_stats_update_begin(&tx_stats->syncp);
412 tx_stats->bytes += len;
413 u64_stats_update_end(&tx_stats->syncp);
416 if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
417 netif_tx_wake_queue(net_txq);
418 apc->eth_stats.wake_queue++;
427 ndev->stats.tx_dropped++;
429 dev_kfree_skb_any(skb);
433 static void mana_get_stats64(struct net_device *ndev,
434 struct rtnl_link_stats64 *st)
436 struct mana_port_context *apc = netdev_priv(ndev);
437 unsigned int num_queues = apc->num_queues;
438 struct mana_stats_rx *rx_stats;
439 struct mana_stats_tx *tx_stats;
444 if (!apc->port_is_up)
447 netdev_stats_to_stats64(st, &ndev->stats);
449 for (q = 0; q < num_queues; q++) {
450 rx_stats = &apc->rxqs[q]->stats;
453 start = u64_stats_fetch_begin(&rx_stats->syncp);
454 packets = rx_stats->packets;
455 bytes = rx_stats->bytes;
456 } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
458 st->rx_packets += packets;
459 st->rx_bytes += bytes;
462 for (q = 0; q < num_queues; q++) {
463 tx_stats = &apc->tx_qp[q].txq.stats;
466 start = u64_stats_fetch_begin(&tx_stats->syncp);
467 packets = tx_stats->packets;
468 bytes = tx_stats->bytes;
469 } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
471 st->tx_packets += packets;
472 st->tx_bytes += bytes;
476 static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
479 struct mana_port_context *apc = netdev_priv(ndev);
480 u32 hash = skb_get_hash(skb);
481 struct sock *sk = skb->sk;
484 txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK];
486 if (txq != old_q && sk && sk_fullsock(sk) &&
487 rcu_access_pointer(sk->sk_dst_cache))
488 sk_tx_queue_set(sk, txq);
493 static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
494 struct net_device *sb_dev)
498 if (ndev->real_num_tx_queues == 1)
501 txq = sk_tx_queue_get(skb->sk);
503 if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) {
504 if (skb_rx_queue_recorded(skb))
505 txq = skb_get_rx_queue(skb);
507 txq = mana_get_tx_queue(ndev, skb, txq);
513 /* Release pre-allocated RX buffers */
514 static void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc)
519 dev = mpc->ac->gdma_dev->gdma_context->dev;
521 if (!mpc->rxbufs_pre)
527 while (mpc->rxbpre_total) {
528 i = --mpc->rxbpre_total;
529 dma_unmap_single(dev, mpc->das_pre[i], mpc->rxbpre_datasize,
531 put_page(virt_to_head_page(mpc->rxbufs_pre[i]));
538 kfree(mpc->rxbufs_pre);
539 mpc->rxbufs_pre = NULL;
542 mpc->rxbpre_datasize = 0;
543 mpc->rxbpre_alloc_size = 0;
544 mpc->rxbpre_headroom = 0;
547 /* Get a buffer from the pre-allocated RX buffers */
548 static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da)
550 struct net_device *ndev = rxq->ndev;
551 struct mana_port_context *mpc;
554 mpc = netdev_priv(ndev);
556 if (!mpc->rxbufs_pre || !mpc->das_pre || !mpc->rxbpre_total) {
557 netdev_err(ndev, "No RX pre-allocated bufs\n");
561 /* Check sizes to catch unexpected coding error */
562 if (mpc->rxbpre_datasize != rxq->datasize) {
563 netdev_err(ndev, "rxbpre_datasize mismatch: %u: %u\n",
564 mpc->rxbpre_datasize, rxq->datasize);
568 if (mpc->rxbpre_alloc_size != rxq->alloc_size) {
569 netdev_err(ndev, "rxbpre_alloc_size mismatch: %u: %u\n",
570 mpc->rxbpre_alloc_size, rxq->alloc_size);
574 if (mpc->rxbpre_headroom != rxq->headroom) {
575 netdev_err(ndev, "rxbpre_headroom mismatch: %u: %u\n",
576 mpc->rxbpre_headroom, rxq->headroom);
582 *da = mpc->das_pre[mpc->rxbpre_total];
583 va = mpc->rxbufs_pre[mpc->rxbpre_total];
584 mpc->rxbufs_pre[mpc->rxbpre_total] = NULL;
586 /* Deallocate the array after all buffers are gone */
587 if (!mpc->rxbpre_total)
588 mana_pre_dealloc_rxbufs(mpc);
593 /* Get RX buffer's data size, alloc size, XDP headroom based on MTU */
594 static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
597 if (mtu > MANA_XDP_MTU_MAX)
598 *headroom = 0; /* no support for XDP */
600 *headroom = XDP_PACKET_HEADROOM;
602 *alloc_size = mtu + MANA_RXBUF_PAD + *headroom;
604 *datasize = ALIGN(mtu + ETH_HLEN, MANA_RX_DATA_ALIGN);
607 static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
616 mana_get_rxbuf_cfg(new_mtu, &mpc->rxbpre_datasize,
617 &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom);
619 dev = mpc->ac->gdma_dev->gdma_context->dev;
621 num_rxb = mpc->num_queues * RX_BUFFERS_PER_QUEUE;
623 WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n");
624 mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL);
625 if (!mpc->rxbufs_pre)
628 mpc->das_pre = kmalloc_array(num_rxb, sizeof(dma_addr_t), GFP_KERNEL);
632 mpc->rxbpre_total = 0;
634 for (i = 0; i < num_rxb; i++) {
635 if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
636 va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
640 page = virt_to_head_page(va);
641 /* Check if the frag falls back to single page */
642 if (compound_order(page) <
643 get_order(mpc->rxbpre_alloc_size)) {
648 page = dev_alloc_page();
652 va = page_to_virt(page);
655 da = dma_map_single(dev, va + mpc->rxbpre_headroom,
656 mpc->rxbpre_datasize, DMA_FROM_DEVICE);
657 if (dma_mapping_error(dev, da)) {
658 put_page(virt_to_head_page(va));
662 mpc->rxbufs_pre[i] = va;
663 mpc->das_pre[i] = da;
664 mpc->rxbpre_total = i + 1;
670 mana_pre_dealloc_rxbufs(mpc);
674 static int mana_change_mtu(struct net_device *ndev, int new_mtu)
676 struct mana_port_context *mpc = netdev_priv(ndev);
677 unsigned int old_mtu = ndev->mtu;
680 /* Pre-allocate buffers to prevent failure in mana_attach later */
681 err = mana_pre_alloc_rxbufs(mpc, new_mtu);
683 netdev_err(ndev, "Insufficient memory for new MTU\n");
687 err = mana_detach(ndev, false);
689 netdev_err(ndev, "mana_detach failed: %d\n", err);
695 err = mana_attach(ndev);
697 netdev_err(ndev, "mana_attach failed: %d\n", err);
702 mana_pre_dealloc_rxbufs(mpc);
706 static const struct net_device_ops mana_devops = {
707 .ndo_open = mana_open,
708 .ndo_stop = mana_close,
709 .ndo_select_queue = mana_select_queue,
710 .ndo_start_xmit = mana_start_xmit,
711 .ndo_validate_addr = eth_validate_addr,
712 .ndo_get_stats64 = mana_get_stats64,
714 .ndo_xdp_xmit = mana_xdp_xmit,
715 .ndo_change_mtu = mana_change_mtu,
718 static void mana_cleanup_port_context(struct mana_port_context *apc)
724 static int mana_init_port_context(struct mana_port_context *apc)
726 apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *),
729 return !apc->rxqs ? -ENOMEM : 0;
732 static int mana_send_request(struct mana_context *ac, void *in_buf,
733 u32 in_len, void *out_buf, u32 out_len)
735 struct gdma_context *gc = ac->gdma_dev->gdma_context;
736 struct gdma_resp_hdr *resp = out_buf;
737 struct gdma_req_hdr *req = in_buf;
738 struct device *dev = gc->dev;
739 static atomic_t activity_id;
742 req->dev_id = gc->mana.dev_id;
743 req->activity_id = atomic_inc_return(&activity_id);
745 err = mana_gd_send_request(gc, in_len, in_buf, out_len,
747 if (err || resp->status) {
748 dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
750 return err ? err : -EPROTO;
753 if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
754 req->activity_id != resp->activity_id) {
755 dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n",
756 req->dev_id.as_uint32, resp->dev_id.as_uint32,
757 req->activity_id, resp->activity_id);
764 static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
765 const enum mana_command_code expected_code,
768 if (resp_hdr->response.msg_type != expected_code)
771 if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
774 if (resp_hdr->response.msg_size < min_size)
780 static int mana_pf_register_hw_vport(struct mana_port_context *apc)
782 struct mana_register_hw_vport_resp resp = {};
783 struct mana_register_hw_vport_req req = {};
786 mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_HW_PORT,
787 sizeof(req), sizeof(resp));
788 req.attached_gfid = 1;
789 req.is_pf_default_vport = 1;
790 req.allow_all_ether_types = 1;
792 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
795 netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err);
799 err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_HW_PORT,
801 if (err || resp.hdr.status) {
802 netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n",
803 err, resp.hdr.status);
804 return err ? err : -EPROTO;
807 apc->port_handle = resp.hw_vport_handle;
811 static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
813 struct mana_deregister_hw_vport_resp resp = {};
814 struct mana_deregister_hw_vport_req req = {};
817 mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_HW_PORT,
818 sizeof(req), sizeof(resp));
819 req.hw_vport_handle = apc->port_handle;
821 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
824 netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
829 err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_HW_PORT,
831 if (err || resp.hdr.status)
832 netdev_err(apc->ndev,
833 "Failed to deregister hw vPort: %d, 0x%x\n",
834 err, resp.hdr.status);
837 static int mana_pf_register_filter(struct mana_port_context *apc)
839 struct mana_register_filter_resp resp = {};
840 struct mana_register_filter_req req = {};
843 mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_FILTER,
844 sizeof(req), sizeof(resp));
845 req.vport = apc->port_handle;
846 memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN);
848 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
851 netdev_err(apc->ndev, "Failed to register filter: %d\n", err);
855 err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_FILTER,
857 if (err || resp.hdr.status) {
858 netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n",
859 err, resp.hdr.status);
860 return err ? err : -EPROTO;
863 apc->pf_filter_handle = resp.filter_handle;
867 static void mana_pf_deregister_filter(struct mana_port_context *apc)
869 struct mana_deregister_filter_resp resp = {};
870 struct mana_deregister_filter_req req = {};
873 mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_FILTER,
874 sizeof(req), sizeof(resp));
875 req.filter_handle = apc->pf_filter_handle;
877 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
880 netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
885 err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_FILTER,
887 if (err || resp.hdr.status)
888 netdev_err(apc->ndev,
889 "Failed to deregister filter: %d, 0x%x\n",
890 err, resp.hdr.status);
893 static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
894 u32 proto_minor_ver, u32 proto_micro_ver,
897 struct gdma_context *gc = ac->gdma_dev->gdma_context;
898 struct mana_query_device_cfg_resp resp = {};
899 struct mana_query_device_cfg_req req = {};
900 struct device *dev = gc->dev;
903 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
904 sizeof(req), sizeof(resp));
906 req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
908 req.proto_major_ver = proto_major_ver;
909 req.proto_minor_ver = proto_minor_ver;
910 req.proto_micro_ver = proto_micro_ver;
912 err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
914 dev_err(dev, "Failed to query config: %d", err);
918 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
920 if (err || resp.hdr.status) {
921 dev_err(dev, "Invalid query result: %d, 0x%x\n", err,
928 *max_num_vports = resp.max_num_vports;
930 if (resp.hdr.response.msg_version == GDMA_MESSAGE_V2)
931 gc->adapter_mtu = resp.adapter_mtu;
933 gc->adapter_mtu = ETH_FRAME_LEN;
938 static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
939 u32 *max_sq, u32 *max_rq, u32 *num_indir_entry)
941 struct mana_query_vport_cfg_resp resp = {};
942 struct mana_query_vport_cfg_req req = {};
945 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
946 sizeof(req), sizeof(resp));
948 req.vport_index = vport_index;
950 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
955 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
963 *max_sq = resp.max_num_sq;
964 *max_rq = resp.max_num_rq;
965 *num_indir_entry = resp.num_indirection_ent;
967 apc->port_handle = resp.vport;
968 ether_addr_copy(apc->mac_addr, resp.mac_addr);
973 void mana_uncfg_vport(struct mana_port_context *apc)
975 mutex_lock(&apc->vport_mutex);
976 apc->vport_use_count--;
977 WARN_ON(apc->vport_use_count < 0);
978 mutex_unlock(&apc->vport_mutex);
980 EXPORT_SYMBOL_NS(mana_uncfg_vport, NET_MANA);
982 int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
985 struct mana_config_vport_resp resp = {};
986 struct mana_config_vport_req req = {};
989 /* This function is used to program the Ethernet port in the hardware
990 * table. It can be called from the Ethernet driver or the RDMA driver.
992 * For Ethernet usage, the hardware supports only one active user on a
993 * physical port. The driver checks on the port usage before programming
994 * the hardware when creating the RAW QP (RDMA driver) or exposing the
995 * device to kernel NET layer (Ethernet driver).
997 * Because the RDMA driver doesn't know in advance which QP type the
998 * user will create, it exposes the device with all its ports. The user
999 * may not be able to create RAW QP on a port if this port is already
1000 * in used by the Ethernet driver from the kernel.
1002 * This physical port limitation only applies to the RAW QP. For RC QP,
1003 * the hardware doesn't have this limitation. The user can create RC
1004 * QPs on a physical port up to the hardware limits independent of the
1005 * Ethernet usage on the same port.
1007 mutex_lock(&apc->vport_mutex);
1008 if (apc->vport_use_count > 0) {
1009 mutex_unlock(&apc->vport_mutex);
1012 apc->vport_use_count++;
1013 mutex_unlock(&apc->vport_mutex);
1015 mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
1016 sizeof(req), sizeof(resp));
1017 req.vport = apc->port_handle;
1018 req.pdid = protection_dom_id;
1019 req.doorbell_pageid = doorbell_pg_id;
1021 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1024 netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err);
1028 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
1030 if (err || resp.hdr.status) {
1031 netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
1032 err, resp.hdr.status);
1039 apc->tx_shortform_allowed = resp.short_form_allowed;
1040 apc->tx_vp_offset = resp.tx_vport_offset;
1042 netdev_info(apc->ndev, "Configured vPort %llu PD %u DB %u\n",
1043 apc->port_handle, protection_dom_id, doorbell_pg_id);
1046 mana_uncfg_vport(apc);
1050 EXPORT_SYMBOL_NS(mana_cfg_vport, NET_MANA);
1052 static int mana_cfg_vport_steering(struct mana_port_context *apc,
1054 bool update_default_rxobj, bool update_key,
1057 u16 num_entries = MANA_INDIRECT_TABLE_SIZE;
1058 struct mana_cfg_rx_steer_req_v2 *req;
1059 struct mana_cfg_rx_steer_resp resp = {};
1060 struct net_device *ndev = apc->ndev;
1061 mana_handle_t *req_indir_tab;
1065 req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
1066 req = kzalloc(req_buf_size, GFP_KERNEL);
1070 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
1073 req->hdr.req.msg_version = GDMA_MESSAGE_V2;
1075 req->vport = apc->port_handle;
1076 req->num_indir_entries = num_entries;
1077 req->indir_tab_offset = sizeof(*req);
1078 req->rx_enable = rx;
1079 req->rss_enable = apc->rss_state;
1080 req->update_default_rxobj = update_default_rxobj;
1081 req->update_hashkey = update_key;
1082 req->update_indir_tab = update_tab;
1083 req->default_rxobj = apc->default_rxobj;
1084 req->cqe_coalescing_enable = 0;
1087 memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
1090 req_indir_tab = (mana_handle_t *)(req + 1);
1091 memcpy(req_indir_tab, apc->rxobj_table,
1092 req->num_indir_entries * sizeof(mana_handle_t));
1095 err = mana_send_request(apc->ac, req, req_buf_size, &resp,
1098 netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
1102 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
1105 netdev_err(ndev, "vPort RX configuration failed: %d\n", err);
1109 if (resp.hdr.status) {
1110 netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
1115 netdev_info(ndev, "Configured steering vPort %llu entries %u\n",
1116 apc->port_handle, num_entries);
1122 int mana_create_wq_obj(struct mana_port_context *apc,
1123 mana_handle_t vport,
1124 u32 wq_type, struct mana_obj_spec *wq_spec,
1125 struct mana_obj_spec *cq_spec,
1126 mana_handle_t *wq_obj)
1128 struct mana_create_wqobj_resp resp = {};
1129 struct mana_create_wqobj_req req = {};
1130 struct net_device *ndev = apc->ndev;
1133 mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
1134 sizeof(req), sizeof(resp));
1136 req.wq_type = wq_type;
1137 req.wq_gdma_region = wq_spec->gdma_region;
1138 req.cq_gdma_region = cq_spec->gdma_region;
1139 req.wq_size = wq_spec->queue_size;
1140 req.cq_size = cq_spec->queue_size;
1141 req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
1142 req.cq_parent_qid = cq_spec->attached_eq;
1144 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1147 netdev_err(ndev, "Failed to create WQ object: %d\n", err);
1151 err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
1153 if (err || resp.hdr.status) {
1154 netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
1161 if (resp.wq_obj == INVALID_MANA_HANDLE) {
1162 netdev_err(ndev, "Got an invalid WQ object handle\n");
1167 *wq_obj = resp.wq_obj;
1168 wq_spec->queue_index = resp.wq_id;
1169 cq_spec->queue_index = resp.cq_id;
1175 EXPORT_SYMBOL_NS(mana_create_wq_obj, NET_MANA);
1177 void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
1178 mana_handle_t wq_obj)
1180 struct mana_destroy_wqobj_resp resp = {};
1181 struct mana_destroy_wqobj_req req = {};
1182 struct net_device *ndev = apc->ndev;
1185 mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
1186 sizeof(req), sizeof(resp));
1187 req.wq_type = wq_type;
1188 req.wq_obj_handle = wq_obj;
1190 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1193 netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
1197 err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
1199 if (err || resp.hdr.status)
1200 netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
1203 EXPORT_SYMBOL_NS(mana_destroy_wq_obj, NET_MANA);
1205 static void mana_destroy_eq(struct mana_context *ac)
1207 struct gdma_context *gc = ac->gdma_dev->gdma_context;
1208 struct gdma_queue *eq;
1214 for (i = 0; i < gc->max_num_queues; i++) {
1219 mana_gd_destroy_queue(gc, eq);
1226 static int mana_create_eq(struct mana_context *ac)
1228 struct gdma_dev *gd = ac->gdma_dev;
1229 struct gdma_context *gc = gd->gdma_context;
1230 struct gdma_queue_spec spec = {};
1234 ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq),
1239 spec.type = GDMA_EQ;
1240 spec.monitor_avl_buf = false;
1241 spec.queue_size = EQ_SIZE;
1242 spec.eq.callback = NULL;
1243 spec.eq.context = ac->eqs;
1244 spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
1246 for (i = 0; i < gc->max_num_queues; i++) {
1247 err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
1254 mana_destroy_eq(ac);
1258 static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
1260 struct mana_fence_rq_resp resp = {};
1261 struct mana_fence_rq_req req = {};
1264 init_completion(&rxq->fence_event);
1266 mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
1267 sizeof(req), sizeof(resp));
1268 req.wq_obj_handle = rxq->rxobj;
1270 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1273 netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n",
1278 err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
1279 if (err || resp.hdr.status) {
1280 netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
1281 rxq->rxq_idx, err, resp.hdr.status);
1288 if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) {
1289 netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n",
1297 static void mana_fence_rqs(struct mana_port_context *apc)
1299 unsigned int rxq_idx;
1300 struct mana_rxq *rxq;
1303 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1304 rxq = apc->rxqs[rxq_idx];
1305 err = mana_fence_rq(apc, rxq);
1307 /* In case of any error, use sleep instead. */
1313 static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
1318 used_space_old = wq->head - wq->tail;
1319 used_space_new = wq->head - (wq->tail + num_units);
1321 if (WARN_ON_ONCE(used_space_new > used_space_old))
1324 wq->tail += num_units;
1328 static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
1330 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
1331 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1332 struct device *dev = gc->dev;
1335 /* Number of SGEs of linear part */
1336 hsg = (skb_is_gso(skb) && skb_headlen(skb) > ash->size[0]) ? 2 : 1;
1338 for (i = 0; i < hsg; i++)
1339 dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
1342 for (i = hsg; i < skb_shinfo(skb)->nr_frags + hsg; i++)
1343 dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
1347 static void mana_poll_tx_cq(struct mana_cq *cq)
1349 struct gdma_comp *completions = cq->gdma_comp_buf;
1350 struct gdma_posted_wqe_info *wqe_info;
1351 unsigned int pkt_transmitted = 0;
1352 unsigned int wqe_unit_cnt = 0;
1353 struct mana_txq *txq = cq->txq;
1354 struct mana_port_context *apc;
1355 struct netdev_queue *net_txq;
1356 struct gdma_queue *gdma_wq;
1357 unsigned int avail_space;
1358 struct net_device *ndev;
1359 struct sk_buff *skb;
1365 apc = netdev_priv(ndev);
1367 comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
1368 CQE_POLLING_BUFFER);
1373 for (i = 0; i < comp_read; i++) {
1374 struct mana_tx_comp_oob *cqe_oob;
1376 if (WARN_ON_ONCE(!completions[i].is_sq))
1379 cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
1380 if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type !=
1381 MANA_CQE_COMPLETION))
1384 switch (cqe_oob->cqe_hdr.cqe_type) {
1388 case CQE_TX_SA_DROP:
1389 case CQE_TX_MTU_DROP:
1390 case CQE_TX_INVALID_OOB:
1391 case CQE_TX_INVALID_ETH_TYPE:
1392 case CQE_TX_HDR_PROCESSING_ERROR:
1393 case CQE_TX_VF_DISABLED:
1394 case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
1395 case CQE_TX_VPORT_DISABLED:
1396 case CQE_TX_VLAN_TAGGING_VIOLATION:
1397 if (net_ratelimit())
1398 netdev_err(ndev, "TX: CQE error %d\n",
1399 cqe_oob->cqe_hdr.cqe_type);
1401 apc->eth_stats.tx_cqe_err++;
1405 /* If the CQE type is unknown, log an error,
1406 * and still free the SKB, update tail, etc.
1408 if (net_ratelimit())
1409 netdev_err(ndev, "TX: unknown CQE type %d\n",
1410 cqe_oob->cqe_hdr.cqe_type);
1412 apc->eth_stats.tx_cqe_unknown_type++;
1416 if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
1419 skb = skb_dequeue(&txq->pending_skbs);
1420 if (WARN_ON_ONCE(!skb))
1423 wqe_info = (struct gdma_posted_wqe_info *)skb->cb;
1424 wqe_unit_cnt += wqe_info->wqe_size_in_bu;
1426 mana_unmap_skb(skb, apc);
1428 napi_consume_skb(skb, cq->budget);
1433 if (WARN_ON_ONCE(wqe_unit_cnt == 0))
1436 mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
1438 gdma_wq = txq->gdma_sq;
1439 avail_space = mana_gd_wq_avail_space(gdma_wq);
1441 /* Ensure tail updated before checking q stop */
1444 net_txq = txq->net_txq;
1445 txq_stopped = netif_tx_queue_stopped(net_txq);
1447 /* Ensure checking txq_stopped before apc->port_is_up. */
1450 if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1451 netif_tx_wake_queue(net_txq);
1452 apc->eth_stats.wake_queue++;
1455 if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
1458 cq->work_done = pkt_transmitted;
1461 static void mana_post_pkt_rxq(struct mana_rxq *rxq)
1463 struct mana_recv_buf_oob *recv_buf_oob;
1467 curr_index = rxq->buf_index++;
1468 if (rxq->buf_index == rxq->num_rx_buf)
1471 recv_buf_oob = &rxq->rx_oobs[curr_index];
1473 err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1474 &recv_buf_oob->wqe_inf);
1475 if (WARN_ON_ONCE(err))
1478 WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
1481 static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va,
1482 uint pkt_len, struct xdp_buff *xdp)
1484 struct sk_buff *skb = napi_build_skb(buf_va, rxq->alloc_size);
1489 if (xdp->data_hard_start) {
1490 skb_reserve(skb, xdp->data - xdp->data_hard_start);
1491 skb_put(skb, xdp->data_end - xdp->data);
1495 skb_reserve(skb, rxq->headroom);
1496 skb_put(skb, pkt_len);
1501 static void mana_rx_skb(void *buf_va, bool from_pool,
1502 struct mana_rxcomp_oob *cqe, struct mana_rxq *rxq)
1504 struct mana_stats_rx *rx_stats = &rxq->stats;
1505 struct net_device *ndev = rxq->ndev;
1506 uint pkt_len = cqe->ppi[0].pkt_len;
1507 u16 rxq_idx = rxq->rxq_idx;
1508 struct napi_struct *napi;
1509 struct xdp_buff xdp = {};
1510 struct sk_buff *skb;
1514 rxq->rx_cq.work_done++;
1515 napi = &rxq->rx_cq.napi;
1518 ++ndev->stats.rx_dropped;
1522 act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
1524 if (act == XDP_REDIRECT && !rxq->xdp_rc)
1527 if (act != XDP_PASS && act != XDP_TX)
1530 skb = mana_build_skb(rxq, buf_va, pkt_len, &xdp);
1536 skb_mark_for_recycle(skb);
1538 skb->dev = napi->dev;
1540 skb->protocol = eth_type_trans(skb, ndev);
1541 skb_checksum_none_assert(skb);
1542 skb_record_rx_queue(skb, rxq_idx);
1544 if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) {
1545 if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed)
1546 skb->ip_summed = CHECKSUM_UNNECESSARY;
1549 if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) {
1550 hash_value = cqe->ppi[0].pkt_hash;
1552 if (cqe->rx_hashtype & MANA_HASH_L4)
1553 skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4);
1555 skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
1558 if (cqe->rx_vlantag_present) {
1559 u16 vlan_tci = cqe->rx_vlan_id;
1561 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1564 u64_stats_update_begin(&rx_stats->syncp);
1565 rx_stats->packets++;
1566 rx_stats->bytes += pkt_len;
1570 u64_stats_update_end(&rx_stats->syncp);
1572 if (act == XDP_TX) {
1573 skb_set_queue_mapping(skb, rxq_idx);
1574 mana_xdp_tx(skb, ndev);
1578 napi_gro_receive(napi, skb);
1583 u64_stats_update_begin(&rx_stats->syncp);
1584 rx_stats->xdp_drop++;
1585 u64_stats_update_end(&rx_stats->syncp);
1589 page_pool_recycle_direct(rxq->page_pool,
1590 virt_to_head_page(buf_va));
1592 WARN_ON_ONCE(rxq->xdp_save_va);
1593 /* Save for reuse */
1594 rxq->xdp_save_va = buf_va;
1597 ++ndev->stats.rx_dropped;
1602 static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
1603 dma_addr_t *da, bool *from_pool, bool is_napi)
1610 /* Reuse XDP dropped page if available */
1611 if (rxq->xdp_save_va) {
1612 va = rxq->xdp_save_va;
1613 rxq->xdp_save_va = NULL;
1614 } else if (rxq->alloc_size > PAGE_SIZE) {
1616 va = napi_alloc_frag(rxq->alloc_size);
1618 va = netdev_alloc_frag(rxq->alloc_size);
1623 page = virt_to_head_page(va);
1624 /* Check if the frag falls back to single page */
1625 if (compound_order(page) < get_order(rxq->alloc_size)) {
1630 page = page_pool_dev_alloc_pages(rxq->page_pool);
1635 va = page_to_virt(page);
1638 *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
1640 if (dma_mapping_error(dev, *da)) {
1642 page_pool_put_full_page(rxq->page_pool, page, false);
1644 put_page(virt_to_head_page(va));
1652 /* Allocate frag for rx buffer, and save the old buf */
1653 static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
1654 struct mana_recv_buf_oob *rxoob, void **old_buf,
1661 va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
1665 dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
1667 *old_buf = rxoob->buf_va;
1668 *old_fp = rxoob->from_pool;
1671 rxoob->sgl[0].address = da;
1672 rxoob->from_pool = from_pool;
1675 static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1676 struct gdma_comp *cqe)
1678 struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
1679 struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
1680 struct net_device *ndev = rxq->ndev;
1681 struct mana_recv_buf_oob *rxbuf_oob;
1682 struct mana_port_context *apc;
1683 struct device *dev = gc->dev;
1684 void *old_buf = NULL;
1688 apc = netdev_priv(ndev);
1690 switch (oob->cqe_hdr.cqe_type) {
1694 case CQE_RX_TRUNCATED:
1695 ++ndev->stats.rx_dropped;
1696 rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
1697 netdev_warn_once(ndev, "Dropped a truncated packet\n");
1700 case CQE_RX_COALESCED_4:
1701 netdev_err(ndev, "RX coalescing is unsupported\n");
1702 apc->eth_stats.rx_coalesced_err++;
1705 case CQE_RX_OBJECT_FENCE:
1706 complete(&rxq->fence_event);
1710 netdev_err(ndev, "Unknown RX CQE type = %d\n",
1711 oob->cqe_hdr.cqe_type);
1712 apc->eth_stats.rx_cqe_unknown_type++;
1716 pktlen = oob->ppi[0].pkt_len;
1719 /* data packets should never have packetlength of zero */
1720 netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
1721 rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1725 curr = rxq->buf_index;
1726 rxbuf_oob = &rxq->rx_oobs[curr];
1727 WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
1729 mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf, &old_fp);
1731 /* Unsuccessful refill will have old_buf == NULL.
1732 * In this case, mana_rx_skb() will drop the packet.
1734 mana_rx_skb(old_buf, old_fp, oob, rxq);
1737 mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1739 mana_post_pkt_rxq(rxq);
1742 static void mana_poll_rx_cq(struct mana_cq *cq)
1744 struct gdma_comp *comp = cq->gdma_comp_buf;
1745 struct mana_rxq *rxq = cq->rxq;
1748 comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1749 WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
1751 rxq->xdp_flush = false;
1753 for (i = 0; i < comp_read; i++) {
1754 if (WARN_ON_ONCE(comp[i].is_sq))
1757 /* verify recv cqe references the right rxq */
1758 if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
1761 mana_process_rx_cqe(rxq, cq, &comp[i]);
1764 if (comp_read > 0) {
1765 struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
1767 mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq);
1774 static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1776 struct mana_cq *cq = context;
1780 WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
1782 if (cq->type == MANA_CQ_TYPE_RX)
1783 mana_poll_rx_cq(cq);
1785 mana_poll_tx_cq(cq);
1789 if (w < cq->budget &&
1790 napi_complete_done(&cq->napi, w)) {
1791 arm_bit = SET_ARM_BIT;
1796 mana_gd_ring_cq(gdma_queue, arm_bit);
1801 static int mana_poll(struct napi_struct *napi, int budget)
1803 struct mana_cq *cq = container_of(napi, struct mana_cq, napi);
1807 cq->budget = budget;
1809 w = mana_cq_handler(cq, cq->gdma_cq);
1811 return min(w, budget);
1814 static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue)
1816 struct mana_cq *cq = context;
1818 napi_schedule_irqoff(&cq->napi);
1821 static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1823 struct gdma_dev *gd = apc->ac->gdma_dev;
1828 mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1831 static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1833 struct gdma_dev *gd = apc->ac->gdma_dev;
1838 mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1841 static void mana_destroy_txq(struct mana_port_context *apc)
1843 struct napi_struct *napi;
1849 for (i = 0; i < apc->num_queues; i++) {
1850 napi = &apc->tx_qp[i].tx_cq.napi;
1851 napi_synchronize(napi);
1853 netif_napi_del(napi);
1855 mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
1857 mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
1859 mana_deinit_txq(apc, &apc->tx_qp[i].txq);
1866 static int mana_create_txq(struct mana_port_context *apc,
1867 struct net_device *net)
1869 struct mana_context *ac = apc->ac;
1870 struct gdma_dev *gd = ac->gdma_dev;
1871 struct mana_obj_spec wq_spec;
1872 struct mana_obj_spec cq_spec;
1873 struct gdma_queue_spec spec;
1874 struct gdma_context *gc;
1875 struct mana_txq *txq;
1882 apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp),
1887 /* The minimum size of the WQE is 32 bytes, hence
1888 * MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
1889 * the SQ can store. This value is then used to size other queues
1890 * to prevent overflow.
1892 txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
1893 BUILD_BUG_ON(!PAGE_ALIGNED(txq_size));
1895 cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
1896 cq_size = PAGE_ALIGN(cq_size);
1898 gc = gd->gdma_context;
1900 for (i = 0; i < apc->num_queues; i++) {
1901 apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
1904 txq = &apc->tx_qp[i].txq;
1906 u64_stats_init(&txq->stats.syncp);
1908 txq->net_txq = netdev_get_tx_queue(net, i);
1909 txq->vp_offset = apc->tx_vp_offset;
1910 skb_queue_head_init(&txq->pending_skbs);
1912 memset(&spec, 0, sizeof(spec));
1913 spec.type = GDMA_SQ;
1914 spec.monitor_avl_buf = true;
1915 spec.queue_size = txq_size;
1916 err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
1920 /* Create SQ's CQ */
1921 cq = &apc->tx_qp[i].tx_cq;
1922 cq->type = MANA_CQ_TYPE_TX;
1926 memset(&spec, 0, sizeof(spec));
1927 spec.type = GDMA_CQ;
1928 spec.monitor_avl_buf = false;
1929 spec.queue_size = cq_size;
1930 spec.cq.callback = mana_schedule_napi;
1931 spec.cq.parent_eq = ac->eqs[i].eq;
1932 spec.cq.context = cq;
1933 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1937 memset(&wq_spec, 0, sizeof(wq_spec));
1938 memset(&cq_spec, 0, sizeof(cq_spec));
1940 wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
1941 wq_spec.queue_size = txq->gdma_sq->queue_size;
1943 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
1944 cq_spec.queue_size = cq->gdma_cq->queue_size;
1945 cq_spec.modr_ctx_id = 0;
1946 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1948 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
1950 &apc->tx_qp[i].tx_object);
1955 txq->gdma_sq->id = wq_spec.queue_index;
1956 cq->gdma_cq->id = cq_spec.queue_index;
1958 txq->gdma_sq->mem_info.dma_region_handle =
1959 GDMA_INVALID_DMA_REGION;
1960 cq->gdma_cq->mem_info.dma_region_handle =
1961 GDMA_INVALID_DMA_REGION;
1963 txq->gdma_txq_id = txq->gdma_sq->id;
1965 cq->gdma_id = cq->gdma_cq->id;
1967 if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
1972 gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1974 netif_napi_add_tx(net, &cq->napi, mana_poll);
1975 napi_enable(&cq->napi);
1977 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
1982 mana_destroy_txq(apc);
1986 static void mana_destroy_rxq(struct mana_port_context *apc,
1987 struct mana_rxq *rxq, bool validate_state)
1990 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1991 struct mana_recv_buf_oob *rx_oob;
1992 struct device *dev = gc->dev;
1993 struct napi_struct *napi;
2000 napi = &rxq->rx_cq.napi;
2003 napi_synchronize(napi);
2007 xdp_rxq_info_unreg(&rxq->xdp_rxq);
2009 netif_napi_del(napi);
2011 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
2013 mana_deinit_cq(apc, &rxq->rx_cq);
2015 if (rxq->xdp_save_va)
2016 put_page(virt_to_head_page(rxq->xdp_save_va));
2018 for (i = 0; i < rxq->num_rx_buf; i++) {
2019 rx_oob = &rxq->rx_oobs[i];
2021 if (!rx_oob->buf_va)
2024 dma_unmap_single(dev, rx_oob->sgl[0].address,
2025 rx_oob->sgl[0].size, DMA_FROM_DEVICE);
2027 page = virt_to_head_page(rx_oob->buf_va);
2029 if (rx_oob->from_pool)
2030 page_pool_put_full_page(rxq->page_pool, page, false);
2034 rx_oob->buf_va = NULL;
2037 page_pool_destroy(rxq->page_pool);
2040 mana_gd_destroy_queue(gc, rxq->gdma_rq);
2045 static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
2046 struct mana_rxq *rxq, struct device *dev)
2048 struct mana_port_context *mpc = netdev_priv(rxq->ndev);
2049 bool from_pool = false;
2053 if (mpc->rxbufs_pre)
2054 va = mana_get_rxbuf_pre(rxq, &da);
2056 va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);
2061 rx_oob->buf_va = va;
2062 rx_oob->from_pool = from_pool;
2064 rx_oob->sgl[0].address = da;
2065 rx_oob->sgl[0].size = rxq->datasize;
2066 rx_oob->sgl[0].mem_key = mem_key;
2071 #define MANA_WQE_HEADER_SIZE 16
2072 #define MANA_WQE_SGE_SIZE 16
2074 static int mana_alloc_rx_wqe(struct mana_port_context *apc,
2075 struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size)
2077 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2078 struct mana_recv_buf_oob *rx_oob;
2079 struct device *dev = gc->dev;
2083 WARN_ON(rxq->datasize == 0);
2088 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2089 rx_oob = &rxq->rx_oobs[buf_idx];
2090 memset(rx_oob, 0, sizeof(*rx_oob));
2092 rx_oob->num_sge = 1;
2094 ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq,
2099 rx_oob->wqe_req.sgl = rx_oob->sgl;
2100 rx_oob->wqe_req.num_sge = rx_oob->num_sge;
2101 rx_oob->wqe_req.inline_oob_size = 0;
2102 rx_oob->wqe_req.inline_oob_data = NULL;
2103 rx_oob->wqe_req.flags = 0;
2104 rx_oob->wqe_req.client_data_unit = 0;
2106 *rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
2107 MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
2108 *cq_size += COMP_ENTRY_SIZE;
2114 static int mana_push_wqe(struct mana_rxq *rxq)
2116 struct mana_recv_buf_oob *rx_oob;
2120 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2121 rx_oob = &rxq->rx_oobs[buf_idx];
2123 err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
2132 static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
2134 struct page_pool_params pprm = {};
2137 pprm.pool_size = RX_BUFFERS_PER_QUEUE;
2138 pprm.nid = gc->numa_node;
2139 pprm.napi = &rxq->rx_cq.napi;
2141 rxq->page_pool = page_pool_create(&pprm);
2143 if (IS_ERR(rxq->page_pool)) {
2144 ret = PTR_ERR(rxq->page_pool);
2145 rxq->page_pool = NULL;
2152 static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
2153 u32 rxq_idx, struct mana_eq *eq,
2154 struct net_device *ndev)
2156 struct gdma_dev *gd = apc->ac->gdma_dev;
2157 struct mana_obj_spec wq_spec;
2158 struct mana_obj_spec cq_spec;
2159 struct gdma_queue_spec spec;
2160 struct mana_cq *cq = NULL;
2161 struct gdma_context *gc;
2162 u32 cq_size, rq_size;
2163 struct mana_rxq *rxq;
2166 gc = gd->gdma_context;
2168 rxq = kzalloc(struct_size(rxq, rx_oobs, RX_BUFFERS_PER_QUEUE),
2174 rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
2175 rxq->rxq_idx = rxq_idx;
2176 rxq->rxobj = INVALID_MANA_HANDLE;
2178 mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size,
2181 /* Create page pool for RX queue */
2182 err = mana_create_page_pool(rxq, gc);
2184 netdev_err(ndev, "Create page pool err:%d\n", err);
2188 err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
2192 rq_size = PAGE_ALIGN(rq_size);
2193 cq_size = PAGE_ALIGN(cq_size);
2196 memset(&spec, 0, sizeof(spec));
2197 spec.type = GDMA_RQ;
2198 spec.monitor_avl_buf = true;
2199 spec.queue_size = rq_size;
2200 err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
2204 /* Create RQ's CQ */
2206 cq->type = MANA_CQ_TYPE_RX;
2209 memset(&spec, 0, sizeof(spec));
2210 spec.type = GDMA_CQ;
2211 spec.monitor_avl_buf = false;
2212 spec.queue_size = cq_size;
2213 spec.cq.callback = mana_schedule_napi;
2214 spec.cq.parent_eq = eq->eq;
2215 spec.cq.context = cq;
2216 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2220 memset(&wq_spec, 0, sizeof(wq_spec));
2221 memset(&cq_spec, 0, sizeof(cq_spec));
2222 wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
2223 wq_spec.queue_size = rxq->gdma_rq->queue_size;
2225 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2226 cq_spec.queue_size = cq->gdma_cq->queue_size;
2227 cq_spec.modr_ctx_id = 0;
2228 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2230 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
2231 &wq_spec, &cq_spec, &rxq->rxobj);
2235 rxq->gdma_rq->id = wq_spec.queue_index;
2236 cq->gdma_cq->id = cq_spec.queue_index;
2238 rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2239 cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2241 rxq->gdma_id = rxq->gdma_rq->id;
2242 cq->gdma_id = cq->gdma_cq->id;
2244 err = mana_push_wqe(rxq);
2248 if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
2253 gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2255 netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1);
2257 WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
2259 WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
2262 napi_enable(&cq->napi);
2264 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2269 netdev_err(ndev, "Failed to create RXQ: err = %d\n", err);
2271 mana_destroy_rxq(apc, rxq, false);
2274 mana_deinit_cq(apc, cq);
2279 static int mana_add_rx_queues(struct mana_port_context *apc,
2280 struct net_device *ndev)
2282 struct mana_context *ac = apc->ac;
2283 struct mana_rxq *rxq;
2287 for (i = 0; i < apc->num_queues; i++) {
2288 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
2294 u64_stats_init(&rxq->stats.syncp);
2299 apc->default_rxobj = apc->rxqs[0]->rxobj;
2304 static void mana_destroy_vport(struct mana_port_context *apc)
2306 struct gdma_dev *gd = apc->ac->gdma_dev;
2307 struct mana_rxq *rxq;
2310 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
2311 rxq = apc->rxqs[rxq_idx];
2315 mana_destroy_rxq(apc, rxq, true);
2316 apc->rxqs[rxq_idx] = NULL;
2319 mana_destroy_txq(apc);
2320 mana_uncfg_vport(apc);
2322 if (gd->gdma_context->is_pf)
2323 mana_pf_deregister_hw_vport(apc);
2326 static int mana_create_vport(struct mana_port_context *apc,
2327 struct net_device *net)
2329 struct gdma_dev *gd = apc->ac->gdma_dev;
2332 apc->default_rxobj = INVALID_MANA_HANDLE;
2334 if (gd->gdma_context->is_pf) {
2335 err = mana_pf_register_hw_vport(apc);
2340 err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
2344 return mana_create_txq(apc, net);
2347 static void mana_rss_table_init(struct mana_port_context *apc)
2351 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
2352 apc->indir_table[i] =
2353 ethtool_rxfh_indir_default(i, apc->num_queues);
2356 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
2357 bool update_hash, bool update_tab)
2364 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
2365 queue_idx = apc->indir_table[i];
2366 apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
2370 err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
2374 mana_fence_rqs(apc);
2379 void mana_query_gf_stats(struct mana_port_context *apc)
2381 struct mana_query_gf_stat_resp resp = {};
2382 struct mana_query_gf_stat_req req = {};
2383 struct net_device *ndev = apc->ndev;
2386 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT,
2387 sizeof(req), sizeof(resp));
2388 req.req_stats = STATISTICS_FLAGS_HC_TX_BYTES |
2389 STATISTICS_FLAGS_HC_TX_UCAST_PACKETS |
2390 STATISTICS_FLAGS_HC_TX_UCAST_BYTES |
2391 STATISTICS_FLAGS_HC_TX_MCAST_PACKETS |
2392 STATISTICS_FLAGS_HC_TX_MCAST_BYTES |
2393 STATISTICS_FLAGS_HC_TX_BCAST_PACKETS |
2394 STATISTICS_FLAGS_HC_TX_BCAST_BYTES;
2396 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
2399 netdev_err(ndev, "Failed to query GF stats: %d\n", err);
2402 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_GF_STAT,
2404 if (err || resp.hdr.status) {
2405 netdev_err(ndev, "Failed to query GF stats: %d, 0x%x\n", err,
2410 apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes;
2411 apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts;
2412 apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes;
2413 apc->eth_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts;
2414 apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes;
2415 apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts;
2416 apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes;
2419 static int mana_init_port(struct net_device *ndev)
2421 struct mana_port_context *apc = netdev_priv(ndev);
2422 u32 max_txq, max_rxq, max_queues;
2423 int port_idx = apc->port_idx;
2424 u32 num_indirect_entries;
2427 err = mana_init_port_context(apc);
2431 err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
2432 &num_indirect_entries);
2434 netdev_err(ndev, "Failed to query info for vPort %d\n",
2439 max_queues = min_t(u32, max_txq, max_rxq);
2440 if (apc->max_queues > max_queues)
2441 apc->max_queues = max_queues;
2443 if (apc->num_queues > apc->max_queues)
2444 apc->num_queues = apc->max_queues;
2446 eth_hw_addr_set(ndev, apc->mac_addr);
2456 int mana_alloc_queues(struct net_device *ndev)
2458 struct mana_port_context *apc = netdev_priv(ndev);
2459 struct gdma_dev *gd = apc->ac->gdma_dev;
2462 err = mana_create_vport(apc, ndev);
2466 err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
2470 err = mana_add_rx_queues(apc, ndev);
2474 apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
2476 err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
2480 mana_rss_table_init(apc);
2482 err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
2486 if (gd->gdma_context->is_pf) {
2487 err = mana_pf_register_filter(apc);
2492 mana_chn_setxdp(apc, mana_xdp_get(apc));
2497 mana_destroy_vport(apc);
2501 int mana_attach(struct net_device *ndev)
2503 struct mana_port_context *apc = netdev_priv(ndev);
2508 err = mana_init_port(ndev);
2512 if (apc->port_st_save) {
2513 err = mana_alloc_queues(ndev);
2515 mana_cleanup_port_context(apc);
2520 apc->port_is_up = apc->port_st_save;
2522 /* Ensure port state updated before txq state */
2525 if (apc->port_is_up)
2526 netif_carrier_on(ndev);
2528 netif_device_attach(ndev);
2533 static int mana_dealloc_queues(struct net_device *ndev)
2535 struct mana_port_context *apc = netdev_priv(ndev);
2536 unsigned long timeout = jiffies + 120 * HZ;
2537 struct gdma_dev *gd = apc->ac->gdma_dev;
2538 struct mana_txq *txq;
2539 struct sk_buff *skb;
2543 if (apc->port_is_up)
2546 mana_chn_setxdp(apc, NULL);
2548 if (gd->gdma_context->is_pf)
2549 mana_pf_deregister_filter(apc);
2551 /* No packet can be transmitted now since apc->port_is_up is false.
2552 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
2553 * a txq because it may not timely see apc->port_is_up being cleared
2554 * to false, but it doesn't matter since mana_start_xmit() drops any
2555 * new packets due to apc->port_is_up being false.
2557 * Drain all the in-flight TX packets.
2558 * A timeout of 120 seconds for all the queues is used.
2559 * This will break the while loop when h/w is not responding.
2560 * This value of 120 has been decided here considering max
2564 for (i = 0; i < apc->num_queues; i++) {
2565 txq = &apc->tx_qp[i].txq;
2567 while (atomic_read(&txq->pending_sends) > 0 &&
2568 time_before(jiffies, timeout)) {
2569 usleep_range(tsleep, tsleep + 1000);
2572 if (atomic_read(&txq->pending_sends)) {
2573 err = pcie_flr(to_pci_dev(gd->gdma_context->dev));
2575 netdev_err(ndev, "flr failed %d with %d pkts pending in txq %u\n",
2576 err, atomic_read(&txq->pending_sends),
2583 for (i = 0; i < apc->num_queues; i++) {
2584 txq = &apc->tx_qp[i].txq;
2585 while ((skb = skb_dequeue(&txq->pending_skbs))) {
2586 mana_unmap_skb(skb, apc);
2587 dev_kfree_skb_any(skb);
2589 atomic_set(&txq->pending_sends, 0);
2591 /* We're 100% sure the queues can no longer be woken up, because
2592 * we're sure now mana_poll_tx_cq() can't be running.
2595 apc->rss_state = TRI_STATE_FALSE;
2596 err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
2598 netdev_err(ndev, "Failed to disable vPort: %d\n", err);
2602 mana_destroy_vport(apc);
2607 int mana_detach(struct net_device *ndev, bool from_close)
2609 struct mana_port_context *apc = netdev_priv(ndev);
2614 apc->port_st_save = apc->port_is_up;
2615 apc->port_is_up = false;
2617 /* Ensure port state updated before txq state */
2620 netif_tx_disable(ndev);
2621 netif_carrier_off(ndev);
2623 if (apc->port_st_save) {
2624 err = mana_dealloc_queues(ndev);
2630 netif_device_detach(ndev);
2631 mana_cleanup_port_context(apc);
2637 static int mana_probe_port(struct mana_context *ac, int port_idx,
2638 struct net_device **ndev_storage)
2640 struct gdma_context *gc = ac->gdma_dev->gdma_context;
2641 struct mana_port_context *apc;
2642 struct net_device *ndev;
2645 ndev = alloc_etherdev_mq(sizeof(struct mana_port_context),
2646 gc->max_num_queues);
2650 *ndev_storage = ndev;
2652 apc = netdev_priv(ndev);
2655 apc->max_queues = gc->max_num_queues;
2656 apc->num_queues = gc->max_num_queues;
2657 apc->port_handle = INVALID_MANA_HANDLE;
2658 apc->pf_filter_handle = INVALID_MANA_HANDLE;
2659 apc->port_idx = port_idx;
2661 mutex_init(&apc->vport_mutex);
2662 apc->vport_use_count = 0;
2664 ndev->netdev_ops = &mana_devops;
2665 ndev->ethtool_ops = &mana_ethtool_ops;
2666 ndev->mtu = ETH_DATA_LEN;
2667 ndev->max_mtu = gc->adapter_mtu - ETH_HLEN;
2668 ndev->min_mtu = ETH_MIN_MTU;
2669 ndev->needed_headroom = MANA_HEADROOM;
2670 ndev->dev_port = port_idx;
2671 SET_NETDEV_DEV(ndev, gc->dev);
2673 netif_carrier_off(ndev);
2675 netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
2677 err = mana_init_port(ndev);
2681 netdev_lockdep_set_classes(ndev);
2683 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2684 ndev->hw_features |= NETIF_F_RXCSUM;
2685 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2686 ndev->hw_features |= NETIF_F_RXHASH;
2687 ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_TX |
2688 NETIF_F_HW_VLAN_CTAG_RX;
2689 ndev->vlan_features = ndev->features;
2690 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
2691 NETDEV_XDP_ACT_NDO_XMIT;
2693 err = register_netdev(ndev);
2695 netdev_err(ndev, "Unable to register netdev.\n");
2705 *ndev_storage = NULL;
2706 netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
2711 static void adev_release(struct device *dev)
2713 struct mana_adev *madev = container_of(dev, struct mana_adev, adev.dev);
2718 static void remove_adev(struct gdma_dev *gd)
2720 struct auxiliary_device *adev = gd->adev;
2723 auxiliary_device_delete(adev);
2724 auxiliary_device_uninit(adev);
2726 mana_adev_idx_free(id);
2730 static int add_adev(struct gdma_dev *gd)
2732 struct auxiliary_device *adev;
2733 struct mana_adev *madev;
2736 madev = kzalloc(sizeof(*madev), GFP_KERNEL);
2740 adev = &madev->adev;
2741 ret = mana_adev_idx_alloc();
2746 adev->name = "rdma";
2747 adev->dev.parent = gd->gdma_context->dev;
2748 adev->dev.release = adev_release;
2751 ret = auxiliary_device_init(adev);
2755 ret = auxiliary_device_add(adev);
2763 auxiliary_device_uninit(adev);
2766 mana_adev_idx_free(adev->id);
2774 int mana_probe(struct gdma_dev *gd, bool resuming)
2776 struct gdma_context *gc = gd->gdma_context;
2777 struct mana_context *ac = gd->driver_data;
2778 struct device *dev = gc->dev;
2784 "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n",
2785 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
2787 err = mana_gd_register_device(gd);
2792 ac = kzalloc(sizeof(*ac), GFP_KERNEL);
2797 gd->driver_data = ac;
2800 err = mana_create_eq(ac);
2804 err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
2805 MANA_MICRO_VERSION, &num_ports);
2810 ac->num_ports = num_ports;
2812 if (ac->num_ports != num_ports) {
2813 dev_err(dev, "The number of vPorts changed: %d->%d\n",
2814 ac->num_ports, num_ports);
2820 if (ac->num_ports == 0)
2821 dev_err(dev, "Failed to detect any vPort\n");
2823 if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
2824 ac->num_ports = MAX_PORTS_IN_MANA_DEV;
2827 for (i = 0; i < ac->num_ports; i++) {
2828 err = mana_probe_port(ac, i, &ac->ports[i]);
2833 for (i = 0; i < ac->num_ports; i++) {
2835 err = mana_attach(ac->ports[i]);
2845 mana_remove(gd, false);
2850 void mana_remove(struct gdma_dev *gd, bool suspending)
2852 struct gdma_context *gc = gd->gdma_context;
2853 struct mana_context *ac = gd->driver_data;
2854 struct device *dev = gc->dev;
2855 struct net_device *ndev;
2859 /* adev currently doesn't support suspending, always remove it */
2863 for (i = 0; i < ac->num_ports; i++) {
2864 ndev = ac->ports[i];
2867 dev_err(dev, "No net device to remove\n");
2871 /* All cleanup actions should stay after rtnl_lock(), otherwise
2872 * other functions may access partially cleaned up data.
2876 err = mana_detach(ndev, false);
2878 netdev_err(ndev, "Failed to detach vPort %d: %d\n",
2882 /* No need to unregister the ndev. */
2887 unregister_netdevice(ndev);
2894 mana_destroy_eq(ac);
2896 mana_gd_deregister_device(gd);
2901 gd->driver_data = NULL;
2902 gd->gdma_context = NULL;