1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physical Function ethernet driver
4 * Copyright (C) 2020 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/pci.h>
14 #include <linux/etherdevice.h>
16 #include <linux/if_vlan.h>
17 #include <linux/iommu.h>
21 #include "otx2_common.h"
22 #include "otx2_txrx.h"
23 #include "otx2_struct.h"
26 #include <rvu_trace.h>
28 #define DRV_NAME "rvu_nicpf"
29 #define DRV_STRING "Marvell RVU NIC Physical Function Driver"
31 /* Supported devices */
32 static const struct pci_device_id otx2_pf_id_table[] = {
33 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF) },
34 { 0, } /* end of table */
37 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
38 MODULE_DESCRIPTION(DRV_STRING);
39 MODULE_LICENSE("GPL v2");
40 MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
42 static void otx2_vf_link_event_task(struct work_struct *work);
49 static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable);
50 static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
52 static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
54 bool if_up = netif_running(netdev);
60 netdev_info(netdev, "Changing MTU from %d to %d\n",
61 netdev->mtu, new_mtu);
62 netdev->mtu = new_mtu;
65 err = otx2_open(netdev);
70 static void otx2_disable_flr_me_intr(struct otx2_nic *pf)
72 int irq, vfs = pf->total_vfs;
74 /* Disable VFs ME interrupts */
75 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
76 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0);
79 /* Disable VFs FLR interrupts */
80 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
81 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0);
87 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
88 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME1);
91 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
92 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR1);
96 static void otx2_flr_wq_destroy(struct otx2_nic *pf)
100 destroy_workqueue(pf->flr_wq);
102 devm_kfree(pf->dev, pf->flr_wrk);
105 static void otx2_flr_handler(struct work_struct *work)
107 struct flr_work *flrwork = container_of(work, struct flr_work, work);
108 struct otx2_nic *pf = flrwork->pf;
109 struct mbox *mbox = &pf->mbox;
113 vf = flrwork - pf->flr_wrk;
115 mutex_lock(&mbox->lock);
116 req = otx2_mbox_alloc_msg_vf_flr(mbox);
118 mutex_unlock(&mbox->lock);
121 req->hdr.pcifunc &= RVU_PFVF_FUNC_MASK;
122 req->hdr.pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
124 if (!otx2_sync_mbox_msg(&pf->mbox)) {
129 /* clear transcation pending bit */
130 otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
131 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
134 mutex_unlock(&mbox->lock);
137 static irqreturn_t otx2_pf_flr_intr_handler(int irq, void *pf_irq)
139 struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
140 int reg, dev, vf, start_vf, num_reg = 1;
143 if (pf->total_vfs > 64)
146 for (reg = 0; reg < num_reg; reg++) {
147 intr = otx2_read64(pf, RVU_PF_VFFLR_INTX(reg));
151 for (vf = 0; vf < 64; vf++) {
152 if (!(intr & BIT_ULL(vf)))
155 queue_work(pf->flr_wq, &pf->flr_wrk[dev].work);
156 /* Clear interrupt */
157 otx2_write64(pf, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
158 /* Disable the interrupt */
159 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(reg),
166 static irqreturn_t otx2_pf_me_intr_handler(int irq, void *pf_irq)
168 struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
169 int vf, reg, num_reg = 1;
172 if (pf->total_vfs > 64)
175 for (reg = 0; reg < num_reg; reg++) {
176 intr = otx2_read64(pf, RVU_PF_VFME_INTX(reg));
179 for (vf = 0; vf < 64; vf++) {
180 if (!(intr & BIT_ULL(vf)))
182 /* clear trpend bit */
183 otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
184 /* clear interrupt */
185 otx2_write64(pf, RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
191 static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
193 struct otx2_hw *hw = &pf->hw;
197 /* Register ME interrupt handler*/
198 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE];
199 snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc));
200 ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0),
201 otx2_pf_me_intr_handler, 0, irq_name, pf);
204 "RVUPF: IRQ registration failed for ME0\n");
207 /* Register FLR interrupt handler */
208 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE];
209 snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc));
210 ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0),
211 otx2_pf_flr_intr_handler, 0, irq_name, pf);
214 "RVUPF: IRQ registration failed for FLR0\n");
219 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE];
220 snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1",
221 rvu_get_pf(pf->pcifunc));
222 ret = request_irq(pci_irq_vector
223 (pf->pdev, RVU_PF_INT_VEC_VFME1),
224 otx2_pf_me_intr_handler, 0, irq_name, pf);
227 "RVUPF: IRQ registration failed for ME1\n");
229 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE];
230 snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1",
231 rvu_get_pf(pf->pcifunc));
232 ret = request_irq(pci_irq_vector
233 (pf->pdev, RVU_PF_INT_VEC_VFFLR1),
234 otx2_pf_flr_intr_handler, 0, irq_name, pf);
237 "RVUPF: IRQ registration failed for FLR1\n");
242 /* Enable ME interrupt for all VFs*/
243 otx2_write64(pf, RVU_PF_VFME_INTX(0), INTR_MASK(numvfs));
244 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(numvfs));
246 /* Enable FLR interrupt for all VFs*/
247 otx2_write64(pf, RVU_PF_VFFLR_INTX(0), INTR_MASK(numvfs));
248 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(numvfs));
253 otx2_write64(pf, RVU_PF_VFME_INTX(1), INTR_MASK(numvfs));
254 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(1),
257 otx2_write64(pf, RVU_PF_VFFLR_INTX(1), INTR_MASK(numvfs));
258 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(1),
264 static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
268 pf->flr_wq = alloc_workqueue("otx2_pf_flr_wq",
269 WQ_UNBOUND | WQ_HIGHPRI, 1);
273 pf->flr_wrk = devm_kcalloc(pf->dev, num_vfs,
274 sizeof(struct flr_work), GFP_KERNEL);
276 destroy_workqueue(pf->flr_wq);
280 for (vf = 0; vf < num_vfs; vf++) {
281 pf->flr_wrk[vf].pf = pf;
282 INIT_WORK(&pf->flr_wrk[vf].work, otx2_flr_handler);
288 static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
289 int first, int mdevs, u64 intr, int type)
291 struct otx2_mbox_dev *mdev;
292 struct otx2_mbox *mbox;
293 struct mbox_hdr *hdr;
296 for (i = first; i < mdevs; i++) {
298 if (!(intr & BIT_ULL(i - first)))
302 mdev = &mbox->dev[i];
303 if (type == TYPE_PFAF)
304 otx2_sync_mbox_bbuf(mbox, i);
305 hdr = mdev->mbase + mbox->rx_start;
306 /* The hdr->num_msgs is set to zero immediately in the interrupt
307 * handler to ensure that it holds a correct value next time
308 * when the interrupt handler is called.
309 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
310 * pf>mbox.up_num_msgs holds the data for use in
311 * pfaf_mbox_up_handler.
314 mw[i].num_msgs = hdr->num_msgs;
316 if (type == TYPE_PFAF)
317 memset(mbox->hwbase + mbox->rx_start, 0,
318 ALIGN(sizeof(struct mbox_hdr),
321 queue_work(mbox_wq, &mw[i].mbox_wrk);
325 mdev = &mbox->dev[i];
326 if (type == TYPE_PFAF)
327 otx2_sync_mbox_bbuf(mbox, i);
328 hdr = mdev->mbase + mbox->rx_start;
330 mw[i].up_num_msgs = hdr->num_msgs;
332 if (type == TYPE_PFAF)
333 memset(mbox->hwbase + mbox->rx_start, 0,
334 ALIGN(sizeof(struct mbox_hdr),
337 queue_work(mbox_wq, &mw[i].mbox_up_wrk);
342 static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
343 struct otx2_mbox *pfvf_mbox, void *bbuf_base,
346 struct otx2_mbox_dev *src_mdev = mdev;
349 /* Msgs are already copied, trigger VF's mbox irq */
352 offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift);
353 writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset);
355 /* Restore VF's mbox bounce buffer region address */
356 src_mdev->mbase = bbuf_base;
359 static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
360 struct otx2_mbox *src_mbox,
361 int dir, int vf, int num_msgs)
363 struct otx2_mbox_dev *src_mdev, *dst_mdev;
364 struct mbox_hdr *mbox_hdr;
365 struct mbox_hdr *req_hdr;
366 struct mbox *dst_mbox;
369 if (dir == MBOX_DIR_PFAF) {
370 /* Set VF's mailbox memory as PF's bounce buffer memory, so
371 * that explicit copying of VF's msgs to PF=>AF mbox region
372 * and AF=>PF responses to VF's mbox region can be avoided.
374 src_mdev = &src_mbox->dev[vf];
375 mbox_hdr = src_mbox->hwbase +
376 src_mbox->rx_start + (vf * MBOX_SIZE);
378 dst_mbox = &pf->mbox;
379 dst_size = dst_mbox->mbox.tx_size -
380 ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
381 /* Check if msgs fit into destination area and has valid size */
382 if (mbox_hdr->msg_size > dst_size || !mbox_hdr->msg_size)
385 dst_mdev = &dst_mbox->mbox.dev[0];
387 mutex_lock(&pf->mbox.lock);
388 dst_mdev->mbase = src_mdev->mbase;
389 dst_mdev->msg_size = mbox_hdr->msg_size;
390 dst_mdev->num_msgs = num_msgs;
391 err = otx2_sync_mbox_msg(dst_mbox);
394 "AF not responding to VF%d messages\n", vf);
395 /* restore PF mbase and exit */
396 dst_mdev->mbase = pf->mbox.bbuf_base;
397 mutex_unlock(&pf->mbox.lock);
400 /* At this point, all the VF messages sent to AF are acked
401 * with proper responses and responses are copied to VF
402 * mailbox hence raise interrupt to VF.
404 req_hdr = (struct mbox_hdr *)(dst_mdev->mbase +
405 dst_mbox->mbox.rx_start);
406 req_hdr->num_msgs = num_msgs;
408 otx2_forward_msg_pfvf(dst_mdev, &pf->mbox_pfvf[0].mbox,
409 pf->mbox.bbuf_base, vf);
410 mutex_unlock(&pf->mbox.lock);
411 } else if (dir == MBOX_DIR_PFVF_UP) {
412 src_mdev = &src_mbox->dev[0];
413 mbox_hdr = src_mbox->hwbase + src_mbox->rx_start;
414 req_hdr = (struct mbox_hdr *)(src_mdev->mbase +
416 req_hdr->num_msgs = num_msgs;
418 dst_mbox = &pf->mbox_pfvf[0];
419 dst_size = dst_mbox->mbox_up.tx_size -
420 ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
421 /* Check if msgs fit into destination area */
422 if (mbox_hdr->msg_size > dst_size)
425 dst_mdev = &dst_mbox->mbox_up.dev[vf];
426 dst_mdev->mbase = src_mdev->mbase;
427 dst_mdev->msg_size = mbox_hdr->msg_size;
428 dst_mdev->num_msgs = mbox_hdr->num_msgs;
429 err = otx2_sync_mbox_up_msg(dst_mbox, vf);
432 "VF%d is not responding to mailbox\n", vf);
435 } else if (dir == MBOX_DIR_VFPF_UP) {
436 req_hdr = (struct mbox_hdr *)(src_mbox->dev[0].mbase +
438 req_hdr->num_msgs = num_msgs;
439 otx2_forward_msg_pfvf(&pf->mbox_pfvf->mbox_up.dev[vf],
441 pf->mbox_pfvf[vf].bbuf_base,
448 static void otx2_pfvf_mbox_handler(struct work_struct *work)
450 struct mbox_msghdr *msg = NULL;
451 int offset, vf_idx, id, err;
452 struct otx2_mbox_dev *mdev;
453 struct mbox_hdr *req_hdr;
454 struct otx2_mbox *mbox;
455 struct mbox *vf_mbox;
458 vf_mbox = container_of(work, struct mbox, mbox_wrk);
460 vf_idx = vf_mbox - pf->mbox_pfvf;
462 mbox = &pf->mbox_pfvf[0].mbox;
463 mdev = &mbox->dev[vf_idx];
464 req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
466 offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
468 for (id = 0; id < vf_mbox->num_msgs; id++) {
469 msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
472 if (msg->sig != OTX2_MBOX_REQ_SIG)
475 /* Set VF's number in each of the msg */
476 msg->pcifunc &= RVU_PFVF_FUNC_MASK;
477 msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK;
478 offset = msg->next_msgoff;
480 err = otx2_forward_vf_mbox_msgs(pf, mbox, MBOX_DIR_PFAF, vf_idx,
487 otx2_reply_invalid_msg(mbox, vf_idx, 0, msg->id);
488 otx2_mbox_msg_send(mbox, vf_idx);
491 static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
493 struct mbox *vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
494 struct otx2_nic *pf = vf_mbox->pfvf;
495 struct otx2_mbox_dev *mdev;
496 int offset, id, vf_idx = 0;
497 struct mbox_hdr *rsp_hdr;
498 struct mbox_msghdr *msg;
499 struct otx2_mbox *mbox;
501 vf_idx = vf_mbox - pf->mbox_pfvf;
502 mbox = &pf->mbox_pfvf[0].mbox_up;
503 mdev = &mbox->dev[vf_idx];
505 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
506 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
508 for (id = 0; id < vf_mbox->up_num_msgs; id++) {
509 msg = mdev->mbase + offset;
511 if (msg->id >= MBOX_MSG_MAX) {
513 "Mbox msg with unknown ID 0x%x\n", msg->id);
517 if (msg->sig != OTX2_MBOX_RSP_SIG) {
519 "Mbox msg with wrong signature %x, ID 0x%x\n",
525 case MBOX_MSG_CGX_LINK_EVENT:
530 "Mbox msg response has err %d, ID 0x%x\n",
536 offset = mbox->rx_start + msg->next_msgoff;
537 if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1))
538 __otx2_mbox_reset(mbox, 0);
543 static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
545 struct otx2_nic *pf = (struct otx2_nic *)(pf_irq);
546 int vfs = pf->total_vfs;
550 mbox = pf->mbox_pfvf;
551 /* Handle VF interrupts */
553 intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1));
554 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
555 otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
560 intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
561 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr);
563 otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
565 trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
570 static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
572 void __iomem *hwbase;
580 pf->mbox_pfvf = devm_kcalloc(&pf->pdev->dev, numvfs,
581 sizeof(struct mbox), GFP_KERNEL);
585 pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox",
586 WQ_UNBOUND | WQ_HIGHPRI |
588 if (!pf->mbox_pfvf_wq)
591 /* On CN10K platform, PF <-> VF mailbox region follows after
592 * PF <-> AF mailbox region.
594 if (test_bit(CN10K_MBOX, &pf->hw.cap_flag))
595 base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
598 base = readq((void __iomem *)((u64)pf->reg_base +
599 RVU_PF_VF_BAR4_ADDR));
601 hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
607 mbox = &pf->mbox_pfvf[0];
608 err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
609 MBOX_DIR_PFVF, numvfs);
613 err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
614 MBOX_DIR_PFVF_UP, numvfs);
618 for (vf = 0; vf < numvfs; vf++) {
620 INIT_WORK(&mbox->mbox_wrk, otx2_pfvf_mbox_handler);
621 INIT_WORK(&mbox->mbox_up_wrk, otx2_pfvf_mbox_up_handler);
631 destroy_workqueue(pf->mbox_pfvf_wq);
635 static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf)
637 struct mbox *mbox = &pf->mbox_pfvf[0];
642 if (pf->mbox_pfvf_wq) {
643 destroy_workqueue(pf->mbox_pfvf_wq);
644 pf->mbox_pfvf_wq = NULL;
647 if (mbox->mbox.hwbase)
648 iounmap(mbox->mbox.hwbase);
650 otx2_mbox_destroy(&mbox->mbox);
653 static void otx2_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
655 /* Clear PF <=> VF mailbox IRQ */
656 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
657 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
659 /* Enable PF <=> VF mailbox IRQ */
660 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(numvfs));
663 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
668 static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
672 /* Disable PF <=> VF mailbox IRQ */
673 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull);
674 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull);
676 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
677 vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
678 free_irq(vector, pf);
681 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
682 vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
683 free_irq(vector, pf);
687 static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
689 struct otx2_hw *hw = &pf->hw;
693 /* Register MBOX0 interrupt handler */
694 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE];
696 snprintf(irq_name, NAME_SIZE,
697 "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc));
699 snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0");
700 err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0),
701 otx2_pfvf_mbox_intr_handler, 0, irq_name, pf);
704 "RVUPF: IRQ registration failed for PFVF mbox0 irq\n");
709 /* Register MBOX1 interrupt handler */
710 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE];
712 snprintf(irq_name, NAME_SIZE,
713 "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc));
715 snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1");
716 err = request_irq(pci_irq_vector(pf->pdev,
717 RVU_PF_INT_VEC_VFPF_MBOX1),
718 otx2_pfvf_mbox_intr_handler,
722 "RVUPF: IRQ registration failed for PFVF mbox1 irq\n");
727 otx2_enable_pfvf_mbox_intr(pf, numvfs);
732 static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
733 struct mbox_msghdr *msg)
737 if (msg->id >= MBOX_MSG_MAX) {
739 "Mbox msg with unknown ID 0x%x\n", msg->id);
743 if (msg->sig != OTX2_MBOX_RSP_SIG) {
745 "Mbox msg with wrong signature %x, ID 0x%x\n",
750 /* message response heading VF */
751 devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
753 struct otx2_vf_config *config = &pf->vf_configs[devid - 1];
754 struct delayed_work *dwork;
757 case MBOX_MSG_NIX_LF_START_RX:
758 config->intf_down = false;
759 dwork = &config->link_event_work;
760 schedule_delayed_work(dwork, msecs_to_jiffies(100));
762 case MBOX_MSG_NIX_LF_STOP_RX:
763 config->intf_down = true;
772 pf->pcifunc = msg->pcifunc;
774 case MBOX_MSG_MSIX_OFFSET:
775 mbox_handler_msix_offset(pf, (struct msix_offset_rsp *)msg);
777 case MBOX_MSG_NPA_LF_ALLOC:
778 mbox_handler_npa_lf_alloc(pf, (struct npa_lf_alloc_rsp *)msg);
780 case MBOX_MSG_NIX_LF_ALLOC:
781 mbox_handler_nix_lf_alloc(pf, (struct nix_lf_alloc_rsp *)msg);
783 case MBOX_MSG_NIX_TXSCH_ALLOC:
784 mbox_handler_nix_txsch_alloc(pf,
785 (struct nix_txsch_alloc_rsp *)msg);
787 case MBOX_MSG_NIX_BP_ENABLE:
788 mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg);
790 case MBOX_MSG_CGX_STATS:
791 mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
793 case MBOX_MSG_CGX_FEC_STATS:
794 mbox_handler_cgx_fec_stats(pf, (struct cgx_fec_stats_rsp *)msg);
799 "Mbox msg response has err %d, ID 0x%x\n",
805 static void otx2_pfaf_mbox_handler(struct work_struct *work)
807 struct otx2_mbox_dev *mdev;
808 struct mbox_hdr *rsp_hdr;
809 struct mbox_msghdr *msg;
810 struct otx2_mbox *mbox;
811 struct mbox *af_mbox;
815 af_mbox = container_of(work, struct mbox, mbox_wrk);
816 mbox = &af_mbox->mbox;
817 mdev = &mbox->dev[0];
818 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
820 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
823 for (id = 0; id < af_mbox->num_msgs; id++) {
824 msg = (struct mbox_msghdr *)(mdev->mbase + offset);
825 otx2_process_pfaf_mbox_msg(pf, msg);
826 offset = mbox->rx_start + msg->next_msgoff;
827 if (mdev->msgs_acked == (af_mbox->num_msgs - 1))
828 __otx2_mbox_reset(mbox, 0);
834 static void otx2_handle_link_event(struct otx2_nic *pf)
836 struct cgx_link_user_info *linfo = &pf->linfo;
837 struct net_device *netdev = pf->netdev;
839 pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name,
840 linfo->link_up ? "UP" : "DOWN", linfo->speed,
841 linfo->full_duplex ? "Full" : "Half");
842 if (linfo->link_up) {
843 netif_carrier_on(netdev);
844 netif_tx_start_all_queues(netdev);
846 netif_tx_stop_all_queues(netdev);
847 netif_carrier_off(netdev);
851 int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
852 struct cgx_link_info_msg *msg,
857 /* Copy the link info sent by AF */
858 pf->linfo = msg->link_info;
860 /* notify VFs about link event */
861 for (i = 0; i < pci_num_vf(pf->pdev); i++) {
862 struct otx2_vf_config *config = &pf->vf_configs[i];
863 struct delayed_work *dwork = &config->link_event_work;
865 if (config->intf_down)
868 schedule_delayed_work(dwork, msecs_to_jiffies(100));
871 /* interface has not been fully configured yet */
872 if (pf->flags & OTX2_FLAG_INTF_DOWN)
875 otx2_handle_link_event(pf);
879 static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
880 struct mbox_msghdr *req)
882 /* Check if valid, if not reply with a invalid msg */
883 if (req->sig != OTX2_MBOX_REQ_SIG) {
884 otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
889 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
891 struct _rsp_type *rsp; \
894 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
895 &pf->mbox.mbox_up, 0, \
896 sizeof(struct _rsp_type)); \
901 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
902 rsp->hdr.pcifunc = 0; \
905 err = otx2_mbox_up_handler_ ## _fn_name( \
906 pf, (struct _req_type *)req, rsp); \
913 otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
919 static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
921 struct mbox *af_mbox = container_of(work, struct mbox, mbox_up_wrk);
922 struct otx2_mbox *mbox = &af_mbox->mbox_up;
923 struct otx2_mbox_dev *mdev = &mbox->dev[0];
924 struct otx2_nic *pf = af_mbox->pfvf;
925 int offset, id, devid = 0;
926 struct mbox_hdr *rsp_hdr;
927 struct mbox_msghdr *msg;
929 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
931 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
933 for (id = 0; id < af_mbox->up_num_msgs; id++) {
934 msg = (struct mbox_msghdr *)(mdev->mbase + offset);
936 devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
937 /* Skip processing VF's messages */
939 otx2_process_mbox_msg_up(pf, msg);
940 offset = mbox->rx_start + msg->next_msgoff;
943 otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up,
944 MBOX_DIR_PFVF_UP, devid - 1,
945 af_mbox->up_num_msgs);
949 otx2_mbox_msg_send(mbox, 0);
952 static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
954 struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
958 otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
962 trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0));
964 otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
969 static void otx2_disable_mbox_intr(struct otx2_nic *pf)
971 int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
973 /* Disable AF => PF mailbox IRQ */
974 otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
975 free_irq(vector, pf);
978 static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
980 struct otx2_hw *hw = &pf->hw;
985 /* Register mailbox interrupt handler */
986 irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
987 snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox");
988 err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
989 otx2_pfaf_mbox_intr_handler, 0, irq_name, pf);
992 "RVUPF: IRQ registration failed for PFAF mbox irq\n");
996 /* Enable mailbox interrupt for msgs coming from AF.
997 * First clear to avoid spurious interrupts, if any.
999 otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
1000 otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
1005 /* Check mailbox communication with AF */
1006 req = otx2_mbox_alloc_msg_ready(&pf->mbox);
1008 otx2_disable_mbox_intr(pf);
1011 err = otx2_sync_mbox_msg(&pf->mbox);
1014 "AF not responding to mailbox, deferring probe\n");
1015 otx2_disable_mbox_intr(pf);
1016 return -EPROBE_DEFER;
1022 static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
1024 struct mbox *mbox = &pf->mbox;
1027 destroy_workqueue(pf->mbox_wq);
1031 if (mbox->mbox.hwbase)
1032 iounmap((void __iomem *)mbox->mbox.hwbase);
1034 otx2_mbox_destroy(&mbox->mbox);
1035 otx2_mbox_destroy(&mbox->mbox_up);
1038 static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
1040 struct mbox *mbox = &pf->mbox;
1041 void __iomem *hwbase;
1045 pf->mbox_wq = alloc_workqueue("otx2_pfaf_mailbox",
1046 WQ_UNBOUND | WQ_HIGHPRI |
1051 /* Mailbox is a reserved memory (in RAM) region shared between
1052 * admin function (i.e AF) and this PF, shouldn't be mapped as
1053 * device memory to allow unaligned accesses.
1055 hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
1058 dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
1063 err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
1068 err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
1069 MBOX_DIR_PFAF_UP, 1);
1073 err = otx2_mbox_bbuf_init(mbox, pf->pdev);
1077 INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler);
1078 INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler);
1079 mutex_init(&mbox->lock);
1083 otx2_pfaf_mbox_destroy(pf);
1087 static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable)
1089 struct msg_req *msg;
1092 mutex_lock(&pf->mbox.lock);
1094 msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox);
1096 msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox);
1099 mutex_unlock(&pf->mbox.lock);
1103 err = otx2_sync_mbox_msg(&pf->mbox);
1104 mutex_unlock(&pf->mbox.lock);
1108 static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
1110 struct msg_req *msg;
1113 mutex_lock(&pf->mbox.lock);
1115 msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
1117 msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox);
1120 mutex_unlock(&pf->mbox.lock);
1124 err = otx2_sync_mbox_msg(&pf->mbox);
1125 mutex_unlock(&pf->mbox.lock);
1129 int otx2_set_real_num_queues(struct net_device *netdev,
1130 int tx_queues, int rx_queues)
1134 err = netif_set_real_num_tx_queues(netdev, tx_queues);
1137 "Failed to set no of Tx queues: %d\n", tx_queues);
1141 err = netif_set_real_num_rx_queues(netdev, rx_queues);
1144 "Failed to set no of Rx queues: %d\n", rx_queues);
1147 EXPORT_SYMBOL(otx2_set_real_num_queues);
1149 static irqreturn_t otx2_q_intr_handler(int irq, void *data)
1151 struct otx2_nic *pf = data;
1156 for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) {
1157 ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT);
1158 val = otx2_atomic64_add((qidx << 44), ptr);
1160 otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) |
1161 (val & NIX_CQERRINT_BITS));
1162 if (!(val & (NIX_CQERRINT_BITS | BIT_ULL(42))))
1165 if (val & BIT_ULL(42)) {
1166 netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
1167 qidx, otx2_read64(pf, NIX_LF_ERR_INT));
1169 if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
1170 netdev_err(pf->netdev, "CQ%lld: Doorbell error",
1172 if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
1173 netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM",
1177 schedule_work(&pf->reset_task);
1181 for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
1182 ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
1183 val = otx2_atomic64_add((qidx << 44), ptr);
1184 otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
1185 (val & NIX_SQINT_BITS));
1187 if (!(val & (NIX_SQINT_BITS | BIT_ULL(42))))
1190 if (val & BIT_ULL(42)) {
1191 netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
1192 qidx, otx2_read64(pf, NIX_LF_ERR_INT));
1194 if (val & BIT_ULL(NIX_SQINT_LMT_ERR)) {
1195 netdev_err(pf->netdev, "SQ%lld: LMT store error NIX_LF_SQ_OP_ERR_DBG:0x%llx",
1198 NIX_LF_SQ_OP_ERR_DBG));
1199 otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG,
1202 if (val & BIT_ULL(NIX_SQINT_MNQ_ERR)) {
1203 netdev_err(pf->netdev, "SQ%lld: Meta-descriptor enqueue error NIX_LF_MNQ_ERR_DGB:0x%llx\n",
1205 otx2_read64(pf, NIX_LF_MNQ_ERR_DBG));
1206 otx2_write64(pf, NIX_LF_MNQ_ERR_DBG,
1209 if (val & BIT_ULL(NIX_SQINT_SEND_ERR)) {
1210 netdev_err(pf->netdev, "SQ%lld: Send error, NIX_LF_SEND_ERR_DBG 0x%llx",
1213 NIX_LF_SEND_ERR_DBG));
1214 otx2_write64(pf, NIX_LF_SEND_ERR_DBG,
1217 if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
1218 netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
1222 schedule_work(&pf->reset_task);
1228 static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
1230 struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq;
1231 struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev;
1232 int qidx = cq_poll->cint_idx;
1234 /* Disable interrupts.
1236 * Completion interrupts behave in a level-triggered interrupt
1237 * fashion, and hence have to be cleared only after it is serviced.
1239 otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
1242 napi_schedule_irqoff(&cq_poll->napi);
1247 static void otx2_disable_napi(struct otx2_nic *pf)
1249 struct otx2_qset *qset = &pf->qset;
1250 struct otx2_cq_poll *cq_poll;
1253 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1254 cq_poll = &qset->napi[qidx];
1255 napi_disable(&cq_poll->napi);
1256 netif_napi_del(&cq_poll->napi);
1260 static void otx2_free_cq_res(struct otx2_nic *pf)
1262 struct otx2_qset *qset = &pf->qset;
1263 struct otx2_cq_queue *cq;
1267 otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_CQ, false);
1268 for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
1269 cq = &qset->cq[qidx];
1270 qmem_free(pf->dev, cq->cqe);
1274 static void otx2_free_sq_res(struct otx2_nic *pf)
1276 struct otx2_qset *qset = &pf->qset;
1277 struct otx2_snd_queue *sq;
1281 otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
1282 /* Free SQB pointers */
1283 otx2_sq_free_sqbs(pf);
1284 for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
1285 sq = &qset->sq[qidx];
1286 qmem_free(pf->dev, sq->sqe);
1287 qmem_free(pf->dev, sq->tso_hdrs);
1289 kfree(sq->sqb_ptrs);
1293 static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
1299 /* The data transferred by NIX to memory consists of actual packet
1300 * plus additional data which has timestamp and/or EDSA/HIGIG2
1301 * headers if interface is configured in corresponding modes.
1302 * NIX transfers entire data using 6 segments/buffers and writes
1303 * a CQE_RX descriptor with those segment addresses. First segment
1304 * has additional data prepended to packet. Also software omits a
1305 * headroom of 128 bytes and sizeof(struct skb_shared_info) in
1306 * each segment. Hence the total size of memory needed
1307 * to receive a packet with 'mtu' is:
1308 * frame size = mtu + additional data;
1309 * memory = frame_size + (headroom + struct skb_shared_info size) * 6;
1310 * each receive buffer size = memory / 6;
1312 frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
1313 total_size = frame_size + (OTX2_HEAD_ROOM +
1314 OTX2_DATA_ALIGN(sizeof(struct skb_shared_info))) * 6;
1315 rbuf_size = total_size / 6;
1317 return ALIGN(rbuf_size, 2048);
1320 static int otx2_init_hw_resources(struct otx2_nic *pf)
1322 struct nix_lf_free_req *free_req;
1323 struct mbox *mbox = &pf->mbox;
1324 struct otx2_hw *hw = &pf->hw;
1325 struct msg_req *req;
1328 /* Set required NPA LF's pool counts
1329 * Auras and Pools are used in a 1:1 mapping,
1330 * so, aura count = pool count.
1332 hw->rqpool_cnt = hw->rx_queues;
1333 hw->sqpool_cnt = hw->tx_queues;
1334 hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
1336 pf->max_frs = pf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
1338 pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
1340 mutex_lock(&mbox->lock);
1342 err = otx2_config_npa(pf);
1347 err = otx2_config_nix(pf);
1349 goto err_free_npa_lf;
1351 /* Enable backpressure */
1352 otx2_nix_config_bp(pf, true);
1354 /* Init Auras and pools used by NIX RQ, for free buffer ptrs */
1355 err = otx2_rq_aura_pool_init(pf);
1357 mutex_unlock(&mbox->lock);
1358 goto err_free_nix_lf;
1360 /* Init Auras and pools used by NIX SQ, for queueing SQEs */
1361 err = otx2_sq_aura_pool_init(pf);
1363 mutex_unlock(&mbox->lock);
1364 goto err_free_rq_ptrs;
1367 err = otx2_txsch_alloc(pf);
1369 mutex_unlock(&mbox->lock);
1370 goto err_free_sq_ptrs;
1373 err = otx2_config_nix_queues(pf);
1375 mutex_unlock(&mbox->lock);
1376 goto err_free_txsch;
1378 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1379 err = otx2_txschq_config(pf, lvl);
1381 mutex_unlock(&mbox->lock);
1382 goto err_free_nix_queues;
1385 mutex_unlock(&mbox->lock);
1388 err_free_nix_queues:
1389 otx2_free_sq_res(pf);
1390 otx2_free_cq_res(pf);
1391 otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
1393 if (otx2_txschq_stop(pf))
1394 dev_err(pf->dev, "%s failed to stop TX schedulers\n", __func__);
1396 otx2_sq_free_sqbs(pf);
1398 otx2_free_aura_ptr(pf, AURA_NIX_RQ);
1399 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
1400 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
1401 otx2_aura_pool_free(pf);
1403 mutex_lock(&mbox->lock);
1404 free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
1406 free_req->flags = NIX_LF_DISABLE_FLOWS;
1407 if (otx2_sync_mbox_msg(mbox))
1408 dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
1412 req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
1414 if (otx2_sync_mbox_msg(mbox))
1415 dev_err(pf->dev, "%s failed to free npalf\n", __func__);
1418 mutex_unlock(&mbox->lock);
1422 static void otx2_free_hw_resources(struct otx2_nic *pf)
1424 struct otx2_qset *qset = &pf->qset;
1425 struct nix_lf_free_req *free_req;
1426 struct mbox *mbox = &pf->mbox;
1427 struct otx2_cq_queue *cq;
1428 struct msg_req *req;
1431 /* Ensure all SQE are processed */
1434 /* Stop transmission */
1435 err = otx2_txschq_stop(pf);
1437 dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
1439 mutex_lock(&mbox->lock);
1440 /* Disable backpressure */
1441 if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
1442 otx2_nix_config_bp(pf, false);
1443 mutex_unlock(&mbox->lock);
1446 otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
1448 /*Dequeue all CQEs */
1449 for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
1450 cq = &qset->cq[qidx];
1451 if (cq->cq_type == CQ_RX)
1452 otx2_cleanup_rx_cqes(pf, cq);
1454 otx2_cleanup_tx_cqes(pf, cq);
1457 otx2_free_sq_res(pf);
1459 /* Free RQ buffer pointers*/
1460 otx2_free_aura_ptr(pf, AURA_NIX_RQ);
1462 otx2_free_cq_res(pf);
1464 /* Free all ingress bandwidth profiles allocated */
1465 cn10k_free_all_ipolicers(pf);
1467 mutex_lock(&mbox->lock);
1469 free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
1471 free_req->flags = NIX_LF_DISABLE_FLOWS;
1472 if (!(pf->flags & OTX2_FLAG_PF_SHUTDOWN))
1473 free_req->flags |= NIX_LF_DONT_FREE_TX_VTAG;
1474 if (otx2_sync_mbox_msg(mbox))
1475 dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
1477 mutex_unlock(&mbox->lock);
1479 /* Disable NPA Pool and Aura hw context */
1480 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
1481 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
1482 otx2_aura_pool_free(pf);
1484 mutex_lock(&mbox->lock);
1486 req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
1488 if (otx2_sync_mbox_msg(mbox))
1489 dev_err(pf->dev, "%s failed to free npalf\n", __func__);
1491 mutex_unlock(&mbox->lock);
1494 int otx2_open(struct net_device *netdev)
1496 struct otx2_nic *pf = netdev_priv(netdev);
1497 struct otx2_cq_poll *cq_poll = NULL;
1498 struct otx2_qset *qset = &pf->qset;
1499 int err = 0, qidx, vec;
1502 netif_carrier_off(netdev);
1504 pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tx_queues;
1505 /* RQ and SQs are mapped to different CQs,
1506 * so find out max CQ IRQs (i.e CINTs) needed.
1508 pf->hw.cint_cnt = max(pf->hw.rx_queues, pf->hw.tx_queues);
1509 qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
1514 qset->rqe_cnt = qset->rqe_cnt ? qset->rqe_cnt : Q_COUNT(Q_SIZE_256);
1516 qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K);
1519 qset->cq = kcalloc(pf->qset.cq_cnt,
1520 sizeof(struct otx2_cq_queue), GFP_KERNEL);
1524 qset->sq = kcalloc(pf->hw.tx_queues,
1525 sizeof(struct otx2_snd_queue), GFP_KERNEL);
1529 qset->rq = kcalloc(pf->hw.rx_queues,
1530 sizeof(struct otx2_rcv_queue), GFP_KERNEL);
1534 if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
1535 /* Reserve LMT lines for NPA AURA batch free */
1536 pf->hw.npa_lmt_base = (__force u64 *)pf->hw.lmt_base;
1537 /* Reserve LMT lines for NIX TX */
1538 pf->hw.nix_lmt_base = (__force u64 *)((u64)pf->hw.npa_lmt_base +
1539 (NIX_LMTID_BASE * LMT_LINE_SIZE));
1542 err = otx2_init_hw_resources(pf);
1546 /* Register NAPI handler */
1547 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1548 cq_poll = &qset->napi[qidx];
1549 cq_poll->cint_idx = qidx;
1550 /* RQ0 & SQ0 are mapped to CINT0 and so on..
1551 * 'cq_ids[0]' points to RQ's CQ and
1552 * 'cq_ids[1]' points to SQ's CQ and
1554 cq_poll->cq_ids[CQ_RX] =
1555 (qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
1556 cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
1557 qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
1558 cq_poll->dev = (void *)pf;
1559 netif_napi_add(netdev, &cq_poll->napi,
1560 otx2_napi_handler, NAPI_POLL_WEIGHT);
1561 napi_enable(&cq_poll->napi);
1564 /* Set maximum frame size allowed in HW */
1565 err = otx2_hw_set_mtu(pf, netdev->mtu);
1567 goto err_disable_napi;
1569 /* Setup segmentation algorithms, if failed, clear offload capability */
1570 otx2_setup_segmentation(pf);
1572 /* Initialize RSS */
1573 err = otx2_rss_init(pf);
1575 goto err_disable_napi;
1577 /* Register Queue IRQ handlers */
1578 vec = pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START;
1579 irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
1581 snprintf(irq_name, NAME_SIZE, "%s-qerr", pf->netdev->name);
1583 err = request_irq(pci_irq_vector(pf->pdev, vec),
1584 otx2_q_intr_handler, 0, irq_name, pf);
1587 "RVUPF%d: IRQ registration failed for QERR\n",
1588 rvu_get_pf(pf->pcifunc));
1589 goto err_disable_napi;
1592 /* Enable QINT IRQ */
1593 otx2_write64(pf, NIX_LF_QINTX_ENA_W1S(0), BIT_ULL(0));
1595 /* Register CQ IRQ handlers */
1596 vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
1597 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1598 irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
1600 snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name,
1603 err = request_irq(pci_irq_vector(pf->pdev, vec),
1604 otx2_cq_intr_handler, 0, irq_name,
1608 "RVUPF%d: IRQ registration failed for CQ%d\n",
1609 rvu_get_pf(pf->pcifunc), qidx);
1610 goto err_free_cints;
1614 otx2_config_irq_coalescing(pf, qidx);
1617 otx2_write64(pf, NIX_LF_CINTX_INT(qidx), BIT_ULL(0));
1618 otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0));
1621 otx2_set_cints_affinity(pf);
1623 if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
1624 otx2_enable_rxvlan(pf, true);
1626 /* When reinitializing enable time stamping if it is enabled before */
1627 if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) {
1628 pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
1629 otx2_config_hw_tx_tstamp(pf, true);
1631 if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) {
1632 pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
1633 otx2_config_hw_rx_tstamp(pf, true);
1636 pf->flags &= ~OTX2_FLAG_INTF_DOWN;
1637 /* 'intf_down' may be checked on any cpu */
1640 /* we have already received link status notification */
1641 if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
1642 otx2_handle_link_event(pf);
1644 /* Restore pause frame settings */
1645 otx2_config_pause_frm(pf);
1647 err = otx2_rxtx_enable(pf, true);
1649 goto err_tx_stop_queues;
1654 netif_tx_stop_all_queues(netdev);
1655 netif_carrier_off(netdev);
1657 otx2_free_cints(pf, qidx);
1658 vec = pci_irq_vector(pf->pdev,
1659 pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
1660 otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
1661 synchronize_irq(vec);
1664 otx2_disable_napi(pf);
1665 otx2_free_hw_resources(pf);
1673 EXPORT_SYMBOL(otx2_open);
1675 int otx2_stop(struct net_device *netdev)
1677 struct otx2_nic *pf = netdev_priv(netdev);
1678 struct otx2_cq_poll *cq_poll = NULL;
1679 struct otx2_qset *qset = &pf->qset;
1680 struct otx2_rss_info *rss;
1683 netif_carrier_off(netdev);
1684 netif_tx_stop_all_queues(netdev);
1686 pf->flags |= OTX2_FLAG_INTF_DOWN;
1687 /* 'intf_down' may be checked on any cpu */
1690 /* First stop packet Rx/Tx */
1691 otx2_rxtx_enable(pf, false);
1693 /* Clear RSS enable flag */
1694 rss = &pf->hw.rss_info;
1695 rss->enable = false;
1697 /* Cleanup Queue IRQ */
1698 vec = pci_irq_vector(pf->pdev,
1699 pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
1700 otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
1701 synchronize_irq(vec);
1704 /* Cleanup CQ NAPI and IRQ */
1705 vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
1706 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1707 /* Disable interrupt */
1708 otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
1710 synchronize_irq(pci_irq_vector(pf->pdev, vec));
1712 cq_poll = &qset->napi[qidx];
1713 napi_synchronize(&cq_poll->napi);
1717 netif_tx_disable(netdev);
1719 otx2_free_hw_resources(pf);
1720 otx2_free_cints(pf, pf->hw.cint_cnt);
1721 otx2_disable_napi(pf);
1723 for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
1724 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
1726 for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
1727 cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
1728 devm_kfree(pf->dev, pf->refill_wrk);
1734 /* Do not clear RQ/SQ ringsize settings */
1735 memset((void *)qset + offsetof(struct otx2_qset, sqe_cnt), 0,
1736 sizeof(*qset) - offsetof(struct otx2_qset, sqe_cnt));
1739 EXPORT_SYMBOL(otx2_stop);
1741 static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
1743 struct otx2_nic *pf = netdev_priv(netdev);
1744 int qidx = skb_get_queue_mapping(skb);
1745 struct otx2_snd_queue *sq;
1746 struct netdev_queue *txq;
1748 /* Check for minimum and maximum packet length */
1749 if (skb->len <= ETH_HLEN ||
1750 (!skb_shinfo(skb)->gso_size && skb->len > pf->max_frs)) {
1752 return NETDEV_TX_OK;
1755 sq = &pf->qset.sq[qidx];
1756 txq = netdev_get_tx_queue(netdev, qidx);
1758 if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
1759 netif_tx_stop_queue(txq);
1761 /* Check again, incase SQBs got freed up */
1763 if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
1765 netif_tx_wake_queue(txq);
1767 return NETDEV_TX_BUSY;
1770 return NETDEV_TX_OK;
1773 static netdev_features_t otx2_fix_features(struct net_device *dev,
1774 netdev_features_t features)
1776 /* check if n-tuple filters are ON */
1777 if ((features & NETIF_F_HW_TC) && (dev->features & NETIF_F_NTUPLE)) {
1778 netdev_info(dev, "Disabling n-tuple filters\n");
1779 features &= ~NETIF_F_NTUPLE;
1782 /* check if tc hw offload is ON */
1783 if ((features & NETIF_F_NTUPLE) && (dev->features & NETIF_F_HW_TC)) {
1784 netdev_info(dev, "Disabling TC hardware offload\n");
1785 features &= ~NETIF_F_HW_TC;
1791 static void otx2_set_rx_mode(struct net_device *netdev)
1793 struct otx2_nic *pf = netdev_priv(netdev);
1795 queue_work(pf->otx2_wq, &pf->rx_mode_work);
1798 static void otx2_do_set_rx_mode(struct work_struct *work)
1800 struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
1801 struct net_device *netdev = pf->netdev;
1802 struct nix_rx_mode *req;
1803 bool promisc = false;
1805 if (!(netdev->flags & IFF_UP))
1808 if ((netdev->flags & IFF_PROMISC) ||
1809 (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
1813 /* Write unicast address to mcam entries or del from mcam */
1814 if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
1815 __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
1817 mutex_lock(&pf->mbox.lock);
1818 req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
1820 mutex_unlock(&pf->mbox.lock);
1824 req->mode = NIX_RX_MODE_UCAST;
1827 req->mode |= NIX_RX_MODE_PROMISC;
1828 if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
1829 req->mode |= NIX_RX_MODE_ALLMULTI;
1831 req->mode |= NIX_RX_MODE_USE_MCE;
1833 otx2_sync_mbox_msg(&pf->mbox);
1834 mutex_unlock(&pf->mbox.lock);
1837 static int otx2_set_features(struct net_device *netdev,
1838 netdev_features_t features)
1840 netdev_features_t changed = features ^ netdev->features;
1841 bool ntuple = !!(features & NETIF_F_NTUPLE);
1842 struct otx2_nic *pf = netdev_priv(netdev);
1844 if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
1845 return otx2_cgx_config_loopback(pf,
1846 features & NETIF_F_LOOPBACK);
1848 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(netdev))
1849 return otx2_enable_rxvlan(pf,
1850 features & NETIF_F_HW_VLAN_CTAG_RX);
1852 if ((changed & NETIF_F_NTUPLE) && !ntuple)
1853 otx2_destroy_ntuple_flows(pf);
1855 if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
1856 pf->tc_info.num_entries) {
1857 netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
1864 static void otx2_reset_task(struct work_struct *work)
1866 struct otx2_nic *pf = container_of(work, struct otx2_nic, reset_task);
1868 if (!netif_running(pf->netdev))
1872 otx2_stop(pf->netdev);
1874 otx2_open(pf->netdev);
1875 netif_trans_update(pf->netdev);
1879 static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable)
1881 struct msg_req *req;
1884 if (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED && enable)
1887 mutex_lock(&pfvf->mbox.lock);
1889 req = otx2_mbox_alloc_msg_cgx_ptp_rx_enable(&pfvf->mbox);
1891 req = otx2_mbox_alloc_msg_cgx_ptp_rx_disable(&pfvf->mbox);
1893 mutex_unlock(&pfvf->mbox.lock);
1897 err = otx2_sync_mbox_msg(&pfvf->mbox);
1899 mutex_unlock(&pfvf->mbox.lock);
1903 mutex_unlock(&pfvf->mbox.lock);
1905 pfvf->flags |= OTX2_FLAG_RX_TSTAMP_ENABLED;
1907 pfvf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
1911 static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
1913 struct msg_req *req;
1916 if (pfvf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED && enable)
1919 mutex_lock(&pfvf->mbox.lock);
1921 req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(&pfvf->mbox);
1923 req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(&pfvf->mbox);
1925 mutex_unlock(&pfvf->mbox.lock);
1929 err = otx2_sync_mbox_msg(&pfvf->mbox);
1931 mutex_unlock(&pfvf->mbox.lock);
1935 mutex_unlock(&pfvf->mbox.lock);
1937 pfvf->flags |= OTX2_FLAG_TX_TSTAMP_ENABLED;
1939 pfvf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
1943 static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
1945 struct otx2_nic *pfvf = netdev_priv(netdev);
1946 struct hwtstamp_config config;
1951 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1954 /* reserved for future extensions */
1958 switch (config.tx_type) {
1959 case HWTSTAMP_TX_OFF:
1960 otx2_config_hw_tx_tstamp(pfvf, false);
1962 case HWTSTAMP_TX_ON:
1963 otx2_config_hw_tx_tstamp(pfvf, true);
1969 switch (config.rx_filter) {
1970 case HWTSTAMP_FILTER_NONE:
1971 otx2_config_hw_rx_tstamp(pfvf, false);
1973 case HWTSTAMP_FILTER_ALL:
1974 case HWTSTAMP_FILTER_SOME:
1975 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1976 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1977 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1978 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1979 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1980 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1981 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1982 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1983 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1984 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1985 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1986 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1987 otx2_config_hw_rx_tstamp(pfvf, true);
1988 config.rx_filter = HWTSTAMP_FILTER_ALL;
1994 memcpy(&pfvf->tstamp, &config, sizeof(config));
1996 return copy_to_user(ifr->ifr_data, &config,
1997 sizeof(config)) ? -EFAULT : 0;
2000 static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
2002 struct otx2_nic *pfvf = netdev_priv(netdev);
2003 struct hwtstamp_config *cfg = &pfvf->tstamp;
2007 return otx2_config_hwtstamp(netdev, req);
2009 return copy_to_user(req->ifr_data, cfg,
2010 sizeof(*cfg)) ? -EFAULT : 0;
2016 static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac)
2018 struct npc_install_flow_req *req;
2021 mutex_lock(&pf->mbox.lock);
2022 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2028 ether_addr_copy(req->packet.dmac, mac);
2029 eth_broadcast_addr((u8 *)&req->mask.dmac);
2030 req->features = BIT_ULL(NPC_DMAC);
2031 req->channel = pf->hw.rx_chan_base;
2032 req->intf = NIX_INTF_RX;
2033 req->default_rule = 1;
2036 req->op = NIX_RX_ACTION_DEFAULT;
2038 err = otx2_sync_mbox_msg(&pf->mbox);
2040 mutex_unlock(&pf->mbox.lock);
2044 static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
2046 struct otx2_nic *pf = netdev_priv(netdev);
2047 struct pci_dev *pdev = pf->pdev;
2048 struct otx2_vf_config *config;
2051 if (!netif_running(netdev))
2054 if (vf >= pf->total_vfs)
2057 if (!is_valid_ether_addr(mac))
2060 config = &pf->vf_configs[vf];
2061 ether_addr_copy(config->mac, mac);
2063 ret = otx2_do_set_vf_mac(pf, vf, mac);
2065 dev_info(&pdev->dev,
2066 "Load/Reload VF driver\n");
2071 static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
2074 struct otx2_flow_config *flow_cfg = pf->flow_cfg;
2075 struct nix_vtag_config_rsp *vtag_rsp;
2076 struct npc_delete_flow_req *del_req;
2077 struct nix_vtag_config *vtag_req;
2078 struct npc_install_flow_req *req;
2079 struct otx2_vf_config *config;
2083 config = &pf->vf_configs[vf];
2085 if (!vlan && !config->vlan)
2088 mutex_lock(&pf->mbox.lock);
2090 /* free old tx vtag entry */
2092 vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
2097 vtag_req->cfg_type = 0;
2098 vtag_req->tx.free_vtag0 = 1;
2099 vtag_req->tx.vtag0_idx = config->tx_vtag_idx;
2101 err = otx2_sync_mbox_msg(&pf->mbox);
2106 if (!vlan && config->vlan) {
2108 del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
2113 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
2115 flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2116 err = otx2_sync_mbox_msg(&pf->mbox);
2121 del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
2126 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
2128 flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2129 err = otx2_sync_mbox_msg(&pf->mbox);
2135 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2141 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
2142 req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2143 req->packet.vlan_tci = htons(vlan);
2144 req->mask.vlan_tci = htons(VLAN_VID_MASK);
2145 /* af fills the destination mac addr */
2146 eth_broadcast_addr((u8 *)&req->mask.dmac);
2147 req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
2148 req->channel = pf->hw.rx_chan_base;
2149 req->intf = NIX_INTF_RX;
2151 req->op = NIX_RX_ACTION_DEFAULT;
2152 req->vtag0_valid = true;
2153 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
2156 err = otx2_sync_mbox_msg(&pf->mbox);
2161 vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
2167 /* configure tx vtag params */
2168 vtag_req->vtag_size = VTAGSIZE_T4;
2169 vtag_req->cfg_type = 0; /* tx vlan cfg */
2170 vtag_req->tx.cfg_vtag0 = 1;
2171 vtag_req->tx.vtag0 = ((u64)ntohs(proto) << 16) | vlan;
2173 err = otx2_sync_mbox_msg(&pf->mbox);
2177 vtag_rsp = (struct nix_vtag_config_rsp *)otx2_mbox_get_rsp
2178 (&pf->mbox.mbox, 0, &vtag_req->hdr);
2179 if (IS_ERR(vtag_rsp)) {
2180 err = PTR_ERR(vtag_rsp);
2183 config->tx_vtag_idx = vtag_rsp->vtag0_idx;
2185 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2191 eth_zero_addr((u8 *)&req->mask.dmac);
2192 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
2193 req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2194 req->features = BIT_ULL(NPC_DMAC);
2195 req->channel = pf->hw.tx_chan_base;
2196 req->intf = NIX_INTF_TX;
2198 req->op = NIX_TX_ACTIONOP_UCAST_DEFAULT;
2199 req->vtag0_def = vtag_rsp->vtag0_idx;
2200 req->vtag0_op = VTAG_INSERT;
2203 err = otx2_sync_mbox_msg(&pf->mbox);
2205 config->vlan = vlan;
2206 mutex_unlock(&pf->mbox.lock);
2210 static int otx2_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
2213 struct otx2_nic *pf = netdev_priv(netdev);
2214 struct pci_dev *pdev = pf->pdev;
2216 if (!netif_running(netdev))
2219 if (vf >= pci_num_vf(pdev))
2222 /* qos is currently unsupported */
2223 if (vlan >= VLAN_N_VID || qos)
2226 if (proto != htons(ETH_P_8021Q))
2227 return -EPROTONOSUPPORT;
2229 if (!(pf->flags & OTX2_FLAG_VF_VLAN_SUPPORT))
2232 return otx2_do_set_vf_vlan(pf, vf, vlan, qos, proto);
2235 static int otx2_get_vf_config(struct net_device *netdev, int vf,
2236 struct ifla_vf_info *ivi)
2238 struct otx2_nic *pf = netdev_priv(netdev);
2239 struct pci_dev *pdev = pf->pdev;
2240 struct otx2_vf_config *config;
2242 if (!netif_running(netdev))
2245 if (vf >= pci_num_vf(pdev))
2248 config = &pf->vf_configs[vf];
2250 ether_addr_copy(ivi->mac, config->mac);
2251 ivi->vlan = config->vlan;
2252 ivi->trusted = config->trusted;
2257 static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf,
2260 struct set_vf_perm *req;
2263 mutex_lock(&pf->mbox.lock);
2264 req = otx2_mbox_alloc_msg_set_vf_perm(&pf->mbox);
2270 /* Let AF reset VF permissions as sriov is disabled */
2271 if (req_perm == OTX2_RESET_VF_PERM) {
2272 req->flags |= RESET_VF_PERM;
2273 } else if (req_perm == OTX2_TRUSTED_VF) {
2274 if (pf->vf_configs[vf].trusted)
2275 req->flags |= VF_TRUSTED;
2279 rc = otx2_sync_mbox_msg(&pf->mbox);
2281 mutex_unlock(&pf->mbox.lock);
2285 static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf,
2288 struct otx2_nic *pf = netdev_priv(netdev);
2289 struct pci_dev *pdev = pf->pdev;
2292 if (vf >= pci_num_vf(pdev))
2295 if (pf->vf_configs[vf].trusted == enable)
2298 pf->vf_configs[vf].trusted = enable;
2299 rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF);
2302 pf->vf_configs[vf].trusted = !enable;
2304 netdev_info(pf->netdev, "VF %d is %strusted\n",
2305 vf, enable ? "" : "not ");
2309 static const struct net_device_ops otx2_netdev_ops = {
2310 .ndo_open = otx2_open,
2311 .ndo_stop = otx2_stop,
2312 .ndo_start_xmit = otx2_xmit,
2313 .ndo_fix_features = otx2_fix_features,
2314 .ndo_set_mac_address = otx2_set_mac_address,
2315 .ndo_change_mtu = otx2_change_mtu,
2316 .ndo_set_rx_mode = otx2_set_rx_mode,
2317 .ndo_set_features = otx2_set_features,
2318 .ndo_tx_timeout = otx2_tx_timeout,
2319 .ndo_get_stats64 = otx2_get_stats64,
2320 .ndo_do_ioctl = otx2_ioctl,
2321 .ndo_set_vf_mac = otx2_set_vf_mac,
2322 .ndo_set_vf_vlan = otx2_set_vf_vlan,
2323 .ndo_get_vf_config = otx2_get_vf_config,
2324 .ndo_setup_tc = otx2_setup_tc,
2325 .ndo_set_vf_trust = otx2_ndo_set_vf_trust,
2328 static int otx2_wq_init(struct otx2_nic *pf)
2330 pf->otx2_wq = create_singlethread_workqueue("otx2_wq");
2334 INIT_WORK(&pf->rx_mode_work, otx2_do_set_rx_mode);
2335 INIT_WORK(&pf->reset_task, otx2_reset_task);
2339 static int otx2_check_pf_usable(struct otx2_nic *nic)
2343 rev = otx2_read64(nic, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
2344 rev = (rev >> 12) & 0xFF;
2345 /* Check if AF has setup revision for RVUM block,
2346 * otherwise this driver probe should be deferred
2347 * until AF driver comes up.
2351 "AF is not initialized, deferring probe\n");
2352 return -EPROBE_DEFER;
2357 static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
2359 struct otx2_hw *hw = &pf->hw;
2362 /* NPA interrupts are inot registered, so alloc only
2363 * upto NIX vector offset.
2365 num_vec = hw->nix_msixoff;
2366 num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
2368 otx2_disable_mbox_intr(pf);
2369 pci_free_irq_vectors(hw->pdev);
2370 err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
2372 dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n",
2377 return otx2_register_mbox_intr(pf, false);
2380 static int otx2_sriov_vfcfg_init(struct otx2_nic *pf)
2384 pf->vf_configs = devm_kcalloc(pf->dev, pf->total_vfs,
2385 sizeof(struct otx2_vf_config),
2387 if (!pf->vf_configs)
2390 for (i = 0; i < pf->total_vfs; i++) {
2391 pf->vf_configs[i].pf = pf;
2392 pf->vf_configs[i].intf_down = true;
2393 pf->vf_configs[i].trusted = false;
2394 INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
2395 otx2_vf_link_event_task);
2401 static void otx2_sriov_vfcfg_cleanup(struct otx2_nic *pf)
2405 if (!pf->vf_configs)
2408 for (i = 0; i < pf->total_vfs; i++) {
2409 cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
2410 otx2_set_vf_permissions(pf, i, OTX2_RESET_VF_PERM);
2414 static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2416 struct device *dev = &pdev->dev;
2417 struct net_device *netdev;
2418 struct otx2_nic *pf;
2423 err = pcim_enable_device(pdev);
2425 dev_err(dev, "Failed to enable PCI device\n");
2429 err = pci_request_regions(pdev, DRV_NAME);
2431 dev_err(dev, "PCI request regions failed 0x%x\n", err);
2435 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
2437 dev_err(dev, "DMA mask config failed, abort\n");
2438 goto err_release_regions;
2441 pci_set_master(pdev);
2443 /* Set number of queues */
2444 qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT);
2446 netdev = alloc_etherdev_mqs(sizeof(*pf), qcount, qcount);
2449 goto err_release_regions;
2452 pci_set_drvdata(pdev, netdev);
2453 SET_NETDEV_DEV(netdev, &pdev->dev);
2454 pf = netdev_priv(netdev);
2455 pf->netdev = netdev;
2458 pf->total_vfs = pci_sriov_get_totalvfs(pdev);
2459 pf->flags |= OTX2_FLAG_INTF_DOWN;
2463 hw->rx_queues = qcount;
2464 hw->tx_queues = qcount;
2465 hw->max_queues = qcount;
2467 num_vec = pci_msix_vec_count(pdev);
2468 hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
2470 if (!hw->irq_name) {
2472 goto err_free_netdev;
2475 hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
2476 sizeof(cpumask_var_t), GFP_KERNEL);
2477 if (!hw->affinity_mask) {
2479 goto err_free_netdev;
2483 pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
2484 if (!pf->reg_base) {
2485 dev_err(dev, "Unable to map physical function CSRs, aborting\n");
2487 goto err_free_netdev;
2490 err = otx2_check_pf_usable(pf);
2492 goto err_free_netdev;
2494 err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
2495 RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
2497 dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
2499 goto err_free_netdev;
2502 otx2_setup_dev_hw_settings(pf);
2504 /* Init PF <=> AF mailbox stuff */
2505 err = otx2_pfaf_mbox_init(pf);
2507 goto err_free_irq_vectors;
2509 /* Register mailbox interrupt */
2510 err = otx2_register_mbox_intr(pf, true);
2512 goto err_mbox_destroy;
2514 /* Request AF to attach NPA and NIX LFs to this PF.
2515 * NIX and NPA LFs are needed for this PF to function as a NIC.
2517 err = otx2_attach_npa_nix(pf);
2519 goto err_disable_mbox_intr;
2521 err = otx2_realloc_msix_vectors(pf);
2523 goto err_detach_rsrc;
2525 err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues);
2527 goto err_detach_rsrc;
2529 err = cn10k_pf_lmtst_init(pf);
2531 goto err_detach_rsrc;
2533 /* Assign default mac address */
2534 otx2_get_mac_from_af(netdev);
2536 /* Don't check for error. Proceed without ptp */
2539 /* NPA's pool is a stack to which SW frees buffer pointers via Aura.
2540 * HW allocates buffer pointer from stack and uses it for DMA'ing
2541 * ingress packet. In some scenarios HW can free back allocated buffer
2542 * pointers to pool. This makes it impossible for SW to maintain a
2543 * parallel list where physical addresses of buffer pointers (IOVAs)
2544 * given to HW can be saved for later reference.
2546 * So the only way to convert Rx packet's buffer address is to use
2547 * IOMMU's iova_to_phys() handler which translates the address by
2548 * walking through the translation tables.
2550 pf->iommu_domain = iommu_get_domain_for_dev(dev);
2552 netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
2553 NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
2554 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2555 NETIF_F_GSO_UDP_L4);
2556 netdev->features |= netdev->hw_features;
2558 netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
2560 err = otx2_mcam_flow_init(pf);
2562 goto err_ptp_destroy;
2564 if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT)
2565 netdev->hw_features |= NETIF_F_NTUPLE;
2567 if (pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)
2568 netdev->priv_flags |= IFF_UNICAST_FLT;
2570 /* Support TSO on tag interface */
2571 netdev->vlan_features |= netdev->features;
2572 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
2573 NETIF_F_HW_VLAN_STAG_TX;
2574 if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
2575 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
2576 NETIF_F_HW_VLAN_STAG_RX;
2577 netdev->features |= netdev->hw_features;
2579 /* HW supports tc offload but mutually exclusive with n-tuple filters */
2580 if (pf->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)
2581 netdev->hw_features |= NETIF_F_HW_TC;
2583 netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
2584 netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
2586 netdev->netdev_ops = &otx2_netdev_ops;
2588 /* MTU range: 64 - 9190 */
2589 netdev->min_mtu = OTX2_MIN_MTU;
2590 netdev->max_mtu = otx2_get_max_mtu(pf);
2592 err = register_netdev(netdev);
2594 dev_err(dev, "Failed to register netdevice\n");
2595 goto err_del_mcam_entries;
2598 err = otx2_wq_init(pf);
2600 goto err_unreg_netdev;
2602 otx2_set_ethtool_ops(netdev);
2604 err = otx2_init_tc(pf);
2606 goto err_mcam_flow_del;
2608 /* Initialize SR-IOV resources */
2609 err = otx2_sriov_vfcfg_init(pf);
2611 goto err_pf_sriov_init;
2613 /* Enable link notifications */
2614 otx2_cgx_config_linkevents(pf, true);
2616 /* Enable pause frames by default */
2617 pf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
2618 pf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
2623 otx2_shutdown_tc(pf);
2625 otx2_mcam_flow_del(pf);
2627 unregister_netdev(netdev);
2628 err_del_mcam_entries:
2629 otx2_mcam_flow_del(pf);
2631 otx2_ptp_destroy(pf);
2634 iounmap(hw->lmt_base);
2635 otx2_detach_resources(&pf->mbox);
2636 err_disable_mbox_intr:
2637 otx2_disable_mbox_intr(pf);
2639 otx2_pfaf_mbox_destroy(pf);
2640 err_free_irq_vectors:
2641 pci_free_irq_vectors(hw->pdev);
2643 pci_set_drvdata(pdev, NULL);
2644 free_netdev(netdev);
2645 err_release_regions:
2646 pci_release_regions(pdev);
2650 static void otx2_vf_link_event_task(struct work_struct *work)
2652 struct otx2_vf_config *config;
2653 struct cgx_link_info_msg *req;
2654 struct mbox_msghdr *msghdr;
2655 struct otx2_nic *pf;
2658 config = container_of(work, struct otx2_vf_config,
2659 link_event_work.work);
2660 vf_idx = config - config->pf->vf_configs;
2663 msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
2664 sizeof(*req), sizeof(struct msg_rsp));
2666 dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
2670 req = (struct cgx_link_info_msg *)msghdr;
2671 req->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
2672 req->hdr.sig = OTX2_MBOX_REQ_SIG;
2673 memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
2675 otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
2678 static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
2680 struct net_device *netdev = pci_get_drvdata(pdev);
2681 struct otx2_nic *pf = netdev_priv(netdev);
2684 /* Init PF <=> VF mailbox stuff */
2685 ret = otx2_pfvf_mbox_init(pf, numvfs);
2689 ret = otx2_register_pfvf_mbox_intr(pf, numvfs);
2693 ret = otx2_pf_flr_init(pf, numvfs);
2697 ret = otx2_register_flr_me_intr(pf, numvfs);
2701 ret = pci_enable_sriov(pdev, numvfs);
2707 otx2_disable_flr_me_intr(pf);
2709 otx2_flr_wq_destroy(pf);
2711 otx2_disable_pfvf_mbox_intr(pf, numvfs);
2713 otx2_pfvf_mbox_destroy(pf);
2717 static int otx2_sriov_disable(struct pci_dev *pdev)
2719 struct net_device *netdev = pci_get_drvdata(pdev);
2720 struct otx2_nic *pf = netdev_priv(netdev);
2721 int numvfs = pci_num_vf(pdev);
2726 pci_disable_sriov(pdev);
2728 otx2_disable_flr_me_intr(pf);
2729 otx2_flr_wq_destroy(pf);
2730 otx2_disable_pfvf_mbox_intr(pf, numvfs);
2731 otx2_pfvf_mbox_destroy(pf);
2736 static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs)
2739 return otx2_sriov_disable(pdev);
2741 return otx2_sriov_enable(pdev, numvfs);
2744 static void otx2_remove(struct pci_dev *pdev)
2746 struct net_device *netdev = pci_get_drvdata(pdev);
2747 struct otx2_nic *pf;
2752 pf = netdev_priv(netdev);
2754 pf->flags |= OTX2_FLAG_PF_SHUTDOWN;
2756 if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED)
2757 otx2_config_hw_tx_tstamp(pf, false);
2758 if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
2759 otx2_config_hw_rx_tstamp(pf, false);
2761 cancel_work_sync(&pf->reset_task);
2762 /* Disable link notifications */
2763 otx2_cgx_config_linkevents(pf, false);
2765 unregister_netdev(netdev);
2766 otx2_sriov_disable(pf->pdev);
2767 otx2_sriov_vfcfg_cleanup(pf);
2769 destroy_workqueue(pf->otx2_wq);
2771 otx2_ptp_destroy(pf);
2772 otx2_mcam_flow_del(pf);
2773 otx2_shutdown_tc(pf);
2774 otx2_detach_resources(&pf->mbox);
2775 if (pf->hw.lmt_base)
2776 iounmap(pf->hw.lmt_base);
2778 otx2_disable_mbox_intr(pf);
2779 otx2_pfaf_mbox_destroy(pf);
2780 pci_free_irq_vectors(pf->pdev);
2781 pci_set_drvdata(pdev, NULL);
2782 free_netdev(netdev);
2784 pci_release_regions(pdev);
2787 static struct pci_driver otx2_pf_driver = {
2789 .id_table = otx2_pf_id_table,
2790 .probe = otx2_probe,
2791 .shutdown = otx2_remove,
2792 .remove = otx2_remove,
2793 .sriov_configure = otx2_sriov_configure
2796 static int __init otx2_rvupf_init_module(void)
2798 pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
2800 return pci_register_driver(&otx2_pf_driver);
2803 static void __exit otx2_rvupf_cleanup_module(void)
2805 pci_unregister_driver(&otx2_pf_driver);
2808 module_init(otx2_rvupf_init_module);
2809 module_exit(otx2_rvupf_cleanup_module);