1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
4 #include <linux/printk.h>
5 #include <linux/dynamic_debug.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/if_vlan.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/cpumask.h>
15 #include "ionic_bus.h"
16 #include "ionic_lif.h"
17 #include "ionic_txrx.h"
18 #include "ionic_ethtool.h"
19 #include "ionic_debugfs.h"
21 /* queuetype support level */
22 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
23 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */
24 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */
25 [IONIC_QTYPE_RXQ] = 0, /* 0 = Base version with CQ+SG support */
26 [IONIC_QTYPE_TXQ] = 1, /* 0 = Base version with CQ+SG support
27 * 1 = ... with Tx SG version 1
31 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
32 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
33 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
34 static void ionic_link_status_check(struct ionic_lif *lif);
35 static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
36 static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
37 static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
39 static void ionic_txrx_deinit(struct ionic_lif *lif);
40 static int ionic_txrx_init(struct ionic_lif *lif);
41 static int ionic_start_queues(struct ionic_lif *lif);
42 static void ionic_stop_queues(struct ionic_lif *lif);
43 static void ionic_lif_queue_identify(struct ionic_lif *lif);
45 static void ionic_dim_work(struct work_struct *work)
47 struct dim *dim = container_of(work, struct dim, work);
48 struct dim_cq_moder cur_moder;
49 struct ionic_qcq *qcq;
52 cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
53 qcq = container_of(dim, struct ionic_qcq, dim);
54 new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec);
55 qcq->intr.dim_coal_hw = new_coal ? new_coal : 1;
56 dim->state = DIM_START_MEASURE;
59 static void ionic_lif_deferred_work(struct work_struct *work)
61 struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
62 struct ionic_deferred *def = &lif->deferred;
63 struct ionic_deferred_work *w = NULL;
66 spin_lock_bh(&def->lock);
67 if (!list_empty(&def->list)) {
68 w = list_first_entry(&def->list,
69 struct ionic_deferred_work, list);
72 spin_unlock_bh(&def->lock);
78 case IONIC_DW_TYPE_RX_MODE:
79 ionic_lif_rx_mode(lif, w->rx_mode);
81 case IONIC_DW_TYPE_RX_ADDR_ADD:
82 ionic_lif_addr_add(lif, w->addr);
84 case IONIC_DW_TYPE_RX_ADDR_DEL:
85 ionic_lif_addr_del(lif, w->addr);
87 case IONIC_DW_TYPE_LINK_STATUS:
88 ionic_link_status_check(lif);
90 case IONIC_DW_TYPE_LIF_RESET:
92 ionic_lif_handle_fw_up(lif);
94 ionic_lif_handle_fw_down(lif);
104 void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
105 struct ionic_deferred_work *work)
107 spin_lock_bh(&def->lock);
108 list_add_tail(&work->list, &def->list);
109 spin_unlock_bh(&def->lock);
110 schedule_work(&def->work);
113 static void ionic_link_status_check(struct ionic_lif *lif)
115 struct net_device *netdev = lif->netdev;
119 if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
122 link_status = le16_to_cpu(lif->info->status.link_status);
123 link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
126 if (!netif_carrier_ok(netdev)) {
129 ionic_port_identify(lif->ionic);
130 link_speed = le32_to_cpu(lif->info->status.link_speed);
131 netdev_info(netdev, "Link up - %d Gbps\n",
133 netif_carrier_on(netdev);
136 if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
137 mutex_lock(&lif->queue_lock);
138 ionic_start_queues(lif);
139 mutex_unlock(&lif->queue_lock);
142 if (netif_carrier_ok(netdev)) {
143 netdev_info(netdev, "Link down\n");
144 netif_carrier_off(netdev);
147 if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
148 mutex_lock(&lif->queue_lock);
149 ionic_stop_queues(lif);
150 mutex_unlock(&lif->queue_lock);
154 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
157 void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
159 struct ionic_deferred_work *work;
161 /* we only need one request outstanding at a time */
162 if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
166 work = kzalloc(sizeof(*work), GFP_ATOMIC);
168 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
172 work->type = IONIC_DW_TYPE_LINK_STATUS;
173 ionic_lif_deferred_enqueue(&lif->deferred, work);
175 ionic_link_status_check(lif);
179 static irqreturn_t ionic_isr(int irq, void *data)
181 struct napi_struct *napi = data;
183 napi_schedule_irqoff(napi);
188 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
190 struct ionic_intr_info *intr = &qcq->intr;
191 struct device *dev = lif->ionic->dev;
192 struct ionic_queue *q = &qcq->q;
196 name = lif->netdev->name;
198 name = dev_name(dev);
200 snprintf(intr->name, sizeof(intr->name),
201 "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
203 return devm_request_irq(dev, intr->vector, ionic_isr,
204 0, intr->name, &qcq->napi);
207 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
209 struct ionic *ionic = lif->ionic;
212 index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
213 if (index == ionic->nintrs) {
214 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
215 __func__, index, ionic->nintrs);
219 set_bit(index, ionic->intrs);
220 ionic_intr_init(&ionic->idev, intr, index);
225 static void ionic_intr_free(struct ionic *ionic, int index)
227 if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
228 clear_bit(index, ionic->intrs);
231 static int ionic_qcq_enable(struct ionic_qcq *qcq)
233 struct ionic_queue *q = &qcq->q;
234 struct ionic_lif *lif = q->lif;
235 struct ionic_dev *idev;
238 struct ionic_admin_ctx ctx = {
239 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
241 .opcode = IONIC_CMD_Q_CONTROL,
242 .lif_index = cpu_to_le16(lif->index),
244 .index = cpu_to_le32(q->index),
245 .oper = IONIC_Q_ENABLE,
249 idev = &lif->ionic->idev;
250 dev = lif->ionic->dev;
252 dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
253 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
255 if (qcq->flags & IONIC_QCQ_F_INTR) {
256 irq_set_affinity_hint(qcq->intr.vector,
257 &qcq->intr.affinity_mask);
258 napi_enable(&qcq->napi);
259 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
260 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
261 IONIC_INTR_MASK_CLEAR);
264 return ionic_adminq_post_wait(lif, &ctx);
267 static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw)
269 struct ionic_queue *q;
270 struct ionic_lif *lif;
273 struct ionic_admin_ctx ctx = {
274 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
276 .opcode = IONIC_CMD_Q_CONTROL,
277 .oper = IONIC_Q_DISABLE,
287 if (qcq->flags & IONIC_QCQ_F_INTR) {
288 struct ionic_dev *idev = &lif->ionic->idev;
290 cancel_work_sync(&qcq->dim.work);
291 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
292 IONIC_INTR_MASK_SET);
293 synchronize_irq(qcq->intr.vector);
294 irq_set_affinity_hint(qcq->intr.vector, NULL);
295 napi_disable(&qcq->napi);
299 ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
300 ctx.cmd.q_control.type = q->type;
301 ctx.cmd.q_control.index = cpu_to_le32(q->index);
302 dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
303 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
305 err = ionic_adminq_post_wait(lif, &ctx);
311 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
313 struct ionic_dev *idev = &lif->ionic->idev;
318 if (!(qcq->flags & IONIC_QCQ_F_INITED))
321 if (qcq->flags & IONIC_QCQ_F_INTR) {
322 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
323 IONIC_INTR_MASK_SET);
324 netif_napi_del(&qcq->napi);
327 qcq->flags &= ~IONIC_QCQ_F_INITED;
330 static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
332 if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0)
335 irq_set_affinity_hint(qcq->intr.vector, NULL);
336 devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi);
337 qcq->intr.vector = 0;
338 ionic_intr_free(lif->ionic, qcq->intr.index);
339 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
342 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
344 struct device *dev = lif->ionic->dev;
349 ionic_debugfs_del_qcq(qcq);
352 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
358 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa);
364 dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa);
369 ionic_qcq_intr_free(lif, qcq);
372 devm_kfree(dev, qcq->cq.info);
376 devm_kfree(dev, qcq->q.info);
381 static void ionic_qcqs_free(struct ionic_lif *lif)
383 struct device *dev = lif->ionic->dev;
385 if (lif->notifyqcq) {
386 ionic_qcq_free(lif, lif->notifyqcq);
387 devm_kfree(dev, lif->notifyqcq);
388 lif->notifyqcq = NULL;
392 ionic_qcq_free(lif, lif->adminqcq);
393 devm_kfree(dev, lif->adminqcq);
394 lif->adminqcq = NULL;
398 devm_kfree(dev, lif->rxqstats);
399 lif->rxqstats = NULL;
400 devm_kfree(dev, lif->rxqcqs);
405 devm_kfree(dev, lif->txqstats);
406 lif->txqstats = NULL;
407 devm_kfree(dev, lif->txqcqs);
412 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
413 struct ionic_qcq *n_qcq)
415 if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
416 ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index);
417 n_qcq->flags &= ~IONIC_QCQ_F_INTR;
420 n_qcq->intr.vector = src_qcq->intr.vector;
421 n_qcq->intr.index = src_qcq->intr.index;
424 static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
428 if (!(qcq->flags & IONIC_QCQ_F_INTR)) {
429 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
433 err = ionic_intr_alloc(lif, &qcq->intr);
435 netdev_warn(lif->netdev, "no intr for %s: %d\n",
440 err = ionic_bus_get_irq(lif->ionic, qcq->intr.index);
442 netdev_warn(lif->netdev, "no vector for %s: %d\n",
444 goto err_out_free_intr;
446 qcq->intr.vector = err;
447 ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index,
448 IONIC_INTR_MASK_SET);
450 err = ionic_request_irq(lif, qcq);
452 netdev_warn(lif->netdev, "irq request failed %d\n", err);
453 goto err_out_free_intr;
456 /* try to get the irq on the local numa node first */
457 qcq->intr.cpu = cpumask_local_spread(qcq->intr.index,
458 dev_to_node(lif->ionic->dev));
459 if (qcq->intr.cpu != -1)
460 cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask);
462 netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index);
466 ionic_intr_free(lif->ionic, qcq->intr.index);
471 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
473 const char *name, unsigned int flags,
474 unsigned int num_descs, unsigned int desc_size,
475 unsigned int cq_desc_size,
476 unsigned int sg_desc_size,
477 unsigned int pid, struct ionic_qcq **qcq)
479 struct ionic_dev *idev = &lif->ionic->idev;
480 struct device *dev = lif->ionic->dev;
481 void *q_base, *cq_base, *sg_base;
482 dma_addr_t cq_base_pa = 0;
483 dma_addr_t sg_base_pa = 0;
484 dma_addr_t q_base_pa = 0;
485 struct ionic_qcq *new;
490 new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
492 netdev_err(lif->netdev, "Cannot allocate queue structure\n");
499 new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info),
502 netdev_err(lif->netdev, "Cannot allocate queue info\n");
504 goto err_out_free_qcq;
509 err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
510 desc_size, sg_desc_size, pid);
512 netdev_err(lif->netdev, "Cannot initialize queue\n");
513 goto err_out_free_q_info;
516 err = ionic_alloc_qcq_interrupt(lif, new);
520 new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info),
523 netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
525 goto err_out_free_irq;
528 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
530 netdev_err(lif->netdev, "Cannot initialize completion queue\n");
531 goto err_out_free_cq_info;
534 if (flags & IONIC_QCQ_F_NOTIFYQ) {
537 /* q & cq need to be contiguous in case of notifyq */
538 q_size = ALIGN(num_descs * desc_size, PAGE_SIZE);
539 cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
541 new->q_size = PAGE_SIZE + q_size + cq_size;
542 new->q_base = dma_alloc_coherent(dev, new->q_size,
543 &new->q_base_pa, GFP_KERNEL);
545 netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n");
547 goto err_out_free_cq_info;
549 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
550 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
551 ionic_q_map(&new->q, q_base, q_base_pa);
553 cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE);
554 cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE);
555 ionic_cq_map(&new->cq, cq_base, cq_base_pa);
556 ionic_cq_bind(&new->cq, &new->q);
558 new->q_size = PAGE_SIZE + (num_descs * desc_size);
559 new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa,
562 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
564 goto err_out_free_cq_info;
566 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
567 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
568 ionic_q_map(&new->q, q_base, q_base_pa);
570 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size);
571 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa,
574 netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n");
578 cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
579 cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
580 ionic_cq_map(&new->cq, cq_base, cq_base_pa);
581 ionic_cq_bind(&new->cq, &new->q);
584 if (flags & IONIC_QCQ_F_SG) {
585 new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size);
586 new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa,
589 netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n");
591 goto err_out_free_cq;
593 sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
594 sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
595 ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
598 INIT_WORK(&new->dim.work, ionic_dim_work);
599 new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
606 dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa);
608 dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
609 err_out_free_cq_info:
610 devm_kfree(dev, new->cq.info);
612 if (flags & IONIC_QCQ_F_INTR) {
613 devm_free_irq(dev, new->intr.vector, &new->napi);
614 ionic_intr_free(lif->ionic, new->intr.index);
617 devm_kfree(dev, new->q.info);
619 devm_kfree(dev, new);
621 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
625 static int ionic_qcqs_alloc(struct ionic_lif *lif)
627 struct device *dev = lif->ionic->dev;
631 flags = IONIC_QCQ_F_INTR;
632 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
634 sizeof(struct ionic_admin_cmd),
635 sizeof(struct ionic_admin_comp),
636 0, lif->kern_pid, &lif->adminqcq);
639 ionic_debugfs_add_qcq(lif, lif->adminqcq);
641 if (lif->ionic->nnqs_per_lif) {
642 flags = IONIC_QCQ_F_NOTIFYQ;
643 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
644 flags, IONIC_NOTIFYQ_LENGTH,
645 sizeof(struct ionic_notifyq_cmd),
646 sizeof(union ionic_notifyq_comp),
647 0, lif->kern_pid, &lif->notifyqcq);
650 ionic_debugfs_add_qcq(lif, lif->notifyqcq);
652 /* Let the notifyq ride on the adminq interrupt */
653 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
657 lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
658 sizeof(struct ionic_qcq *), GFP_KERNEL);
661 lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
662 sizeof(struct ionic_qcq *), GFP_KERNEL);
666 lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
667 sizeof(struct ionic_tx_stats), GFP_KERNEL);
670 lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
671 sizeof(struct ionic_rx_stats), GFP_KERNEL);
678 ionic_qcqs_free(lif);
682 static void ionic_qcq_sanitize(struct ionic_qcq *qcq)
686 qcq->cq.tail_idx = 0;
687 qcq->cq.done_color = 1;
688 memset(qcq->q_base, 0, qcq->q_size);
689 memset(qcq->cq_base, 0, qcq->cq_size);
690 memset(qcq->sg_base, 0, qcq->sg_size);
693 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
695 struct device *dev = lif->ionic->dev;
696 struct ionic_queue *q = &qcq->q;
697 struct ionic_cq *cq = &qcq->cq;
698 struct ionic_admin_ctx ctx = {
699 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
701 .opcode = IONIC_CMD_Q_INIT,
702 .lif_index = cpu_to_le16(lif->index),
704 .ver = lif->qtype_info[q->type].version,
705 .index = cpu_to_le32(q->index),
706 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
708 .pid = cpu_to_le16(q->pid),
709 .ring_size = ilog2(q->num_descs),
710 .ring_base = cpu_to_le64(q->base_pa),
711 .cq_ring_base = cpu_to_le64(cq->base_pa),
712 .sg_ring_base = cpu_to_le64(q->sg_base_pa),
715 unsigned int intr_index;
718 if (qcq->flags & IONIC_QCQ_F_INTR)
719 intr_index = qcq->intr.index;
721 intr_index = lif->rxqcqs[q->index]->intr.index;
722 ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index);
724 dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
725 dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
726 dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
727 dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
728 dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
729 dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
730 dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
732 ionic_qcq_sanitize(qcq);
734 err = ionic_adminq_post_wait(lif, &ctx);
738 q->hw_type = ctx.comp.q_init.hw_type;
739 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
740 q->dbval = IONIC_DBELL_QID(q->hw_index);
742 dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
743 dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
745 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
746 netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi,
749 qcq->flags |= IONIC_QCQ_F_INITED;
754 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
756 struct device *dev = lif->ionic->dev;
757 struct ionic_queue *q = &qcq->q;
758 struct ionic_cq *cq = &qcq->cq;
759 struct ionic_admin_ctx ctx = {
760 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
762 .opcode = IONIC_CMD_Q_INIT,
763 .lif_index = cpu_to_le16(lif->index),
765 .ver = lif->qtype_info[q->type].version,
766 .index = cpu_to_le32(q->index),
767 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
769 .intr_index = cpu_to_le16(cq->bound_intr->index),
770 .pid = cpu_to_le16(q->pid),
771 .ring_size = ilog2(q->num_descs),
772 .ring_base = cpu_to_le64(q->base_pa),
773 .cq_ring_base = cpu_to_le64(cq->base_pa),
774 .sg_ring_base = cpu_to_le64(q->sg_base_pa),
779 dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
780 dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
781 dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
782 dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
783 dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
784 dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
785 dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
787 ionic_qcq_sanitize(qcq);
789 err = ionic_adminq_post_wait(lif, &ctx);
793 q->hw_type = ctx.comp.q_init.hw_type;
794 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
795 q->dbval = IONIC_DBELL_QID(q->hw_index);
797 dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
798 dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
800 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
801 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
804 netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi,
807 qcq->flags |= IONIC_QCQ_F_INITED;
812 static bool ionic_notifyq_service(struct ionic_cq *cq,
813 struct ionic_cq_info *cq_info)
815 union ionic_notifyq_comp *comp = cq_info->cq_desc;
816 struct ionic_deferred_work *work;
817 struct net_device *netdev;
818 struct ionic_queue *q;
819 struct ionic_lif *lif;
823 lif = q->info[0].cb_arg;
824 netdev = lif->netdev;
825 eid = le64_to_cpu(comp->event.eid);
827 /* Have we run out of new completions to process? */
828 if ((s64)(eid - lif->last_eid) <= 0)
833 dev_dbg(lif->ionic->dev, "notifyq event:\n");
834 dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
835 comp, sizeof(*comp), true);
837 switch (le16_to_cpu(comp->event.ecode)) {
838 case IONIC_EVENT_LINK_CHANGE:
839 ionic_link_status_check_request(lif, false);
841 case IONIC_EVENT_RESET:
842 work = kzalloc(sizeof(*work), GFP_ATOMIC);
844 netdev_err(lif->netdev, "%s OOM\n", __func__);
846 work->type = IONIC_DW_TYPE_LIF_RESET;
847 ionic_lif_deferred_enqueue(&lif->deferred, work);
851 netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
852 comp->event.ecode, eid);
859 static bool ionic_adminq_service(struct ionic_cq *cq,
860 struct ionic_cq_info *cq_info)
862 struct ionic_admin_comp *comp = cq_info->cq_desc;
864 if (!color_match(comp->color, cq->done_color))
867 ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
872 static int ionic_adminq_napi(struct napi_struct *napi, int budget)
874 struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr;
875 struct ionic_lif *lif = napi_to_cq(napi)->lif;
876 struct ionic_dev *idev = &lif->ionic->idev;
877 unsigned int flags = 0;
882 if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED)
883 n_work = ionic_cq_service(&lif->notifyqcq->cq, budget,
884 ionic_notifyq_service, NULL, NULL);
886 if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED)
887 a_work = ionic_cq_service(&lif->adminqcq->cq, budget,
888 ionic_adminq_service, NULL, NULL);
890 work_done = max(n_work, a_work);
891 if (work_done < budget && napi_complete_done(napi, work_done)) {
892 flags |= IONIC_INTR_CRED_UNMASK;
893 lif->adminqcq->cq.bound_intr->rearm_count++;
896 if (work_done || flags) {
897 flags |= IONIC_INTR_CRED_RESET_COALESCE;
898 ionic_intr_credits(idev->intr_ctrl,
900 n_work + a_work, flags);
906 void ionic_get_stats64(struct net_device *netdev,
907 struct rtnl_link_stats64 *ns)
909 struct ionic_lif *lif = netdev_priv(netdev);
910 struct ionic_lif_stats *ls;
912 memset(ns, 0, sizeof(*ns));
913 ls = &lif->info->stats;
915 ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
916 le64_to_cpu(ls->rx_mcast_packets) +
917 le64_to_cpu(ls->rx_bcast_packets);
919 ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
920 le64_to_cpu(ls->tx_mcast_packets) +
921 le64_to_cpu(ls->tx_bcast_packets);
923 ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
924 le64_to_cpu(ls->rx_mcast_bytes) +
925 le64_to_cpu(ls->rx_bcast_bytes);
927 ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
928 le64_to_cpu(ls->tx_mcast_bytes) +
929 le64_to_cpu(ls->tx_bcast_bytes);
931 ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
932 le64_to_cpu(ls->rx_mcast_drop_packets) +
933 le64_to_cpu(ls->rx_bcast_drop_packets);
935 ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
936 le64_to_cpu(ls->tx_mcast_drop_packets) +
937 le64_to_cpu(ls->tx_bcast_drop_packets);
939 ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
941 ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
943 ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
944 le64_to_cpu(ls->rx_queue_disabled) +
945 le64_to_cpu(ls->rx_desc_fetch_error) +
946 le64_to_cpu(ls->rx_desc_data_error);
948 ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
949 le64_to_cpu(ls->tx_queue_disabled) +
950 le64_to_cpu(ls->tx_desc_fetch_error) +
951 le64_to_cpu(ls->tx_desc_data_error);
953 ns->rx_errors = ns->rx_over_errors +
954 ns->rx_missed_errors;
956 ns->tx_errors = ns->tx_aborted_errors;
959 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
961 struct ionic_admin_ctx ctx = {
962 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
963 .cmd.rx_filter_add = {
964 .opcode = IONIC_CMD_RX_FILTER_ADD,
965 .lif_index = cpu_to_le16(lif->index),
966 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
969 struct ionic_rx_filter *f;
972 /* don't bother if we already have it */
973 spin_lock_bh(&lif->rx_filters.lock);
974 f = ionic_rx_filter_by_addr(lif, addr);
975 spin_unlock_bh(&lif->rx_filters.lock);
979 netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr);
981 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
982 err = ionic_adminq_post_wait(lif, &ctx);
983 if (err && err != -EEXIST)
986 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
989 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
991 struct ionic_admin_ctx ctx = {
992 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
993 .cmd.rx_filter_del = {
994 .opcode = IONIC_CMD_RX_FILTER_DEL,
995 .lif_index = cpu_to_le16(lif->index),
998 struct ionic_rx_filter *f;
1001 spin_lock_bh(&lif->rx_filters.lock);
1002 f = ionic_rx_filter_by_addr(lif, addr);
1004 spin_unlock_bh(&lif->rx_filters.lock);
1008 netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n",
1009 addr, f->filter_id);
1011 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1012 ionic_rx_filter_free(lif, f);
1013 spin_unlock_bh(&lif->rx_filters.lock);
1015 err = ionic_adminq_post_wait(lif, &ctx);
1016 if (err && err != -EEXIST)
1022 static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add,
1025 struct ionic_deferred_work *work;
1026 unsigned int nmfilters;
1027 unsigned int nufilters;
1030 /* Do we have space for this filter? We test the counters
1031 * here before checking the need for deferral so that we
1032 * can return an overflow error to the stack.
1034 nmfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters);
1035 nufilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
1037 if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters))
1039 else if (!is_multicast_ether_addr(addr) &&
1040 lif->nucast < nufilters)
1045 if (is_multicast_ether_addr(addr) && lif->nmcast)
1047 else if (!is_multicast_ether_addr(addr) && lif->nucast)
1052 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1054 netdev_err(lif->netdev, "%s OOM\n", __func__);
1057 work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD :
1058 IONIC_DW_TYPE_RX_ADDR_DEL;
1059 memcpy(work->addr, addr, ETH_ALEN);
1060 netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n",
1061 add ? "add" : "del", addr);
1062 ionic_lif_deferred_enqueue(&lif->deferred, work);
1064 netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
1065 add ? "add" : "del", addr);
1067 return ionic_lif_addr_add(lif, addr);
1069 return ionic_lif_addr_del(lif, addr);
1075 static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
1077 return ionic_lif_addr(netdev_priv(netdev), addr, true, true);
1080 static int ionic_ndo_addr_add(struct net_device *netdev, const u8 *addr)
1082 return ionic_lif_addr(netdev_priv(netdev), addr, true, false);
1085 static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
1087 return ionic_lif_addr(netdev_priv(netdev), addr, false, true);
1090 static int ionic_ndo_addr_del(struct net_device *netdev, const u8 *addr)
1092 return ionic_lif_addr(netdev_priv(netdev), addr, false, false);
1095 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
1097 struct ionic_admin_ctx ctx = {
1098 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1099 .cmd.rx_mode_set = {
1100 .opcode = IONIC_CMD_RX_MODE_SET,
1101 .lif_index = cpu_to_le16(lif->index),
1102 .rx_mode = cpu_to_le16(rx_mode),
1108 #define REMAIN(__x) (sizeof(buf) - (__x))
1110 i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
1111 lif->rx_mode, rx_mode);
1112 if (rx_mode & IONIC_RX_MODE_F_UNICAST)
1113 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
1114 if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
1115 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
1116 if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
1117 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
1118 if (rx_mode & IONIC_RX_MODE_F_PROMISC)
1119 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
1120 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
1121 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
1122 netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
1124 err = ionic_adminq_post_wait(lif, &ctx);
1126 netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n",
1129 lif->rx_mode = rx_mode;
1132 static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode,
1135 struct ionic_deferred_work *work;
1138 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1140 netdev_err(lif->netdev, "%s OOM\n", __func__);
1143 work->type = IONIC_DW_TYPE_RX_MODE;
1144 work->rx_mode = rx_mode;
1145 netdev_dbg(lif->netdev, "deferred: rx_mode\n");
1146 ionic_lif_deferred_enqueue(&lif->deferred, work);
1148 ionic_lif_rx_mode(lif, rx_mode);
1152 static void ionic_dev_uc_sync(struct net_device *netdev, bool from_ndo)
1155 __dev_uc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
1157 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
1161 static void ionic_set_rx_mode(struct net_device *netdev, bool from_ndo)
1163 struct ionic_lif *lif = netdev_priv(netdev);
1164 unsigned int nfilters;
1165 unsigned int rx_mode;
1167 rx_mode = IONIC_RX_MODE_F_UNICAST;
1168 rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
1169 rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
1170 rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
1171 rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
1173 /* sync unicast addresses
1174 * next check to see if we're in an overflow state
1175 * if so, we track that we overflowed and enable NIC PROMISC
1176 * else if the overflow is set and not needed
1177 * we remove our overflow flag and check the netdev flags
1178 * to see if we can disable NIC PROMISC
1180 ionic_dev_uc_sync(netdev, from_ndo);
1181 nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
1182 if (netdev_uc_count(netdev) + 1 > nfilters) {
1183 rx_mode |= IONIC_RX_MODE_F_PROMISC;
1184 lif->uc_overflow = true;
1185 } else if (lif->uc_overflow) {
1186 lif->uc_overflow = false;
1187 if (!(netdev->flags & IFF_PROMISC))
1188 rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
1191 /* same for multicast */
1192 ionic_dev_uc_sync(netdev, from_ndo);
1193 nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters);
1194 if (netdev_mc_count(netdev) > nfilters) {
1195 rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
1196 lif->mc_overflow = true;
1197 } else if (lif->mc_overflow) {
1198 lif->mc_overflow = false;
1199 if (!(netdev->flags & IFF_ALLMULTI))
1200 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
1203 if (lif->rx_mode != rx_mode)
1204 _ionic_lif_rx_mode(lif, rx_mode, from_ndo);
1207 static void ionic_ndo_set_rx_mode(struct net_device *netdev)
1209 ionic_set_rx_mode(netdev, true);
1212 static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
1216 if (features & NETIF_F_HW_VLAN_CTAG_TX)
1217 wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
1218 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1219 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
1220 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1221 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
1222 if (features & NETIF_F_RXHASH)
1223 wanted |= IONIC_ETH_HW_RX_HASH;
1224 if (features & NETIF_F_RXCSUM)
1225 wanted |= IONIC_ETH_HW_RX_CSUM;
1226 if (features & NETIF_F_SG)
1227 wanted |= IONIC_ETH_HW_TX_SG;
1228 if (features & NETIF_F_HW_CSUM)
1229 wanted |= IONIC_ETH_HW_TX_CSUM;
1230 if (features & NETIF_F_TSO)
1231 wanted |= IONIC_ETH_HW_TSO;
1232 if (features & NETIF_F_TSO6)
1233 wanted |= IONIC_ETH_HW_TSO_IPV6;
1234 if (features & NETIF_F_TSO_ECN)
1235 wanted |= IONIC_ETH_HW_TSO_ECN;
1236 if (features & NETIF_F_GSO_GRE)
1237 wanted |= IONIC_ETH_HW_TSO_GRE;
1238 if (features & NETIF_F_GSO_GRE_CSUM)
1239 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
1240 if (features & NETIF_F_GSO_IPXIP4)
1241 wanted |= IONIC_ETH_HW_TSO_IPXIP4;
1242 if (features & NETIF_F_GSO_IPXIP6)
1243 wanted |= IONIC_ETH_HW_TSO_IPXIP6;
1244 if (features & NETIF_F_GSO_UDP_TUNNEL)
1245 wanted |= IONIC_ETH_HW_TSO_UDP;
1246 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
1247 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
1249 return cpu_to_le64(wanted);
1252 static int ionic_set_nic_features(struct ionic_lif *lif,
1253 netdev_features_t features)
1255 struct device *dev = lif->ionic->dev;
1256 struct ionic_admin_ctx ctx = {
1257 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1258 .cmd.lif_setattr = {
1259 .opcode = IONIC_CMD_LIF_SETATTR,
1260 .index = cpu_to_le16(lif->index),
1261 .attr = IONIC_LIF_ATTR_FEATURES,
1264 u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
1265 IONIC_ETH_HW_VLAN_RX_STRIP |
1266 IONIC_ETH_HW_VLAN_RX_FILTER;
1267 u64 old_hw_features;
1270 ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
1271 err = ionic_adminq_post_wait(lif, &ctx);
1275 old_hw_features = lif->hw_features;
1276 lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
1277 ctx.comp.lif_setattr.features);
1279 if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
1280 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1282 if ((vlan_flags & features) &&
1283 !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
1284 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1286 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1287 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
1288 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1289 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
1290 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1291 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
1292 if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1293 dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
1294 if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1295 dev_dbg(dev, "feature ETH_HW_TX_SG\n");
1296 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1297 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
1298 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1299 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
1300 if (lif->hw_features & IONIC_ETH_HW_TSO)
1301 dev_dbg(dev, "feature ETH_HW_TSO\n");
1302 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1303 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
1304 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1305 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
1306 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1307 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
1308 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1309 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
1310 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1311 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
1312 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1313 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
1314 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1315 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
1316 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1317 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
1322 static int ionic_init_nic_features(struct ionic_lif *lif)
1324 struct net_device *netdev = lif->netdev;
1325 netdev_features_t features;
1328 /* set up what we expect to support by default */
1329 features = NETIF_F_HW_VLAN_CTAG_TX |
1330 NETIF_F_HW_VLAN_CTAG_RX |
1331 NETIF_F_HW_VLAN_CTAG_FILTER |
1340 err = ionic_set_nic_features(lif, features);
1344 /* tell the netdev what we actually can support */
1345 netdev->features |= NETIF_F_HIGHDMA;
1347 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1348 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1349 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1350 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1351 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1352 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1353 if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1354 netdev->hw_features |= NETIF_F_RXHASH;
1355 if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1356 netdev->hw_features |= NETIF_F_SG;
1358 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1359 netdev->hw_enc_features |= NETIF_F_HW_CSUM;
1360 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1361 netdev->hw_enc_features |= NETIF_F_RXCSUM;
1362 if (lif->hw_features & IONIC_ETH_HW_TSO)
1363 netdev->hw_enc_features |= NETIF_F_TSO;
1364 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1365 netdev->hw_enc_features |= NETIF_F_TSO6;
1366 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1367 netdev->hw_enc_features |= NETIF_F_TSO_ECN;
1368 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1369 netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1370 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1371 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
1372 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1373 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
1374 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1375 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
1376 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1377 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1378 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1379 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1381 netdev->hw_features |= netdev->hw_enc_features;
1382 netdev->features |= netdev->hw_features;
1383 netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES;
1385 netdev->priv_flags |= IFF_UNICAST_FLT |
1386 IFF_LIVE_ADDR_CHANGE;
1391 static int ionic_set_features(struct net_device *netdev,
1392 netdev_features_t features)
1394 struct ionic_lif *lif = netdev_priv(netdev);
1397 netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1398 __func__, (u64)lif->netdev->features, (u64)features);
1400 err = ionic_set_nic_features(lif, features);
1405 static int ionic_set_mac_address(struct net_device *netdev, void *sa)
1407 struct sockaddr *addr = sa;
1411 mac = (u8 *)addr->sa_data;
1412 if (ether_addr_equal(netdev->dev_addr, mac))
1415 err = eth_prepare_mac_addr_change(netdev, addr);
1419 if (!is_zero_ether_addr(netdev->dev_addr)) {
1420 netdev_info(netdev, "deleting mac addr %pM\n",
1422 ionic_addr_del(netdev, netdev->dev_addr);
1425 eth_commit_mac_addr_change(netdev, addr);
1426 netdev_info(netdev, "updating mac addr %pM\n", mac);
1428 return ionic_addr_add(netdev, mac);
1431 static void ionic_stop_queues_reconfig(struct ionic_lif *lif)
1433 /* Stop and clean the queues before reconfiguration */
1434 mutex_lock(&lif->queue_lock);
1435 netif_device_detach(lif->netdev);
1436 ionic_stop_queues(lif);
1437 ionic_txrx_deinit(lif);
1440 static int ionic_start_queues_reconfig(struct ionic_lif *lif)
1444 /* Re-init the queues after reconfiguration */
1446 /* The only way txrx_init can fail here is if communication
1447 * with FW is suddenly broken. There's not much we can do
1448 * at this point - error messages have already been printed,
1449 * so we can continue on and the user can eventually do a
1450 * DOWN and UP to try to reset and clear the issue.
1452 err = ionic_txrx_init(lif);
1453 mutex_unlock(&lif->queue_lock);
1454 ionic_link_status_check_request(lif, true);
1455 netif_device_attach(lif->netdev);
1460 static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
1462 struct ionic_lif *lif = netdev_priv(netdev);
1463 struct ionic_admin_ctx ctx = {
1464 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1465 .cmd.lif_setattr = {
1466 .opcode = IONIC_CMD_LIF_SETATTR,
1467 .index = cpu_to_le16(lif->index),
1468 .attr = IONIC_LIF_ATTR_MTU,
1469 .mtu = cpu_to_le32(new_mtu),
1474 err = ionic_adminq_post_wait(lif, &ctx);
1478 netdev->mtu = new_mtu;
1479 /* if we're not running, nothing more to do */
1480 if (!netif_running(netdev))
1483 ionic_stop_queues_reconfig(lif);
1484 return ionic_start_queues_reconfig(lif);
1487 static void ionic_tx_timeout_work(struct work_struct *ws)
1489 struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
1491 netdev_info(lif->netdev, "Tx Timeout recovery\n");
1493 /* if we were stopped before this scheduled job was launched,
1494 * don't bother the queues as they are already stopped.
1496 if (!netif_running(lif->netdev))
1499 ionic_stop_queues_reconfig(lif);
1500 ionic_start_queues_reconfig(lif);
1503 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1505 struct ionic_lif *lif = netdev_priv(netdev);
1507 schedule_work(&lif->tx_timeout_work);
1510 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1513 struct ionic_lif *lif = netdev_priv(netdev);
1514 struct ionic_admin_ctx ctx = {
1515 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1516 .cmd.rx_filter_add = {
1517 .opcode = IONIC_CMD_RX_FILTER_ADD,
1518 .lif_index = cpu_to_le16(lif->index),
1519 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
1520 .vlan.vlan = cpu_to_le16(vid),
1525 netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid);
1526 err = ionic_adminq_post_wait(lif, &ctx);
1530 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
1533 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1536 struct ionic_lif *lif = netdev_priv(netdev);
1537 struct ionic_admin_ctx ctx = {
1538 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1539 .cmd.rx_filter_del = {
1540 .opcode = IONIC_CMD_RX_FILTER_DEL,
1541 .lif_index = cpu_to_le16(lif->index),
1544 struct ionic_rx_filter *f;
1546 spin_lock_bh(&lif->rx_filters.lock);
1548 f = ionic_rx_filter_by_vlan(lif, vid);
1550 spin_unlock_bh(&lif->rx_filters.lock);
1554 netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n",
1557 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1558 ionic_rx_filter_free(lif, f);
1559 spin_unlock_bh(&lif->rx_filters.lock);
1561 return ionic_adminq_post_wait(lif, &ctx);
1564 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
1565 const u8 *key, const u32 *indir)
1567 struct ionic_admin_ctx ctx = {
1568 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1569 .cmd.lif_setattr = {
1570 .opcode = IONIC_CMD_LIF_SETATTR,
1571 .attr = IONIC_LIF_ATTR_RSS,
1572 .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
1575 unsigned int i, tbl_sz;
1577 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) {
1578 lif->rss_types = types;
1579 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types);
1583 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1586 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1587 for (i = 0; i < tbl_sz; i++)
1588 lif->rss_ind_tbl[i] = indir[i];
1591 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1592 IONIC_RSS_HASH_KEY_SIZE);
1594 return ionic_adminq_post_wait(lif, &ctx);
1597 static int ionic_lif_rss_init(struct ionic_lif *lif)
1599 unsigned int tbl_sz;
1602 lif->rss_types = IONIC_RSS_TYPE_IPV4 |
1603 IONIC_RSS_TYPE_IPV4_TCP |
1604 IONIC_RSS_TYPE_IPV4_UDP |
1605 IONIC_RSS_TYPE_IPV6 |
1606 IONIC_RSS_TYPE_IPV6_TCP |
1607 IONIC_RSS_TYPE_IPV6_UDP;
1609 /* Fill indirection table with 'default' values */
1610 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1611 for (i = 0; i < tbl_sz; i++)
1612 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
1614 return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1617 static void ionic_lif_rss_deinit(struct ionic_lif *lif)
1621 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1622 memset(lif->rss_ind_tbl, 0, tbl_sz);
1623 memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
1625 ionic_lif_rss_config(lif, 0x0, NULL, NULL);
1628 static void ionic_txrx_disable(struct ionic_lif *lif)
1634 for (i = 0; i < lif->nxqs; i++)
1635 err = ionic_qcq_disable(lif->txqcqs[i], (err != -ETIMEDOUT));
1639 for (i = 0; i < lif->nxqs; i++)
1640 err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
1644 static void ionic_txrx_deinit(struct ionic_lif *lif)
1649 for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) {
1650 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
1651 ionic_tx_flush(&lif->txqcqs[i]->cq);
1652 ionic_tx_empty(&lif->txqcqs[i]->q);
1657 for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) {
1658 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
1659 ionic_rx_empty(&lif->rxqcqs[i]->q);
1665 static void ionic_txrx_free(struct ionic_lif *lif)
1670 for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) {
1671 ionic_qcq_free(lif, lif->txqcqs[i]);
1672 devm_kfree(lif->ionic->dev, lif->txqcqs[i]);
1673 lif->txqcqs[i] = NULL;
1678 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
1679 ionic_qcq_free(lif, lif->rxqcqs[i]);
1680 devm_kfree(lif->ionic->dev, lif->rxqcqs[i]);
1681 lif->rxqcqs[i] = NULL;
1686 static int ionic_txrx_alloc(struct ionic_lif *lif)
1688 unsigned int sg_desc_sz;
1693 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
1694 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
1695 sizeof(struct ionic_txq_sg_desc_v1))
1696 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
1698 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
1700 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
1701 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
1702 flags |= IONIC_QCQ_F_INTR;
1703 for (i = 0; i < lif->nxqs; i++) {
1704 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
1706 sizeof(struct ionic_txq_desc),
1707 sizeof(struct ionic_txq_comp),
1709 lif->kern_pid, &lif->txqcqs[i]);
1713 if (flags & IONIC_QCQ_F_INTR) {
1714 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
1715 lif->txqcqs[i]->intr.index,
1716 lif->tx_coalesce_hw);
1717 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
1718 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
1721 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
1724 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
1725 for (i = 0; i < lif->nxqs; i++) {
1726 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
1728 sizeof(struct ionic_rxq_desc),
1729 sizeof(struct ionic_rxq_comp),
1730 sizeof(struct ionic_rxq_sg_desc),
1731 lif->kern_pid, &lif->rxqcqs[i]);
1735 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
1736 lif->rxqcqs[i]->intr.index,
1737 lif->rx_coalesce_hw);
1738 if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state))
1739 lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw;
1741 if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
1742 ionic_link_qcq_interrupts(lif->rxqcqs[i],
1745 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
1751 ionic_txrx_free(lif);
1756 static int ionic_txrx_init(struct ionic_lif *lif)
1761 for (i = 0; i < lif->nxqs; i++) {
1762 err = ionic_lif_txq_init(lif, lif->txqcqs[i]);
1766 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]);
1768 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
1773 if (lif->netdev->features & NETIF_F_RXHASH)
1774 ionic_lif_rss_init(lif);
1776 ionic_set_rx_mode(lif->netdev, false);
1782 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
1783 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
1789 static int ionic_txrx_enable(struct ionic_lif *lif)
1794 for (i = 0; i < lif->nxqs; i++) {
1795 if (!(lif->rxqcqs[i] && lif->txqcqs[i])) {
1796 dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i);
1801 ionic_rx_fill(&lif->rxqcqs[i]->q);
1802 err = ionic_qcq_enable(lif->rxqcqs[i]);
1806 err = ionic_qcq_enable(lif->txqcqs[i]);
1808 derr = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
1817 derr = ionic_qcq_disable(lif->txqcqs[i], (derr != -ETIMEDOUT));
1818 derr = ionic_qcq_disable(lif->rxqcqs[i], (derr != -ETIMEDOUT));
1824 static int ionic_start_queues(struct ionic_lif *lif)
1828 if (test_and_set_bit(IONIC_LIF_F_UP, lif->state))
1831 err = ionic_txrx_enable(lif);
1833 clear_bit(IONIC_LIF_F_UP, lif->state);
1836 netif_tx_wake_all_queues(lif->netdev);
1841 static int ionic_open(struct net_device *netdev)
1843 struct ionic_lif *lif = netdev_priv(netdev);
1846 err = ionic_txrx_alloc(lif);
1850 err = ionic_txrx_init(lif);
1854 err = netif_set_real_num_tx_queues(netdev, lif->nxqs);
1856 goto err_txrx_deinit;
1858 err = netif_set_real_num_rx_queues(netdev, lif->nxqs);
1860 goto err_txrx_deinit;
1862 /* don't start the queues until we have link */
1863 if (netif_carrier_ok(netdev)) {
1864 err = ionic_start_queues(lif);
1866 goto err_txrx_deinit;
1872 ionic_txrx_deinit(lif);
1874 ionic_txrx_free(lif);
1878 static void ionic_stop_queues(struct ionic_lif *lif)
1880 if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
1883 netif_tx_disable(lif->netdev);
1884 ionic_txrx_disable(lif);
1887 static int ionic_stop(struct net_device *netdev)
1889 struct ionic_lif *lif = netdev_priv(netdev);
1891 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
1894 ionic_stop_queues(lif);
1895 ionic_txrx_deinit(lif);
1896 ionic_txrx_free(lif);
1901 static int ionic_get_vf_config(struct net_device *netdev,
1902 int vf, struct ifla_vf_info *ivf)
1904 struct ionic_lif *lif = netdev_priv(netdev);
1905 struct ionic *ionic = lif->ionic;
1908 if (!netif_device_present(netdev))
1911 down_read(&ionic->vf_op_lock);
1913 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1917 ivf->vlan = le16_to_cpu(ionic->vfs[vf].vlanid);
1919 ivf->spoofchk = ionic->vfs[vf].spoofchk;
1920 ivf->linkstate = ionic->vfs[vf].linkstate;
1921 ivf->max_tx_rate = le32_to_cpu(ionic->vfs[vf].maxrate);
1922 ivf->trusted = ionic->vfs[vf].trusted;
1923 ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
1926 up_read(&ionic->vf_op_lock);
1930 static int ionic_get_vf_stats(struct net_device *netdev, int vf,
1931 struct ifla_vf_stats *vf_stats)
1933 struct ionic_lif *lif = netdev_priv(netdev);
1934 struct ionic *ionic = lif->ionic;
1935 struct ionic_lif_stats *vs;
1938 if (!netif_device_present(netdev))
1941 down_read(&ionic->vf_op_lock);
1943 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1946 memset(vf_stats, 0, sizeof(*vf_stats));
1947 vs = &ionic->vfs[vf].stats;
1949 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
1950 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
1951 vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes);
1952 vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes);
1953 vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets);
1954 vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets);
1955 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
1956 le64_to_cpu(vs->rx_mcast_drop_packets) +
1957 le64_to_cpu(vs->rx_bcast_drop_packets);
1958 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
1959 le64_to_cpu(vs->tx_mcast_drop_packets) +
1960 le64_to_cpu(vs->tx_bcast_drop_packets);
1963 up_read(&ionic->vf_op_lock);
1967 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1969 struct ionic_lif *lif = netdev_priv(netdev);
1970 struct ionic *ionic = lif->ionic;
1973 if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
1976 if (!netif_device_present(netdev))
1979 down_write(&ionic->vf_op_lock);
1981 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1984 ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac);
1986 ether_addr_copy(ionic->vfs[vf].macaddr, mac);
1989 up_write(&ionic->vf_op_lock);
1993 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1994 u8 qos, __be16 proto)
1996 struct ionic_lif *lif = netdev_priv(netdev);
1997 struct ionic *ionic = lif->ionic;
2000 /* until someday when we support qos */
2007 if (proto != htons(ETH_P_8021Q))
2008 return -EPROTONOSUPPORT;
2010 if (!netif_device_present(netdev))
2013 down_write(&ionic->vf_op_lock);
2015 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2018 ret = ionic_set_vf_config(ionic, vf,
2019 IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
2021 ionic->vfs[vf].vlanid = cpu_to_le16(vlan);
2024 up_write(&ionic->vf_op_lock);
2028 static int ionic_set_vf_rate(struct net_device *netdev, int vf,
2029 int tx_min, int tx_max)
2031 struct ionic_lif *lif = netdev_priv(netdev);
2032 struct ionic *ionic = lif->ionic;
2035 /* setting the min just seems silly */
2039 if (!netif_device_present(netdev))
2042 down_write(&ionic->vf_op_lock);
2044 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2047 ret = ionic_set_vf_config(ionic, vf,
2048 IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
2050 lif->ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
2053 up_write(&ionic->vf_op_lock);
2057 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
2059 struct ionic_lif *lif = netdev_priv(netdev);
2060 struct ionic *ionic = lif->ionic;
2061 u8 data = set; /* convert to u8 for config */
2064 if (!netif_device_present(netdev))
2067 down_write(&ionic->vf_op_lock);
2069 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2072 ret = ionic_set_vf_config(ionic, vf,
2073 IONIC_VF_ATTR_SPOOFCHK, &data);
2075 ionic->vfs[vf].spoofchk = data;
2078 up_write(&ionic->vf_op_lock);
2082 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
2084 struct ionic_lif *lif = netdev_priv(netdev);
2085 struct ionic *ionic = lif->ionic;
2086 u8 data = set; /* convert to u8 for config */
2089 if (!netif_device_present(netdev))
2092 down_write(&ionic->vf_op_lock);
2094 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2097 ret = ionic_set_vf_config(ionic, vf,
2098 IONIC_VF_ATTR_TRUST, &data);
2100 ionic->vfs[vf].trusted = data;
2103 up_write(&ionic->vf_op_lock);
2107 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
2109 struct ionic_lif *lif = netdev_priv(netdev);
2110 struct ionic *ionic = lif->ionic;
2115 case IFLA_VF_LINK_STATE_ENABLE:
2116 data = IONIC_VF_LINK_STATUS_UP;
2118 case IFLA_VF_LINK_STATE_DISABLE:
2119 data = IONIC_VF_LINK_STATUS_DOWN;
2121 case IFLA_VF_LINK_STATE_AUTO:
2122 data = IONIC_VF_LINK_STATUS_AUTO;
2128 if (!netif_device_present(netdev))
2131 down_write(&ionic->vf_op_lock);
2133 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2136 ret = ionic_set_vf_config(ionic, vf,
2137 IONIC_VF_ATTR_LINKSTATE, &data);
2139 ionic->vfs[vf].linkstate = set;
2142 up_write(&ionic->vf_op_lock);
2146 static const struct net_device_ops ionic_netdev_ops = {
2147 .ndo_open = ionic_open,
2148 .ndo_stop = ionic_stop,
2149 .ndo_start_xmit = ionic_start_xmit,
2150 .ndo_get_stats64 = ionic_get_stats64,
2151 .ndo_set_rx_mode = ionic_ndo_set_rx_mode,
2152 .ndo_set_features = ionic_set_features,
2153 .ndo_set_mac_address = ionic_set_mac_address,
2154 .ndo_validate_addr = eth_validate_addr,
2155 .ndo_tx_timeout = ionic_tx_timeout,
2156 .ndo_change_mtu = ionic_change_mtu,
2157 .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid,
2158 .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid,
2159 .ndo_set_vf_vlan = ionic_set_vf_vlan,
2160 .ndo_set_vf_trust = ionic_set_vf_trust,
2161 .ndo_set_vf_mac = ionic_set_vf_mac,
2162 .ndo_set_vf_rate = ionic_set_vf_rate,
2163 .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk,
2164 .ndo_get_vf_config = ionic_get_vf_config,
2165 .ndo_set_vf_link_state = ionic_set_vf_link_state,
2166 .ndo_get_vf_stats = ionic_get_vf_stats,
2169 static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
2171 /* only swapping the queues, not the napi, flags, or other stuff */
2172 swap(a->q.num_descs, b->q.num_descs);
2173 swap(a->q.base, b->q.base);
2174 swap(a->q.base_pa, b->q.base_pa);
2175 swap(a->q.info, b->q.info);
2176 swap(a->q_base, b->q_base);
2177 swap(a->q_base_pa, b->q_base_pa);
2178 swap(a->q_size, b->q_size);
2180 swap(a->q.sg_base, b->q.sg_base);
2181 swap(a->q.sg_base_pa, b->q.sg_base_pa);
2182 swap(a->sg_base, b->sg_base);
2183 swap(a->sg_base_pa, b->sg_base_pa);
2184 swap(a->sg_size, b->sg_size);
2186 swap(a->cq.num_descs, b->cq.num_descs);
2187 swap(a->cq.base, b->cq.base);
2188 swap(a->cq.base_pa, b->cq.base_pa);
2189 swap(a->cq.info, b->cq.info);
2190 swap(a->cq_base, b->cq_base);
2191 swap(a->cq_base_pa, b->cq_base_pa);
2192 swap(a->cq_size, b->cq_size);
2195 int ionic_reconfigure_queues(struct ionic_lif *lif,
2196 struct ionic_queue_params *qparam)
2198 struct ionic_qcq **tx_qcqs = NULL;
2199 struct ionic_qcq **rx_qcqs = NULL;
2200 unsigned int sg_desc_sz;
2205 /* allocate temporary qcq arrays to hold new queue structs */
2206 if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) {
2207 tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif,
2208 sizeof(struct ionic_qcq *), GFP_KERNEL);
2212 if (qparam->nxqs != lif->nxqs || qparam->nrxq_descs != lif->nrxq_descs) {
2213 rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif,
2214 sizeof(struct ionic_qcq *), GFP_KERNEL);
2219 /* allocate new desc_info and rings, but leave the interrupt setup
2220 * until later so as to not mess with the still-running queues
2222 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
2223 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
2224 sizeof(struct ionic_txq_sg_desc_v1))
2225 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
2227 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
2230 for (i = 0; i < qparam->nxqs; i++) {
2231 flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
2232 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
2234 sizeof(struct ionic_txq_desc),
2235 sizeof(struct ionic_txq_comp),
2237 lif->kern_pid, &tx_qcqs[i]);
2244 for (i = 0; i < qparam->nxqs; i++) {
2245 flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
2246 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
2248 sizeof(struct ionic_rxq_desc),
2249 sizeof(struct ionic_rxq_comp),
2250 sizeof(struct ionic_rxq_sg_desc),
2251 lif->kern_pid, &rx_qcqs[i]);
2257 /* stop and clean the queues */
2258 ionic_stop_queues_reconfig(lif);
2260 if (qparam->nxqs != lif->nxqs) {
2261 err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs);
2263 goto err_out_reinit_unlock;
2264 err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs);
2266 netif_set_real_num_tx_queues(lif->netdev, lif->nxqs);
2267 goto err_out_reinit_unlock;
2271 /* swap new desc_info and rings, keeping existing interrupt config */
2273 lif->ntxq_descs = qparam->ntxq_descs;
2274 for (i = 0; i < qparam->nxqs; i++)
2275 ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]);
2279 lif->nrxq_descs = qparam->nrxq_descs;
2280 for (i = 0; i < qparam->nxqs; i++)
2281 ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]);
2284 /* if we need to change the interrupt layout, this is the time */
2285 if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) ||
2286 qparam->nxqs != lif->nxqs) {
2287 if (qparam->intr_split) {
2288 set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
2290 clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
2291 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
2292 lif->tx_coalesce_hw = lif->rx_coalesce_hw;
2295 /* clear existing interrupt assignments */
2296 for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) {
2297 ionic_qcq_intr_free(lif, lif->txqcqs[i]);
2298 ionic_qcq_intr_free(lif, lif->rxqcqs[i]);
2301 /* re-assign the interrupts */
2302 for (i = 0; i < qparam->nxqs; i++) {
2303 lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR;
2304 err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]);
2305 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2306 lif->rxqcqs[i]->intr.index,
2307 lif->rx_coalesce_hw);
2309 if (qparam->intr_split) {
2310 lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR;
2311 err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]);
2312 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2313 lif->txqcqs[i]->intr.index,
2314 lif->tx_coalesce_hw);
2315 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
2316 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
2318 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2319 ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]);
2324 /* now we can rework the debugfs mappings */
2326 for (i = 0; i < qparam->nxqs; i++) {
2327 ionic_debugfs_del_qcq(lif->txqcqs[i]);
2328 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
2333 for (i = 0; i < qparam->nxqs; i++) {
2334 ionic_debugfs_del_qcq(lif->rxqcqs[i]);
2335 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
2339 swap(lif->nxqs, qparam->nxqs);
2341 err_out_reinit_unlock:
2342 /* re-init the queues, but don't loose an error code */
2344 ionic_start_queues_reconfig(lif);
2346 err = ionic_start_queues_reconfig(lif);
2349 /* free old allocs without cleaning intr */
2350 for (i = 0; i < qparam->nxqs; i++) {
2351 if (tx_qcqs && tx_qcqs[i]) {
2352 tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2353 ionic_qcq_free(lif, tx_qcqs[i]);
2354 devm_kfree(lif->ionic->dev, tx_qcqs[i]);
2357 if (rx_qcqs && rx_qcqs[i]) {
2358 rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2359 ionic_qcq_free(lif, rx_qcqs[i]);
2360 devm_kfree(lif->ionic->dev, rx_qcqs[i]);
2367 devm_kfree(lif->ionic->dev, rx_qcqs);
2371 devm_kfree(lif->ionic->dev, tx_qcqs);
2375 /* clean the unused dma and info allocations when new set is smaller
2376 * than the full array, but leave the qcq shells in place
2378 for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
2379 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2380 ionic_qcq_free(lif, lif->txqcqs[i]);
2382 lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2383 ionic_qcq_free(lif, lif->rxqcqs[i]);
2389 int ionic_lif_alloc(struct ionic *ionic)
2391 struct device *dev = ionic->dev;
2392 union ionic_lif_identity *lid;
2393 struct net_device *netdev;
2394 struct ionic_lif *lif;
2398 lid = kzalloc(sizeof(*lid), GFP_KERNEL);
2402 netdev = alloc_etherdev_mqs(sizeof(*lif),
2403 ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
2405 dev_err(dev, "Cannot allocate netdev, aborting\n");
2407 goto err_out_free_lid;
2410 SET_NETDEV_DEV(netdev, dev);
2412 lif = netdev_priv(netdev);
2413 lif->netdev = netdev;
2415 netdev->netdev_ops = &ionic_netdev_ops;
2416 ionic_ethtool_set_ops(netdev);
2418 netdev->watchdog_timeo = 2 * HZ;
2419 netif_carrier_off(netdev);
2421 lif->identity = lid;
2422 lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
2423 err = ionic_lif_identify(ionic, lif->lif_type, lif->identity);
2425 dev_err(ionic->dev, "Cannot identify type %d: %d\n",
2426 lif->lif_type, err);
2427 goto err_out_free_netdev;
2429 lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU,
2430 le32_to_cpu(lif->identity->eth.min_frame_size));
2431 lif->netdev->max_mtu =
2432 le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
2434 lif->neqs = ionic->neqs_per_lif;
2435 lif->nxqs = ionic->ntxqs_per_lif;
2439 lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
2440 lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
2441 lif->tx_budget = IONIC_TX_BUDGET_DEFAULT;
2443 /* Convert the default coalesce value to actual hw resolution */
2444 lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
2445 lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
2446 lif->rx_coalesce_usecs);
2447 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
2448 lif->tx_coalesce_hw = lif->rx_coalesce_hw;
2449 set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
2450 set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
2452 snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index);
2454 spin_lock_init(&lif->adminq_lock);
2456 spin_lock_init(&lif->deferred.lock);
2457 INIT_LIST_HEAD(&lif->deferred.list);
2458 INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
2460 /* allocate lif info */
2461 lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
2462 lif->info = dma_alloc_coherent(dev, lif->info_sz,
2463 &lif->info_pa, GFP_KERNEL);
2465 dev_err(dev, "Failed to allocate lif info, aborting\n");
2467 goto err_out_free_netdev;
2470 ionic_debugfs_add_lif(lif);
2472 /* allocate control queues and txrx queue arrays */
2473 ionic_lif_queue_identify(lif);
2474 err = ionic_qcqs_alloc(lif);
2476 goto err_out_free_lif_info;
2478 /* allocate rss indirection table */
2479 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
2480 lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
2481 lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
2482 &lif->rss_ind_tbl_pa,
2485 if (!lif->rss_ind_tbl) {
2487 dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
2488 goto err_out_free_qcqs;
2490 netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
2495 ionic_qcqs_free(lif);
2496 err_out_free_lif_info:
2497 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2500 err_out_free_netdev:
2501 free_netdev(lif->netdev);
2509 static void ionic_lif_reset(struct ionic_lif *lif)
2511 struct ionic_dev *idev = &lif->ionic->idev;
2513 mutex_lock(&lif->ionic->dev_cmd_lock);
2514 ionic_dev_cmd_lif_reset(idev, lif->index);
2515 ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2516 mutex_unlock(&lif->ionic->dev_cmd_lock);
2519 static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
2521 struct ionic *ionic = lif->ionic;
2523 if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state))
2526 dev_info(ionic->dev, "FW Down: Stopping LIFs\n");
2528 netif_device_detach(lif->netdev);
2530 if (test_bit(IONIC_LIF_F_UP, lif->state)) {
2531 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
2532 mutex_lock(&lif->queue_lock);
2533 ionic_stop_queues(lif);
2534 mutex_unlock(&lif->queue_lock);
2537 if (netif_running(lif->netdev)) {
2538 ionic_txrx_deinit(lif);
2539 ionic_txrx_free(lif);
2541 ionic_lif_deinit(lif);
2543 ionic_qcqs_free(lif);
2545 dev_info(ionic->dev, "FW Down: LIFs stopped\n");
2548 static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
2550 struct ionic *ionic = lif->ionic;
2553 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2556 dev_info(ionic->dev, "FW Up: restarting LIFs\n");
2558 ionic_init_devinfo(ionic);
2559 err = ionic_identify(ionic);
2562 err = ionic_port_identify(ionic);
2565 err = ionic_port_init(ionic);
2568 err = ionic_qcqs_alloc(lif);
2572 err = ionic_lif_init(lif);
2576 if (lif->registered)
2577 ionic_lif_set_netdev_info(lif);
2579 ionic_rx_filter_replay(lif);
2581 if (netif_running(lif->netdev)) {
2582 err = ionic_txrx_alloc(lif);
2584 goto err_lifs_deinit;
2586 err = ionic_txrx_init(lif);
2591 clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
2592 ionic_link_status_check_request(lif, true);
2593 netif_device_attach(lif->netdev);
2594 dev_info(ionic->dev, "FW Up: LIFs restarted\n");
2599 ionic_txrx_free(lif);
2601 ionic_lif_deinit(lif);
2603 ionic_qcqs_free(lif);
2605 dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
2608 void ionic_lif_free(struct ionic_lif *lif)
2610 struct device *dev = lif->ionic->dev;
2612 /* free rss indirection table */
2613 dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
2614 lif->rss_ind_tbl_pa);
2615 lif->rss_ind_tbl = NULL;
2616 lif->rss_ind_tbl_pa = 0;
2619 ionic_qcqs_free(lif);
2620 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2621 ionic_lif_reset(lif);
2624 kfree(lif->identity);
2625 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2629 /* unmap doorbell page */
2630 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2631 lif->kern_dbpage = NULL;
2632 kfree(lif->dbid_inuse);
2633 lif->dbid_inuse = NULL;
2635 /* free netdev & lif */
2636 ionic_debugfs_del_lif(lif);
2637 free_netdev(lif->netdev);
2640 void ionic_lif_deinit(struct ionic_lif *lif)
2642 if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
2645 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
2646 cancel_work_sync(&lif->deferred.work);
2647 cancel_work_sync(&lif->tx_timeout_work);
2648 ionic_rx_filters_deinit(lif);
2649 if (lif->netdev->features & NETIF_F_RXHASH)
2650 ionic_lif_rss_deinit(lif);
2653 napi_disable(&lif->adminqcq->napi);
2654 ionic_lif_qcq_deinit(lif, lif->notifyqcq);
2655 ionic_lif_qcq_deinit(lif, lif->adminqcq);
2657 mutex_destroy(&lif->queue_lock);
2658 ionic_lif_reset(lif);
2661 static int ionic_lif_adminq_init(struct ionic_lif *lif)
2663 struct device *dev = lif->ionic->dev;
2664 struct ionic_q_init_comp comp;
2665 struct ionic_dev *idev;
2666 struct ionic_qcq *qcq;
2667 struct ionic_queue *q;
2670 idev = &lif->ionic->idev;
2671 qcq = lif->adminqcq;
2674 mutex_lock(&lif->ionic->dev_cmd_lock);
2675 ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
2676 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2677 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2678 mutex_unlock(&lif->ionic->dev_cmd_lock);
2680 netdev_err(lif->netdev, "adminq init failed %d\n", err);
2684 q->hw_type = comp.hw_type;
2685 q->hw_index = le32_to_cpu(comp.hw_index);
2686 q->dbval = IONIC_DBELL_QID(q->hw_index);
2688 dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
2689 dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
2691 netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
2694 napi_enable(&qcq->napi);
2696 if (qcq->flags & IONIC_QCQ_F_INTR)
2697 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
2698 IONIC_INTR_MASK_CLEAR);
2700 qcq->flags |= IONIC_QCQ_F_INITED;
2705 static int ionic_lif_notifyq_init(struct ionic_lif *lif)
2707 struct ionic_qcq *qcq = lif->notifyqcq;
2708 struct device *dev = lif->ionic->dev;
2709 struct ionic_queue *q = &qcq->q;
2712 struct ionic_admin_ctx ctx = {
2713 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2715 .opcode = IONIC_CMD_Q_INIT,
2716 .lif_index = cpu_to_le16(lif->index),
2718 .ver = lif->qtype_info[q->type].version,
2719 .index = cpu_to_le32(q->index),
2720 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
2722 .intr_index = cpu_to_le16(lif->adminqcq->intr.index),
2723 .pid = cpu_to_le16(q->pid),
2724 .ring_size = ilog2(q->num_descs),
2725 .ring_base = cpu_to_le64(q->base_pa),
2729 dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
2730 dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
2731 dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
2732 dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
2734 err = ionic_adminq_post_wait(lif, &ctx);
2739 q->hw_type = ctx.comp.q_init.hw_type;
2740 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
2741 q->dbval = IONIC_DBELL_QID(q->hw_index);
2743 dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
2744 dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
2746 /* preset the callback info */
2747 q->info[0].cb_arg = lif;
2749 qcq->flags |= IONIC_QCQ_F_INITED;
2754 static int ionic_station_set(struct ionic_lif *lif)
2756 struct net_device *netdev = lif->netdev;
2757 struct ionic_admin_ctx ctx = {
2758 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2759 .cmd.lif_getattr = {
2760 .opcode = IONIC_CMD_LIF_GETATTR,
2761 .index = cpu_to_le16(lif->index),
2762 .attr = IONIC_LIF_ATTR_MAC,
2765 struct sockaddr addr;
2768 err = ionic_adminq_post_wait(lif, &ctx);
2771 netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
2772 ctx.comp.lif_getattr.mac);
2773 if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
2776 if (!is_zero_ether_addr(netdev->dev_addr)) {
2777 /* If the netdev mac is non-zero and doesn't match the default
2778 * device address, it was set by something earlier and we're
2779 * likely here again after a fw-upgrade reset. We need to be
2780 * sure the netdev mac is in our filter list.
2782 if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
2784 ionic_lif_addr(lif, netdev->dev_addr, true, true);
2786 /* Update the netdev mac with the device's mac */
2787 memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
2788 addr.sa_family = AF_INET;
2789 err = eth_prepare_mac_addr_change(netdev, &addr);
2791 netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
2796 eth_commit_mac_addr_change(netdev, &addr);
2799 netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
2801 ionic_lif_addr(lif, netdev->dev_addr, true, true);
2806 int ionic_lif_init(struct ionic_lif *lif)
2808 struct ionic_dev *idev = &lif->ionic->idev;
2809 struct device *dev = lif->ionic->dev;
2810 struct ionic_lif_init_comp comp;
2814 mutex_lock(&lif->ionic->dev_cmd_lock);
2815 ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
2816 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2817 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2818 mutex_unlock(&lif->ionic->dev_cmd_lock);
2822 lif->hw_index = le16_to_cpu(comp.hw_index);
2823 mutex_init(&lif->queue_lock);
2825 /* now that we have the hw_index we can figure out our doorbell page */
2826 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
2827 if (!lif->dbid_count) {
2828 dev_err(dev, "No doorbell pages, aborting\n");
2832 lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
2833 if (!lif->dbid_inuse) {
2834 dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
2838 /* first doorbell id reserved for kernel (dbid aka pid == zero) */
2839 set_bit(0, lif->dbid_inuse);
2842 dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
2843 lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
2844 if (!lif->kern_dbpage) {
2845 dev_err(dev, "Cannot map dbpage, aborting\n");
2847 goto err_out_free_dbid;
2850 err = ionic_lif_adminq_init(lif);
2852 goto err_out_adminq_deinit;
2854 if (lif->ionic->nnqs_per_lif) {
2855 err = ionic_lif_notifyq_init(lif);
2857 goto err_out_notifyq_deinit;
2860 err = ionic_init_nic_features(lif);
2862 goto err_out_notifyq_deinit;
2864 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
2865 err = ionic_rx_filters_init(lif);
2867 goto err_out_notifyq_deinit;
2870 err = ionic_station_set(lif);
2872 goto err_out_notifyq_deinit;
2874 lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
2876 set_bit(IONIC_LIF_F_INITED, lif->state);
2878 INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
2882 err_out_notifyq_deinit:
2883 ionic_lif_qcq_deinit(lif, lif->notifyqcq);
2884 err_out_adminq_deinit:
2885 ionic_lif_qcq_deinit(lif, lif->adminqcq);
2886 ionic_lif_reset(lif);
2887 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2888 lif->kern_dbpage = NULL;
2890 kfree(lif->dbid_inuse);
2891 lif->dbid_inuse = NULL;
2896 static void ionic_lif_notify_work(struct work_struct *ws)
2900 static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
2902 struct ionic_admin_ctx ctx = {
2903 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2904 .cmd.lif_setattr = {
2905 .opcode = IONIC_CMD_LIF_SETATTR,
2906 .index = cpu_to_le16(lif->index),
2907 .attr = IONIC_LIF_ATTR_NAME,
2911 strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
2912 sizeof(ctx.cmd.lif_setattr.name));
2914 ionic_adminq_post_wait(lif, &ctx);
2917 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
2919 if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
2922 return netdev_priv(netdev);
2925 static int ionic_lif_notify(struct notifier_block *nb,
2926 unsigned long event, void *info)
2928 struct net_device *ndev = netdev_notifier_info_to_dev(info);
2929 struct ionic *ionic = container_of(nb, struct ionic, nb);
2930 struct ionic_lif *lif = ionic_netdev_lif(ndev);
2932 if (!lif || lif->ionic != ionic)
2936 case NETDEV_CHANGENAME:
2937 ionic_lif_set_netdev_info(lif);
2944 int ionic_lif_register(struct ionic_lif *lif)
2948 INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
2950 lif->ionic->nb.notifier_call = ionic_lif_notify;
2952 err = register_netdevice_notifier(&lif->ionic->nb);
2954 lif->ionic->nb.notifier_call = NULL;
2956 /* only register LIF0 for now */
2957 err = register_netdev(lif->netdev);
2959 dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
2962 lif->registered = true;
2963 ionic_lif_set_netdev_info(lif);
2968 void ionic_lif_unregister(struct ionic_lif *lif)
2970 if (lif->ionic->nb.notifier_call) {
2971 unregister_netdevice_notifier(&lif->ionic->nb);
2972 cancel_work_sync(&lif->ionic->nb_work);
2973 lif->ionic->nb.notifier_call = NULL;
2976 if (lif->netdev->reg_state == NETREG_REGISTERED)
2977 unregister_netdev(lif->netdev);
2978 lif->registered = false;
2981 static void ionic_lif_queue_identify(struct ionic_lif *lif)
2983 union ionic_q_identity __iomem *q_ident;
2984 struct ionic *ionic = lif->ionic;
2985 struct ionic_dev *idev;
2989 idev = &lif->ionic->idev;
2990 q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data;
2992 for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
2993 struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
2995 /* filter out the ones we know about */
2997 case IONIC_QTYPE_ADMINQ:
2998 case IONIC_QTYPE_NOTIFYQ:
2999 case IONIC_QTYPE_RXQ:
3000 case IONIC_QTYPE_TXQ:
3006 memset(qti, 0, sizeof(*qti));
3008 mutex_lock(&ionic->dev_cmd_lock);
3009 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype,
3010 ionic_qtype_versions[qtype]);
3011 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3013 qti->version = readb(&q_ident->version);
3014 qti->supported = readb(&q_ident->supported);
3015 qti->features = readq(&q_ident->features);
3016 qti->desc_sz = readw(&q_ident->desc_sz);
3017 qti->comp_sz = readw(&q_ident->comp_sz);
3018 qti->sg_desc_sz = readw(&q_ident->sg_desc_sz);
3019 qti->max_sg_elems = readw(&q_ident->max_sg_elems);
3020 qti->sg_desc_stride = readw(&q_ident->sg_desc_stride);
3022 mutex_unlock(&ionic->dev_cmd_lock);
3024 if (err == -EINVAL) {
3025 dev_err(ionic->dev, "qtype %d not supported\n", qtype);
3027 } else if (err == -EIO) {
3028 dev_err(ionic->dev, "q_ident failed, not supported on older FW\n");
3031 dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n",
3036 dev_dbg(ionic->dev, " qtype[%d].version = %d\n",
3037 qtype, qti->version);
3038 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n",
3039 qtype, qti->supported);
3040 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n",
3041 qtype, qti->features);
3042 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n",
3043 qtype, qti->desc_sz);
3044 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n",
3045 qtype, qti->comp_sz);
3046 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n",
3047 qtype, qti->sg_desc_sz);
3048 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n",
3049 qtype, qti->max_sg_elems);
3050 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
3051 qtype, qti->sg_desc_stride);
3055 int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
3056 union ionic_lif_identity *lid)
3058 struct ionic_dev *idev = &ionic->idev;
3062 sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
3064 mutex_lock(&ionic->dev_cmd_lock);
3065 ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
3066 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3067 memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
3068 mutex_unlock(&ionic->dev_cmd_lock);
3072 dev_dbg(ionic->dev, "capabilities 0x%llx\n",
3073 le64_to_cpu(lid->capabilities));
3075 dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
3076 le32_to_cpu(lid->eth.max_ucast_filters));
3077 dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
3078 le32_to_cpu(lid->eth.max_mcast_filters));
3079 dev_dbg(ionic->dev, "eth.features 0x%llx\n",
3080 le64_to_cpu(lid->eth.config.features));
3081 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
3082 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
3083 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
3084 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
3085 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
3086 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
3087 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
3088 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
3089 dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
3090 dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
3091 dev_dbg(ionic->dev, "eth.config.mtu %d\n",
3092 le32_to_cpu(lid->eth.config.mtu));
3097 int ionic_lif_size(struct ionic *ionic)
3099 struct ionic_identity *ident = &ionic->ident;
3100 unsigned int nintrs, dev_nintrs;
3101 union ionic_lif_config *lc;
3102 unsigned int ntxqs_per_lif;
3103 unsigned int nrxqs_per_lif;
3104 unsigned int neqs_per_lif;
3105 unsigned int nnqs_per_lif;
3106 unsigned int nxqs, neqs;
3107 unsigned int min_intrs;
3110 lc = &ident->lif.eth.config;
3111 dev_nintrs = le32_to_cpu(ident->dev.nintrs);
3112 neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
3113 nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
3114 ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
3115 nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
3117 nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
3118 nxqs = min(nxqs, num_online_cpus());
3119 neqs = min(neqs_per_lif, num_online_cpus());
3123 * 1 for master lif adminq/notifyq
3124 * 1 for each CPU for master lif TxRx queue pairs
3125 * whatever's left is for RDMA queues
3127 nintrs = 1 + nxqs + neqs;
3128 min_intrs = 2; /* adminq + 1 TxRx queue pair */
3130 if (nintrs > dev_nintrs)
3133 err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
3134 if (err < 0 && err != -ENOSPC) {
3135 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
3141 if (err != nintrs) {
3142 ionic_bus_free_irq_vectors(ionic);
3146 ionic->nnqs_per_lif = nnqs_per_lif;
3147 ionic->neqs_per_lif = neqs;
3148 ionic->ntxqs_per_lif = nxqs;
3149 ionic->nrxqs_per_lif = nxqs;
3150 ionic->nintrs = nintrs;
3152 ionic_debugfs_add_sizes(ionic);
3157 if (nnqs_per_lif > 1) {
3169 dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);