1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
4 * Copyright (C) 2018 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/pci.h>
14 #include "rvu_struct.h"
19 #include "lmac_common.h"
21 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
22 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
23 int type, int chan_id);
24 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
51 enum nix_makr_fmt_indexes {
52 NIX_MARK_CFG_IP_DSCP_RED,
53 NIX_MARK_CFG_IP_DSCP_YELLOW,
54 NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
55 NIX_MARK_CFG_IP_ECN_RED,
56 NIX_MARK_CFG_IP_ECN_YELLOW,
57 NIX_MARK_CFG_IP_ECN_YELLOW_RED,
58 NIX_MARK_CFG_VLAN_DEI_RED,
59 NIX_MARK_CFG_VLAN_DEI_YELLOW,
60 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
64 /* For now considering MC resources needed for broadcast
65 * pkt replication only. i.e 256 HWVFs + 12 PFs.
67 #define MC_TBL_SIZE MC_TBL_SZ_512
68 #define MC_BUF_CNT MC_BUF_CNT_128
71 struct hlist_node node;
75 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
79 /*If blkaddr is 0, return the first nix block address*/
81 return rvu->nix_blkaddr[blkaddr];
83 while (i + 1 < MAX_NIX_BLKS) {
84 if (rvu->nix_blkaddr[i] == blkaddr)
85 return rvu->nix_blkaddr[i + 1];
92 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
94 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
97 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
98 if (!pfvf->nixlf || blkaddr < 0)
103 int rvu_get_nixlf_count(struct rvu *rvu)
105 int blkaddr = 0, max = 0;
106 struct rvu_block *block;
108 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
110 block = &rvu->hw->block[blkaddr];
111 max += block->lf.max;
112 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
117 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
119 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
120 struct rvu_hwinfo *hw = rvu->hw;
123 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
124 if (!pfvf->nixlf || blkaddr < 0)
125 return NIX_AF_ERR_AF_LF_INVALID;
127 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
129 return NIX_AF_ERR_AF_LF_INVALID;
132 *nix_blkaddr = blkaddr;
137 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
138 struct nix_hw **nix_hw, int *blkaddr)
140 struct rvu_pfvf *pfvf;
142 pfvf = rvu_get_pfvf(rvu, pcifunc);
143 *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
144 if (!pfvf->nixlf || *blkaddr < 0)
145 return NIX_AF_ERR_AF_LF_INVALID;
147 *nix_hw = get_nix_hw(rvu->hw, *blkaddr);
149 return NIX_AF_ERR_INVALID_NIXBLK;
153 static void nix_mce_list_init(struct nix_mce_list *list, int max)
155 INIT_HLIST_HEAD(&list->head);
160 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
167 idx = mcast->next_free_mce;
168 mcast->next_free_mce += count;
172 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
174 int nix_blkaddr = 0, i = 0;
175 struct rvu *rvu = hw->rvu;
177 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
178 while (nix_blkaddr) {
179 if (blkaddr == nix_blkaddr && hw->nix)
181 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
187 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
191 /*Sync all in flight RX packets to LLC/DRAM */
192 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
193 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
195 dev_err(rvu->dev, "NIX RX software sync failed\n");
198 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
199 int lvl, u16 pcifunc, u16 schq)
201 struct rvu_hwinfo *hw = rvu->hw;
202 struct nix_txsch *txsch;
203 struct nix_hw *nix_hw;
206 nix_hw = get_nix_hw(rvu->hw, blkaddr);
210 txsch = &nix_hw->txsch[lvl];
211 /* Check out of bounds */
212 if (schq >= txsch->schq.max)
215 mutex_lock(&rvu->rsrc_lock);
216 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
217 mutex_unlock(&rvu->rsrc_lock);
219 /* TLs aggegating traffic are shared across PF and VFs */
220 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
221 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
227 if (map_func != pcifunc)
233 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
235 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
236 struct mac_ops *mac_ops;
237 int pkind, pf, vf, lbkid;
241 pf = rvu_get_pf(pcifunc);
242 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
246 case NIX_INTF_TYPE_CGX:
247 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
248 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
250 pkind = rvu_npc_get_pkind(rvu, pf);
253 "PF_Func 0x%x: Invalid pkind\n", pcifunc);
256 pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
257 pfvf->tx_chan_base = pfvf->rx_chan_base;
258 pfvf->rx_chan_cnt = 1;
259 pfvf->tx_chan_cnt = 1;
260 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
261 rvu_npc_set_pkind(rvu, pkind, pfvf);
263 mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
264 /* By default we enable pause frames */
265 if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
266 mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id,
268 lmac_id, true, true);
270 case NIX_INTF_TYPE_LBK:
271 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
273 /* If NIX1 block is present on the silicon then NIXes are
274 * assigned alternatively for lbk interfaces. NIX0 should
275 * send packets on lbk link 1 channels and NIX1 should send
276 * on lbk link 0 channels for the communication between
280 if (rvu->hw->lbk_links > 1)
281 lbkid = vf & 0x1 ? 0 : 1;
283 /* Note that AF's VFs work in pairs and talk over consecutive
284 * loopback channels.Therefore if odd number of AF VFs are
285 * enabled then the last VF remains with no pair.
287 pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
288 pfvf->tx_chan_base = vf & 0x1 ?
289 rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
290 rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
291 pfvf->rx_chan_cnt = 1;
292 pfvf->tx_chan_cnt = 1;
293 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
299 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
300 * RVU PF/VF's MAC address.
302 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
303 pfvf->rx_chan_base, pfvf->mac_addr);
305 /* Add this PF_FUNC to bcast pkt replication list */
306 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
309 "Bcast list, failed to enable PF_FUNC 0x%x\n",
313 /* Install MCAM rule matching Ethernet broadcast mac address */
314 rvu_npc_install_bcast_match_entry(rvu, pcifunc,
315 nixlf, pfvf->rx_chan_base);
317 pfvf->maxlen = NIC_HW_MIN_FRS;
318 pfvf->minlen = NIC_HW_MIN_FRS;
323 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
325 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
331 /* Remove this PF_FUNC from bcast pkt replication list */
332 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
335 "Bcast list, failed to disable PF_FUNC 0x%x\n",
339 /* Free and disable any MCAM entries used by this NIX LF */
340 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
343 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
344 struct nix_bp_cfg_req *req,
347 u16 pcifunc = req->hdr.pcifunc;
348 struct rvu_pfvf *pfvf;
349 int blkaddr, pf, type;
353 pf = rvu_get_pf(pcifunc);
354 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
355 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
358 pfvf = rvu_get_pfvf(rvu, pcifunc);
359 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
361 chan_base = pfvf->rx_chan_base + req->chan_base;
362 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
363 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
364 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
370 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
371 int type, int chan_id)
373 int bpid, blkaddr, lmac_chan_cnt;
374 struct rvu_hwinfo *hw = rvu->hw;
375 u16 cgx_bpid_cnt, lbk_bpid_cnt;
376 struct rvu_pfvf *pfvf;
380 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
381 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
382 lmac_chan_cnt = cfg & 0xFF;
384 cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
385 lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
387 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
389 /* Backpressure IDs range division
390 * CGX channles are mapped to (0 - 191) BPIDs
391 * LBK channles are mapped to (192 - 255) BPIDs
392 * SDP channles are mapped to (256 - 511) BPIDs
394 * Lmac channles and bpids mapped as follows
395 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
396 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
397 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
400 case NIX_INTF_TYPE_CGX:
401 if ((req->chan_base + req->chan_cnt) > 15)
403 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
404 /* Assign bpid based on cgx, lmac and chan id */
405 bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
406 (lmac_id * lmac_chan_cnt) + req->chan_base;
408 if (req->bpid_per_chan)
410 if (bpid > cgx_bpid_cnt)
414 case NIX_INTF_TYPE_LBK:
415 if ((req->chan_base + req->chan_cnt) > 63)
417 bpid = cgx_bpid_cnt + req->chan_base;
418 if (req->bpid_per_chan)
420 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
429 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
430 struct nix_bp_cfg_req *req,
431 struct nix_bp_cfg_rsp *rsp)
433 int blkaddr, pf, type, chan_id = 0;
434 u16 pcifunc = req->hdr.pcifunc;
435 struct rvu_pfvf *pfvf;
440 pf = rvu_get_pf(pcifunc);
441 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
443 /* Enable backpressure only for CGX mapped PFs and LBK interface */
444 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
447 pfvf = rvu_get_pfvf(rvu, pcifunc);
448 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
450 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
451 chan_base = pfvf->rx_chan_base + req->chan_base;
454 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
456 dev_warn(rvu->dev, "Fail to enable backpressure\n");
460 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
461 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
462 cfg | (bpid & 0xFF) | BIT_ULL(16));
464 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
467 for (chan = 0; chan < req->chan_cnt; chan++) {
468 /* Map channel and bpid assign to it */
469 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
471 if (req->bpid_per_chan)
474 rsp->chan_cnt = req->chan_cnt;
479 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
480 u64 format, bool v4, u64 *fidx)
482 struct nix_lso_format field = {0};
484 /* IP's Length field */
485 field.layer = NIX_TXLAYER_OL3;
486 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
487 field.offset = v4 ? 2 : 4;
488 field.sizem1 = 1; /* i.e 2 bytes */
489 field.alg = NIX_LSOALG_ADD_PAYLEN;
490 rvu_write64(rvu, blkaddr,
491 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
494 /* No ID field in IPv6 header */
499 field.layer = NIX_TXLAYER_OL3;
501 field.sizem1 = 1; /* i.e 2 bytes */
502 field.alg = NIX_LSOALG_ADD_SEGNUM;
503 rvu_write64(rvu, blkaddr,
504 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
508 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
509 u64 format, u64 *fidx)
511 struct nix_lso_format field = {0};
513 /* TCP's sequence number field */
514 field.layer = NIX_TXLAYER_OL4;
516 field.sizem1 = 3; /* i.e 4 bytes */
517 field.alg = NIX_LSOALG_ADD_OFFSET;
518 rvu_write64(rvu, blkaddr,
519 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
522 /* TCP's flags field */
523 field.layer = NIX_TXLAYER_OL4;
525 field.sizem1 = 1; /* 2 bytes */
526 field.alg = NIX_LSOALG_TCP_FLAGS;
527 rvu_write64(rvu, blkaddr,
528 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
532 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
534 u64 cfg, idx, fidx = 0;
536 /* Get max HW supported format indices */
537 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
538 nix_hw->lso.total = cfg;
541 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
542 /* For TSO, set first and middle segment flags to
543 * mask out PSH, RST & FIN flags in TCP packet
545 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
546 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
547 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
549 /* Setup default static LSO formats
551 * Configure format fields for TCPv4 segmentation offload
553 idx = NIX_LSO_FORMAT_IDX_TSOV4;
554 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
555 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
557 /* Set rest of the fields to NOP */
558 for (; fidx < 8; fidx++) {
559 rvu_write64(rvu, blkaddr,
560 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
562 nix_hw->lso.in_use++;
564 /* Configure format fields for TCPv6 segmentation offload */
565 idx = NIX_LSO_FORMAT_IDX_TSOV6;
567 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
568 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
570 /* Set rest of the fields to NOP */
571 for (; fidx < 8; fidx++) {
572 rvu_write64(rvu, blkaddr,
573 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
575 nix_hw->lso.in_use++;
578 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
580 kfree(pfvf->rq_bmap);
581 kfree(pfvf->sq_bmap);
582 kfree(pfvf->cq_bmap);
584 qmem_free(rvu->dev, pfvf->rq_ctx);
586 qmem_free(rvu->dev, pfvf->sq_ctx);
588 qmem_free(rvu->dev, pfvf->cq_ctx);
590 qmem_free(rvu->dev, pfvf->rss_ctx);
591 if (pfvf->nix_qints_ctx)
592 qmem_free(rvu->dev, pfvf->nix_qints_ctx);
593 if (pfvf->cq_ints_ctx)
594 qmem_free(rvu->dev, pfvf->cq_ints_ctx);
596 pfvf->rq_bmap = NULL;
597 pfvf->cq_bmap = NULL;
598 pfvf->sq_bmap = NULL;
602 pfvf->rss_ctx = NULL;
603 pfvf->nix_qints_ctx = NULL;
604 pfvf->cq_ints_ctx = NULL;
607 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
608 struct rvu_pfvf *pfvf, int nixlf,
609 int rss_sz, int rss_grps, int hwctx_size,
612 int err, grp, num_indices;
614 /* RSS is not requested for this NIXLF */
617 num_indices = rss_sz * rss_grps;
619 /* Alloc NIX RSS HW context memory and config the base */
620 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
624 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
625 (u64)pfvf->rss_ctx->iova);
627 /* Config full RSS table size, enable RSS and caching */
628 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
629 BIT_ULL(36) | BIT_ULL(4) |
630 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
632 /* Config RSS group offset and sizes */
633 for (grp = 0; grp < rss_grps; grp++)
634 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
635 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
639 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
640 struct nix_aq_inst_s *inst)
642 struct admin_queue *aq = block->aq;
643 struct nix_aq_res_s *result;
647 result = (struct nix_aq_res_s *)aq->res->base;
649 /* Get current head pointer where to append this instruction */
650 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
651 head = (reg >> 4) & AQ_PTR_MASK;
653 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
654 (void *)inst, aq->inst->entry_sz);
655 memset(result, 0, sizeof(*result));
656 /* sync into memory */
659 /* Ring the doorbell and wait for result */
660 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
661 while (result->compcode == NIX_AQ_COMP_NOTDONE) {
669 if (result->compcode != NIX_AQ_COMP_GOOD)
670 /* TODO: Replace this with some error code */
676 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
677 struct nix_aq_enq_req *req,
678 struct nix_aq_enq_rsp *rsp)
680 struct rvu_hwinfo *hw = rvu->hw;
681 u16 pcifunc = req->hdr.pcifunc;
682 int nixlf, blkaddr, rc = 0;
683 struct nix_aq_inst_s inst;
684 struct rvu_block *block;
685 struct admin_queue *aq;
686 struct rvu_pfvf *pfvf;
691 blkaddr = nix_hw->blkaddr;
692 block = &hw->block[blkaddr];
695 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
696 return NIX_AF_ERR_AQ_ENQUEUE;
699 pfvf = rvu_get_pfvf(rvu, pcifunc);
700 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
702 /* Skip NIXLF check for broadcast MCE entry init */
703 if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
704 if (!pfvf->nixlf || nixlf < 0)
705 return NIX_AF_ERR_AF_LF_INVALID;
708 switch (req->ctype) {
709 case NIX_AQ_CTYPE_RQ:
710 /* Check if index exceeds max no of queues */
711 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
712 rc = NIX_AF_ERR_AQ_ENQUEUE;
714 case NIX_AQ_CTYPE_SQ:
715 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
716 rc = NIX_AF_ERR_AQ_ENQUEUE;
718 case NIX_AQ_CTYPE_CQ:
719 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
720 rc = NIX_AF_ERR_AQ_ENQUEUE;
722 case NIX_AQ_CTYPE_RSS:
723 /* Check if RSS is enabled and qidx is within range */
724 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
725 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
726 (req->qidx >= (256UL << (cfg & 0xF))))
727 rc = NIX_AF_ERR_AQ_ENQUEUE;
729 case NIX_AQ_CTYPE_MCE:
730 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
732 /* Check if index exceeds MCE list length */
733 if (!nix_hw->mcast.mce_ctx ||
734 (req->qidx >= (256UL << (cfg & 0xF))))
735 rc = NIX_AF_ERR_AQ_ENQUEUE;
737 /* Adding multicast lists for requests from PF/VFs is not
738 * yet supported, so ignore this.
741 rc = NIX_AF_ERR_AQ_ENQUEUE;
744 rc = NIX_AF_ERR_AQ_ENQUEUE;
750 /* Check if SQ pointed SMQ belongs to this PF/VF or not */
751 if (req->ctype == NIX_AQ_CTYPE_SQ &&
752 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
753 (req->op == NIX_AQ_INSTOP_WRITE &&
754 req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
755 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
756 pcifunc, req->sq.smq))
757 return NIX_AF_ERR_AQ_ENQUEUE;
760 memset(&inst, 0, sizeof(struct nix_aq_inst_s));
762 inst.cindex = req->qidx;
763 inst.ctype = req->ctype;
765 /* Currently we are not supporting enqueuing multiple instructions,
766 * so always choose first entry in result memory.
768 inst.res_addr = (u64)aq->res->iova;
770 /* Hardware uses same aq->res->base for updating result of
771 * previous instruction hence wait here till it is done.
773 spin_lock(&aq->lock);
775 /* Clean result + context memory */
776 memset(aq->res->base, 0, aq->res->entry_sz);
777 /* Context needs to be written at RES_ADDR + 128 */
778 ctx = aq->res->base + 128;
779 /* Mask needs to be written at RES_ADDR + 256 */
780 mask = aq->res->base + 256;
783 case NIX_AQ_INSTOP_WRITE:
784 if (req->ctype == NIX_AQ_CTYPE_RQ)
785 memcpy(mask, &req->rq_mask,
786 sizeof(struct nix_rq_ctx_s));
787 else if (req->ctype == NIX_AQ_CTYPE_SQ)
788 memcpy(mask, &req->sq_mask,
789 sizeof(struct nix_sq_ctx_s));
790 else if (req->ctype == NIX_AQ_CTYPE_CQ)
791 memcpy(mask, &req->cq_mask,
792 sizeof(struct nix_cq_ctx_s));
793 else if (req->ctype == NIX_AQ_CTYPE_RSS)
794 memcpy(mask, &req->rss_mask,
795 sizeof(struct nix_rsse_s));
796 else if (req->ctype == NIX_AQ_CTYPE_MCE)
797 memcpy(mask, &req->mce_mask,
798 sizeof(struct nix_rx_mce_s));
800 case NIX_AQ_INSTOP_INIT:
801 if (req->ctype == NIX_AQ_CTYPE_RQ)
802 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
803 else if (req->ctype == NIX_AQ_CTYPE_SQ)
804 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
805 else if (req->ctype == NIX_AQ_CTYPE_CQ)
806 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
807 else if (req->ctype == NIX_AQ_CTYPE_RSS)
808 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
809 else if (req->ctype == NIX_AQ_CTYPE_MCE)
810 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
812 case NIX_AQ_INSTOP_NOP:
813 case NIX_AQ_INSTOP_READ:
814 case NIX_AQ_INSTOP_LOCK:
815 case NIX_AQ_INSTOP_UNLOCK:
818 rc = NIX_AF_ERR_AQ_ENQUEUE;
819 spin_unlock(&aq->lock);
823 /* Submit the instruction to AQ */
824 rc = nix_aq_enqueue_wait(rvu, block, &inst);
826 spin_unlock(&aq->lock);
830 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
831 if (req->op == NIX_AQ_INSTOP_INIT) {
832 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
833 __set_bit(req->qidx, pfvf->rq_bmap);
834 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
835 __set_bit(req->qidx, pfvf->sq_bmap);
836 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
837 __set_bit(req->qidx, pfvf->cq_bmap);
840 if (req->op == NIX_AQ_INSTOP_WRITE) {
841 if (req->ctype == NIX_AQ_CTYPE_RQ) {
842 ena = (req->rq.ena & req->rq_mask.ena) |
843 (test_bit(req->qidx, pfvf->rq_bmap) &
846 __set_bit(req->qidx, pfvf->rq_bmap);
848 __clear_bit(req->qidx, pfvf->rq_bmap);
850 if (req->ctype == NIX_AQ_CTYPE_SQ) {
851 ena = (req->rq.ena & req->sq_mask.ena) |
852 (test_bit(req->qidx, pfvf->sq_bmap) &
855 __set_bit(req->qidx, pfvf->sq_bmap);
857 __clear_bit(req->qidx, pfvf->sq_bmap);
859 if (req->ctype == NIX_AQ_CTYPE_CQ) {
860 ena = (req->rq.ena & req->cq_mask.ena) |
861 (test_bit(req->qidx, pfvf->cq_bmap) &
864 __set_bit(req->qidx, pfvf->cq_bmap);
866 __clear_bit(req->qidx, pfvf->cq_bmap);
871 /* Copy read context into mailbox */
872 if (req->op == NIX_AQ_INSTOP_READ) {
873 if (req->ctype == NIX_AQ_CTYPE_RQ)
874 memcpy(&rsp->rq, ctx,
875 sizeof(struct nix_rq_ctx_s));
876 else if (req->ctype == NIX_AQ_CTYPE_SQ)
877 memcpy(&rsp->sq, ctx,
878 sizeof(struct nix_sq_ctx_s));
879 else if (req->ctype == NIX_AQ_CTYPE_CQ)
880 memcpy(&rsp->cq, ctx,
881 sizeof(struct nix_cq_ctx_s));
882 else if (req->ctype == NIX_AQ_CTYPE_RSS)
883 memcpy(&rsp->rss, ctx,
884 sizeof(struct nix_rsse_s));
885 else if (req->ctype == NIX_AQ_CTYPE_MCE)
886 memcpy(&rsp->mce, ctx,
887 sizeof(struct nix_rx_mce_s));
891 spin_unlock(&aq->lock);
895 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
896 struct nix_aq_enq_rsp *rsp)
898 struct nix_hw *nix_hw;
901 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
903 return NIX_AF_ERR_AF_LF_INVALID;
905 nix_hw = get_nix_hw(rvu->hw, blkaddr);
909 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
912 static const char *nix_get_ctx_name(int ctype)
915 case NIX_AQ_CTYPE_CQ:
917 case NIX_AQ_CTYPE_SQ:
919 case NIX_AQ_CTYPE_RQ:
921 case NIX_AQ_CTYPE_RSS:
927 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
929 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
930 struct nix_aq_enq_req aq_req;
935 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
936 return NIX_AF_ERR_AQ_ENQUEUE;
938 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
939 aq_req.hdr.pcifunc = req->hdr.pcifunc;
941 if (req->ctype == NIX_AQ_CTYPE_CQ) {
943 aq_req.cq_mask.ena = 1;
944 aq_req.cq.bp_ena = 0;
945 aq_req.cq_mask.bp_ena = 1;
946 q_cnt = pfvf->cq_ctx->qsize;
947 bmap = pfvf->cq_bmap;
949 if (req->ctype == NIX_AQ_CTYPE_SQ) {
951 aq_req.sq_mask.ena = 1;
952 q_cnt = pfvf->sq_ctx->qsize;
953 bmap = pfvf->sq_bmap;
955 if (req->ctype == NIX_AQ_CTYPE_RQ) {
957 aq_req.rq_mask.ena = 1;
958 q_cnt = pfvf->rq_ctx->qsize;
959 bmap = pfvf->rq_bmap;
962 aq_req.ctype = req->ctype;
963 aq_req.op = NIX_AQ_INSTOP_WRITE;
965 for (qidx = 0; qidx < q_cnt; qidx++) {
966 if (!test_bit(qidx, bmap))
969 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
972 dev_err(rvu->dev, "Failed to disable %s:%d context\n",
973 nix_get_ctx_name(req->ctype), qidx);
980 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
981 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
983 struct nix_aq_enq_req lock_ctx_req;
986 if (req->op != NIX_AQ_INSTOP_INIT)
989 if (req->ctype == NIX_AQ_CTYPE_MCE ||
990 req->ctype == NIX_AQ_CTYPE_DYNO)
993 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
994 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
995 lock_ctx_req.ctype = req->ctype;
996 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
997 lock_ctx_req.qidx = req->qidx;
998 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
1001 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
1003 nix_get_ctx_name(req->ctype), req->qidx);
1007 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1008 struct nix_aq_enq_req *req,
1009 struct nix_aq_enq_rsp *rsp)
1013 err = rvu_nix_aq_enq_inst(rvu, req, rsp);
1015 err = nix_lf_hwctx_lockdown(rvu, req);
1020 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1021 struct nix_aq_enq_req *req,
1022 struct nix_aq_enq_rsp *rsp)
1024 return rvu_nix_aq_enq_inst(rvu, req, rsp);
1027 /* CN10K mbox handler */
1028 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
1029 struct nix_cn10k_aq_enq_req *req,
1030 struct nix_cn10k_aq_enq_rsp *rsp)
1032 return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
1033 (struct nix_aq_enq_rsp *)rsp);
1036 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
1037 struct hwctx_disable_req *req,
1038 struct msg_rsp *rsp)
1040 return nix_lf_hwctx_disable(rvu, req);
1043 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
1044 struct nix_lf_alloc_req *req,
1045 struct nix_lf_alloc_rsp *rsp)
1047 int nixlf, qints, hwctx_size, intf, err, rc = 0;
1048 struct rvu_hwinfo *hw = rvu->hw;
1049 u16 pcifunc = req->hdr.pcifunc;
1050 struct rvu_block *block;
1051 struct rvu_pfvf *pfvf;
1055 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
1056 return NIX_AF_ERR_PARAM;
1059 req->way_mask &= 0xFFFF;
1061 pfvf = rvu_get_pfvf(rvu, pcifunc);
1062 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1063 if (!pfvf->nixlf || blkaddr < 0)
1064 return NIX_AF_ERR_AF_LF_INVALID;
1066 block = &hw->block[blkaddr];
1067 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1069 return NIX_AF_ERR_AF_LF_INVALID;
1071 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1072 if (req->npa_func) {
1073 /* If default, use 'this' NIXLF's PFFUNC */
1074 if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1075 req->npa_func = pcifunc;
1076 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1077 return NIX_AF_INVAL_NPA_PF_FUNC;
1080 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1081 if (req->sso_func) {
1082 /* If default, use 'this' NIXLF's PFFUNC */
1083 if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1084 req->sso_func = pcifunc;
1085 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1086 return NIX_AF_INVAL_SSO_PF_FUNC;
1089 /* If RSS is being enabled, check if requested config is valid.
1090 * RSS table size should be power of two, otherwise
1091 * RSS_GRP::OFFSET + adder might go beyond that group or
1092 * won't be able to use entire table.
1094 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1095 !is_power_of_2(req->rss_sz)))
1096 return NIX_AF_ERR_RSS_SIZE_INVALID;
1099 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1100 return NIX_AF_ERR_RSS_GRPS_INVALID;
1102 /* Reset this NIX LF */
1103 err = rvu_lf_reset(rvu, block, nixlf);
1105 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1106 block->addr - BLKADDR_NIX0, nixlf);
1107 return NIX_AF_ERR_LF_RESET;
1110 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1112 /* Alloc NIX RQ HW context memory and config the base */
1113 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1114 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1118 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1122 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1123 (u64)pfvf->rq_ctx->iova);
1125 /* Set caching and queue count in HW */
1126 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1127 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1129 /* Alloc NIX SQ HW context memory and config the base */
1130 hwctx_size = 1UL << (ctx_cfg & 0xF);
1131 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1135 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1139 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1140 (u64)pfvf->sq_ctx->iova);
1142 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1143 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1145 /* Alloc NIX CQ HW context memory and config the base */
1146 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1147 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1151 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1155 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1156 (u64)pfvf->cq_ctx->iova);
1158 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1159 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1161 /* Initialize receive side scaling (RSS) */
1162 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1163 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1164 req->rss_grps, hwctx_size, req->way_mask);
1168 /* Alloc memory for CQINT's HW contexts */
1169 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1170 qints = (cfg >> 24) & 0xFFF;
1171 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1172 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1176 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1177 (u64)pfvf->cq_ints_ctx->iova);
1179 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1180 BIT_ULL(36) | req->way_mask << 20);
1182 /* Alloc memory for QINT's HW contexts */
1183 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1184 qints = (cfg >> 12) & 0xFFF;
1185 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1186 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1190 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1191 (u64)pfvf->nix_qints_ctx->iova);
1192 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1193 BIT_ULL(36) | req->way_mask << 20);
1195 /* Setup VLANX TPID's.
1196 * Use VLAN1 for 802.1Q
1197 * and VLAN0 for 802.1AD.
1199 cfg = (0x8100ULL << 16) | 0x88A8ULL;
1200 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1202 /* Enable LMTST for this NIX LF */
1203 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1205 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1207 cfg = req->npa_func;
1209 cfg |= (u64)req->sso_func << 16;
1211 cfg |= (u64)req->xqe_sz << 33;
1212 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1214 /* Config Rx pkt length, csum checks and apad enable / disable */
1215 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1217 /* Configure pkind for TX parse config */
1218 cfg = NPC_TX_DEF_PKIND;
1219 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1221 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1222 err = nix_interface_init(rvu, pcifunc, intf, nixlf);
1226 /* Disable NPC entries as NIXLF's contexts are not initialized yet */
1227 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1229 /* Configure RX VTAG Type 7 (strip) for vf vlan */
1230 rvu_write64(rvu, blkaddr,
1231 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
1232 VTAGSIZE_T4 | VTAG_STRIP);
1237 nix_ctx_free(rvu, pfvf);
1241 /* Set macaddr of this PF/VF */
1242 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1244 /* set SQB size info */
1245 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1246 rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1247 rsp->rx_chan_base = pfvf->rx_chan_base;
1248 rsp->tx_chan_base = pfvf->tx_chan_base;
1249 rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1250 rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1251 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1252 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1253 /* Get HW supported stat count */
1254 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1255 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1256 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1257 /* Get count of CQ IRQs and error IRQs supported per LF */
1258 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1259 rsp->qints = ((cfg >> 12) & 0xFFF);
1260 rsp->cints = ((cfg >> 24) & 0xFFF);
1261 rsp->cgx_links = hw->cgx_links;
1262 rsp->lbk_links = hw->lbk_links;
1263 rsp->sdp_links = hw->sdp_links;
1268 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
1269 struct msg_rsp *rsp)
1271 struct rvu_hwinfo *hw = rvu->hw;
1272 u16 pcifunc = req->hdr.pcifunc;
1273 struct rvu_block *block;
1274 int blkaddr, nixlf, err;
1275 struct rvu_pfvf *pfvf;
1277 pfvf = rvu_get_pfvf(rvu, pcifunc);
1278 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1279 if (!pfvf->nixlf || blkaddr < 0)
1280 return NIX_AF_ERR_AF_LF_INVALID;
1282 block = &hw->block[blkaddr];
1283 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1285 return NIX_AF_ERR_AF_LF_INVALID;
1287 if (req->flags & NIX_LF_DISABLE_FLOWS)
1288 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
1290 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
1292 /* Free any tx vtag def entries used by this NIX LF */
1293 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
1294 nix_free_tx_vtag_entries(rvu, pcifunc);
1296 nix_interface_deinit(rvu, pcifunc, nixlf);
1298 /* Reset this NIX LF */
1299 err = rvu_lf_reset(rvu, block, nixlf);
1301 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1302 block->addr - BLKADDR_NIX0, nixlf);
1303 return NIX_AF_ERR_LF_RESET;
1306 nix_ctx_free(rvu, pfvf);
1311 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1312 struct nix_mark_format_cfg *req,
1313 struct nix_mark_format_cfg_rsp *rsp)
1315 u16 pcifunc = req->hdr.pcifunc;
1316 struct nix_hw *nix_hw;
1317 struct rvu_pfvf *pfvf;
1321 pfvf = rvu_get_pfvf(rvu, pcifunc);
1322 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1323 if (!pfvf->nixlf || blkaddr < 0)
1324 return NIX_AF_ERR_AF_LF_INVALID;
1326 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1330 cfg = (((u32)req->offset & 0x7) << 16) |
1331 (((u32)req->y_mask & 0xF) << 12) |
1332 (((u32)req->y_val & 0xF) << 8) |
1333 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1335 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1337 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1338 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1339 return NIX_AF_ERR_MARK_CFG_FAIL;
1342 rsp->mark_format_idx = rc;
1346 /* Disable shaping of pkts by a scheduler queue
1347 * at a given scheduler level.
1349 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1352 u64 cir_reg = 0, pir_reg = 0;
1356 case NIX_TXSCH_LVL_TL1:
1357 cir_reg = NIX_AF_TL1X_CIR(schq);
1358 pir_reg = 0; /* PIR not available at TL1 */
1360 case NIX_TXSCH_LVL_TL2:
1361 cir_reg = NIX_AF_TL2X_CIR(schq);
1362 pir_reg = NIX_AF_TL2X_PIR(schq);
1364 case NIX_TXSCH_LVL_TL3:
1365 cir_reg = NIX_AF_TL3X_CIR(schq);
1366 pir_reg = NIX_AF_TL3X_PIR(schq);
1368 case NIX_TXSCH_LVL_TL4:
1369 cir_reg = NIX_AF_TL4X_CIR(schq);
1370 pir_reg = NIX_AF_TL4X_PIR(schq);
1376 cfg = rvu_read64(rvu, blkaddr, cir_reg);
1377 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1381 cfg = rvu_read64(rvu, blkaddr, pir_reg);
1382 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1385 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1388 struct rvu_hwinfo *hw = rvu->hw;
1391 if (lvl >= hw->cap.nix_tx_aggr_lvl)
1394 /* Reset TL4's SDP link config */
1395 if (lvl == NIX_TXSCH_LVL_TL4)
1396 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1398 if (lvl != NIX_TXSCH_LVL_TL2)
1401 /* Reset TL2's CGX or LBK link config */
1402 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1403 rvu_write64(rvu, blkaddr,
1404 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1407 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1409 struct rvu_hwinfo *hw = rvu->hw;
1410 int pf = rvu_get_pf(pcifunc);
1411 u8 cgx_id = 0, lmac_id = 0;
1413 if (is_afvf(pcifunc)) {/* LBK links */
1414 return hw->cgx_links;
1415 } else if (is_pf_cgxmapped(rvu, pf)) {
1416 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1417 return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1421 return hw->cgx_links + hw->lbk_links;
1424 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1425 int link, int *start, int *end)
1427 struct rvu_hwinfo *hw = rvu->hw;
1428 int pf = rvu_get_pf(pcifunc);
1430 if (is_afvf(pcifunc)) { /* LBK links */
1431 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1432 *end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1433 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1434 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1435 *end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1436 } else { /* SDP link */
1437 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1438 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1439 *end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1443 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1444 struct nix_hw *nix_hw,
1445 struct nix_txsch_alloc_req *req)
1447 struct rvu_hwinfo *hw = rvu->hw;
1448 int schq, req_schq, free_cnt;
1449 struct nix_txsch *txsch;
1450 int link, start, end;
1452 txsch = &nix_hw->txsch[lvl];
1453 req_schq = req->schq_contig[lvl] + req->schq[lvl];
1458 link = nix_get_tx_link(rvu, pcifunc);
1460 /* For traffic aggregating scheduler level, one queue is enough */
1461 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1463 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1467 /* Get free SCHQ count and check if request can be accomodated */
1468 if (hw->cap.nix_fixed_txschq_mapping) {
1469 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1470 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1471 if (end <= txsch->schq.max && schq < end &&
1472 !test_bit(schq, txsch->schq.bmap))
1477 free_cnt = rvu_rsrc_free_count(&txsch->schq);
1480 if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
1481 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1483 /* If contiguous queues are needed, check for availability */
1484 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1485 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1486 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1491 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1492 struct nix_txsch_alloc_rsp *rsp,
1493 int lvl, int start, int end)
1495 struct rvu_hwinfo *hw = rvu->hw;
1496 u16 pcifunc = rsp->hdr.pcifunc;
1499 /* For traffic aggregating levels, queue alloc is based
1500 * on transmit link to which PF_FUNC is mapped to.
1502 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1503 /* A single TL queue is allocated */
1504 if (rsp->schq_contig[lvl]) {
1505 rsp->schq_contig[lvl] = 1;
1506 rsp->schq_contig_list[lvl][0] = start;
1509 /* Both contig and non-contig reqs doesn't make sense here */
1510 if (rsp->schq_contig[lvl])
1513 if (rsp->schq[lvl]) {
1515 rsp->schq_list[lvl][0] = start;
1520 /* Adjust the queue request count if HW supports
1521 * only one queue per level configuration.
1523 if (hw->cap.nix_fixed_txschq_mapping) {
1524 idx = pcifunc & RVU_PFVF_FUNC_MASK;
1526 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1527 rsp->schq_contig[lvl] = 0;
1532 if (rsp->schq_contig[lvl]) {
1533 rsp->schq_contig[lvl] = 1;
1534 set_bit(schq, txsch->schq.bmap);
1535 rsp->schq_contig_list[lvl][0] = schq;
1537 } else if (rsp->schq[lvl]) {
1539 set_bit(schq, txsch->schq.bmap);
1540 rsp->schq_list[lvl][0] = schq;
1545 /* Allocate contiguous queue indices requesty first */
1546 if (rsp->schq_contig[lvl]) {
1547 schq = bitmap_find_next_zero_area(txsch->schq.bmap,
1548 txsch->schq.max, start,
1549 rsp->schq_contig[lvl], 0);
1551 rsp->schq_contig[lvl] = 0;
1552 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
1553 set_bit(schq, txsch->schq.bmap);
1554 rsp->schq_contig_list[lvl][idx] = schq;
1559 /* Allocate non-contiguous queue indices */
1560 if (rsp->schq[lvl]) {
1562 for (schq = start; schq < end; schq++) {
1563 if (!test_bit(schq, txsch->schq.bmap)) {
1564 set_bit(schq, txsch->schq.bmap);
1565 rsp->schq_list[lvl][idx++] = schq;
1567 if (idx == rsp->schq[lvl])
1570 /* Update how many were allocated */
1571 rsp->schq[lvl] = idx;
1575 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
1576 struct nix_txsch_alloc_req *req,
1577 struct nix_txsch_alloc_rsp *rsp)
1579 struct rvu_hwinfo *hw = rvu->hw;
1580 u16 pcifunc = req->hdr.pcifunc;
1581 int link, blkaddr, rc = 0;
1582 int lvl, idx, start, end;
1583 struct nix_txsch *txsch;
1584 struct rvu_pfvf *pfvf;
1585 struct nix_hw *nix_hw;
1589 pfvf = rvu_get_pfvf(rvu, pcifunc);
1590 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1591 if (!pfvf->nixlf || blkaddr < 0)
1592 return NIX_AF_ERR_AF_LF_INVALID;
1594 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1598 mutex_lock(&rvu->rsrc_lock);
1600 /* Check if request is valid as per HW capabilities
1601 * and can be accomodated.
1603 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1604 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
1609 /* Allocate requested Tx scheduler queues */
1610 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1611 txsch = &nix_hw->txsch[lvl];
1612 pfvf_map = txsch->pfvf_map;
1614 if (!req->schq[lvl] && !req->schq_contig[lvl])
1617 rsp->schq[lvl] = req->schq[lvl];
1618 rsp->schq_contig[lvl] = req->schq_contig[lvl];
1620 link = nix_get_tx_link(rvu, pcifunc);
1622 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1625 } else if (hw->cap.nix_fixed_txschq_mapping) {
1626 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1629 end = txsch->schq.max;
1632 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
1634 /* Reset queue config */
1635 for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
1636 schq = rsp->schq_contig_list[lvl][idx];
1637 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1638 NIX_TXSCHQ_CFG_DONE))
1639 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1640 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1641 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1644 for (idx = 0; idx < req->schq[lvl]; idx++) {
1645 schq = rsp->schq_list[lvl][idx];
1646 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1647 NIX_TXSCHQ_CFG_DONE))
1648 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1649 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1650 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1654 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
1655 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
1656 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
1657 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1658 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1661 rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1663 mutex_unlock(&rvu->rsrc_lock);
1667 static void nix_smq_flush(struct rvu *rvu, int blkaddr,
1668 int smq, u16 pcifunc, int nixlf)
1670 int pf = rvu_get_pf(pcifunc);
1671 u8 cgx_id = 0, lmac_id = 0;
1672 int err, restore_tx_en = 0;
1675 /* enable cgx tx if disabled */
1676 if (is_pf_cgxmapped(rvu, pf)) {
1677 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1678 restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
1682 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
1683 /* Do SMQ flush and set enqueue xoff */
1684 cfg |= BIT_ULL(50) | BIT_ULL(49);
1685 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
1687 /* Disable backpressure from physical link,
1688 * otherwise SMQ flush may stall.
1690 rvu_cgx_enadis_rx_bp(rvu, pf, false);
1692 /* Wait for flush to complete */
1693 err = rvu_poll_reg(rvu, blkaddr,
1694 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
1697 "NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
1699 rvu_cgx_enadis_rx_bp(rvu, pf, true);
1700 /* restore cgx tx state */
1702 cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
1705 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1707 int blkaddr, nixlf, lvl, schq, err;
1708 struct rvu_hwinfo *hw = rvu->hw;
1709 struct nix_txsch *txsch;
1710 struct nix_hw *nix_hw;
1712 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1714 return NIX_AF_ERR_AF_LF_INVALID;
1716 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1720 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1722 return NIX_AF_ERR_AF_LF_INVALID;
1724 /* Disable TL2/3 queue links before SMQ flush*/
1725 mutex_lock(&rvu->rsrc_lock);
1726 for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1727 if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1730 txsch = &nix_hw->txsch[lvl];
1731 for (schq = 0; schq < txsch->schq.max; schq++) {
1732 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1734 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1739 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1740 for (schq = 0; schq < txsch->schq.max; schq++) {
1741 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1743 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1746 /* Now free scheduler queues to free pool */
1747 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1748 /* TLs above aggregation level are shared across all PF
1749 * and it's VFs, hence skip freeing them.
1751 if (lvl >= hw->cap.nix_tx_aggr_lvl)
1754 txsch = &nix_hw->txsch[lvl];
1755 for (schq = 0; schq < txsch->schq.max; schq++) {
1756 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1758 rvu_free_rsrc(&txsch->schq, schq);
1759 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1762 mutex_unlock(&rvu->rsrc_lock);
1764 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1765 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1766 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1768 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1773 static int nix_txschq_free_one(struct rvu *rvu,
1774 struct nix_txsch_free_req *req)
1776 struct rvu_hwinfo *hw = rvu->hw;
1777 u16 pcifunc = req->hdr.pcifunc;
1778 int lvl, schq, nixlf, blkaddr;
1779 struct nix_txsch *txsch;
1780 struct nix_hw *nix_hw;
1783 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1785 return NIX_AF_ERR_AF_LF_INVALID;
1787 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1791 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1793 return NIX_AF_ERR_AF_LF_INVALID;
1795 lvl = req->schq_lvl;
1797 txsch = &nix_hw->txsch[lvl];
1799 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
1802 pfvf_map = txsch->pfvf_map;
1803 mutex_lock(&rvu->rsrc_lock);
1805 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
1806 mutex_unlock(&rvu->rsrc_lock);
1810 /* Flush if it is a SMQ. Onus of disabling
1811 * TL2/3 queue links before SMQ flush is on user
1813 if (lvl == NIX_TXSCH_LVL_SMQ)
1814 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1816 /* Free the resource */
1817 rvu_free_rsrc(&txsch->schq, schq);
1818 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1819 mutex_unlock(&rvu->rsrc_lock);
1822 return NIX_AF_ERR_TLX_INVALID;
1825 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
1826 struct nix_txsch_free_req *req,
1827 struct msg_rsp *rsp)
1829 if (req->flags & TXSCHQ_FREE_ALL)
1830 return nix_txschq_free(rvu, req->hdr.pcifunc);
1832 return nix_txschq_free_one(rvu, req);
1835 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1836 int lvl, u64 reg, u64 regval)
1838 u64 regbase = reg & 0xFFFF;
1841 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1844 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1845 /* Check if this schq belongs to this PF/VF or not */
1846 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1849 parent = (regval >> 16) & 0x1FF;
1850 /* Validate MDQ's TL4 parent */
1851 if (regbase == NIX_AF_MDQX_PARENT(0) &&
1852 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1855 /* Validate TL4's TL3 parent */
1856 if (regbase == NIX_AF_TL4X_PARENT(0) &&
1857 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1860 /* Validate TL3's TL2 parent */
1861 if (regbase == NIX_AF_TL3X_PARENT(0) &&
1862 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1865 /* Validate TL2's TL1 parent */
1866 if (regbase == NIX_AF_TL2X_PARENT(0) &&
1867 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1873 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
1877 if (hw->cap.nix_shaping)
1880 /* If shaping and coloring is not supported, then
1881 * *_CIR and *_PIR registers should not be configured.
1883 regbase = reg & 0xFFFF;
1886 case NIX_TXSCH_LVL_TL1:
1887 if (regbase == NIX_AF_TL1X_CIR(0))
1890 case NIX_TXSCH_LVL_TL2:
1891 if (regbase == NIX_AF_TL2X_CIR(0) ||
1892 regbase == NIX_AF_TL2X_PIR(0))
1895 case NIX_TXSCH_LVL_TL3:
1896 if (regbase == NIX_AF_TL3X_CIR(0) ||
1897 regbase == NIX_AF_TL3X_PIR(0))
1900 case NIX_TXSCH_LVL_TL4:
1901 if (regbase == NIX_AF_TL4X_CIR(0) ||
1902 regbase == NIX_AF_TL4X_PIR(0))
1909 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
1910 u16 pcifunc, int blkaddr)
1915 schq = nix_get_tx_link(rvu, pcifunc);
1916 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
1917 /* Skip if PF has already done the config */
1918 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
1920 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
1921 (TXSCH_TL1_DFLT_RR_PRIO << 1));
1922 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
1923 TXSCH_TL1_DFLT_RR_QTM);
1924 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
1925 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
1928 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
1929 struct nix_txschq_config *req,
1930 struct msg_rsp *rsp)
1932 struct rvu_hwinfo *hw = rvu->hw;
1933 u16 pcifunc = req->hdr.pcifunc;
1934 u64 reg, regval, schq_regbase;
1935 struct nix_txsch *txsch;
1936 struct nix_hw *nix_hw;
1937 int blkaddr, idx, err;
1941 if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1942 req->num_regs > MAX_REGS_PER_MBOX_MSG)
1943 return NIX_AF_INVAL_TXSCHQ_CFG;
1945 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
1949 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1953 txsch = &nix_hw->txsch[req->lvl];
1954 pfvf_map = txsch->pfvf_map;
1956 if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
1957 pcifunc & RVU_PFVF_FUNC_MASK) {
1958 mutex_lock(&rvu->rsrc_lock);
1959 if (req->lvl == NIX_TXSCH_LVL_TL1)
1960 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
1961 mutex_unlock(&rvu->rsrc_lock);
1965 for (idx = 0; idx < req->num_regs; idx++) {
1966 reg = req->reg[idx];
1967 regval = req->regval[idx];
1968 schq_regbase = reg & 0xFFFF;
1970 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
1971 txsch->lvl, reg, regval))
1972 return NIX_AF_INVAL_TXSCHQ_CFG;
1974 /* Check if shaping and coloring is supported */
1975 if (!is_txschq_shaping_valid(hw, req->lvl, reg))
1978 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
1979 if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
1980 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1982 regval &= ~(0x7FULL << 24);
1983 regval |= ((u64)nixlf << 24);
1986 /* Clear 'BP_ENA' config, if it's not allowed */
1987 if (!hw->cap.nix_tx_link_bp) {
1988 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
1989 (schq_regbase & 0xFF00) ==
1990 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
1991 regval &= ~BIT_ULL(13);
1994 /* Mark config as done for TL1 by PF */
1995 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
1996 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
1997 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1998 mutex_lock(&rvu->rsrc_lock);
1999 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
2000 NIX_TXSCHQ_CFG_DONE);
2001 mutex_unlock(&rvu->rsrc_lock);
2004 /* SMQ flush is special hence split register writes such
2005 * that flush first and write rest of the bits later.
2007 if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
2008 (regval & BIT_ULL(49))) {
2009 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2010 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2011 regval &= ~BIT_ULL(49);
2013 rvu_write64(rvu, blkaddr, reg, regval);
2019 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
2020 struct nix_vtag_config *req)
2022 u64 regval = req->vtag_size;
2024 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
2025 req->vtag_size > VTAGSIZE_T8)
2028 /* RX VTAG Type 7 reserved for vf vlan */
2029 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
2030 return NIX_AF_ERR_RX_VTAG_INUSE;
2032 if (req->rx.capture_vtag)
2033 regval |= BIT_ULL(5);
2034 if (req->rx.strip_vtag)
2035 regval |= BIT_ULL(4);
2037 rvu_write64(rvu, blkaddr,
2038 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
2042 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
2043 u16 pcifunc, int index)
2045 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2046 struct nix_txvlan *vlan = &nix_hw->txvlan;
2048 if (vlan->entry2pfvf_map[index] != pcifunc)
2049 return NIX_AF_ERR_PARAM;
2051 rvu_write64(rvu, blkaddr,
2052 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
2053 rvu_write64(rvu, blkaddr,
2054 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
2056 vlan->entry2pfvf_map[index] = 0;
2057 rvu_free_rsrc(&vlan->rsrc, index);
2062 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
2064 struct nix_txvlan *vlan;
2065 struct nix_hw *nix_hw;
2068 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2072 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2073 vlan = &nix_hw->txvlan;
2075 mutex_lock(&vlan->rsrc_lock);
2076 /* Scan all the entries and free the ones mapped to 'pcifunc' */
2077 for (index = 0; index < vlan->rsrc.max; index++) {
2078 if (vlan->entry2pfvf_map[index] == pcifunc)
2079 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
2081 mutex_unlock(&vlan->rsrc_lock);
2084 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
2087 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2088 struct nix_txvlan *vlan = &nix_hw->txvlan;
2092 mutex_lock(&vlan->rsrc_lock);
2094 index = rvu_alloc_rsrc(&vlan->rsrc);
2096 mutex_unlock(&vlan->rsrc_lock);
2100 mutex_unlock(&vlan->rsrc_lock);
2102 regval = size ? vtag : vtag << 32;
2104 rvu_write64(rvu, blkaddr,
2105 NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
2106 rvu_write64(rvu, blkaddr,
2107 NIX_AF_TX_VTAG_DEFX_CTL(index), size);
2112 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
2113 struct nix_vtag_config *req)
2115 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2116 struct nix_txvlan *vlan = &nix_hw->txvlan;
2117 u16 pcifunc = req->hdr.pcifunc;
2118 int idx0 = req->tx.vtag0_idx;
2119 int idx1 = req->tx.vtag1_idx;
2122 if (req->tx.free_vtag0 && req->tx.free_vtag1)
2123 if (vlan->entry2pfvf_map[idx0] != pcifunc ||
2124 vlan->entry2pfvf_map[idx1] != pcifunc)
2125 return NIX_AF_ERR_PARAM;
2127 mutex_lock(&vlan->rsrc_lock);
2129 if (req->tx.free_vtag0) {
2130 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
2135 if (req->tx.free_vtag1)
2136 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
2139 mutex_unlock(&vlan->rsrc_lock);
2143 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
2144 struct nix_vtag_config *req,
2145 struct nix_vtag_config_rsp *rsp)
2147 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2148 struct nix_txvlan *vlan = &nix_hw->txvlan;
2149 u16 pcifunc = req->hdr.pcifunc;
2151 if (req->tx.cfg_vtag0) {
2153 nix_tx_vtag_alloc(rvu, blkaddr,
2154 req->tx.vtag0, req->vtag_size);
2156 if (rsp->vtag0_idx < 0)
2157 return NIX_AF_ERR_TX_VTAG_NOSPC;
2159 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
2162 if (req->tx.cfg_vtag1) {
2164 nix_tx_vtag_alloc(rvu, blkaddr,
2165 req->tx.vtag1, req->vtag_size);
2167 if (rsp->vtag1_idx < 0)
2170 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
2176 if (req->tx.cfg_vtag0)
2177 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
2179 return NIX_AF_ERR_TX_VTAG_NOSPC;
2182 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
2183 struct nix_vtag_config *req,
2184 struct nix_vtag_config_rsp *rsp)
2186 u16 pcifunc = req->hdr.pcifunc;
2187 int blkaddr, nixlf, err;
2189 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2193 if (req->cfg_type) {
2194 /* rx vtag configuration */
2195 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
2197 return NIX_AF_ERR_PARAM;
2199 /* tx vtag configuration */
2200 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
2201 (req->tx.free_vtag0 || req->tx.free_vtag1))
2202 return NIX_AF_ERR_PARAM;
2204 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
2205 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
2207 if (req->tx.free_vtag0 || req->tx.free_vtag1)
2208 return nix_tx_vtag_decfg(rvu, blkaddr, req);
2214 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
2215 int mce, u8 op, u16 pcifunc, int next, bool eol)
2217 struct nix_aq_enq_req aq_req;
2220 aq_req.hdr.pcifunc = 0;
2221 aq_req.ctype = NIX_AQ_CTYPE_MCE;
2225 /* Use RSS with RSS index 0 */
2227 aq_req.mce.index = 0;
2228 aq_req.mce.eol = eol;
2229 aq_req.mce.pf_func = pcifunc;
2230 aq_req.mce.next = next;
2232 /* All fields valid */
2233 *(u64 *)(&aq_req.mce_mask) = ~0ULL;
2235 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
2237 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
2238 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
2244 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
2245 u16 pcifunc, bool add)
2247 struct mce *mce, *tail = NULL;
2248 bool delete = false;
2250 /* Scan through the current list */
2251 hlist_for_each_entry(mce, &mce_list->head, node) {
2252 /* If already exists, then delete */
2253 if (mce->pcifunc == pcifunc && !add) {
2256 } else if (mce->pcifunc == pcifunc && add) {
2257 /* entry already exists */
2264 hlist_del(&mce->node);
2273 /* Add a new one to the list, at the tail */
2274 mce = kzalloc(sizeof(*mce), GFP_KERNEL);
2277 mce->pcifunc = pcifunc;
2279 hlist_add_head(&mce->node, &mce_list->head);
2281 hlist_add_behind(&mce->node, &tail->node);
2286 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
2287 struct nix_mce_list *mce_list,
2288 int mce_idx, int mcam_index, bool add)
2290 int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
2291 struct npc_mcam *mcam = &rvu->hw->mcam;
2292 struct nix_mcast *mcast;
2293 struct nix_hw *nix_hw;
2299 /* Get this PF/VF func's MCE index */
2300 idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
2302 if (idx > (mce_idx + mce_list->max)) {
2304 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
2305 __func__, idx, mce_list->max,
2306 pcifunc >> RVU_PFVF_PF_SHIFT);
2310 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
2314 mcast = &nix_hw->mcast;
2315 mutex_lock(&mcast->mce_lock);
2317 err = nix_update_mce_list_entry(mce_list, pcifunc, add);
2321 /* Disable MCAM entry in NPC */
2322 if (!mce_list->count) {
2323 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2324 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
2328 /* Dump the updated list to HW */
2330 last_idx = idx + mce_list->count - 1;
2331 hlist_for_each_entry(mce, &mce_list->head, node) {
2336 /* EOL should be set in last MCE */
2337 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
2338 mce->pcifunc, next_idx,
2339 (next_idx > last_idx) ? true : false);
2346 mutex_unlock(&mcast->mce_lock);
2350 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
2351 struct nix_mce_list **mce_list, int *mce_idx)
2353 struct rvu_hwinfo *hw = rvu->hw;
2354 struct rvu_pfvf *pfvf;
2356 if (!hw->cap.nix_rx_multicast ||
2357 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
2363 /* Get this PF/VF func's MCE index */
2364 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
2366 if (type == NIXLF_BCAST_ENTRY) {
2367 *mce_list = &pfvf->bcast_mce_list;
2368 *mce_idx = pfvf->bcast_mce_idx;
2369 } else if (type == NIXLF_ALLMULTI_ENTRY) {
2370 *mce_list = &pfvf->mcast_mce_list;
2371 *mce_idx = pfvf->mcast_mce_idx;
2372 } else if (type == NIXLF_PROMISC_ENTRY) {
2373 *mce_list = &pfvf->promisc_mce_list;
2374 *mce_idx = pfvf->promisc_mce_idx;
2381 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
2384 int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
2385 struct npc_mcam *mcam = &rvu->hw->mcam;
2386 struct rvu_hwinfo *hw = rvu->hw;
2387 struct nix_mce_list *mce_list;
2389 /* skip multicast pkt replication for AF's VFs */
2390 if (is_afvf(pcifunc))
2393 if (!hw->cap.nix_rx_multicast)
2396 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2400 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2404 nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
2406 mcam_index = npc_get_nixlf_mcam_index(mcam,
2407 pcifunc & ~RVU_PFVF_FUNC_MASK,
2409 err = nix_update_mce_list(rvu, pcifunc, mce_list,
2410 mce_idx, mcam_index, add);
2414 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
2416 struct nix_mcast *mcast = &nix_hw->mcast;
2417 int err, pf, numvfs, idx;
2418 struct rvu_pfvf *pfvf;
2422 /* Skip PF0 (i.e AF) */
2423 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
2424 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2425 /* If PF is not enabled, nothing to do */
2426 if (!((cfg >> 20) & 0x01))
2428 /* Get numVFs attached to this PF */
2429 numvfs = (cfg >> 12) & 0xFF;
2431 pfvf = &rvu->pf[pf];
2433 /* This NIX0/1 block mapped to PF ? */
2434 if (pfvf->nix_blkaddr != nix_hw->blkaddr)
2437 /* save start idx of broadcast mce list */
2438 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2439 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
2441 /* save start idx of multicast mce list */
2442 pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2443 nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
2445 /* save the start idx of promisc mce list */
2446 pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2447 nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
2449 for (idx = 0; idx < (numvfs + 1); idx++) {
2450 /* idx-0 is for PF, followed by VFs */
2451 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2453 /* Add dummy entries now, so that we don't have to check
2454 * for whether AQ_OP should be INIT/WRITE later on.
2455 * Will be updated when a NIXLF is attached/detached to
2458 err = nix_blk_setup_mce(rvu, nix_hw,
2459 pfvf->bcast_mce_idx + idx,
2465 /* add dummy entries to multicast mce list */
2466 err = nix_blk_setup_mce(rvu, nix_hw,
2467 pfvf->mcast_mce_idx + idx,
2473 /* add dummy entries to promisc mce list */
2474 err = nix_blk_setup_mce(rvu, nix_hw,
2475 pfvf->promisc_mce_idx + idx,
2485 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2487 struct nix_mcast *mcast = &nix_hw->mcast;
2488 struct rvu_hwinfo *hw = rvu->hw;
2491 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
2492 size = (1ULL << size);
2494 /* Alloc memory for multicast/mirror replication entries */
2495 err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
2496 (256UL << MC_TBL_SIZE), size);
2500 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
2501 (u64)mcast->mce_ctx->iova);
2503 /* Set max list length equal to max no of VFs per PF + PF itself */
2504 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
2505 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
2507 /* Alloc memory for multicast replication buffers */
2508 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
2509 err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
2510 (8UL << MC_BUF_CNT), size);
2514 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
2515 (u64)mcast->mcast_buf->iova);
2517 /* Alloc pkind for NIX internal RX multicast/mirror replay */
2518 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
2520 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
2521 BIT_ULL(63) | (mcast->replay_pkind << 24) |
2522 BIT_ULL(20) | MC_BUF_CNT);
2524 mutex_init(&mcast->mce_lock);
2526 return nix_setup_mce_tables(rvu, nix_hw);
2529 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
2531 struct nix_txvlan *vlan = &nix_hw->txvlan;
2534 /* Allocate resource bimap for tx vtag def registers*/
2535 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
2536 err = rvu_alloc_bitmap(&vlan->rsrc);
2540 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
2541 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
2542 sizeof(u16), GFP_KERNEL);
2543 if (!vlan->entry2pfvf_map)
2546 mutex_init(&vlan->rsrc_lock);
2550 kfree(vlan->rsrc.bmap);
2554 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2556 struct nix_txsch *txsch;
2560 /* Get scheduler queue count of each type and alloc
2561 * bitmap for each for alloc/free/attach operations.
2563 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2564 txsch = &nix_hw->txsch[lvl];
2567 case NIX_TXSCH_LVL_SMQ:
2568 reg = NIX_AF_MDQ_CONST;
2570 case NIX_TXSCH_LVL_TL4:
2571 reg = NIX_AF_TL4_CONST;
2573 case NIX_TXSCH_LVL_TL3:
2574 reg = NIX_AF_TL3_CONST;
2576 case NIX_TXSCH_LVL_TL2:
2577 reg = NIX_AF_TL2_CONST;
2579 case NIX_TXSCH_LVL_TL1:
2580 reg = NIX_AF_TL1_CONST;
2583 cfg = rvu_read64(rvu, blkaddr, reg);
2584 txsch->schq.max = cfg & 0xFFFF;
2585 err = rvu_alloc_bitmap(&txsch->schq);
2589 /* Allocate memory for scheduler queues to
2590 * PF/VF pcifunc mapping info.
2592 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
2593 sizeof(u32), GFP_KERNEL);
2594 if (!txsch->pfvf_map)
2596 for (schq = 0; schq < txsch->schq.max; schq++)
2597 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2602 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
2603 int blkaddr, u32 cfg)
2607 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
2608 if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
2611 if (fmt_idx >= nix_hw->mark_format.total)
2614 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
2615 nix_hw->mark_format.cfg[fmt_idx] = cfg;
2616 nix_hw->mark_format.in_use++;
2620 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
2624 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003,
2625 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200,
2626 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203,
2627 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c,
2628 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00,
2629 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c,
2630 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008,
2631 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800,
2632 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
2637 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
2638 nix_hw->mark_format.total = (u8)total;
2639 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
2641 if (!nix_hw->mark_format.cfg)
2643 for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
2644 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
2646 dev_err(rvu->dev, "Err %d in setup mark format %d\n",
2653 static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu)
2655 /* CN10K supports LBK FIFO size 72 KB */
2656 if (rvu->hw->lbk_bufsize == 0x12000)
2657 *max_mtu = CN10K_LBK_LINK_MAX_FRS;
2659 *max_mtu = NIC_HW_MAX_FRS;
2662 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
2664 /* RPM supports FIFO len 128 KB */
2665 if (rvu_cgx_get_fifolen(rvu) == 0x20000)
2666 *max_mtu = CN10K_LMAC_LINK_MAX_FRS;
2668 *max_mtu = NIC_HW_MAX_FRS;
2671 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
2672 struct nix_hw_info *rsp)
2674 u16 pcifunc = req->hdr.pcifunc;
2677 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2679 return NIX_AF_ERR_AF_LF_INVALID;
2681 if (is_afvf(pcifunc))
2682 rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
2684 rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
2686 rsp->min_mtu = NIC_HW_MIN_FRS;
2690 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
2691 struct msg_rsp *rsp)
2693 u16 pcifunc = req->hdr.pcifunc;
2694 int i, nixlf, blkaddr, err;
2697 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2701 /* Get stats count supported by HW */
2702 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
2704 /* Reset tx stats */
2705 for (i = 0; i < ((stats >> 24) & 0xFF); i++)
2706 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
2708 /* Reset rx stats */
2709 for (i = 0; i < ((stats >> 32) & 0xFF); i++)
2710 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
2715 /* Returns the ALG index to be set into NPC_RX_ACTION */
2716 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
2720 /* Scan over exiting algo entries to find a match */
2721 for (i = 0; i < nix_hw->flowkey.in_use; i++)
2722 if (nix_hw->flowkey.flowkey[i] == flow_cfg)
2728 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
2730 int idx, nr_field, key_off, field_marker, keyoff_marker;
2731 int max_key_off, max_bit_pos, group_member;
2732 struct nix_rx_flowkey_alg *field;
2733 struct nix_rx_flowkey_alg tmp;
2734 u32 key_type, valid_key;
2735 int l4_key_offset = 0;
2740 #define FIELDS_PER_ALG 5
2741 #define MAX_KEY_OFF 40
2742 /* Clear all fields */
2743 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
2745 /* Each of the 32 possible flow key algorithm definitions should
2746 * fall into above incremental config (except ALG0). Otherwise a
2747 * single NPC MCAM entry is not sufficient for supporting RSS.
2749 * If a different definition or combination needed then NPC MCAM
2750 * has to be programmed to filter such pkts and it's action should
2751 * point to this definition to calculate flowtag or hash.
2753 * The `for loop` goes over _all_ protocol field and the following
2754 * variables depicts the state machine forward progress logic.
2756 * keyoff_marker - Enabled when hash byte length needs to be accounted
2757 * in field->key_offset update.
2758 * field_marker - Enabled when a new field needs to be selected.
2759 * group_member - Enabled when protocol is part of a group.
2762 keyoff_marker = 0; max_key_off = 0; group_member = 0;
2763 nr_field = 0; key_off = 0; field_marker = 1;
2764 field = &tmp; max_bit_pos = fls(flow_cfg);
2766 idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
2767 key_off < MAX_KEY_OFF; idx++) {
2768 key_type = BIT(idx);
2769 valid_key = flow_cfg & key_type;
2770 /* Found a field marker, reset the field values */
2772 memset(&tmp, 0, sizeof(tmp));
2774 field_marker = true;
2775 keyoff_marker = true;
2777 case NIX_FLOW_KEY_TYPE_PORT:
2778 field->sel_chan = true;
2779 /* This should be set to 1, when SEL_CHAN is set */
2782 case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
2783 field->lid = NPC_LID_LC;
2784 field->hdr_offset = 9; /* offset */
2785 field->bytesm1 = 0; /* 1 byte */
2786 field->ltype_match = NPC_LT_LC_IP;
2787 field->ltype_mask = 0xF;
2789 case NIX_FLOW_KEY_TYPE_IPV4:
2790 case NIX_FLOW_KEY_TYPE_INNR_IPV4:
2791 field->lid = NPC_LID_LC;
2792 field->ltype_match = NPC_LT_LC_IP;
2793 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
2794 field->lid = NPC_LID_LG;
2795 field->ltype_match = NPC_LT_LG_TU_IP;
2797 field->hdr_offset = 12; /* SIP offset */
2798 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
2799 field->ltype_mask = 0xF; /* Match only IPv4 */
2800 keyoff_marker = false;
2802 case NIX_FLOW_KEY_TYPE_IPV6:
2803 case NIX_FLOW_KEY_TYPE_INNR_IPV6:
2804 field->lid = NPC_LID_LC;
2805 field->ltype_match = NPC_LT_LC_IP6;
2806 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
2807 field->lid = NPC_LID_LG;
2808 field->ltype_match = NPC_LT_LG_TU_IP6;
2810 field->hdr_offset = 8; /* SIP offset */
2811 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
2812 field->ltype_mask = 0xF; /* Match only IPv6 */
2814 case NIX_FLOW_KEY_TYPE_TCP:
2815 case NIX_FLOW_KEY_TYPE_UDP:
2816 case NIX_FLOW_KEY_TYPE_SCTP:
2817 case NIX_FLOW_KEY_TYPE_INNR_TCP:
2818 case NIX_FLOW_KEY_TYPE_INNR_UDP:
2819 case NIX_FLOW_KEY_TYPE_INNR_SCTP:
2820 field->lid = NPC_LID_LD;
2821 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
2822 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
2823 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
2824 field->lid = NPC_LID_LH;
2825 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
2827 /* Enum values for NPC_LID_LD and NPC_LID_LG are same,
2828 * so no need to change the ltype_match, just change
2829 * the lid for inner protocols
2831 BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
2832 (int)NPC_LT_LH_TU_TCP);
2833 BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
2834 (int)NPC_LT_LH_TU_UDP);
2835 BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
2836 (int)NPC_LT_LH_TU_SCTP);
2838 if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
2839 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
2841 field->ltype_match |= NPC_LT_LD_TCP;
2842 group_member = true;
2843 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
2844 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
2846 field->ltype_match |= NPC_LT_LD_UDP;
2847 group_member = true;
2848 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2849 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
2851 field->ltype_match |= NPC_LT_LD_SCTP;
2852 group_member = true;
2854 field->ltype_mask = ~field->ltype_match;
2855 if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2856 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
2857 /* Handle the case where any of the group item
2858 * is enabled in the group but not the final one
2862 group_member = false;
2865 field_marker = false;
2866 keyoff_marker = false;
2869 /* TCP/UDP/SCTP and ESP/AH falls at same offset so
2870 * remember the TCP key offset of 40 byte hash key.
2872 if (key_type == NIX_FLOW_KEY_TYPE_TCP)
2873 l4_key_offset = key_off;
2875 case NIX_FLOW_KEY_TYPE_NVGRE:
2876 field->lid = NPC_LID_LD;
2877 field->hdr_offset = 4; /* VSID offset */
2879 field->ltype_match = NPC_LT_LD_NVGRE;
2880 field->ltype_mask = 0xF;
2882 case NIX_FLOW_KEY_TYPE_VXLAN:
2883 case NIX_FLOW_KEY_TYPE_GENEVE:
2884 field->lid = NPC_LID_LE;
2886 field->hdr_offset = 4;
2887 field->ltype_mask = 0xF;
2888 field_marker = false;
2889 keyoff_marker = false;
2891 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
2892 field->ltype_match |= NPC_LT_LE_VXLAN;
2893 group_member = true;
2896 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
2897 field->ltype_match |= NPC_LT_LE_GENEVE;
2898 group_member = true;
2901 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
2903 field->ltype_mask = ~field->ltype_match;
2904 field_marker = true;
2905 keyoff_marker = true;
2907 group_member = false;
2911 case NIX_FLOW_KEY_TYPE_ETH_DMAC:
2912 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
2913 field->lid = NPC_LID_LA;
2914 field->ltype_match = NPC_LT_LA_ETHER;
2915 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
2916 field->lid = NPC_LID_LF;
2917 field->ltype_match = NPC_LT_LF_TU_ETHER;
2919 field->hdr_offset = 0;
2920 field->bytesm1 = 5; /* DMAC 6 Byte */
2921 field->ltype_mask = 0xF;
2923 case NIX_FLOW_KEY_TYPE_IPV6_EXT:
2924 field->lid = NPC_LID_LC;
2925 field->hdr_offset = 40; /* IPV6 hdr */
2926 field->bytesm1 = 0; /* 1 Byte ext hdr*/
2927 field->ltype_match = NPC_LT_LC_IP6_EXT;
2928 field->ltype_mask = 0xF;
2930 case NIX_FLOW_KEY_TYPE_GTPU:
2931 field->lid = NPC_LID_LE;
2932 field->hdr_offset = 4;
2933 field->bytesm1 = 3; /* 4 bytes TID*/
2934 field->ltype_match = NPC_LT_LE_GTPU;
2935 field->ltype_mask = 0xF;
2937 case NIX_FLOW_KEY_TYPE_VLAN:
2938 field->lid = NPC_LID_LB;
2939 field->hdr_offset = 2; /* Skip TPID (2-bytes) */
2940 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
2941 field->ltype_match = NPC_LT_LB_CTAG;
2942 field->ltype_mask = 0xF;
2943 field->fn_mask = 1; /* Mask out the first nibble */
2945 case NIX_FLOW_KEY_TYPE_AH:
2946 case NIX_FLOW_KEY_TYPE_ESP:
2947 field->hdr_offset = 0;
2948 field->bytesm1 = 7; /* SPI + sequence number */
2949 field->ltype_mask = 0xF;
2950 field->lid = NPC_LID_LE;
2951 field->ltype_match = NPC_LT_LE_ESP;
2952 if (key_type == NIX_FLOW_KEY_TYPE_AH) {
2953 field->lid = NPC_LID_LD;
2954 field->ltype_match = NPC_LT_LD_AH;
2955 field->hdr_offset = 4;
2956 keyoff_marker = false;
2962 /* Found a valid flow key type */
2964 /* Use the key offset of TCP/UDP/SCTP fields
2965 * for ESP/AH fields.
2967 if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
2968 key_type == NIX_FLOW_KEY_TYPE_AH)
2969 key_off = l4_key_offset;
2970 field->key_offset = key_off;
2971 memcpy(&alg[nr_field], field, sizeof(*field));
2972 max_key_off = max(max_key_off, field->bytesm1 + 1);
2974 /* Found a field marker, get the next field */
2979 /* Found a keyoff marker, update the new key_off */
2980 if (keyoff_marker) {
2981 key_off += max_key_off;
2985 /* Processed all the flow key types */
2986 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
2989 return NIX_AF_ERR_RSS_NOSPC_FIELD;
2992 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
2994 u64 field[FIELDS_PER_ALG];
2998 hw = get_nix_hw(rvu->hw, blkaddr);
3002 /* No room to add new flow hash algoritham */
3003 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
3004 return NIX_AF_ERR_RSS_NOSPC_ALGO;
3006 /* Generate algo fields for the given flow_cfg */
3007 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
3011 /* Update ALGX_FIELDX register with generated fields */
3012 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3013 rvu_write64(rvu, blkaddr,
3014 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
3017 /* Store the flow_cfg for futher lookup */
3018 rc = hw->flowkey.in_use;
3019 hw->flowkey.flowkey[rc] = flow_cfg;
3020 hw->flowkey.in_use++;
3025 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
3026 struct nix_rss_flowkey_cfg *req,
3027 struct nix_rss_flowkey_cfg_rsp *rsp)
3029 u16 pcifunc = req->hdr.pcifunc;
3030 int alg_idx, nixlf, blkaddr;
3031 struct nix_hw *nix_hw;
3034 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3038 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3042 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
3043 /* Failed to get algo index from the exiting list, reserve new */
3045 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
3050 rsp->alg_idx = alg_idx;
3051 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
3052 alg_idx, req->mcam_index);
3056 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
3058 u32 flowkey_cfg, minkey_cfg;
3061 /* Disable all flow key algx fieldx */
3062 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
3063 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3064 rvu_write64(rvu, blkaddr,
3065 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
3069 /* IPv4/IPv6 SIP/DIPs */
3070 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
3071 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3075 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3076 minkey_cfg = flowkey_cfg;
3077 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
3078 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3082 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3083 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
3084 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3088 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3089 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
3090 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3094 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
3095 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3096 NIX_FLOW_KEY_TYPE_UDP;
3097 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3101 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3102 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3103 NIX_FLOW_KEY_TYPE_SCTP;
3104 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3108 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3109 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
3110 NIX_FLOW_KEY_TYPE_SCTP;
3111 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3115 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3116 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3117 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
3118 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3125 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
3126 struct nix_set_mac_addr *req,
3127 struct msg_rsp *rsp)
3129 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
3130 u16 pcifunc = req->hdr.pcifunc;
3131 int blkaddr, nixlf, err;
3132 struct rvu_pfvf *pfvf;
3134 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3138 pfvf = rvu_get_pfvf(rvu, pcifunc);
3140 /* untrusted VF can't overwrite admin(PF) changes */
3141 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3142 (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
3144 "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
3148 ether_addr_copy(pfvf->mac_addr, req->mac_addr);
3150 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
3151 pfvf->rx_chan_base, req->mac_addr);
3153 if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
3154 ether_addr_copy(pfvf->default_mac, req->mac_addr);
3159 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
3160 struct msg_req *req,
3161 struct nix_get_mac_addr_rsp *rsp)
3163 u16 pcifunc = req->hdr.pcifunc;
3164 struct rvu_pfvf *pfvf;
3166 if (!is_nixlf_attached(rvu, pcifunc))
3167 return NIX_AF_ERR_AF_LF_INVALID;
3169 pfvf = rvu_get_pfvf(rvu, pcifunc);
3171 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
3176 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
3177 struct msg_rsp *rsp)
3179 bool allmulti, promisc, nix_rx_multicast;
3180 u16 pcifunc = req->hdr.pcifunc;
3181 struct rvu_pfvf *pfvf;
3184 pfvf = rvu_get_pfvf(rvu, pcifunc);
3185 promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
3186 allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
3187 pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
3189 nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
3191 if (is_vf(pcifunc) && !nix_rx_multicast &&
3192 (promisc || allmulti)) {
3193 dev_warn_ratelimited(rvu->dev,
3194 "VF promisc/multicast not supported\n");
3198 /* untrusted VF can't configure promisc/allmulti */
3199 if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3200 (promisc || allmulti))
3203 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3207 if (nix_rx_multicast) {
3208 /* add/del this PF_FUNC to/from mcast pkt replication list */
3209 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
3213 "Failed to update pcifunc 0x%x to multicast list\n",
3218 /* add/del this PF_FUNC to/from promisc pkt replication list */
3219 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
3223 "Failed to update pcifunc 0x%x to promisc list\n",
3229 /* install/uninstall allmulti entry */
3231 rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
3232 pfvf->rx_chan_base);
3234 if (!nix_rx_multicast)
3235 rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
3238 /* install/uninstall promisc entry */
3240 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
3244 if (!nix_rx_multicast)
3245 rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
3251 static void nix_find_link_frs(struct rvu *rvu,
3252 struct nix_frs_cfg *req, u16 pcifunc)
3254 int pf = rvu_get_pf(pcifunc);
3255 struct rvu_pfvf *pfvf;
3260 /* Update with requester's min/max lengths */
3261 pfvf = rvu_get_pfvf(rvu, pcifunc);
3262 pfvf->maxlen = req->maxlen;
3263 if (req->update_minlen)
3264 pfvf->minlen = req->minlen;
3266 maxlen = req->maxlen;
3267 minlen = req->update_minlen ? req->minlen : 0;
3269 /* Get this PF's numVFs and starting hwvf */
3270 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
3272 /* For each VF, compare requested max/minlen */
3273 for (vf = 0; vf < numvfs; vf++) {
3274 pfvf = &rvu->hwvf[hwvf + vf];
3275 if (pfvf->maxlen > maxlen)
3276 maxlen = pfvf->maxlen;
3277 if (req->update_minlen &&
3278 pfvf->minlen && pfvf->minlen < minlen)
3279 minlen = pfvf->minlen;
3282 /* Compare requested max/minlen with PF's max/minlen */
3283 pfvf = &rvu->pf[pf];
3284 if (pfvf->maxlen > maxlen)
3285 maxlen = pfvf->maxlen;
3286 if (req->update_minlen &&
3287 pfvf->minlen && pfvf->minlen < minlen)
3288 minlen = pfvf->minlen;
3290 /* Update the request with max/min PF's and it's VF's max/min */
3291 req->maxlen = maxlen;
3292 if (req->update_minlen)
3293 req->minlen = minlen;
3296 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
3297 struct msg_rsp *rsp)
3299 struct rvu_hwinfo *hw = rvu->hw;
3300 u16 pcifunc = req->hdr.pcifunc;
3301 int pf = rvu_get_pf(pcifunc);
3302 int blkaddr, schq, link = -1;
3303 struct nix_txsch *txsch;
3304 u64 cfg, lmac_fifo_len;
3305 struct nix_hw *nix_hw;
3306 u8 cgx = 0, lmac = 0;
3309 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3311 return NIX_AF_ERR_AF_LF_INVALID;
3313 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3317 if (is_afvf(pcifunc))
3318 rvu_get_lbk_link_max_frs(rvu, &max_mtu);
3320 rvu_get_lmac_link_max_frs(rvu, &max_mtu);
3322 if (!req->sdp_link && req->maxlen > max_mtu)
3323 return NIX_AF_ERR_FRS_INVALID;
3325 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
3326 return NIX_AF_ERR_FRS_INVALID;
3328 /* Check if requester wants to update SMQ's */
3329 if (!req->update_smq)
3332 /* Update min/maxlen in each of the SMQ attached to this PF/VF */
3333 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
3334 mutex_lock(&rvu->rsrc_lock);
3335 for (schq = 0; schq < txsch->schq.max; schq++) {
3336 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
3338 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
3339 cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
3340 if (req->update_minlen)
3341 cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
3342 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
3344 mutex_unlock(&rvu->rsrc_lock);
3347 /* Check if config is for SDP link */
3348 if (req->sdp_link) {
3350 return NIX_AF_ERR_RX_LINK_INVALID;
3351 link = hw->cgx_links + hw->lbk_links;
3355 /* Check if the request is from CGX mapped RVU PF */
3356 if (is_pf_cgxmapped(rvu, pf)) {
3357 /* Get CGX and LMAC to which this PF is mapped and find link */
3358 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
3359 link = (cgx * hw->lmac_per_cgx) + lmac;
3360 } else if (pf == 0) {
3361 /* For VFs of PF0 ingress is LBK port, so config LBK link */
3362 link = hw->cgx_links;
3366 return NIX_AF_ERR_RX_LINK_INVALID;
3368 nix_find_link_frs(rvu, req, pcifunc);
3371 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
3372 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
3373 if (req->update_minlen)
3374 cfg = (cfg & ~0xFFFFULL) | req->minlen;
3375 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
3377 if (req->sdp_link || pf == 0)
3380 /* Update transmit credits for CGX links */
3382 rvu_cgx_get_fifolen(rvu) /
3383 cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3384 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
3385 cfg &= ~(0xFFFFFULL << 12);
3386 cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12;
3387 rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
3391 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
3392 struct msg_rsp *rsp)
3394 int nixlf, blkaddr, err;
3397 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
3401 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
3402 /* Set the interface configuration */
3403 if (req->len_verify & BIT(0))
3406 cfg &= ~BIT_ULL(41);
3408 if (req->len_verify & BIT(1))
3411 cfg &= ~BIT_ULL(40);
3413 if (req->csum_verify & BIT(0))
3416 cfg &= ~BIT_ULL(37);
3418 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
3423 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
3425 /* CN10k supports 72KB FIFO size and max packet size of 64k */
3426 if (rvu->hw->lbk_bufsize == 0x12000)
3427 return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
3429 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
3432 static void nix_link_config(struct rvu *rvu, int blkaddr)
3434 struct rvu_hwinfo *hw = rvu->hw;
3435 int cgx, lmac_cnt, slink, link;
3436 u16 lbk_max_frs, lmac_max_frs;
3439 rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
3440 rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
3442 /* Set default min/max packet lengths allowed on NIX Rx links.
3444 * With HW reset minlen value of 60byte, HW will treat ARP pkts
3445 * as undersize and report them to SW as error pkts, hence
3446 * setting it to 40 bytes.
3448 for (link = 0; link < hw->cgx_links; link++) {
3449 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3450 ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
3453 for (link = hw->cgx_links; link < hw->lbk_links; link++) {
3454 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3455 ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
3457 if (hw->sdp_links) {
3458 link = hw->cgx_links + hw->lbk_links;
3459 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3460 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
3463 /* Set credits for Tx links assuming max packet length allowed.
3464 * This will be reconfigured based on MTU set for PF/VF.
3466 for (cgx = 0; cgx < hw->cgx; cgx++) {
3467 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3468 tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) -
3470 /* Enable credits and set credit pkt count to max allowed */
3471 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3472 slink = cgx * hw->lmac_per_cgx;
3473 for (link = slink; link < (slink + lmac_cnt); link++) {
3474 rvu_write64(rvu, blkaddr,
3475 NIX_AF_TX_LINKX_NORM_CREDIT(link),
3480 /* Set Tx credits for LBK link */
3481 slink = hw->cgx_links;
3482 for (link = slink; link < (slink + hw->lbk_links); link++) {
3483 tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
3484 /* Enable credits and set credit pkt count to max allowed */
3485 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3486 rvu_write64(rvu, blkaddr,
3487 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
3491 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
3496 /* Start X2P bus calibration */
3497 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3498 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
3499 /* Wait for calibration to complete */
3500 err = rvu_poll_reg(rvu, blkaddr,
3501 NIX_AF_STATUS, BIT_ULL(10), false);
3503 dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
3507 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
3508 /* Check if CGX devices are ready */
3509 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
3510 /* Skip when cgx port is not available */
3511 if (!rvu_cgx_pdata(idx, rvu) ||
3512 (status & (BIT_ULL(16 + idx))))
3515 "CGX%d didn't respond to NIX X2P calibration\n", idx);
3519 /* Check if LBK is ready */
3520 if (!(status & BIT_ULL(19))) {
3522 "LBK didn't respond to NIX X2P calibration\n");
3526 /* Clear 'calibrate_x2p' bit */
3527 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3528 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
3529 if (err || (status & 0x3FFULL))
3531 "NIX X2P calibration failed, status 0x%llx\n", status);
3537 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
3542 /* Set admin queue endianness */
3543 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
3546 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3549 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3552 /* Do not bypass NDC cache */
3553 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
3555 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
3556 /* Disable caching of SQB aka SQEs */
3559 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
3561 /* Result structure can be followed by RQ/SQ/CQ context at
3562 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
3563 * operation type. Alloc sufficient result memory for all operations.
3565 err = rvu_aq_alloc(rvu, &block->aq,
3566 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
3567 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
3571 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
3572 rvu_write64(rvu, block->addr,
3573 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
3577 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
3579 const struct npc_lt_def_cfg *ltdefs;
3580 struct rvu_hwinfo *hw = rvu->hw;
3581 int blkaddr = nix_hw->blkaddr;
3582 struct rvu_block *block;
3586 block = &hw->block[blkaddr];
3588 if (is_rvu_96xx_B0(rvu)) {
3589 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
3590 * internal state when conditional clocks are turned off.
3591 * Hence enable them.
3593 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3594 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
3596 /* Set chan/link to backpressure TL3 instead of TL2 */
3597 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
3599 /* Disable SQ manager's sticky mode operation (set TM6 = 0)
3600 * This sticky mode is known to cause SQ stalls when multiple
3601 * SQs are mapped to same SMQ and transmitting pkts at a time.
3603 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
3604 cfg &= ~BIT_ULL(15);
3605 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
3608 ltdefs = rvu->kpu.lt_def;
3609 /* Calibrate X2P bus to check if CGX/LBK links are fine */
3610 err = nix_calibrate_x2p(rvu, blkaddr);
3614 /* Initialize admin queue */
3615 err = nix_aq_init(rvu, block);
3619 /* Restore CINT timer delay to HW reset values */
3620 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
3622 if (is_block_implemented(hw, blkaddr)) {
3623 err = nix_setup_txschq(rvu, nix_hw, blkaddr);
3627 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
3631 err = nix_setup_mcast(rvu, nix_hw, blkaddr);
3635 err = nix_setup_txvlan(rvu, nix_hw);
3639 /* Configure segmentation offload formats */
3640 nix_setup_lso(rvu, nix_hw, blkaddr);
3642 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
3643 * This helps HW protocol checker to identify headers
3644 * and validate length and checksums.
3646 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
3647 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
3648 ltdefs->rx_ol2.ltype_mask);
3649 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
3650 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
3651 ltdefs->rx_oip4.ltype_mask);
3652 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
3653 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
3654 ltdefs->rx_iip4.ltype_mask);
3655 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
3656 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
3657 ltdefs->rx_oip6.ltype_mask);
3658 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
3659 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
3660 ltdefs->rx_iip6.ltype_mask);
3661 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
3662 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
3663 ltdefs->rx_otcp.ltype_mask);
3664 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
3665 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
3666 ltdefs->rx_itcp.ltype_mask);
3667 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
3668 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
3669 ltdefs->rx_oudp.ltype_mask);
3670 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
3671 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
3672 ltdefs->rx_iudp.ltype_mask);
3673 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
3674 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
3675 ltdefs->rx_osctp.ltype_mask);
3676 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
3677 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
3678 ltdefs->rx_isctp.ltype_mask);
3680 if (!is_rvu_otx2(rvu)) {
3681 /* Enable APAD calculation for other protocols
3682 * matching APAD0 and APAD1 lt def registers.
3684 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
3685 (ltdefs->rx_apad0.valid << 11) |
3686 (ltdefs->rx_apad0.lid << 8) |
3687 (ltdefs->rx_apad0.ltype_match << 4) |
3688 ltdefs->rx_apad0.ltype_mask);
3689 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
3690 (ltdefs->rx_apad1.valid << 11) |
3691 (ltdefs->rx_apad1.lid << 8) |
3692 (ltdefs->rx_apad1.ltype_match << 4) |
3693 ltdefs->rx_apad1.ltype_mask);
3695 /* Receive ethertype defination register defines layer
3696 * information in NPC_RESULT_S to identify the Ethertype
3697 * location in L2 header. Used for Ethertype overwriting
3698 * in inline IPsec flow.
3700 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
3701 (ltdefs->rx_et[0].offset << 12) |
3702 (ltdefs->rx_et[0].valid << 11) |
3703 (ltdefs->rx_et[0].lid << 8) |
3704 (ltdefs->rx_et[0].ltype_match << 4) |
3705 ltdefs->rx_et[0].ltype_mask);
3706 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
3707 (ltdefs->rx_et[1].offset << 12) |
3708 (ltdefs->rx_et[1].valid << 11) |
3709 (ltdefs->rx_et[1].lid << 8) |
3710 (ltdefs->rx_et[1].ltype_match << 4) |
3711 ltdefs->rx_et[1].ltype_mask);
3714 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
3718 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
3719 nix_link_config(rvu, blkaddr);
3721 /* Enable Channel backpressure */
3722 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
3727 int rvu_nix_init(struct rvu *rvu)
3729 struct rvu_hwinfo *hw = rvu->hw;
3730 struct nix_hw *nix_hw;
3731 int blkaddr = 0, err;
3734 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
3739 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3741 nix_hw = &hw->nix[i];
3743 nix_hw->blkaddr = blkaddr;
3744 err = rvu_nix_block_init(rvu, nix_hw);
3747 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3754 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
3755 struct rvu_block *block)
3757 struct nix_txsch *txsch;
3758 struct nix_mcast *mcast;
3759 struct nix_txvlan *vlan;
3760 struct nix_hw *nix_hw;
3763 rvu_aq_free(rvu, block->aq);
3765 if (is_block_implemented(rvu->hw, blkaddr)) {
3766 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3770 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3771 txsch = &nix_hw->txsch[lvl];
3772 kfree(txsch->schq.bmap);
3775 vlan = &nix_hw->txvlan;
3776 kfree(vlan->rsrc.bmap);
3777 mutex_destroy(&vlan->rsrc_lock);
3778 devm_kfree(rvu->dev, vlan->entry2pfvf_map);
3780 mcast = &nix_hw->mcast;
3781 qmem_free(rvu->dev, mcast->mce_ctx);
3782 qmem_free(rvu->dev, mcast->mcast_buf);
3783 mutex_destroy(&mcast->mce_lock);
3787 void rvu_nix_freemem(struct rvu *rvu)
3789 struct rvu_hwinfo *hw = rvu->hw;
3790 struct rvu_block *block;
3793 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3795 block = &hw->block[blkaddr];
3796 rvu_nix_block_freemem(rvu, blkaddr, block);
3797 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3801 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
3802 struct msg_rsp *rsp)
3804 u16 pcifunc = req->hdr.pcifunc;
3805 struct rvu_pfvf *pfvf;
3808 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3812 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
3814 npc_mcam_enable_flows(rvu, pcifunc);
3816 pfvf = rvu_get_pfvf(rvu, pcifunc);
3817 set_bit(NIXLF_INITIALIZED, &pfvf->flags);
3819 return rvu_cgx_start_stop_io(rvu, pcifunc, true);
3822 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
3823 struct msg_rsp *rsp)
3825 u16 pcifunc = req->hdr.pcifunc;
3826 struct rvu_pfvf *pfvf;
3829 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3833 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
3835 pfvf = rvu_get_pfvf(rvu, pcifunc);
3836 clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
3838 return rvu_cgx_start_stop_io(rvu, pcifunc, false);
3841 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
3843 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
3844 struct hwctx_disable_req ctx_req;
3847 ctx_req.hdr.pcifunc = pcifunc;
3849 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
3850 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
3851 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
3852 nix_interface_deinit(rvu, pcifunc, nixlf);
3853 nix_rx_sync(rvu, blkaddr);
3854 nix_txschq_free(rvu, pcifunc);
3856 clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
3858 rvu_cgx_start_stop_io(rvu, pcifunc, false);
3861 ctx_req.ctype = NIX_AQ_CTYPE_SQ;
3862 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3864 dev_err(rvu->dev, "SQ ctx disable failed\n");
3868 ctx_req.ctype = NIX_AQ_CTYPE_RQ;
3869 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3871 dev_err(rvu->dev, "RQ ctx disable failed\n");
3875 ctx_req.ctype = NIX_AQ_CTYPE_CQ;
3876 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3878 dev_err(rvu->dev, "CQ ctx disable failed\n");
3881 nix_ctx_free(rvu, pfvf);
3884 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
3886 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
3888 struct rvu_hwinfo *hw = rvu->hw;
3889 struct rvu_block *block;
3894 pf = rvu_get_pf(pcifunc);
3895 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
3898 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3900 return NIX_AF_ERR_AF_LF_INVALID;
3902 block = &hw->block[blkaddr];
3903 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
3905 return NIX_AF_ERR_AF_LF_INVALID;
3907 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
3910 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
3912 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
3914 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
3919 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
3920 struct msg_rsp *rsp)
3922 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
3925 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
3926 struct msg_rsp *rsp)
3928 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
3931 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
3932 struct nix_lso_format_cfg *req,
3933 struct nix_lso_format_cfg_rsp *rsp)
3935 u16 pcifunc = req->hdr.pcifunc;
3936 struct nix_hw *nix_hw;
3937 struct rvu_pfvf *pfvf;
3938 int blkaddr, idx, f;
3941 pfvf = rvu_get_pfvf(rvu, pcifunc);
3942 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3943 if (!pfvf->nixlf || blkaddr < 0)
3944 return NIX_AF_ERR_AF_LF_INVALID;
3946 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3950 /* Find existing matching LSO format, if any */
3951 for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
3952 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
3953 reg = rvu_read64(rvu, blkaddr,
3954 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
3955 if (req->fields[f] != (reg & req->field_mask))
3959 if (f == NIX_LSO_FIELD_MAX)
3963 if (idx < nix_hw->lso.in_use) {
3965 rsp->lso_format_idx = idx;
3969 if (nix_hw->lso.in_use == nix_hw->lso.total)
3970 return NIX_AF_ERR_LSO_CFG_FAIL;
3972 rsp->lso_format_idx = nix_hw->lso.in_use++;
3974 for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
3975 rvu_write64(rvu, blkaddr,
3976 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
3982 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
3984 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
3986 /* overwrite vf mac address with default_mac */
3988 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);