2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: Fast Path Operators
39 #define dev_fmt(fmt) "QPLIB: " fmt
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/prefetch.h>
48 #include <linux/if_ether.h>
49 #include <rdma/ib_mad.h>
53 #include "qplib_res.h"
54 #include "qplib_rcfw.h"
58 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
60 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
62 qp->sq.condition = false;
63 qp->sq.send_phantom = false;
64 qp->sq.single = false;
68 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
70 struct bnxt_qplib_cq *scq, *rcq;
75 if (!qp->sq.flushed) {
76 dev_dbg(&scq->hwq.pdev->dev,
77 "FP: Adding to SQ Flush list = %p\n", qp);
78 bnxt_qplib_cancel_phantom_processing(qp);
79 list_add_tail(&qp->sq_flush, &scq->sqf_head);
80 qp->sq.flushed = true;
83 if (!qp->rq.flushed) {
84 dev_dbg(&rcq->hwq.pdev->dev,
85 "FP: Adding to RQ Flush list = %p\n", qp);
86 list_add_tail(&qp->rq_flush, &rcq->rqf_head);
87 qp->rq.flushed = true;
92 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
94 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
96 spin_lock_irqsave(&qp->scq->flush_lock, *flags);
97 if (qp->scq == qp->rcq)
98 __acquire(&qp->rcq->flush_lock);
100 spin_lock(&qp->rcq->flush_lock);
103 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
104 unsigned long *flags)
105 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
107 if (qp->scq == qp->rcq)
108 __release(&qp->rcq->flush_lock);
110 spin_unlock(&qp->rcq->flush_lock);
111 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
114 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
118 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
119 __bnxt_qplib_add_flush_qp(qp);
120 bnxt_qplib_release_cq_flush_locks(qp, &flags);
123 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
125 if (qp->sq.flushed) {
126 qp->sq.flushed = false;
127 list_del(&qp->sq_flush);
130 if (qp->rq.flushed) {
131 qp->rq.flushed = false;
132 list_del(&qp->rq_flush);
137 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
141 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
142 __clean_cq(qp->scq, (u64)(unsigned long)qp);
145 __clean_cq(qp->rcq, (u64)(unsigned long)qp);
149 __bnxt_qplib_del_flush_qp(qp);
150 bnxt_qplib_release_cq_flush_locks(qp, &flags);
153 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
155 struct bnxt_qplib_nq_work *nq_work =
156 container_of(work, struct bnxt_qplib_nq_work, work);
158 struct bnxt_qplib_cq *cq = nq_work->cq;
159 struct bnxt_qplib_nq *nq = nq_work->nq;
162 spin_lock_bh(&cq->compl_lock);
163 if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
164 dev_dbg(&nq->pdev->dev,
165 "%s:Trigger cq = %p event nq = %p\n",
167 nq->cqn_handler(nq, cq);
169 spin_unlock_bh(&cq->compl_lock);
174 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
175 struct bnxt_qplib_qp *qp)
177 struct bnxt_qplib_q *rq = &qp->rq;
178 struct bnxt_qplib_q *sq = &qp->sq;
181 dma_free_coherent(&res->pdev->dev,
182 rq->max_wqe * qp->rq_hdr_buf_size,
183 qp->rq_hdr_buf, qp->rq_hdr_buf_map);
185 dma_free_coherent(&res->pdev->dev,
186 sq->max_wqe * qp->sq_hdr_buf_size,
187 qp->sq_hdr_buf, qp->sq_hdr_buf_map);
188 qp->rq_hdr_buf = NULL;
189 qp->sq_hdr_buf = NULL;
190 qp->rq_hdr_buf_map = 0;
191 qp->sq_hdr_buf_map = 0;
192 qp->sq_hdr_buf_size = 0;
193 qp->rq_hdr_buf_size = 0;
196 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
197 struct bnxt_qplib_qp *qp)
199 struct bnxt_qplib_q *rq = &qp->rq;
200 struct bnxt_qplib_q *sq = &qp->sq;
203 if (qp->sq_hdr_buf_size && sq->max_wqe) {
204 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
205 sq->max_wqe * qp->sq_hdr_buf_size,
206 &qp->sq_hdr_buf_map, GFP_KERNEL);
207 if (!qp->sq_hdr_buf) {
209 dev_err(&res->pdev->dev,
210 "Failed to create sq_hdr_buf\n");
215 if (qp->rq_hdr_buf_size && rq->max_wqe) {
216 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
221 if (!qp->rq_hdr_buf) {
223 dev_err(&res->pdev->dev,
224 "Failed to create rq_hdr_buf\n");
231 bnxt_qplib_free_qp_hdr_buf(res, qp);
235 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
237 struct bnxt_qplib_hwq *hwq = &nq->hwq;
238 struct nq_base *nqe, **nq_ptr;
239 int budget = nq->budget;
240 u32 sw_cons, raw_cons;
244 spin_lock_bh(&hwq->lock);
245 /* Service the NQ until empty */
246 raw_cons = hwq->cons;
248 sw_cons = HWQ_CMP(raw_cons, hwq);
249 nq_ptr = (struct nq_base **)hwq->pbl_ptr;
250 nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
251 if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
255 * The valid test of the entry must be done first before
256 * reading any further.
260 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
262 case NQ_BASE_TYPE_CQ_NOTIFICATION:
264 struct nq_cn *nqcne = (struct nq_cn *)nqe;
266 q_handle = le32_to_cpu(nqcne->cq_handle_low);
267 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
269 if ((unsigned long)cq == q_handle) {
270 nqcne->cq_handle_low = 0;
271 nqcne->cq_handle_high = 0;
281 spin_unlock_bh(&hwq->lock);
284 /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
287 static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
291 while (retry_cnt--) {
292 if (cnq_events == cq->cnq_events)
294 usleep_range(50, 100);
295 clean_nq(cq->nq, cq);
299 static void bnxt_qplib_service_nq(struct tasklet_struct *t)
301 struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
302 struct bnxt_qplib_hwq *hwq = &nq->hwq;
303 struct bnxt_qplib_cq *cq;
304 int budget = nq->budget;
305 u32 sw_cons, raw_cons;
310 spin_lock_bh(&hwq->lock);
311 /* Service the NQ until empty */
312 raw_cons = hwq->cons;
314 sw_cons = HWQ_CMP(raw_cons, hwq);
315 nqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
316 if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
320 * The valid test of the entry must be done first before
321 * reading any further.
325 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
327 case NQ_BASE_TYPE_CQ_NOTIFICATION:
329 struct nq_cn *nqcne = (struct nq_cn *)nqe;
331 q_handle = le32_to_cpu(nqcne->cq_handle_low);
332 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
334 cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
337 bnxt_qplib_armen_db(&cq->dbinfo,
338 DBC_DBC_TYPE_CQ_ARMENA);
339 spin_lock_bh(&cq->compl_lock);
340 atomic_set(&cq->arm_state, 0);
341 if (nq->cqn_handler(nq, (cq)))
342 dev_warn(&nq->pdev->dev,
343 "cqn - type 0x%x not handled\n", type);
345 spin_unlock_bh(&cq->compl_lock);
348 case NQ_BASE_TYPE_SRQ_EVENT:
350 struct bnxt_qplib_srq *srq;
351 struct nq_srq_event *nqsrqe =
352 (struct nq_srq_event *)nqe;
354 q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
355 q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
357 srq = (struct bnxt_qplib_srq *)q_handle;
358 bnxt_qplib_armen_db(&srq->dbinfo,
359 DBC_DBC_TYPE_SRQ_ARMENA);
360 if (nq->srqn_handler(nq,
361 (struct bnxt_qplib_srq *)q_handle,
363 dev_warn(&nq->pdev->dev,
364 "SRQ event 0x%x not handled\n",
368 case NQ_BASE_TYPE_DBQ_EVENT:
371 dev_warn(&nq->pdev->dev,
372 "nqe with type = 0x%x not handled\n", type);
377 if (hwq->cons != raw_cons) {
378 hwq->cons = raw_cons;
379 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
381 spin_unlock_bh(&hwq->lock);
384 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
386 struct bnxt_qplib_nq *nq = dev_instance;
387 struct bnxt_qplib_hwq *hwq = &nq->hwq;
390 /* Prefetch the NQ element */
391 sw_cons = HWQ_CMP(hwq->cons, hwq);
392 prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
394 /* Fan out to CPU affinitized kthreads? */
395 tasklet_schedule(&nq->nq_tasklet);
400 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
405 tasklet_disable(&nq->nq_tasklet);
406 /* Mask h/w interrupt */
407 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
408 /* Sync with last running IRQ handler */
409 synchronize_irq(nq->msix_vec);
411 tasklet_kill(&nq->nq_tasklet);
413 irq_set_affinity_hint(nq->msix_vec, NULL);
414 free_irq(nq->msix_vec, nq);
417 nq->requested = false;
420 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
423 destroy_workqueue(nq->cqn_wq);
427 /* Make sure the HW is stopped! */
428 bnxt_qplib_nq_stop_irq(nq, true);
430 if (nq->nq_db.reg.bar_reg) {
431 iounmap(nq->nq_db.reg.bar_reg);
432 nq->nq_db.reg.bar_reg = NULL;
435 nq->cqn_handler = NULL;
436 nq->srqn_handler = NULL;
440 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
441 int msix_vector, bool need_init)
443 struct bnxt_qplib_res *res = nq->res;
449 nq->msix_vec = msix_vector;
451 tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
453 tasklet_enable(&nq->nq_tasklet);
455 nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
456 nq_indx, pci_name(res->pdev));
459 rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
463 tasklet_disable(&nq->nq_tasklet);
467 cpumask_clear(&nq->mask);
468 cpumask_set_cpu(nq_indx, &nq->mask);
469 rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
471 dev_warn(&nq->pdev->dev,
472 "set affinity failed; vector: %d nq_idx: %d\n",
473 nq->msix_vec, nq_indx);
475 nq->requested = true;
476 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
481 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
483 resource_size_t reg_base;
484 struct bnxt_qplib_nq_db *nq_db;
485 struct pci_dev *pdev;
490 nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
491 nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
492 if (!nq_db->reg.bar_base) {
493 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
498 reg_base = nq_db->reg.bar_base + reg_offt;
499 /* Unconditionally map 8 bytes to support 57500 series */
501 nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
502 if (!nq_db->reg.bar_reg) {
503 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
508 nq_db->dbinfo.db = nq_db->reg.bar_reg;
509 nq_db->dbinfo.hwq = &nq->hwq;
510 nq_db->dbinfo.xid = nq->ring_id;
515 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
516 int nq_idx, int msix_vector, int bar_reg_offset,
517 cqn_handler_t cqn_handler,
518 srqn_handler_t srqn_handler)
523 nq->cqn_handler = cqn_handler;
524 nq->srqn_handler = srqn_handler;
526 /* Have a task to schedule CQ notifiers in post send case */
527 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
531 rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
535 rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
537 dev_err(&nq->pdev->dev,
538 "Failed to request irq for nq-idx %d\n", nq_idx);
544 bnxt_qplib_disable_nq(nq);
548 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
550 if (nq->hwq.max_elements) {
551 bnxt_qplib_free_hwq(nq->res, &nq->hwq);
552 nq->hwq.max_elements = 0;
556 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
558 struct bnxt_qplib_hwq_attr hwq_attr = {};
559 struct bnxt_qplib_sg_info sginfo = {};
561 nq->pdev = res->pdev;
563 if (!nq->hwq.max_elements ||
564 nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
565 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
567 sginfo.pgsize = PAGE_SIZE;
568 sginfo.pgshft = PAGE_SHIFT;
570 hwq_attr.sginfo = &sginfo;
571 hwq_attr.depth = nq->hwq.max_elements;
572 hwq_attr.stride = sizeof(struct nq_base);
573 hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
574 if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
575 dev_err(&nq->pdev->dev, "FP NQ allocation failed");
583 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
584 struct bnxt_qplib_srq *srq)
586 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
587 struct creq_destroy_srq_resp resp = {};
588 struct bnxt_qplib_cmdqmsg msg = {};
589 struct cmdq_destroy_srq req = {};
592 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
593 CMDQ_BASE_OPCODE_DESTROY_SRQ,
596 /* Configure the request */
597 req.srq_cid = cpu_to_le32(srq->id);
599 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
600 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
604 bnxt_qplib_free_hwq(res, &srq->hwq);
607 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
608 struct bnxt_qplib_srq *srq)
610 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
611 struct bnxt_qplib_hwq_attr hwq_attr = {};
612 struct creq_create_srq_resp resp = {};
613 struct bnxt_qplib_cmdqmsg msg = {};
614 struct cmdq_create_srq req = {};
615 struct bnxt_qplib_pbl *pbl;
620 hwq_attr.sginfo = &srq->sg_info;
621 hwq_attr.depth = srq->max_wqe;
622 hwq_attr.stride = srq->wqe_size;
623 hwq_attr.type = HWQ_TYPE_QUEUE;
624 rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
628 srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
635 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
636 CMDQ_BASE_OPCODE_CREATE_SRQ,
639 /* Configure the request */
640 req.dpi = cpu_to_le32(srq->dpi->dpi);
641 req.srq_handle = cpu_to_le64((uintptr_t)srq);
643 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
644 pbl = &srq->hwq.pbl[PBL_LVL_0];
645 pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
646 CMDQ_CREATE_SRQ_PG_SIZE_SFT);
647 pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
648 CMDQ_CREATE_SRQ_LVL_SFT;
649 req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
650 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
651 req.pd_id = cpu_to_le32(srq->pd->id);
652 req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
654 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
655 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
659 spin_lock_init(&srq->lock);
661 srq->last_idx = srq->hwq.max_elements - 1;
662 for (idx = 0; idx < srq->hwq.max_elements; idx++)
663 srq->swq[idx].next_idx = idx + 1;
664 srq->swq[srq->last_idx].next_idx = -1;
666 srq->id = le32_to_cpu(resp.xid);
667 srq->dbinfo.hwq = &srq->hwq;
668 srq->dbinfo.xid = srq->id;
669 srq->dbinfo.db = srq->dpi->dbr;
670 srq->dbinfo.max_slot = 1;
671 srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
673 bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
674 srq->arm_req = false;
678 bnxt_qplib_free_hwq(res, &srq->hwq);
684 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
685 struct bnxt_qplib_srq *srq)
687 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
688 u32 sw_prod, sw_cons, count = 0;
690 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
691 sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
693 count = sw_prod > sw_cons ? sw_prod - sw_cons :
694 srq_hwq->max_elements - sw_cons + sw_prod;
695 if (count > srq->threshold) {
696 srq->arm_req = false;
697 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
699 /* Deferred arming */
706 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
707 struct bnxt_qplib_srq *srq)
709 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
710 struct creq_query_srq_resp resp = {};
711 struct bnxt_qplib_cmdqmsg msg = {};
712 struct bnxt_qplib_rcfw_sbuf *sbuf;
713 struct creq_query_srq_resp_sb *sb;
714 struct cmdq_query_srq req = {};
717 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
718 CMDQ_BASE_OPCODE_QUERY_SRQ,
721 /* Configure the request */
722 sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
725 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
726 req.srq_cid = cpu_to_le32(srq->id);
728 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req),
730 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
731 srq->threshold = le16_to_cpu(sb->srq_limit);
732 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
737 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
738 struct bnxt_qplib_swqe *wqe)
740 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
742 struct sq_sge *hw_sge;
743 u32 sw_prod, sw_cons, count = 0;
746 spin_lock(&srq_hwq->lock);
747 if (srq->start_idx == srq->last_idx) {
748 dev_err(&srq_hwq->pdev->dev,
749 "FP: SRQ (0x%x) is full!\n", srq->id);
750 spin_unlock(&srq_hwq->lock);
753 next = srq->start_idx;
754 srq->start_idx = srq->swq[next].next_idx;
755 spin_unlock(&srq_hwq->lock);
757 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
758 srqe = bnxt_qplib_get_qe(srq_hwq, sw_prod, NULL);
759 memset(srqe, 0, srq->wqe_size);
760 /* Calculate wqe_size16 and data_len */
761 for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
762 i < wqe->num_sge; i++, hw_sge++) {
763 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
764 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
765 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
767 srqe->wqe_type = wqe->type;
768 srqe->flags = wqe->flags;
769 srqe->wqe_size = wqe->num_sge +
770 ((offsetof(typeof(*srqe), data) + 15) >> 4);
771 srqe->wr_id[0] = cpu_to_le32((u32)next);
772 srq->swq[next].wr_id = wqe->wr_id;
776 spin_lock(&srq_hwq->lock);
777 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
778 /* retaining srq_hwq->cons for this logic
779 * actually the lock is only required to
780 * read srq_hwq->cons.
782 sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
783 count = sw_prod > sw_cons ? sw_prod - sw_cons :
784 srq_hwq->max_elements - sw_cons + sw_prod;
785 spin_unlock(&srq_hwq->lock);
787 bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
788 if (srq->arm_req == true && count > srq->threshold) {
789 srq->arm_req = false;
790 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
798 static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
802 que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
807 que->swq_last = que->max_wqe - 1;
808 for (indx = 0; indx < que->max_wqe; indx++)
809 que->swq[indx].next_idx = indx + 1;
810 que->swq[que->swq_last].next_idx = 0; /* Make it circular */
816 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
818 struct bnxt_qplib_hwq_attr hwq_attr = {};
819 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
820 struct creq_create_qp1_resp resp = {};
821 struct bnxt_qplib_cmdqmsg msg = {};
822 struct bnxt_qplib_q *sq = &qp->sq;
823 struct bnxt_qplib_q *rq = &qp->rq;
824 struct cmdq_create_qp1 req = {};
825 struct bnxt_qplib_pbl *pbl;
831 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
832 CMDQ_BASE_OPCODE_CREATE_QP1,
836 req.dpi = cpu_to_le32(qp->dpi->dpi);
837 req.qp_handle = cpu_to_le64(qp->qp_handle);
841 hwq_attr.sginfo = &sq->sg_info;
842 hwq_attr.stride = sizeof(struct sq_sge);
843 hwq_attr.depth = bnxt_qplib_get_depth(sq);
844 hwq_attr.type = HWQ_TYPE_QUEUE;
845 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
849 rc = bnxt_qplib_alloc_init_swq(sq);
853 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
854 pbl = &sq->hwq.pbl[PBL_LVL_0];
855 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
856 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
857 CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
858 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
859 req.sq_pg_size_sq_lvl = pg_sz_lvl;
861 cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
862 CMDQ_CREATE_QP1_SQ_SGE_SFT);
863 req.scq_cid = cpu_to_le32(qp->scq->id);
868 hwq_attr.sginfo = &rq->sg_info;
869 hwq_attr.stride = sizeof(struct sq_sge);
870 hwq_attr.depth = bnxt_qplib_get_depth(rq);
871 hwq_attr.type = HWQ_TYPE_QUEUE;
872 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
875 rc = bnxt_qplib_alloc_init_swq(rq);
878 req.rq_size = cpu_to_le32(rq->max_wqe);
879 pbl = &rq->hwq.pbl[PBL_LVL_0];
880 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
881 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
882 CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
883 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
884 req.rq_pg_size_rq_lvl = pg_sz_lvl;
886 cpu_to_le16((rq->max_sge &
887 CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
888 CMDQ_CREATE_QP1_RQ_SGE_SFT);
890 req.rcq_cid = cpu_to_le32(qp->rcq->id);
891 /* Header buffer - allow hdr_buf pass in */
892 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
897 qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
898 req.qp_flags = cpu_to_le32(qp_flags);
899 req.pd_id = cpu_to_le32(qp->pd->id);
901 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
902 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
906 qp->id = le32_to_cpu(resp.xid);
907 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
908 qp->cctx = res->cctx;
909 sq->dbinfo.hwq = &sq->hwq;
910 sq->dbinfo.xid = qp->id;
911 sq->dbinfo.db = qp->dpi->dbr;
912 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
914 rq->dbinfo.hwq = &rq->hwq;
915 rq->dbinfo.xid = qp->id;
916 rq->dbinfo.db = qp->dpi->dbr;
917 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
919 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
920 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
921 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
926 bnxt_qplib_free_qp_hdr_buf(res, qp);
930 bnxt_qplib_free_hwq(res, &rq->hwq);
934 bnxt_qplib_free_hwq(res, &sq->hwq);
938 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
940 struct bnxt_qplib_hwq *hwq;
941 struct bnxt_qplib_q *sq;
947 /* First psn entry */
948 fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
949 if (!IS_ALIGNED(fpsne, PAGE_SIZE))
950 indx_pad = (fpsne & ~PAGE_MASK) / size;
951 hwq->pad_pgofft = indx_pad;
952 hwq->pad_pg = (u64 *)psn_pg;
953 hwq->pad_stride = size;
956 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
958 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
959 struct bnxt_qplib_hwq_attr hwq_attr = {};
960 struct bnxt_qplib_sg_info sginfo = {};
961 struct creq_create_qp_resp resp = {};
962 struct bnxt_qplib_cmdqmsg msg = {};
963 struct bnxt_qplib_q *sq = &qp->sq;
964 struct bnxt_qplib_q *rq = &qp->rq;
965 struct cmdq_create_qp req = {};
966 int rc, req_size, psn_sz = 0;
967 struct bnxt_qplib_hwq *xrrq;
968 struct bnxt_qplib_pbl *pbl;
974 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
975 CMDQ_BASE_OPCODE_CREATE_QP,
980 req.dpi = cpu_to_le32(qp->dpi->dpi);
981 req.qp_handle = cpu_to_le64(qp->qp_handle);
984 if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
985 psn_sz = bnxt_qplib_is_chip_gen_p5(res->cctx) ?
986 sizeof(struct sq_psn_search_ext) :
987 sizeof(struct sq_psn_search);
991 hwq_attr.sginfo = &sq->sg_info;
992 hwq_attr.stride = sizeof(struct sq_sge);
993 hwq_attr.depth = bnxt_qplib_get_depth(sq);
994 hwq_attr.aux_stride = psn_sz;
995 hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode);
996 hwq_attr.type = HWQ_TYPE_QUEUE;
997 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
1001 rc = bnxt_qplib_alloc_init_swq(sq);
1006 bnxt_qplib_init_psn_ptr(qp, psn_sz);
1008 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1009 pbl = &sq->hwq.pbl[PBL_LVL_0];
1010 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1011 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1012 CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1013 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1014 req.sq_pg_size_sq_lvl = pg_sz_lvl;
1016 cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1017 CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1018 req.scq_cid = cpu_to_le32(qp->scq->id);
1023 hwq_attr.sginfo = &rq->sg_info;
1024 hwq_attr.stride = sizeof(struct sq_sge);
1025 hwq_attr.depth = bnxt_qplib_get_depth(rq);
1026 hwq_attr.aux_stride = 0;
1027 hwq_attr.aux_depth = 0;
1028 hwq_attr.type = HWQ_TYPE_QUEUE;
1029 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1032 rc = bnxt_qplib_alloc_init_swq(rq);
1036 req.rq_size = cpu_to_le32(rq->max_wqe);
1037 pbl = &rq->hwq.pbl[PBL_LVL_0];
1038 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1039 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1040 CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1041 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1042 req.rq_pg_size_rq_lvl = pg_sz_lvl;
1043 nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1046 cpu_to_le16(((nsge &
1047 CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1048 CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1051 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1052 req.srq_cid = cpu_to_le32(qp->srq->id);
1054 req.rcq_cid = cpu_to_le32(qp->rcq->id);
1056 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1057 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1059 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1060 if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1061 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1062 if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
1063 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1065 req.qp_flags = cpu_to_le32(qp_flags);
1070 xrrq->max_elements =
1071 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1072 req_size = xrrq->max_elements *
1073 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1074 req_size &= ~(PAGE_SIZE - 1);
1075 sginfo.pgsize = req_size;
1076 sginfo.pgshft = PAGE_SHIFT;
1079 hwq_attr.sginfo = &sginfo;
1080 hwq_attr.depth = xrrq->max_elements;
1081 hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1082 hwq_attr.aux_stride = 0;
1083 hwq_attr.aux_depth = 0;
1084 hwq_attr.type = HWQ_TYPE_CTX;
1085 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1088 pbl = &xrrq->pbl[PBL_LVL_0];
1089 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1092 xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1093 qp->max_dest_rd_atomic);
1094 req_size = xrrq->max_elements *
1095 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1096 req_size &= ~(PAGE_SIZE - 1);
1097 sginfo.pgsize = req_size;
1098 hwq_attr.depth = xrrq->max_elements;
1099 hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1100 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1104 pbl = &xrrq->pbl[PBL_LVL_0];
1105 req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1107 req.pd_id = cpu_to_le32(qp->pd->id);
1109 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1111 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1115 qp->id = le32_to_cpu(resp.xid);
1116 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1117 INIT_LIST_HEAD(&qp->sq_flush);
1118 INIT_LIST_HEAD(&qp->rq_flush);
1119 qp->cctx = res->cctx;
1120 sq->dbinfo.hwq = &sq->hwq;
1121 sq->dbinfo.xid = qp->id;
1122 sq->dbinfo.db = qp->dpi->dbr;
1123 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1125 rq->dbinfo.hwq = &rq->hwq;
1126 rq->dbinfo.xid = qp->id;
1127 rq->dbinfo.db = qp->dpi->dbr;
1128 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1130 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1131 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1132 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1136 bnxt_qplib_free_hwq(res, &qp->irrq);
1138 bnxt_qplib_free_hwq(res, &qp->orrq);
1142 bnxt_qplib_free_hwq(res, &rq->hwq);
1146 bnxt_qplib_free_hwq(res, &sq->hwq);
1150 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1152 switch (qp->state) {
1153 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1154 /* INIT->RTR, configure the path_mtu to the default
1155 * 2048 if not being requested
1157 if (!(qp->modify_flags &
1158 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1160 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1162 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1165 ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1166 /* Bono FW require the max_dest_rd_atomic to be >= 1 */
1167 if (qp->max_dest_rd_atomic < 1)
1168 qp->max_dest_rd_atomic = 1;
1169 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1170 /* Bono FW 20.6.5 requires SGID_INDEX configuration */
1171 if (!(qp->modify_flags &
1172 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1174 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1175 qp->ah.sgid_index = 0;
1183 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1185 switch (qp->state) {
1186 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1187 /* Bono FW requires the max_rd_atomic to be >= 1 */
1188 if (qp->max_rd_atomic < 1)
1189 qp->max_rd_atomic = 1;
1190 /* Bono FW does not allow PKEY_INDEX,
1191 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1192 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1193 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1197 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1198 CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1199 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1200 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1201 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1202 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1203 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1204 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1205 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1206 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1207 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1208 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1215 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1217 switch (qp->cur_qp_state) {
1218 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1220 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1221 __modify_flags_from_init_state(qp);
1223 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1224 __modify_flags_from_rtr_state(qp);
1226 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1228 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1230 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1232 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1239 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1241 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1242 struct creq_modify_qp_resp resp = {};
1243 struct bnxt_qplib_cmdqmsg msg = {};
1244 struct cmdq_modify_qp req = {};
1249 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1250 CMDQ_BASE_OPCODE_MODIFY_QP,
1253 /* Filter out the qp_attr_mask based on the state->new transition */
1254 __filter_modify_flags(qp);
1255 bmask = qp->modify_flags;
1256 req.modify_mask = cpu_to_le32(qp->modify_flags);
1257 req.qp_cid = cpu_to_le32(qp->id);
1258 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1259 req.network_type_en_sqd_async_notify_new_state =
1260 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1261 (qp->en_sqd_async_notify ?
1262 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1264 req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1266 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1267 req.access = qp->access;
1269 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
1270 req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
1272 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1273 req.qkey = cpu_to_le32(qp->qkey);
1275 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1276 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1277 req.dgid[0] = cpu_to_le32(temp32[0]);
1278 req.dgid[1] = cpu_to_le32(temp32[1]);
1279 req.dgid[2] = cpu_to_le32(temp32[2]);
1280 req.dgid[3] = cpu_to_le32(temp32[3]);
1282 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1283 req.flow_label = cpu_to_le32(qp->ah.flow_label);
1285 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1286 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1287 [qp->ah.sgid_index]);
1289 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1290 req.hop_limit = qp->ah.hop_limit;
1292 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1293 req.traffic_class = qp->ah.traffic_class;
1295 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1296 memcpy(req.dest_mac, qp->ah.dmac, 6);
1298 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1299 req.path_mtu_pingpong_push_enable |= qp->path_mtu;
1301 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1302 req.timeout = qp->timeout;
1304 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1305 req.retry_cnt = qp->retry_cnt;
1307 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1308 req.rnr_retry = qp->rnr_retry;
1310 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1311 req.min_rnr_timer = qp->min_rnr_timer;
1313 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1314 req.rq_psn = cpu_to_le32(qp->rq.psn);
1316 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1317 req.sq_psn = cpu_to_le32(qp->sq.psn);
1319 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1321 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1323 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1324 req.max_dest_rd_atomic =
1325 IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1327 req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1328 req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1329 req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1330 req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1331 req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1332 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1333 req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1335 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1337 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
1338 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1341 qp->cur_qp_state = qp->state;
1345 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1347 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1348 struct creq_query_qp_resp resp = {};
1349 struct bnxt_qplib_cmdqmsg msg = {};
1350 struct bnxt_qplib_rcfw_sbuf *sbuf;
1351 struct creq_query_qp_resp_sb *sb;
1352 struct cmdq_query_qp req = {};
1356 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1357 CMDQ_BASE_OPCODE_QUERY_QP,
1360 sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
1365 req.qp_cid = cpu_to_le32(qp->id);
1366 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
1367 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req),
1369 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1372 /* Extract the context from the side buffer */
1373 qp->state = sb->en_sqd_async_notify_state &
1374 CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1375 qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1376 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
1378 qp->access = sb->access;
1379 qp->pkey_index = le16_to_cpu(sb->pkey);
1380 qp->qkey = le32_to_cpu(sb->qkey);
1382 temp32[0] = le32_to_cpu(sb->dgid[0]);
1383 temp32[1] = le32_to_cpu(sb->dgid[1]);
1384 temp32[2] = le32_to_cpu(sb->dgid[2]);
1385 temp32[3] = le32_to_cpu(sb->dgid[3]);
1386 memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1388 qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1390 qp->ah.sgid_index = 0;
1391 for (i = 0; i < res->sgid_tbl.max; i++) {
1392 if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1393 qp->ah.sgid_index = i;
1397 if (i == res->sgid_tbl.max)
1398 dev_warn(&res->pdev->dev, "SGID not found??\n");
1400 qp->ah.hop_limit = sb->hop_limit;
1401 qp->ah.traffic_class = sb->traffic_class;
1402 memcpy(qp->ah.dmac, sb->dest_mac, 6);
1403 qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1404 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1405 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1406 qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1407 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1408 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1409 qp->timeout = sb->timeout;
1410 qp->retry_cnt = sb->retry_cnt;
1411 qp->rnr_retry = sb->rnr_retry;
1412 qp->min_rnr_timer = sb->min_rnr_timer;
1413 qp->rq.psn = le32_to_cpu(sb->rq_psn);
1414 qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1415 qp->sq.psn = le32_to_cpu(sb->sq_psn);
1416 qp->max_dest_rd_atomic =
1417 IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1418 qp->sq.max_wqe = qp->sq.hwq.max_elements;
1419 qp->rq.max_wqe = qp->rq.hwq.max_elements;
1420 qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1421 qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1422 qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1423 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1424 memcpy(qp->smac, sb->src_mac, 6);
1425 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1427 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
1431 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1433 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1434 struct cq_base *hw_cqe;
1437 for (i = 0; i < cq_hwq->max_elements; i++) {
1438 hw_cqe = bnxt_qplib_get_qe(cq_hwq, i, NULL);
1439 if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
1442 * The valid test of the entry must be done first before
1443 * reading any further.
1446 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1447 case CQ_BASE_CQE_TYPE_REQ:
1448 case CQ_BASE_CQE_TYPE_TERMINAL:
1450 struct cq_req *cqe = (struct cq_req *)hw_cqe;
1452 if (qp == le64_to_cpu(cqe->qp_handle))
1456 case CQ_BASE_CQE_TYPE_RES_RC:
1457 case CQ_BASE_CQE_TYPE_RES_UD:
1458 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1460 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1462 if (qp == le64_to_cpu(cqe->qp_handle))
1472 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1473 struct bnxt_qplib_qp *qp)
1475 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1476 struct creq_destroy_qp_resp resp = {};
1477 struct bnxt_qplib_cmdqmsg msg = {};
1478 struct cmdq_destroy_qp req = {};
1482 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1483 rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1484 rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1486 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1487 CMDQ_BASE_OPCODE_DESTROY_QP,
1490 req.qp_cid = cpu_to_le32(qp->id);
1491 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1493 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1495 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1496 rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1503 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1504 struct bnxt_qplib_qp *qp)
1506 bnxt_qplib_free_qp_hdr_buf(res, qp);
1507 bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1510 bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1513 if (qp->irrq.max_elements)
1514 bnxt_qplib_free_hwq(res, &qp->irrq);
1515 if (qp->orrq.max_elements)
1516 bnxt_qplib_free_hwq(res, &qp->orrq);
1520 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1521 struct bnxt_qplib_sge *sge)
1523 struct bnxt_qplib_q *sq = &qp->sq;
1526 memset(sge, 0, sizeof(*sge));
1528 if (qp->sq_hdr_buf) {
1529 sw_prod = sq->swq_start;
1530 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1531 sw_prod * qp->sq_hdr_buf_size);
1532 sge->lkey = 0xFFFFFFFF;
1533 sge->size = qp->sq_hdr_buf_size;
1534 return qp->sq_hdr_buf + sw_prod * sge->size;
1539 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1541 struct bnxt_qplib_q *rq = &qp->rq;
1543 return rq->swq_start;
1546 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1548 return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1551 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1552 struct bnxt_qplib_sge *sge)
1554 struct bnxt_qplib_q *rq = &qp->rq;
1557 memset(sge, 0, sizeof(*sge));
1559 if (qp->rq_hdr_buf) {
1560 sw_prod = rq->swq_start;
1561 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1562 sw_prod * qp->rq_hdr_buf_size);
1563 sge->lkey = 0xFFFFFFFF;
1564 sge->size = qp->rq_hdr_buf_size;
1565 return qp->rq_hdr_buf + sw_prod * sge->size;
1570 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1571 struct bnxt_qplib_swqe *wqe,
1572 struct bnxt_qplib_swq *swq)
1574 struct sq_psn_search_ext *psns_ext;
1575 struct sq_psn_search *psns;
1579 if (!swq->psn_search)
1581 psns = swq->psn_search;
1582 psns_ext = swq->psn_ext;
1584 op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1585 SQ_PSN_SEARCH_START_PSN_MASK);
1586 op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1587 SQ_PSN_SEARCH_OPCODE_MASK);
1588 flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1589 SQ_PSN_SEARCH_NEXT_PSN_MASK);
1591 if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
1592 psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1593 psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1594 psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1596 psns->opcode_start_psn = cpu_to_le32(op_spsn);
1597 psns->flags_next_psn = cpu_to_le32(flg_npsn);
1601 static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1602 struct bnxt_qplib_swqe *wqe,
1605 struct bnxt_qplib_hwq *hwq;
1606 int len, t_len, offt;
1607 bool pull_dst = true;
1608 void *il_dst = NULL;
1609 void *il_src = NULL;
1615 for (indx = 0; indx < wqe->num_sge; indx++) {
1616 len = wqe->sg_list[indx].size;
1617 il_src = (void *)wqe->sg_list[indx].addr;
1619 if (t_len > qp->max_inline_data)
1624 il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1629 cplen = min_t(int, len, sizeof(struct sq_sge));
1630 cplen = min_t(int, cplen,
1631 (sizeof(struct sq_sge) - offt));
1632 memcpy(il_dst, il_src, cplen);
1638 if (t_cplen == sizeof(struct sq_sge))
1646 static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1647 struct bnxt_qplib_sge *ssge,
1650 struct sq_sge *dsge;
1653 for (indx = 0; indx < nsge; indx++, (*idx)++) {
1654 dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1655 dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1656 dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1657 dsge->size = cpu_to_le32(ssge[indx].size);
1658 len += ssge[indx].size;
1664 static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1665 struct bnxt_qplib_swqe *wqe,
1666 u16 *wqe_sz, u16 *qdf, u8 mode)
1672 nsge = wqe->num_sge;
1673 /* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1674 bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1675 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1676 ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1677 bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1678 bytes += sizeof(struct sq_send_hdr);
1681 *qdf = __xlate_qfd(qp->sq.q_full_delta, bytes);
1684 if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1689 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_q *sq,
1690 struct bnxt_qplib_swq *swq)
1692 struct bnxt_qplib_hwq *hwq;
1693 u32 pg_num, pg_indx;
1700 tail = swq->slot_idx / sq->dbinfo.max_slot;
1701 pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1702 pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1703 buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1704 swq->psn_ext = buff;
1705 swq->psn_search = buff;
1708 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1710 struct bnxt_qplib_q *sq = &qp->sq;
1712 bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1715 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1716 struct bnxt_qplib_swqe *wqe)
1718 struct bnxt_qplib_nq_work *nq_work = NULL;
1719 int i, rc = 0, data_len = 0, pkt_num = 0;
1720 struct bnxt_qplib_q *sq = &qp->sq;
1721 struct bnxt_qplib_hwq *hwq;
1722 struct bnxt_qplib_swq *swq;
1723 bool sch_handler = false;
1724 u16 wqe_sz, qdf = 0;
1733 if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1734 qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1735 dev_err(&hwq->pdev->dev,
1736 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1742 slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1743 if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1744 dev_err(&hwq->pdev->dev,
1745 "prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1746 hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1751 swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1752 bnxt_qplib_pull_psn_buff(sq, swq);
1755 swq->slot_idx = hwq->prod;
1757 swq->wr_id = wqe->wr_id;
1758 swq->type = wqe->type;
1759 swq->flags = wqe->flags;
1760 swq->start_psn = sq->psn & BTH_PSN_MASK;
1762 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1764 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1766 dev_dbg(&hwq->pdev->dev,
1767 "%s Error QP. Scheduling for poll_cq\n", __func__);
1771 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1772 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1773 memset(base_hdr, 0, sizeof(struct sq_sge));
1774 memset(ext_hdr, 0, sizeof(struct sq_sge));
1776 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1777 /* Copy the inline data */
1778 data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1780 data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1785 switch (wqe->type) {
1786 case BNXT_QPLIB_SWQE_TYPE_SEND:
1787 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1788 struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1789 struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1790 /* Assemble info for Raw Ethertype QPs */
1792 sqe->wqe_type = wqe->type;
1793 sqe->flags = wqe->flags;
1794 sqe->wqe_size = wqe_sz;
1795 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1796 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1797 sqe->length = cpu_to_le32(data_len);
1798 ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1799 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1800 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1805 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1806 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1808 struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1809 struct sq_send_hdr *sqe = base_hdr;
1811 sqe->wqe_type = wqe->type;
1812 sqe->flags = wqe->flags;
1813 sqe->wqe_size = wqe_sz;
1814 sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1815 if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1816 qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1817 sqe->q_key = cpu_to_le32(wqe->send.q_key);
1818 sqe->length = cpu_to_le32(data_len);
1819 sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1820 ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1821 SQ_SEND_DST_QP_MASK);
1822 ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1825 sqe->length = cpu_to_le32(data_len);
1827 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1830 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1834 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1835 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1836 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1838 struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
1839 struct sq_rdma_hdr *sqe = base_hdr;
1841 sqe->wqe_type = wqe->type;
1842 sqe->flags = wqe->flags;
1843 sqe->wqe_size = wqe_sz;
1844 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1845 sqe->length = cpu_to_le32((u32)data_len);
1846 ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1847 ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1849 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1852 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1855 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1856 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1858 struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
1859 struct sq_atomic_hdr *sqe = base_hdr;
1861 sqe->wqe_type = wqe->type;
1862 sqe->flags = wqe->flags;
1863 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1864 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1865 ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1866 ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1868 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1871 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1874 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1876 struct sq_localinvalidate *sqe = base_hdr;
1878 sqe->wqe_type = wqe->type;
1879 sqe->flags = wqe->flags;
1880 sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1884 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1886 struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
1887 struct sq_fr_pmr_hdr *sqe = base_hdr;
1889 sqe->wqe_type = wqe->type;
1890 sqe->flags = wqe->flags;
1891 sqe->access_cntl = wqe->frmr.access_cntl |
1892 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1893 sqe->zero_based_page_size_log =
1894 (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1895 SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1896 (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1897 sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1898 temp32 = cpu_to_le32(wqe->frmr.length);
1899 memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1900 sqe->numlevels_pbl_page_size_log =
1901 ((wqe->frmr.pbl_pg_sz_log <<
1902 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1903 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1904 ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1905 SQ_FR_PMR_NUMLEVELS_MASK);
1907 for (i = 0; i < wqe->frmr.page_list_len; i++)
1908 wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1909 wqe->frmr.page_list[i] |
1911 ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1912 ext_sqe->va = cpu_to_le64(wqe->frmr.va);
1916 case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1918 struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
1919 struct sq_bind_hdr *sqe = base_hdr;
1921 sqe->wqe_type = wqe->type;
1922 sqe->flags = wqe->flags;
1923 sqe->access_cntl = wqe->bind.access_cntl;
1924 sqe->mw_type_zero_based = wqe->bind.mw_type |
1925 (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1926 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1927 sqe->l_key = cpu_to_le32(wqe->bind.r_key);
1928 ext_sqe->va = cpu_to_le64(wqe->bind.va);
1929 ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
1933 /* Bad wqe, return error */
1937 swq->next_psn = sq->psn & BTH_PSN_MASK;
1938 bnxt_qplib_fill_psn_search(qp, wqe, swq);
1940 bnxt_qplib_swq_mod_start(sq, wqe_idx);
1941 bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
1945 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1947 nq_work->cq = qp->scq;
1948 nq_work->nq = qp->scq->nq;
1949 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1950 queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
1952 dev_err(&hwq->pdev->dev,
1953 "FP: Failed to allocate SQ nq_work!\n");
1960 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
1962 struct bnxt_qplib_q *rq = &qp->rq;
1964 bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
1967 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1968 struct bnxt_qplib_swqe *wqe)
1970 struct bnxt_qplib_nq_work *nq_work = NULL;
1971 struct bnxt_qplib_q *rq = &qp->rq;
1972 struct rq_wqe_hdr *base_hdr;
1973 struct rq_ext_hdr *ext_hdr;
1974 struct bnxt_qplib_hwq *hwq;
1975 struct bnxt_qplib_swq *swq;
1976 bool sch_handler = false;
1982 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1983 dev_err(&hwq->pdev->dev,
1984 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1990 if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
1991 dev_err(&hwq->pdev->dev,
1992 "FP: QP (0x%x) RQ is full!\n", qp->id);
1997 swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
1998 swq->wr_id = wqe->wr_id;
1999 swq->slots = rq->dbinfo.max_slot;
2001 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2003 dev_dbg(&hwq->pdev->dev,
2004 "%s: Error QP. Scheduling for poll_cq\n", __func__);
2009 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2010 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2011 memset(base_hdr, 0, sizeof(struct sq_sge));
2012 memset(ext_hdr, 0, sizeof(struct sq_sge));
2013 wqe_sz = (sizeof(struct rq_wqe_hdr) +
2014 wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2015 bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2016 if (!wqe->num_sge) {
2019 sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2023 base_hdr->wqe_type = wqe->type;
2024 base_hdr->flags = wqe->flags;
2025 base_hdr->wqe_size = wqe_sz;
2026 base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2028 bnxt_qplib_swq_mod_start(rq, wqe_idx);
2029 bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
2032 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2034 nq_work->cq = qp->rcq;
2035 nq_work->nq = qp->rcq->nq;
2036 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2037 queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2039 dev_err(&hwq->pdev->dev,
2040 "FP: Failed to allocate RQ nq_work!\n");
2049 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2051 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2052 struct bnxt_qplib_hwq_attr hwq_attr = {};
2053 struct creq_create_cq_resp resp = {};
2054 struct bnxt_qplib_cmdqmsg msg = {};
2055 struct cmdq_create_cq req = {};
2056 struct bnxt_qplib_pbl *pbl;
2061 dev_err(&rcfw->pdev->dev,
2062 "FP: CREATE_CQ failed due to NULL DPI\n");
2067 hwq_attr.depth = cq->max_wqe;
2068 hwq_attr.stride = sizeof(struct cq_base);
2069 hwq_attr.type = HWQ_TYPE_QUEUE;
2070 hwq_attr.sginfo = &cq->sg_info;
2071 rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2075 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2076 CMDQ_BASE_OPCODE_CREATE_CQ,
2079 req.dpi = cpu_to_le32(cq->dpi->dpi);
2080 req.cq_handle = cpu_to_le64(cq->cq_handle);
2081 req.cq_size = cpu_to_le32(cq->hwq.max_elements);
2082 pbl = &cq->hwq.pbl[PBL_LVL_0];
2083 pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2084 CMDQ_CREATE_CQ_PG_SIZE_SFT);
2085 pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2086 req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2087 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2088 req.cq_fco_cnq_id = cpu_to_le32(
2089 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2090 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2091 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2093 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2097 cq->id = le32_to_cpu(resp.xid);
2098 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2099 init_waitqueue_head(&cq->waitq);
2100 INIT_LIST_HEAD(&cq->sqf_head);
2101 INIT_LIST_HEAD(&cq->rqf_head);
2102 spin_lock_init(&cq->compl_lock);
2103 spin_lock_init(&cq->flush_lock);
2105 cq->dbinfo.hwq = &cq->hwq;
2106 cq->dbinfo.xid = cq->id;
2107 cq->dbinfo.db = cq->dpi->dbr;
2108 cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
2110 bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2115 bnxt_qplib_free_hwq(res, &cq->hwq);
2119 void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
2120 struct bnxt_qplib_cq *cq)
2122 bnxt_qplib_free_hwq(res, &cq->hwq);
2123 memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
2126 int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
2129 struct bnxt_qplib_hwq_attr hwq_attr = {};
2130 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2131 struct creq_resize_cq_resp resp = {};
2132 struct bnxt_qplib_cmdqmsg msg = {};
2133 struct cmdq_resize_cq req = {};
2134 struct bnxt_qplib_pbl *pbl;
2135 u32 pg_sz, lvl, new_sz;
2138 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2139 CMDQ_BASE_OPCODE_RESIZE_CQ,
2141 hwq_attr.sginfo = &cq->sg_info;
2143 hwq_attr.depth = new_cqes;
2144 hwq_attr.stride = sizeof(struct cq_base);
2145 hwq_attr.type = HWQ_TYPE_QUEUE;
2146 rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
2150 req.cq_cid = cpu_to_le32(cq->id);
2151 pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
2152 pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq);
2153 lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
2154 CMDQ_RESIZE_CQ_LVL_MASK;
2155 new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
2156 CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
2157 req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl);
2158 req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2160 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2162 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2166 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2168 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2169 struct creq_destroy_cq_resp resp = {};
2170 struct bnxt_qplib_cmdqmsg msg = {};
2171 struct cmdq_destroy_cq req = {};
2172 u16 total_cnq_events;
2175 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2176 CMDQ_BASE_OPCODE_DESTROY_CQ,
2179 req.cq_cid = cpu_to_le32(cq->id);
2180 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2182 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2185 total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2186 __wait_for_all_nqes(cq, total_cnq_events);
2187 bnxt_qplib_free_hwq(res, &cq->hwq);
2191 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2192 struct bnxt_qplib_cqe **pcqe, int *budget)
2194 struct bnxt_qplib_cqe *cqe;
2198 /* Now complete all outstanding SQEs with FLUSHED_ERR */
2199 start = sq->swq_start;
2202 last = sq->swq_last;
2205 /* Skip the FENCE WQE completions */
2206 if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2207 bnxt_qplib_cancel_phantom_processing(qp);
2210 memset(cqe, 0, sizeof(*cqe));
2211 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2212 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2213 cqe->qp_handle = (u64)(unsigned long)qp;
2214 cqe->wr_id = sq->swq[last].wr_id;
2215 cqe->src_qp = qp->id;
2216 cqe->type = sq->swq[last].type;
2220 bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[last].slots);
2221 sq->swq_last = sq->swq[last].next_idx;
2224 if (!(*budget) && sq->swq_last != start)
2231 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2232 struct bnxt_qplib_cqe **pcqe, int *budget)
2234 struct bnxt_qplib_cqe *cqe;
2240 case CMDQ_CREATE_QP1_TYPE_GSI:
2241 opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2243 case CMDQ_CREATE_QP_TYPE_RC:
2244 opcode = CQ_BASE_CQE_TYPE_RES_RC;
2246 case CMDQ_CREATE_QP_TYPE_UD:
2247 case CMDQ_CREATE_QP_TYPE_GSI:
2248 opcode = CQ_BASE_CQE_TYPE_RES_UD;
2252 /* Flush the rest of the RQ */
2253 start = rq->swq_start;
2256 last = rq->swq_last;
2259 memset(cqe, 0, sizeof(*cqe));
2261 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2262 cqe->opcode = opcode;
2263 cqe->qp_handle = (unsigned long)qp;
2264 cqe->wr_id = rq->swq[last].wr_id;
2267 bnxt_qplib_hwq_incr_cons(&rq->hwq, rq->swq[last].slots);
2268 rq->swq_last = rq->swq[last].next_idx;
2271 if (!*budget && rq->swq_last != start)
2278 void bnxt_qplib_mark_qp_error(void *qp_handle)
2280 struct bnxt_qplib_qp *qp = qp_handle;
2285 /* Must block new posting of SQ and RQ */
2286 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2287 bnxt_qplib_cancel_phantom_processing(qp);
2290 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2291 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2293 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2294 u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2296 u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
2297 struct bnxt_qplib_q *sq = &qp->sq;
2298 struct cq_req *peek_req_hwcqe;
2299 struct bnxt_qplib_qp *peek_qp;
2300 struct bnxt_qplib_q *peek_sq;
2301 struct bnxt_qplib_swq *swq;
2302 struct cq_base *peek_hwcqe;
2306 /* Check for the psn_search marking before completing */
2307 swq = &sq->swq[swq_last];
2308 if (swq->psn_search &&
2309 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2311 swq->psn_search->flags_next_psn = cpu_to_le32
2312 (le32_to_cpu(swq->psn_search->flags_next_psn)
2314 dev_dbg(&cq->hwq.pdev->dev,
2315 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2316 cq_cons, qp->id, swq_last, cqe_sq_cons);
2317 sq->condition = true;
2318 sq->send_phantom = true;
2320 /* TODO: Only ARM if the previous SQE is ARMALL */
2321 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2325 if (sq->condition) {
2326 /* Peek at the completions */
2327 peek_raw_cq_cons = cq->hwq.cons;
2328 peek_sw_cq_cons = cq_cons;
2329 i = cq->hwq.max_elements;
2331 peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
2332 peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2333 peek_sw_cq_cons, NULL);
2334 /* If the next hwcqe is VALID */
2335 if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
2336 cq->hwq.max_elements)) {
2338 * The valid test of the entry must be done first before
2339 * reading any further.
2342 /* If the next hwcqe is a REQ */
2343 if ((peek_hwcqe->cqe_type_toggle &
2344 CQ_BASE_CQE_TYPE_MASK) ==
2345 CQ_BASE_CQE_TYPE_REQ) {
2346 peek_req_hwcqe = (struct cq_req *)
2348 peek_qp = (struct bnxt_qplib_qp *)
2351 (peek_req_hwcqe->qp_handle));
2352 peek_sq = &peek_qp->sq;
2355 peek_req_hwcqe->sq_cons_idx)
2356 - 1) % sq->max_wqe);
2357 /* If the hwcqe's sq's wr_id matches */
2358 if (peek_sq == sq &&
2359 sq->swq[peek_sq_cons_idx].wr_id ==
2360 BNXT_QPLIB_FENCE_WRID) {
2362 * Unbreak only if the phantom
2365 dev_dbg(&cq->hwq.pdev->dev,
2366 "FP: Got Phantom CQE\n");
2367 sq->condition = false;
2373 /* Valid but not the phantom, so keep looping */
2375 /* Not valid yet, just exit and wait */
2382 dev_err(&cq->hwq.pdev->dev,
2383 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2384 cq_cons, qp->id, swq_last, cqe_sq_cons);
2391 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2392 struct cq_req *hwcqe,
2393 struct bnxt_qplib_cqe **pcqe, int *budget,
2394 u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2396 struct bnxt_qplib_swq *swq;
2397 struct bnxt_qplib_cqe *cqe;
2398 struct bnxt_qplib_qp *qp;
2399 struct bnxt_qplib_q *sq;
2403 qp = (struct bnxt_qplib_qp *)((unsigned long)
2404 le64_to_cpu(hwcqe->qp_handle));
2406 dev_err(&cq->hwq.pdev->dev,
2407 "FP: Process Req qp is NULL\n");
2412 cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
2413 if (qp->sq.flushed) {
2414 dev_dbg(&cq->hwq.pdev->dev,
2415 "%s: QP in Flush QP = %p\n", __func__, qp);
2418 /* Require to walk the sq's swq to fabricate CQEs for all previously
2419 * signaled SWQEs due to CQE aggregation from the current sq cons
2420 * to the cqe_sq_cons
2424 if (sq->swq_last == cqe_sq_cons)
2428 swq = &sq->swq[sq->swq_last];
2429 memset(cqe, 0, sizeof(*cqe));
2430 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2431 cqe->qp_handle = (u64)(unsigned long)qp;
2432 cqe->src_qp = qp->id;
2433 cqe->wr_id = swq->wr_id;
2434 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2436 cqe->type = swq->type;
2438 /* For the last CQE, check for status. For errors, regardless
2439 * of the request being signaled or not, it must complete with
2440 * the hwcqe error status
2442 if (swq->next_idx == cqe_sq_cons &&
2443 hwcqe->status != CQ_REQ_STATUS_OK) {
2444 cqe->status = hwcqe->status;
2445 dev_err(&cq->hwq.pdev->dev,
2446 "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2447 sq->swq_last, cqe->wr_id, cqe->status);
2450 bnxt_qplib_mark_qp_error(qp);
2451 /* Add qp to flush list of the CQ */
2452 bnxt_qplib_add_flush_qp(qp);
2454 /* Before we complete, do WA 9060 */
2455 if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2460 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2461 cqe->status = CQ_REQ_STATUS_OK;
2467 bnxt_qplib_hwq_incr_cons(&sq->hwq, swq->slots);
2468 sq->swq_last = swq->next_idx;
2474 if (sq->swq_last != cqe_sq_cons) {
2480 * Back to normal completion mode only after it has completed all of
2481 * the WC for this CQE
2488 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2490 spin_lock(&srq->hwq.lock);
2491 srq->swq[srq->last_idx].next_idx = (int)tag;
2492 srq->last_idx = (int)tag;
2493 srq->swq[srq->last_idx].next_idx = -1;
2494 srq->hwq.cons++; /* Support for SRQE counter */
2495 spin_unlock(&srq->hwq.lock);
2498 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2499 struct cq_res_rc *hwcqe,
2500 struct bnxt_qplib_cqe **pcqe,
2503 struct bnxt_qplib_srq *srq;
2504 struct bnxt_qplib_cqe *cqe;
2505 struct bnxt_qplib_qp *qp;
2506 struct bnxt_qplib_q *rq;
2509 qp = (struct bnxt_qplib_qp *)((unsigned long)
2510 le64_to_cpu(hwcqe->qp_handle));
2512 dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2515 if (qp->rq.flushed) {
2516 dev_dbg(&cq->hwq.pdev->dev,
2517 "%s: QP in Flush QP = %p\n", __func__, qp);
2522 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2523 cqe->length = le32_to_cpu(hwcqe->length);
2524 cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2525 cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2526 cqe->flags = le16_to_cpu(hwcqe->flags);
2527 cqe->status = hwcqe->status;
2528 cqe->qp_handle = (u64)(unsigned long)qp;
2530 wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2531 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2532 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2536 if (wr_id_idx >= srq->hwq.max_elements) {
2537 dev_err(&cq->hwq.pdev->dev,
2538 "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2539 wr_id_idx, srq->hwq.max_elements);
2542 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2543 bnxt_qplib_release_srqe(srq, wr_id_idx);
2548 struct bnxt_qplib_swq *swq;
2551 if (wr_id_idx > (rq->max_wqe - 1)) {
2552 dev_err(&cq->hwq.pdev->dev,
2553 "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2554 wr_id_idx, rq->max_wqe);
2557 if (wr_id_idx != rq->swq_last)
2559 swq = &rq->swq[rq->swq_last];
2560 cqe->wr_id = swq->wr_id;
2563 bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2564 rq->swq_last = swq->next_idx;
2567 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2568 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2569 /* Add qp to flush list of the CQ */
2570 bnxt_qplib_add_flush_qp(qp);
2577 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2578 struct cq_res_ud *hwcqe,
2579 struct bnxt_qplib_cqe **pcqe,
2582 struct bnxt_qplib_srq *srq;
2583 struct bnxt_qplib_cqe *cqe;
2584 struct bnxt_qplib_qp *qp;
2585 struct bnxt_qplib_q *rq;
2588 qp = (struct bnxt_qplib_qp *)((unsigned long)
2589 le64_to_cpu(hwcqe->qp_handle));
2591 dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2594 if (qp->rq.flushed) {
2595 dev_dbg(&cq->hwq.pdev->dev,
2596 "%s: QP in Flush QP = %p\n", __func__, qp);
2600 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2601 cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2602 cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2603 cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2604 cqe->flags = le16_to_cpu(hwcqe->flags);
2605 cqe->status = hwcqe->status;
2606 cqe->qp_handle = (u64)(unsigned long)qp;
2607 /*FIXME: Endianness fix needed for smace */
2608 memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2609 wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2610 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2611 cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2613 hwcqe->src_qp_high_srq_or_rq_wr_id) &
2614 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2616 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2621 if (wr_id_idx >= srq->hwq.max_elements) {
2622 dev_err(&cq->hwq.pdev->dev,
2623 "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2624 wr_id_idx, srq->hwq.max_elements);
2627 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2628 bnxt_qplib_release_srqe(srq, wr_id_idx);
2633 struct bnxt_qplib_swq *swq;
2636 if (wr_id_idx > (rq->max_wqe - 1)) {
2637 dev_err(&cq->hwq.pdev->dev,
2638 "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2639 wr_id_idx, rq->max_wqe);
2643 if (rq->swq_last != wr_id_idx)
2645 swq = &rq->swq[rq->swq_last];
2646 cqe->wr_id = swq->wr_id;
2649 bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2650 rq->swq_last = swq->next_idx;
2653 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2654 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2655 /* Add qp to flush list of the CQ */
2656 bnxt_qplib_add_flush_qp(qp);
2663 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2665 struct cq_base *hw_cqe;
2666 u32 sw_cons, raw_cons;
2669 raw_cons = cq->hwq.cons;
2670 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2671 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
2672 /* Check for Valid bit. If the CQE is valid, return false */
2673 rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2677 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2678 struct cq_res_raweth_qp1 *hwcqe,
2679 struct bnxt_qplib_cqe **pcqe,
2682 struct bnxt_qplib_qp *qp;
2683 struct bnxt_qplib_q *rq;
2684 struct bnxt_qplib_srq *srq;
2685 struct bnxt_qplib_cqe *cqe;
2688 qp = (struct bnxt_qplib_qp *)((unsigned long)
2689 le64_to_cpu(hwcqe->qp_handle));
2691 dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2694 if (qp->rq.flushed) {
2695 dev_dbg(&cq->hwq.pdev->dev,
2696 "%s: QP in Flush QP = %p\n", __func__, qp);
2700 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2701 cqe->flags = le16_to_cpu(hwcqe->flags);
2702 cqe->qp_handle = (u64)(unsigned long)qp;
2705 le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2706 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2707 cqe->src_qp = qp->id;
2708 if (qp->id == 1 && !cqe->length) {
2709 /* Add workaround for the length misdetection */
2712 cqe->length = le16_to_cpu(hwcqe->length);
2714 cqe->pkey_index = qp->pkey_index;
2715 memcpy(cqe->smac, qp->smac, 6);
2717 cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2718 cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2719 cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2721 if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2724 dev_err(&cq->hwq.pdev->dev,
2725 "FP: SRQ used but not defined??\n");
2728 if (wr_id_idx >= srq->hwq.max_elements) {
2729 dev_err(&cq->hwq.pdev->dev,
2730 "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2731 wr_id_idx, srq->hwq.max_elements);
2734 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2735 bnxt_qplib_release_srqe(srq, wr_id_idx);
2740 struct bnxt_qplib_swq *swq;
2743 if (wr_id_idx > (rq->max_wqe - 1)) {
2744 dev_err(&cq->hwq.pdev->dev,
2745 "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2746 wr_id_idx, rq->max_wqe);
2749 if (rq->swq_last != wr_id_idx)
2751 swq = &rq->swq[rq->swq_last];
2752 cqe->wr_id = swq->wr_id;
2755 bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2756 rq->swq_last = swq->next_idx;
2759 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2760 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2761 /* Add qp to flush list of the CQ */
2762 bnxt_qplib_add_flush_qp(qp);
2769 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2770 struct cq_terminal *hwcqe,
2771 struct bnxt_qplib_cqe **pcqe,
2774 struct bnxt_qplib_qp *qp;
2775 struct bnxt_qplib_q *sq, *rq;
2776 struct bnxt_qplib_cqe *cqe;
2777 u32 swq_last = 0, cqe_cons;
2780 /* Check the Status */
2781 if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2782 dev_warn(&cq->hwq.pdev->dev,
2783 "FP: CQ Process Terminal Error status = 0x%x\n",
2786 qp = (struct bnxt_qplib_qp *)((unsigned long)
2787 le64_to_cpu(hwcqe->qp_handle));
2791 /* Must block new posting of SQ and RQ */
2792 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2797 cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2798 if (cqe_cons == 0xFFFF)
2800 cqe_cons %= sq->max_wqe;
2802 if (qp->sq.flushed) {
2803 dev_dbg(&cq->hwq.pdev->dev,
2804 "%s: QP in Flush QP = %p\n", __func__, qp);
2808 /* Terminal CQE can also include aggregated successful CQEs prior.
2809 * So we must complete all CQEs from the current sq's cons to the
2810 * cq_cons with status OK
2814 swq_last = sq->swq_last;
2815 if (swq_last == cqe_cons)
2817 if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2818 memset(cqe, 0, sizeof(*cqe));
2819 cqe->status = CQ_REQ_STATUS_OK;
2820 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2821 cqe->qp_handle = (u64)(unsigned long)qp;
2822 cqe->src_qp = qp->id;
2823 cqe->wr_id = sq->swq[swq_last].wr_id;
2824 cqe->type = sq->swq[swq_last].type;
2828 bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[swq_last].slots);
2829 sq->swq_last = sq->swq[swq_last].next_idx;
2832 if (!(*budget) && swq_last != cqe_cons) {
2841 cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2842 if (cqe_cons == 0xFFFF) {
2844 } else if (cqe_cons > rq->max_wqe - 1) {
2845 dev_err(&cq->hwq.pdev->dev,
2846 "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2847 cqe_cons, rq->max_wqe);
2852 if (qp->rq.flushed) {
2853 dev_dbg(&cq->hwq.pdev->dev,
2854 "%s: QP in Flush QP = %p\n", __func__, qp);
2859 /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2860 * from the current rq->cons to the rq->prod regardless what the
2861 * rq->cons the terminal CQE indicates
2864 /* Add qp to flush list of the CQ */
2865 bnxt_qplib_add_flush_qp(qp);
2870 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2871 struct cq_cutoff *hwcqe)
2873 /* Check the Status */
2874 if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2875 dev_err(&cq->hwq.pdev->dev,
2876 "FP: CQ Process Cutoff Error status = 0x%x\n",
2880 clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2881 wake_up_interruptible(&cq->waitq);
2886 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2887 struct bnxt_qplib_cqe *cqe,
2890 struct bnxt_qplib_qp *qp = NULL;
2891 u32 budget = num_cqes;
2892 unsigned long flags;
2894 spin_lock_irqsave(&cq->flush_lock, flags);
2895 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2896 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2897 __flush_sq(&qp->sq, qp, &cqe, &budget);
2900 list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2901 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2902 __flush_rq(&qp->rq, qp, &cqe, &budget);
2904 spin_unlock_irqrestore(&cq->flush_lock, flags);
2906 return num_cqes - budget;
2909 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2910 int num_cqes, struct bnxt_qplib_qp **lib_qp)
2912 struct cq_base *hw_cqe;
2913 u32 sw_cons, raw_cons;
2917 raw_cons = cq->hwq.cons;
2921 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2922 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
2924 /* Check for Valid bit */
2925 if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
2929 * The valid test of the entry must be done first before
2930 * reading any further.
2933 /* From the device's respective CQE format to qplib_wc*/
2934 type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2936 case CQ_BASE_CQE_TYPE_REQ:
2937 rc = bnxt_qplib_cq_process_req(cq,
2938 (struct cq_req *)hw_cqe,
2942 case CQ_BASE_CQE_TYPE_RES_RC:
2943 rc = bnxt_qplib_cq_process_res_rc(cq,
2944 (struct cq_res_rc *)
2948 case CQ_BASE_CQE_TYPE_RES_UD:
2949 rc = bnxt_qplib_cq_process_res_ud
2950 (cq, (struct cq_res_ud *)hw_cqe, &cqe,
2953 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2954 rc = bnxt_qplib_cq_process_res_raweth_qp1
2955 (cq, (struct cq_res_raweth_qp1 *)
2956 hw_cqe, &cqe, &budget);
2958 case CQ_BASE_CQE_TYPE_TERMINAL:
2959 rc = bnxt_qplib_cq_process_terminal
2960 (cq, (struct cq_terminal *)hw_cqe,
2963 case CQ_BASE_CQE_TYPE_CUT_OFF:
2964 bnxt_qplib_cq_process_cutoff
2965 (cq, (struct cq_cutoff *)hw_cqe);
2966 /* Done processing this CQ */
2969 dev_err(&cq->hwq.pdev->dev,
2970 "process_cq unknown type 0x%lx\n",
2971 hw_cqe->cqe_type_toggle &
2972 CQ_BASE_CQE_TYPE_MASK);
2979 /* Error while processing the CQE, just skip to the
2982 if (type != CQ_BASE_CQE_TYPE_TERMINAL)
2983 dev_err(&cq->hwq.pdev->dev,
2984 "process_cqe error rc = 0x%x\n", rc);
2988 if (cq->hwq.cons != raw_cons) {
2989 cq->hwq.cons = raw_cons;
2990 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
2993 return num_cqes - budget;
2996 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
2999 bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
3000 /* Using cq->arm_state variable to track whether to issue cq handler */
3001 atomic_set(&cq->arm_state, 1);
3004 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
3006 flush_workqueue(qp->scq->nq->cqn_wq);
3007 if (qp->scq != qp->rcq)
3008 flush_workqueue(qp->rcq->nq->cqn_wq);