drm/nouveau: fence: fix undefined fence state after emit
[platform/kernel/linux-rpi.git] / drivers / infiniband / hw / bnxt_re / qplib_fp.c
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: Fast Path Operators
37  */
38
39 #define dev_fmt(fmt) "QPLIB: " fmt
40
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/prefetch.h>
48 #include <linux/if_ether.h>
49 #include <rdma/ib_mad.h>
50
51 #include "roce_hsi.h"
52
53 #include "qplib_res.h"
54 #include "qplib_rcfw.h"
55 #include "qplib_sp.h"
56 #include "qplib_fp.h"
57
58 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
59
60 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
61 {
62         qp->sq.condition = false;
63         qp->sq.send_phantom = false;
64         qp->sq.single = false;
65 }
66
67 /* Flush list */
68 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
69 {
70         struct bnxt_qplib_cq *scq, *rcq;
71
72         scq = qp->scq;
73         rcq = qp->rcq;
74
75         if (!qp->sq.flushed) {
76                 dev_dbg(&scq->hwq.pdev->dev,
77                         "FP: Adding to SQ Flush list = %p\n", qp);
78                 bnxt_qplib_cancel_phantom_processing(qp);
79                 list_add_tail(&qp->sq_flush, &scq->sqf_head);
80                 qp->sq.flushed = true;
81         }
82         if (!qp->srq) {
83                 if (!qp->rq.flushed) {
84                         dev_dbg(&rcq->hwq.pdev->dev,
85                                 "FP: Adding to RQ Flush list = %p\n", qp);
86                         list_add_tail(&qp->rq_flush, &rcq->rqf_head);
87                         qp->rq.flushed = true;
88                 }
89         }
90 }
91
92 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
93                                        unsigned long *flags)
94         __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
95 {
96         spin_lock_irqsave(&qp->scq->flush_lock, *flags);
97         if (qp->scq == qp->rcq)
98                 __acquire(&qp->rcq->flush_lock);
99         else
100                 spin_lock(&qp->rcq->flush_lock);
101 }
102
103 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
104                                        unsigned long *flags)
105         __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
106 {
107         if (qp->scq == qp->rcq)
108                 __release(&qp->rcq->flush_lock);
109         else
110                 spin_unlock(&qp->rcq->flush_lock);
111         spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
112 }
113
114 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
115 {
116         unsigned long flags;
117
118         bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
119         __bnxt_qplib_add_flush_qp(qp);
120         bnxt_qplib_release_cq_flush_locks(qp, &flags);
121 }
122
123 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
124 {
125         if (qp->sq.flushed) {
126                 qp->sq.flushed = false;
127                 list_del(&qp->sq_flush);
128         }
129         if (!qp->srq) {
130                 if (qp->rq.flushed) {
131                         qp->rq.flushed = false;
132                         list_del(&qp->rq_flush);
133                 }
134         }
135 }
136
137 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
138 {
139         unsigned long flags;
140
141         bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
142         __clean_cq(qp->scq, (u64)(unsigned long)qp);
143         qp->sq.hwq.prod = 0;
144         qp->sq.hwq.cons = 0;
145         __clean_cq(qp->rcq, (u64)(unsigned long)qp);
146         qp->rq.hwq.prod = 0;
147         qp->rq.hwq.cons = 0;
148
149         __bnxt_qplib_del_flush_qp(qp);
150         bnxt_qplib_release_cq_flush_locks(qp, &flags);
151 }
152
153 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
154 {
155         struct bnxt_qplib_nq_work *nq_work =
156                         container_of(work, struct bnxt_qplib_nq_work, work);
157
158         struct bnxt_qplib_cq *cq = nq_work->cq;
159         struct bnxt_qplib_nq *nq = nq_work->nq;
160
161         if (cq && nq) {
162                 spin_lock_bh(&cq->compl_lock);
163                 if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
164                         dev_dbg(&nq->pdev->dev,
165                                 "%s:Trigger cq  = %p event nq = %p\n",
166                                 __func__, cq, nq);
167                         nq->cqn_handler(nq, cq);
168                 }
169                 spin_unlock_bh(&cq->compl_lock);
170         }
171         kfree(nq_work);
172 }
173
174 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
175                                        struct bnxt_qplib_qp *qp)
176 {
177         struct bnxt_qplib_q *rq = &qp->rq;
178         struct bnxt_qplib_q *sq = &qp->sq;
179
180         if (qp->rq_hdr_buf)
181                 dma_free_coherent(&res->pdev->dev,
182                                   rq->max_wqe * qp->rq_hdr_buf_size,
183                                   qp->rq_hdr_buf, qp->rq_hdr_buf_map);
184         if (qp->sq_hdr_buf)
185                 dma_free_coherent(&res->pdev->dev,
186                                   sq->max_wqe * qp->sq_hdr_buf_size,
187                                   qp->sq_hdr_buf, qp->sq_hdr_buf_map);
188         qp->rq_hdr_buf = NULL;
189         qp->sq_hdr_buf = NULL;
190         qp->rq_hdr_buf_map = 0;
191         qp->sq_hdr_buf_map = 0;
192         qp->sq_hdr_buf_size = 0;
193         qp->rq_hdr_buf_size = 0;
194 }
195
196 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
197                                        struct bnxt_qplib_qp *qp)
198 {
199         struct bnxt_qplib_q *rq = &qp->rq;
200         struct bnxt_qplib_q *sq = &qp->sq;
201         int rc = 0;
202
203         if (qp->sq_hdr_buf_size && sq->max_wqe) {
204                 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
205                                         sq->max_wqe * qp->sq_hdr_buf_size,
206                                         &qp->sq_hdr_buf_map, GFP_KERNEL);
207                 if (!qp->sq_hdr_buf) {
208                         rc = -ENOMEM;
209                         dev_err(&res->pdev->dev,
210                                 "Failed to create sq_hdr_buf\n");
211                         goto fail;
212                 }
213         }
214
215         if (qp->rq_hdr_buf_size && rq->max_wqe) {
216                 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
217                                                     rq->max_wqe *
218                                                     qp->rq_hdr_buf_size,
219                                                     &qp->rq_hdr_buf_map,
220                                                     GFP_KERNEL);
221                 if (!qp->rq_hdr_buf) {
222                         rc = -ENOMEM;
223                         dev_err(&res->pdev->dev,
224                                 "Failed to create rq_hdr_buf\n");
225                         goto fail;
226                 }
227         }
228         return 0;
229
230 fail:
231         bnxt_qplib_free_qp_hdr_buf(res, qp);
232         return rc;
233 }
234
235 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
236 {
237         struct bnxt_qplib_hwq *hwq = &nq->hwq;
238         struct nq_base *nqe, **nq_ptr;
239         int budget = nq->budget;
240         u32 sw_cons, raw_cons;
241         uintptr_t q_handle;
242         u16 type;
243
244         spin_lock_bh(&hwq->lock);
245         /* Service the NQ until empty */
246         raw_cons = hwq->cons;
247         while (budget--) {
248                 sw_cons = HWQ_CMP(raw_cons, hwq);
249                 nq_ptr = (struct nq_base **)hwq->pbl_ptr;
250                 nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
251                 if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
252                         break;
253
254                 /*
255                  * The valid test of the entry must be done first before
256                  * reading any further.
257                  */
258                 dma_rmb();
259
260                 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
261                 switch (type) {
262                 case NQ_BASE_TYPE_CQ_NOTIFICATION:
263                 {
264                         struct nq_cn *nqcne = (struct nq_cn *)nqe;
265
266                         q_handle = le32_to_cpu(nqcne->cq_handle_low);
267                         q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
268                                                      << 32;
269                         if ((unsigned long)cq == q_handle) {
270                                 nqcne->cq_handle_low = 0;
271                                 nqcne->cq_handle_high = 0;
272                                 cq->cnq_events++;
273                         }
274                         break;
275                 }
276                 default:
277                         break;
278                 }
279                 raw_cons++;
280         }
281         spin_unlock_bh(&hwq->lock);
282 }
283
284 /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
285  * this CQ.
286  */
287 static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
288 {
289         u32 retry_cnt = 100;
290
291         while (retry_cnt--) {
292                 if (cnq_events == cq->cnq_events)
293                         return;
294                 usleep_range(50, 100);
295                 clean_nq(cq->nq, cq);
296         }
297 }
298
299 static void bnxt_qplib_service_nq(struct tasklet_struct *t)
300 {
301         struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
302         struct bnxt_qplib_hwq *hwq = &nq->hwq;
303         struct bnxt_qplib_cq *cq;
304         int budget = nq->budget;
305         u32 sw_cons, raw_cons;
306         struct nq_base *nqe;
307         uintptr_t q_handle;
308         u16 type;
309
310         spin_lock_bh(&hwq->lock);
311         /* Service the NQ until empty */
312         raw_cons = hwq->cons;
313         while (budget--) {
314                 sw_cons = HWQ_CMP(raw_cons, hwq);
315                 nqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
316                 if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
317                         break;
318
319                 /*
320                  * The valid test of the entry must be done first before
321                  * reading any further.
322                  */
323                 dma_rmb();
324
325                 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
326                 switch (type) {
327                 case NQ_BASE_TYPE_CQ_NOTIFICATION:
328                 {
329                         struct nq_cn *nqcne = (struct nq_cn *)nqe;
330
331                         q_handle = le32_to_cpu(nqcne->cq_handle_low);
332                         q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
333                                                      << 32;
334                         cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
335                         if (!cq)
336                                 break;
337                         bnxt_qplib_armen_db(&cq->dbinfo,
338                                             DBC_DBC_TYPE_CQ_ARMENA);
339                         spin_lock_bh(&cq->compl_lock);
340                         atomic_set(&cq->arm_state, 0);
341                         if (nq->cqn_handler(nq, (cq)))
342                                 dev_warn(&nq->pdev->dev,
343                                          "cqn - type 0x%x not handled\n", type);
344                         cq->cnq_events++;
345                         spin_unlock_bh(&cq->compl_lock);
346                         break;
347                 }
348                 case NQ_BASE_TYPE_SRQ_EVENT:
349                 {
350                         struct bnxt_qplib_srq *srq;
351                         struct nq_srq_event *nqsrqe =
352                                                 (struct nq_srq_event *)nqe;
353
354                         q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
355                         q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
356                                      << 32;
357                         srq = (struct bnxt_qplib_srq *)q_handle;
358                         bnxt_qplib_armen_db(&srq->dbinfo,
359                                             DBC_DBC_TYPE_SRQ_ARMENA);
360                         if (nq->srqn_handler(nq,
361                                              (struct bnxt_qplib_srq *)q_handle,
362                                              nqsrqe->event))
363                                 dev_warn(&nq->pdev->dev,
364                                          "SRQ event 0x%x not handled\n",
365                                          nqsrqe->event);
366                         break;
367                 }
368                 case NQ_BASE_TYPE_DBQ_EVENT:
369                         break;
370                 default:
371                         dev_warn(&nq->pdev->dev,
372                                  "nqe with type = 0x%x not handled\n", type);
373                         break;
374                 }
375                 raw_cons++;
376         }
377         if (hwq->cons != raw_cons) {
378                 hwq->cons = raw_cons;
379                 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
380         }
381         spin_unlock_bh(&hwq->lock);
382 }
383
384 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
385 {
386         struct bnxt_qplib_nq *nq = dev_instance;
387         struct bnxt_qplib_hwq *hwq = &nq->hwq;
388         u32 sw_cons;
389
390         /* Prefetch the NQ element */
391         sw_cons = HWQ_CMP(hwq->cons, hwq);
392         prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
393
394         /* Fan out to CPU affinitized kthreads? */
395         tasklet_schedule(&nq->nq_tasklet);
396
397         return IRQ_HANDLED;
398 }
399
400 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
401 {
402         if (!nq->requested)
403                 return;
404
405         tasklet_disable(&nq->nq_tasklet);
406         /* Mask h/w interrupt */
407         bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
408         /* Sync with last running IRQ handler */
409         synchronize_irq(nq->msix_vec);
410         if (kill)
411                 tasklet_kill(&nq->nq_tasklet);
412
413         irq_set_affinity_hint(nq->msix_vec, NULL);
414         free_irq(nq->msix_vec, nq);
415         kfree(nq->name);
416         nq->name = NULL;
417         nq->requested = false;
418 }
419
420 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
421 {
422         if (nq->cqn_wq) {
423                 destroy_workqueue(nq->cqn_wq);
424                 nq->cqn_wq = NULL;
425         }
426
427         /* Make sure the HW is stopped! */
428         bnxt_qplib_nq_stop_irq(nq, true);
429
430         if (nq->nq_db.reg.bar_reg) {
431                 iounmap(nq->nq_db.reg.bar_reg);
432                 nq->nq_db.reg.bar_reg = NULL;
433         }
434
435         nq->cqn_handler = NULL;
436         nq->srqn_handler = NULL;
437         nq->msix_vec = 0;
438 }
439
440 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
441                             int msix_vector, bool need_init)
442 {
443         struct bnxt_qplib_res *res = nq->res;
444         int rc;
445
446         if (nq->requested)
447                 return -EFAULT;
448
449         nq->msix_vec = msix_vector;
450         if (need_init)
451                 tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
452         else
453                 tasklet_enable(&nq->nq_tasklet);
454
455         nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
456                              nq_indx, pci_name(res->pdev));
457         if (!nq->name)
458                 return -ENOMEM;
459         rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
460         if (rc) {
461                 kfree(nq->name);
462                 nq->name = NULL;
463                 tasklet_disable(&nq->nq_tasklet);
464                 return rc;
465         }
466
467         cpumask_clear(&nq->mask);
468         cpumask_set_cpu(nq_indx, &nq->mask);
469         rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
470         if (rc) {
471                 dev_warn(&nq->pdev->dev,
472                          "set affinity failed; vector: %d nq_idx: %d\n",
473                          nq->msix_vec, nq_indx);
474         }
475         nq->requested = true;
476         bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
477
478         return rc;
479 }
480
481 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq,  u32 reg_offt)
482 {
483         resource_size_t reg_base;
484         struct bnxt_qplib_nq_db *nq_db;
485         struct pci_dev *pdev;
486
487         pdev = nq->pdev;
488         nq_db = &nq->nq_db;
489
490         nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
491         nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
492         if (!nq_db->reg.bar_base) {
493                 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
494                         nq_db->reg.bar_id);
495                 return -ENOMEM;
496         }
497
498         reg_base = nq_db->reg.bar_base + reg_offt;
499         /* Unconditionally map 8 bytes to support 57500 series */
500         nq_db->reg.len = 8;
501         nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
502         if (!nq_db->reg.bar_reg) {
503                 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
504                         nq_db->reg.bar_id);
505                 return -ENOMEM;
506         }
507
508         nq_db->dbinfo.db = nq_db->reg.bar_reg;
509         nq_db->dbinfo.hwq = &nq->hwq;
510         nq_db->dbinfo.xid = nq->ring_id;
511
512         return 0;
513 }
514
515 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
516                          int nq_idx, int msix_vector, int bar_reg_offset,
517                          cqn_handler_t cqn_handler,
518                          srqn_handler_t srqn_handler)
519 {
520         int rc = -1;
521
522         nq->pdev = pdev;
523         nq->cqn_handler = cqn_handler;
524         nq->srqn_handler = srqn_handler;
525
526         /* Have a task to schedule CQ notifiers in post send case */
527         nq->cqn_wq  = create_singlethread_workqueue("bnxt_qplib_nq");
528         if (!nq->cqn_wq)
529                 return -ENOMEM;
530
531         rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
532         if (rc)
533                 goto fail;
534
535         rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
536         if (rc) {
537                 dev_err(&nq->pdev->dev,
538                         "Failed to request irq for nq-idx %d\n", nq_idx);
539                 goto fail;
540         }
541
542         return 0;
543 fail:
544         bnxt_qplib_disable_nq(nq);
545         return rc;
546 }
547
548 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
549 {
550         if (nq->hwq.max_elements) {
551                 bnxt_qplib_free_hwq(nq->res, &nq->hwq);
552                 nq->hwq.max_elements = 0;
553         }
554 }
555
556 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
557 {
558         struct bnxt_qplib_hwq_attr hwq_attr = {};
559         struct bnxt_qplib_sg_info sginfo = {};
560
561         nq->pdev = res->pdev;
562         nq->res = res;
563         if (!nq->hwq.max_elements ||
564             nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
565                 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
566
567         sginfo.pgsize = PAGE_SIZE;
568         sginfo.pgshft = PAGE_SHIFT;
569         hwq_attr.res = res;
570         hwq_attr.sginfo = &sginfo;
571         hwq_attr.depth = nq->hwq.max_elements;
572         hwq_attr.stride = sizeof(struct nq_base);
573         hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
574         if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
575                 dev_err(&nq->pdev->dev, "FP NQ allocation failed");
576                 return -ENOMEM;
577         }
578         nq->budget = 8;
579         return 0;
580 }
581
582 /* SRQ */
583 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
584                            struct bnxt_qplib_srq *srq)
585 {
586         struct bnxt_qplib_rcfw *rcfw = res->rcfw;
587         struct creq_destroy_srq_resp resp = {};
588         struct bnxt_qplib_cmdqmsg msg = {};
589         struct cmdq_destroy_srq req = {};
590         int rc;
591
592         bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
593                                  CMDQ_BASE_OPCODE_DESTROY_SRQ,
594                                  sizeof(req));
595
596         /* Configure the request */
597         req.srq_cid = cpu_to_le32(srq->id);
598
599         bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
600         rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
601         kfree(srq->swq);
602         if (rc)
603                 return;
604         bnxt_qplib_free_hwq(res, &srq->hwq);
605 }
606
607 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
608                           struct bnxt_qplib_srq *srq)
609 {
610         struct bnxt_qplib_rcfw *rcfw = res->rcfw;
611         struct bnxt_qplib_hwq_attr hwq_attr = {};
612         struct creq_create_srq_resp resp = {};
613         struct bnxt_qplib_cmdqmsg msg = {};
614         struct cmdq_create_srq req = {};
615         struct bnxt_qplib_pbl *pbl;
616         u16 pg_sz_lvl;
617         int rc, idx;
618
619         hwq_attr.res = res;
620         hwq_attr.sginfo = &srq->sg_info;
621         hwq_attr.depth = srq->max_wqe;
622         hwq_attr.stride = srq->wqe_size;
623         hwq_attr.type = HWQ_TYPE_QUEUE;
624         rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
625         if (rc)
626                 return rc;
627
628         srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
629                            GFP_KERNEL);
630         if (!srq->swq) {
631                 rc = -ENOMEM;
632                 goto fail;
633         }
634
635         bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
636                                  CMDQ_BASE_OPCODE_CREATE_SRQ,
637                                  sizeof(req));
638
639         /* Configure the request */
640         req.dpi = cpu_to_le32(srq->dpi->dpi);
641         req.srq_handle = cpu_to_le64((uintptr_t)srq);
642
643         req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
644         pbl = &srq->hwq.pbl[PBL_LVL_0];
645         pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
646                      CMDQ_CREATE_SRQ_PG_SIZE_SFT);
647         pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
648                       CMDQ_CREATE_SRQ_LVL_SFT;
649         req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
650         req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
651         req.pd_id = cpu_to_le32(srq->pd->id);
652         req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
653
654         bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
655         rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
656         if (rc)
657                 goto fail;
658
659         spin_lock_init(&srq->lock);
660         srq->start_idx = 0;
661         srq->last_idx = srq->hwq.max_elements - 1;
662         for (idx = 0; idx < srq->hwq.max_elements; idx++)
663                 srq->swq[idx].next_idx = idx + 1;
664         srq->swq[srq->last_idx].next_idx = -1;
665
666         srq->id = le32_to_cpu(resp.xid);
667         srq->dbinfo.hwq = &srq->hwq;
668         srq->dbinfo.xid = srq->id;
669         srq->dbinfo.db = srq->dpi->dbr;
670         srq->dbinfo.max_slot = 1;
671         srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
672         if (srq->threshold)
673                 bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
674         srq->arm_req = false;
675
676         return 0;
677 fail:
678         bnxt_qplib_free_hwq(res, &srq->hwq);
679         kfree(srq->swq);
680
681         return rc;
682 }
683
684 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
685                           struct bnxt_qplib_srq *srq)
686 {
687         struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
688         u32 sw_prod, sw_cons, count = 0;
689
690         sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
691         sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
692
693         count = sw_prod > sw_cons ? sw_prod - sw_cons :
694                                     srq_hwq->max_elements - sw_cons + sw_prod;
695         if (count > srq->threshold) {
696                 srq->arm_req = false;
697                 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
698         } else {
699                 /* Deferred arming */
700                 srq->arm_req = true;
701         }
702
703         return 0;
704 }
705
706 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
707                          struct bnxt_qplib_srq *srq)
708 {
709         struct bnxt_qplib_rcfw *rcfw = res->rcfw;
710         struct creq_query_srq_resp resp = {};
711         struct bnxt_qplib_cmdqmsg msg = {};
712         struct bnxt_qplib_rcfw_sbuf *sbuf;
713         struct creq_query_srq_resp_sb *sb;
714         struct cmdq_query_srq req = {};
715         int rc = 0;
716
717         bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
718                                  CMDQ_BASE_OPCODE_QUERY_SRQ,
719                                  sizeof(req));
720
721         /* Configure the request */
722         sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
723         if (!sbuf)
724                 return -ENOMEM;
725         req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
726         req.srq_cid = cpu_to_le32(srq->id);
727         sb = sbuf->sb;
728         bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req),
729                                 sizeof(resp), 0);
730         rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
731         srq->threshold = le16_to_cpu(sb->srq_limit);
732         bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
733
734         return rc;
735 }
736
737 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
738                              struct bnxt_qplib_swqe *wqe)
739 {
740         struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
741         struct rq_wqe *srqe;
742         struct sq_sge *hw_sge;
743         u32 sw_prod, sw_cons, count = 0;
744         int i, next;
745
746         spin_lock(&srq_hwq->lock);
747         if (srq->start_idx == srq->last_idx) {
748                 dev_err(&srq_hwq->pdev->dev,
749                         "FP: SRQ (0x%x) is full!\n", srq->id);
750                 spin_unlock(&srq_hwq->lock);
751                 return -EINVAL;
752         }
753         next = srq->start_idx;
754         srq->start_idx = srq->swq[next].next_idx;
755         spin_unlock(&srq_hwq->lock);
756
757         sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
758         srqe = bnxt_qplib_get_qe(srq_hwq, sw_prod, NULL);
759         memset(srqe, 0, srq->wqe_size);
760         /* Calculate wqe_size16 and data_len */
761         for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
762              i < wqe->num_sge; i++, hw_sge++) {
763                 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
764                 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
765                 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
766         }
767         srqe->wqe_type = wqe->type;
768         srqe->flags = wqe->flags;
769         srqe->wqe_size = wqe->num_sge +
770                         ((offsetof(typeof(*srqe), data) + 15) >> 4);
771         srqe->wr_id[0] = cpu_to_le32((u32)next);
772         srq->swq[next].wr_id = wqe->wr_id;
773
774         srq_hwq->prod++;
775
776         spin_lock(&srq_hwq->lock);
777         sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
778         /* retaining srq_hwq->cons for this logic
779          * actually the lock is only required to
780          * read srq_hwq->cons.
781          */
782         sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
783         count = sw_prod > sw_cons ? sw_prod - sw_cons :
784                                     srq_hwq->max_elements - sw_cons + sw_prod;
785         spin_unlock(&srq_hwq->lock);
786         /* Ring DB */
787         bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
788         if (srq->arm_req == true && count > srq->threshold) {
789                 srq->arm_req = false;
790                 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
791         }
792
793         return 0;
794 }
795
796 /* QP */
797
798 static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
799 {
800         int indx;
801
802         que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
803         if (!que->swq)
804                 return -ENOMEM;
805
806         que->swq_start = 0;
807         que->swq_last = que->max_wqe - 1;
808         for (indx = 0; indx < que->max_wqe; indx++)
809                 que->swq[indx].next_idx = indx + 1;
810         que->swq[que->swq_last].next_idx = 0; /* Make it circular */
811         que->swq_last = 0;
812
813         return 0;
814 }
815
816 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
817 {
818         struct bnxt_qplib_hwq_attr hwq_attr = {};
819         struct bnxt_qplib_rcfw *rcfw = res->rcfw;
820         struct creq_create_qp1_resp resp = {};
821         struct bnxt_qplib_cmdqmsg msg = {};
822         struct bnxt_qplib_q *sq = &qp->sq;
823         struct bnxt_qplib_q *rq = &qp->rq;
824         struct cmdq_create_qp1 req = {};
825         struct bnxt_qplib_pbl *pbl;
826         u32 qp_flags = 0;
827         u8 pg_sz_lvl;
828         u32 tbl_indx;
829         int rc;
830
831         bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
832                                  CMDQ_BASE_OPCODE_CREATE_QP1,
833                                  sizeof(req));
834         /* General */
835         req.type = qp->type;
836         req.dpi = cpu_to_le32(qp->dpi->dpi);
837         req.qp_handle = cpu_to_le64(qp->qp_handle);
838
839         /* SQ */
840         hwq_attr.res = res;
841         hwq_attr.sginfo = &sq->sg_info;
842         hwq_attr.stride = sizeof(struct sq_sge);
843         hwq_attr.depth = bnxt_qplib_get_depth(sq);
844         hwq_attr.type = HWQ_TYPE_QUEUE;
845         rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
846         if (rc)
847                 return rc;
848
849         rc = bnxt_qplib_alloc_init_swq(sq);
850         if (rc)
851                 goto fail_sq;
852
853         req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
854         pbl = &sq->hwq.pbl[PBL_LVL_0];
855         req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
856         pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
857                      CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
858         pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
859         req.sq_pg_size_sq_lvl = pg_sz_lvl;
860         req.sq_fwo_sq_sge =
861                 cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
862                              CMDQ_CREATE_QP1_SQ_SGE_SFT);
863         req.scq_cid = cpu_to_le32(qp->scq->id);
864
865         /* RQ */
866         if (rq->max_wqe) {
867                 hwq_attr.res = res;
868                 hwq_attr.sginfo = &rq->sg_info;
869                 hwq_attr.stride = sizeof(struct sq_sge);
870                 hwq_attr.depth = bnxt_qplib_get_depth(rq);
871                 hwq_attr.type = HWQ_TYPE_QUEUE;
872                 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
873                 if (rc)
874                         goto sq_swq;
875                 rc = bnxt_qplib_alloc_init_swq(rq);
876                 if (rc)
877                         goto fail_rq;
878                 req.rq_size = cpu_to_le32(rq->max_wqe);
879                 pbl = &rq->hwq.pbl[PBL_LVL_0];
880                 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
881                 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
882                              CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
883                 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
884                 req.rq_pg_size_rq_lvl = pg_sz_lvl;
885                 req.rq_fwo_rq_sge =
886                         cpu_to_le16((rq->max_sge &
887                                      CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
888                                     CMDQ_CREATE_QP1_RQ_SGE_SFT);
889         }
890         req.rcq_cid = cpu_to_le32(qp->rcq->id);
891         /* Header buffer - allow hdr_buf pass in */
892         rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
893         if (rc) {
894                 rc = -ENOMEM;
895                 goto rq_rwq;
896         }
897         qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
898         req.qp_flags = cpu_to_le32(qp_flags);
899         req.pd_id = cpu_to_le32(qp->pd->id);
900
901         bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
902         rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
903         if (rc)
904                 goto fail;
905
906         qp->id = le32_to_cpu(resp.xid);
907         qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
908         qp->cctx = res->cctx;
909         sq->dbinfo.hwq = &sq->hwq;
910         sq->dbinfo.xid = qp->id;
911         sq->dbinfo.db = qp->dpi->dbr;
912         sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
913         if (rq->max_wqe) {
914                 rq->dbinfo.hwq = &rq->hwq;
915                 rq->dbinfo.xid = qp->id;
916                 rq->dbinfo.db = qp->dpi->dbr;
917                 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
918         }
919         tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
920         rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
921         rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
922
923         return 0;
924
925 fail:
926         bnxt_qplib_free_qp_hdr_buf(res, qp);
927 rq_rwq:
928         kfree(rq->swq);
929 fail_rq:
930         bnxt_qplib_free_hwq(res, &rq->hwq);
931 sq_swq:
932         kfree(sq->swq);
933 fail_sq:
934         bnxt_qplib_free_hwq(res, &sq->hwq);
935         return rc;
936 }
937
938 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
939 {
940         struct bnxt_qplib_hwq *hwq;
941         struct bnxt_qplib_q *sq;
942         u64 fpsne, psn_pg;
943         u16 indx_pad = 0;
944
945         sq = &qp->sq;
946         hwq = &sq->hwq;
947         /* First psn entry */
948         fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
949         if (!IS_ALIGNED(fpsne, PAGE_SIZE))
950                 indx_pad = (fpsne & ~PAGE_MASK) / size;
951         hwq->pad_pgofft = indx_pad;
952         hwq->pad_pg = (u64 *)psn_pg;
953         hwq->pad_stride = size;
954 }
955
956 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
957 {
958         struct bnxt_qplib_rcfw *rcfw = res->rcfw;
959         struct bnxt_qplib_hwq_attr hwq_attr = {};
960         struct bnxt_qplib_sg_info sginfo = {};
961         struct creq_create_qp_resp resp = {};
962         struct bnxt_qplib_cmdqmsg msg = {};
963         struct bnxt_qplib_q *sq = &qp->sq;
964         struct bnxt_qplib_q *rq = &qp->rq;
965         struct cmdq_create_qp req = {};
966         int rc, req_size, psn_sz = 0;
967         struct bnxt_qplib_hwq *xrrq;
968         struct bnxt_qplib_pbl *pbl;
969         u32 qp_flags = 0;
970         u8 pg_sz_lvl;
971         u32 tbl_indx;
972         u16 nsge;
973
974         bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
975                                  CMDQ_BASE_OPCODE_CREATE_QP,
976                                  sizeof(req));
977
978         /* General */
979         req.type = qp->type;
980         req.dpi = cpu_to_le32(qp->dpi->dpi);
981         req.qp_handle = cpu_to_le64(qp->qp_handle);
982
983         /* SQ */
984         if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
985                 psn_sz = bnxt_qplib_is_chip_gen_p5(res->cctx) ?
986                          sizeof(struct sq_psn_search_ext) :
987                          sizeof(struct sq_psn_search);
988         }
989
990         hwq_attr.res = res;
991         hwq_attr.sginfo = &sq->sg_info;
992         hwq_attr.stride = sizeof(struct sq_sge);
993         hwq_attr.depth = bnxt_qplib_get_depth(sq);
994         hwq_attr.aux_stride = psn_sz;
995         hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode);
996         hwq_attr.type = HWQ_TYPE_QUEUE;
997         rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
998         if (rc)
999                 return rc;
1000
1001         rc = bnxt_qplib_alloc_init_swq(sq);
1002         if (rc)
1003                 goto fail_sq;
1004
1005         if (psn_sz)
1006                 bnxt_qplib_init_psn_ptr(qp, psn_sz);
1007
1008         req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1009         pbl = &sq->hwq.pbl[PBL_LVL_0];
1010         req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1011         pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1012                      CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1013         pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1014         req.sq_pg_size_sq_lvl = pg_sz_lvl;
1015         req.sq_fwo_sq_sge =
1016                 cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1017                              CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1018         req.scq_cid = cpu_to_le32(qp->scq->id);
1019
1020         /* RQ */
1021         if (!qp->srq) {
1022                 hwq_attr.res = res;
1023                 hwq_attr.sginfo = &rq->sg_info;
1024                 hwq_attr.stride = sizeof(struct sq_sge);
1025                 hwq_attr.depth = bnxt_qplib_get_depth(rq);
1026                 hwq_attr.aux_stride = 0;
1027                 hwq_attr.aux_depth = 0;
1028                 hwq_attr.type = HWQ_TYPE_QUEUE;
1029                 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1030                 if (rc)
1031                         goto sq_swq;
1032                 rc = bnxt_qplib_alloc_init_swq(rq);
1033                 if (rc)
1034                         goto fail_rq;
1035
1036                 req.rq_size = cpu_to_le32(rq->max_wqe);
1037                 pbl = &rq->hwq.pbl[PBL_LVL_0];
1038                 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1039                 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1040                              CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1041                 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1042                 req.rq_pg_size_rq_lvl = pg_sz_lvl;
1043                 nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1044                         6 : rq->max_sge;
1045                 req.rq_fwo_rq_sge =
1046                         cpu_to_le16(((nsge &
1047                                       CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1048                                      CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1049         } else {
1050                 /* SRQ */
1051                 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1052                 req.srq_cid = cpu_to_le32(qp->srq->id);
1053         }
1054         req.rcq_cid = cpu_to_le32(qp->rcq->id);
1055
1056         qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1057         qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1058         if (qp->sig_type)
1059                 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1060         if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1061                 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1062         if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
1063                 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1064
1065         req.qp_flags = cpu_to_le32(qp_flags);
1066
1067         /* ORRQ and IRRQ */
1068         if (psn_sz) {
1069                 xrrq = &qp->orrq;
1070                 xrrq->max_elements =
1071                         ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1072                 req_size = xrrq->max_elements *
1073                            BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1074                 req_size &= ~(PAGE_SIZE - 1);
1075                 sginfo.pgsize = req_size;
1076                 sginfo.pgshft = PAGE_SHIFT;
1077
1078                 hwq_attr.res = res;
1079                 hwq_attr.sginfo = &sginfo;
1080                 hwq_attr.depth = xrrq->max_elements;
1081                 hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1082                 hwq_attr.aux_stride = 0;
1083                 hwq_attr.aux_depth = 0;
1084                 hwq_attr.type = HWQ_TYPE_CTX;
1085                 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1086                 if (rc)
1087                         goto rq_swq;
1088                 pbl = &xrrq->pbl[PBL_LVL_0];
1089                 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1090
1091                 xrrq = &qp->irrq;
1092                 xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1093                                                 qp->max_dest_rd_atomic);
1094                 req_size = xrrq->max_elements *
1095                            BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1096                 req_size &= ~(PAGE_SIZE - 1);
1097                 sginfo.pgsize = req_size;
1098                 hwq_attr.depth =  xrrq->max_elements;
1099                 hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1100                 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1101                 if (rc)
1102                         goto fail_orrq;
1103
1104                 pbl = &xrrq->pbl[PBL_LVL_0];
1105                 req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1106         }
1107         req.pd_id = cpu_to_le32(qp->pd->id);
1108
1109         bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1110                                 sizeof(resp), 0);
1111         rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1112         if (rc)
1113                 goto fail;
1114
1115         qp->id = le32_to_cpu(resp.xid);
1116         qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1117         INIT_LIST_HEAD(&qp->sq_flush);
1118         INIT_LIST_HEAD(&qp->rq_flush);
1119         qp->cctx = res->cctx;
1120         sq->dbinfo.hwq = &sq->hwq;
1121         sq->dbinfo.xid = qp->id;
1122         sq->dbinfo.db = qp->dpi->dbr;
1123         sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1124         if (rq->max_wqe) {
1125                 rq->dbinfo.hwq = &rq->hwq;
1126                 rq->dbinfo.xid = qp->id;
1127                 rq->dbinfo.db = qp->dpi->dbr;
1128                 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1129         }
1130         tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1131         rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1132         rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1133
1134         return 0;
1135 fail:
1136         bnxt_qplib_free_hwq(res, &qp->irrq);
1137 fail_orrq:
1138         bnxt_qplib_free_hwq(res, &qp->orrq);
1139 rq_swq:
1140         kfree(rq->swq);
1141 fail_rq:
1142         bnxt_qplib_free_hwq(res, &rq->hwq);
1143 sq_swq:
1144         kfree(sq->swq);
1145 fail_sq:
1146         bnxt_qplib_free_hwq(res, &sq->hwq);
1147         return rc;
1148 }
1149
1150 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1151 {
1152         switch (qp->state) {
1153         case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1154                 /* INIT->RTR, configure the path_mtu to the default
1155                  * 2048 if not being requested
1156                  */
1157                 if (!(qp->modify_flags &
1158                     CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1159                         qp->modify_flags |=
1160                                 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1161                         qp->path_mtu =
1162                                 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1163                 }
1164                 qp->modify_flags &=
1165                         ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1166                 /* Bono FW require the max_dest_rd_atomic to be >= 1 */
1167                 if (qp->max_dest_rd_atomic < 1)
1168                         qp->max_dest_rd_atomic = 1;
1169                 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1170                 /* Bono FW 20.6.5 requires SGID_INDEX configuration */
1171                 if (!(qp->modify_flags &
1172                     CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1173                         qp->modify_flags |=
1174                                 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1175                         qp->ah.sgid_index = 0;
1176                 }
1177                 break;
1178         default:
1179                 break;
1180         }
1181 }
1182
1183 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1184 {
1185         switch (qp->state) {
1186         case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1187                 /* Bono FW requires the max_rd_atomic to be >= 1 */
1188                 if (qp->max_rd_atomic < 1)
1189                         qp->max_rd_atomic = 1;
1190                 /* Bono FW does not allow PKEY_INDEX,
1191                  * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1192                  * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1193                  * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1194                  * modification
1195                  */
1196                 qp->modify_flags &=
1197                         ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1198                           CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1199                           CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1200                           CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1201                           CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1202                           CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1203                           CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1204                           CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1205                           CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1206                           CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1207                           CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1208                           CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1209                 break;
1210         default:
1211                 break;
1212         }
1213 }
1214
1215 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1216 {
1217         switch (qp->cur_qp_state) {
1218         case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1219                 break;
1220         case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1221                 __modify_flags_from_init_state(qp);
1222                 break;
1223         case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1224                 __modify_flags_from_rtr_state(qp);
1225                 break;
1226         case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1227                 break;
1228         case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1229                 break;
1230         case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1231                 break;
1232         case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1233                 break;
1234         default:
1235                 break;
1236         }
1237 }
1238
1239 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1240 {
1241         struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1242         struct creq_modify_qp_resp resp = {};
1243         struct bnxt_qplib_cmdqmsg msg = {};
1244         struct cmdq_modify_qp req = {};
1245         u32 temp32[4];
1246         u32 bmask;
1247         int rc;
1248
1249         bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1250                                  CMDQ_BASE_OPCODE_MODIFY_QP,
1251                                  sizeof(req));
1252
1253         /* Filter out the qp_attr_mask based on the state->new transition */
1254         __filter_modify_flags(qp);
1255         bmask = qp->modify_flags;
1256         req.modify_mask = cpu_to_le32(qp->modify_flags);
1257         req.qp_cid = cpu_to_le32(qp->id);
1258         if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1259                 req.network_type_en_sqd_async_notify_new_state =
1260                                 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1261                                 (qp->en_sqd_async_notify ?
1262                                         CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1263         }
1264         req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1265
1266         if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1267                 req.access = qp->access;
1268
1269         if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
1270                 req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
1271
1272         if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1273                 req.qkey = cpu_to_le32(qp->qkey);
1274
1275         if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1276                 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1277                 req.dgid[0] = cpu_to_le32(temp32[0]);
1278                 req.dgid[1] = cpu_to_le32(temp32[1]);
1279                 req.dgid[2] = cpu_to_le32(temp32[2]);
1280                 req.dgid[3] = cpu_to_le32(temp32[3]);
1281         }
1282         if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1283                 req.flow_label = cpu_to_le32(qp->ah.flow_label);
1284
1285         if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1286                 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1287                                              [qp->ah.sgid_index]);
1288
1289         if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1290                 req.hop_limit = qp->ah.hop_limit;
1291
1292         if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1293                 req.traffic_class = qp->ah.traffic_class;
1294
1295         if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1296                 memcpy(req.dest_mac, qp->ah.dmac, 6);
1297
1298         if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1299                 req.path_mtu_pingpong_push_enable |= qp->path_mtu;
1300
1301         if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1302                 req.timeout = qp->timeout;
1303
1304         if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1305                 req.retry_cnt = qp->retry_cnt;
1306
1307         if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1308                 req.rnr_retry = qp->rnr_retry;
1309
1310         if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1311                 req.min_rnr_timer = qp->min_rnr_timer;
1312
1313         if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1314                 req.rq_psn = cpu_to_le32(qp->rq.psn);
1315
1316         if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1317                 req.sq_psn = cpu_to_le32(qp->sq.psn);
1318
1319         if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1320                 req.max_rd_atomic =
1321                         ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1322
1323         if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1324                 req.max_dest_rd_atomic =
1325                         IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1326
1327         req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1328         req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1329         req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1330         req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1331         req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1332         if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1333                 req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1334
1335         req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1336
1337         bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),  sizeof(resp), 0);
1338         rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1339         if (rc)
1340                 return rc;
1341         qp->cur_qp_state = qp->state;
1342         return 0;
1343 }
1344
1345 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1346 {
1347         struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1348         struct creq_query_qp_resp resp = {};
1349         struct bnxt_qplib_cmdqmsg msg = {};
1350         struct bnxt_qplib_rcfw_sbuf *sbuf;
1351         struct creq_query_qp_resp_sb *sb;
1352         struct cmdq_query_qp req = {};
1353         u32 temp32[4];
1354         int i, rc = 0;
1355
1356         bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1357                                  CMDQ_BASE_OPCODE_QUERY_QP,
1358                                  sizeof(req));
1359
1360         sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
1361         if (!sbuf)
1362                 return -ENOMEM;
1363         sb = sbuf->sb;
1364
1365         req.qp_cid = cpu_to_le32(qp->id);
1366         req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
1367         bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req),
1368                                 sizeof(resp), 0);
1369         rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1370         if (rc)
1371                 goto bail;
1372         /* Extract the context from the side buffer */
1373         qp->state = sb->en_sqd_async_notify_state &
1374                         CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1375         qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1376                                   CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
1377                                   true : false;
1378         qp->access = sb->access;
1379         qp->pkey_index = le16_to_cpu(sb->pkey);
1380         qp->qkey = le32_to_cpu(sb->qkey);
1381
1382         temp32[0] = le32_to_cpu(sb->dgid[0]);
1383         temp32[1] = le32_to_cpu(sb->dgid[1]);
1384         temp32[2] = le32_to_cpu(sb->dgid[2]);
1385         temp32[3] = le32_to_cpu(sb->dgid[3]);
1386         memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1387
1388         qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1389
1390         qp->ah.sgid_index = 0;
1391         for (i = 0; i < res->sgid_tbl.max; i++) {
1392                 if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1393                         qp->ah.sgid_index = i;
1394                         break;
1395                 }
1396         }
1397         if (i == res->sgid_tbl.max)
1398                 dev_warn(&res->pdev->dev, "SGID not found??\n");
1399
1400         qp->ah.hop_limit = sb->hop_limit;
1401         qp->ah.traffic_class = sb->traffic_class;
1402         memcpy(qp->ah.dmac, sb->dest_mac, 6);
1403         qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1404                                 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1405                                 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1406         qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1407                                     CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1408                                     CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1409         qp->timeout = sb->timeout;
1410         qp->retry_cnt = sb->retry_cnt;
1411         qp->rnr_retry = sb->rnr_retry;
1412         qp->min_rnr_timer = sb->min_rnr_timer;
1413         qp->rq.psn = le32_to_cpu(sb->rq_psn);
1414         qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1415         qp->sq.psn = le32_to_cpu(sb->sq_psn);
1416         qp->max_dest_rd_atomic =
1417                         IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1418         qp->sq.max_wqe = qp->sq.hwq.max_elements;
1419         qp->rq.max_wqe = qp->rq.hwq.max_elements;
1420         qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1421         qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1422         qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1423         qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1424         memcpy(qp->smac, sb->src_mac, 6);
1425         qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1426 bail:
1427         bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
1428         return rc;
1429 }
1430
1431 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1432 {
1433         struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1434         struct cq_base *hw_cqe;
1435         int i;
1436
1437         for (i = 0; i < cq_hwq->max_elements; i++) {
1438                 hw_cqe = bnxt_qplib_get_qe(cq_hwq, i, NULL);
1439                 if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
1440                         continue;
1441                 /*
1442                  * The valid test of the entry must be done first before
1443                  * reading any further.
1444                  */
1445                 dma_rmb();
1446                 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1447                 case CQ_BASE_CQE_TYPE_REQ:
1448                 case CQ_BASE_CQE_TYPE_TERMINAL:
1449                 {
1450                         struct cq_req *cqe = (struct cq_req *)hw_cqe;
1451
1452                         if (qp == le64_to_cpu(cqe->qp_handle))
1453                                 cqe->qp_handle = 0;
1454                         break;
1455                 }
1456                 case CQ_BASE_CQE_TYPE_RES_RC:
1457                 case CQ_BASE_CQE_TYPE_RES_UD:
1458                 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1459                 {
1460                         struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1461
1462                         if (qp == le64_to_cpu(cqe->qp_handle))
1463                                 cqe->qp_handle = 0;
1464                         break;
1465                 }
1466                 default:
1467                         break;
1468                 }
1469         }
1470 }
1471
1472 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1473                           struct bnxt_qplib_qp *qp)
1474 {
1475         struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1476         struct creq_destroy_qp_resp resp = {};
1477         struct bnxt_qplib_cmdqmsg msg = {};
1478         struct cmdq_destroy_qp req = {};
1479         u32 tbl_indx;
1480         int rc;
1481
1482         tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1483         rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1484         rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1485
1486         bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1487                                  CMDQ_BASE_OPCODE_DESTROY_QP,
1488                                  sizeof(req));
1489
1490         req.qp_cid = cpu_to_le32(qp->id);
1491         bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1492                                 sizeof(resp), 0);
1493         rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1494         if (rc) {
1495                 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1496                 rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1497                 return rc;
1498         }
1499
1500         return 0;
1501 }
1502
1503 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1504                             struct bnxt_qplib_qp *qp)
1505 {
1506         bnxt_qplib_free_qp_hdr_buf(res, qp);
1507         bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1508         kfree(qp->sq.swq);
1509
1510         bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1511         kfree(qp->rq.swq);
1512
1513         if (qp->irrq.max_elements)
1514                 bnxt_qplib_free_hwq(res, &qp->irrq);
1515         if (qp->orrq.max_elements)
1516                 bnxt_qplib_free_hwq(res, &qp->orrq);
1517
1518 }
1519
1520 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1521                                 struct bnxt_qplib_sge *sge)
1522 {
1523         struct bnxt_qplib_q *sq = &qp->sq;
1524         u32 sw_prod;
1525
1526         memset(sge, 0, sizeof(*sge));
1527
1528         if (qp->sq_hdr_buf) {
1529                 sw_prod = sq->swq_start;
1530                 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1531                                          sw_prod * qp->sq_hdr_buf_size);
1532                 sge->lkey = 0xFFFFFFFF;
1533                 sge->size = qp->sq_hdr_buf_size;
1534                 return qp->sq_hdr_buf + sw_prod * sge->size;
1535         }
1536         return NULL;
1537 }
1538
1539 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1540 {
1541         struct bnxt_qplib_q *rq = &qp->rq;
1542
1543         return rq->swq_start;
1544 }
1545
1546 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1547 {
1548         return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1549 }
1550
1551 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1552                                 struct bnxt_qplib_sge *sge)
1553 {
1554         struct bnxt_qplib_q *rq = &qp->rq;
1555         u32 sw_prod;
1556
1557         memset(sge, 0, sizeof(*sge));
1558
1559         if (qp->rq_hdr_buf) {
1560                 sw_prod = rq->swq_start;
1561                 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1562                                          sw_prod * qp->rq_hdr_buf_size);
1563                 sge->lkey = 0xFFFFFFFF;
1564                 sge->size = qp->rq_hdr_buf_size;
1565                 return qp->rq_hdr_buf + sw_prod * sge->size;
1566         }
1567         return NULL;
1568 }
1569
1570 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1571                                        struct bnxt_qplib_swqe *wqe,
1572                                        struct bnxt_qplib_swq *swq)
1573 {
1574         struct sq_psn_search_ext *psns_ext;
1575         struct sq_psn_search *psns;
1576         u32 flg_npsn;
1577         u32 op_spsn;
1578
1579         if (!swq->psn_search)
1580                 return;
1581         psns = swq->psn_search;
1582         psns_ext = swq->psn_ext;
1583
1584         op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1585                     SQ_PSN_SEARCH_START_PSN_MASK);
1586         op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1587                      SQ_PSN_SEARCH_OPCODE_MASK);
1588         flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1589                      SQ_PSN_SEARCH_NEXT_PSN_MASK);
1590
1591         if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
1592                 psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1593                 psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1594                 psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1595         } else {
1596                 psns->opcode_start_psn = cpu_to_le32(op_spsn);
1597                 psns->flags_next_psn = cpu_to_le32(flg_npsn);
1598         }
1599 }
1600
1601 static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1602                                  struct bnxt_qplib_swqe *wqe,
1603                                  u16 *idx)
1604 {
1605         struct bnxt_qplib_hwq *hwq;
1606         int len, t_len, offt;
1607         bool pull_dst = true;
1608         void *il_dst = NULL;
1609         void *il_src = NULL;
1610         int t_cplen, cplen;
1611         int indx;
1612
1613         hwq = &qp->sq.hwq;
1614         t_len = 0;
1615         for (indx = 0; indx < wqe->num_sge; indx++) {
1616                 len = wqe->sg_list[indx].size;
1617                 il_src = (void *)wqe->sg_list[indx].addr;
1618                 t_len += len;
1619                 if (t_len > qp->max_inline_data)
1620                         return -ENOMEM;
1621                 while (len) {
1622                         if (pull_dst) {
1623                                 pull_dst = false;
1624                                 il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1625                                 (*idx)++;
1626                                 t_cplen = 0;
1627                                 offt = 0;
1628                         }
1629                         cplen = min_t(int, len, sizeof(struct sq_sge));
1630                         cplen = min_t(int, cplen,
1631                                         (sizeof(struct sq_sge) - offt));
1632                         memcpy(il_dst, il_src, cplen);
1633                         t_cplen += cplen;
1634                         il_src += cplen;
1635                         il_dst += cplen;
1636                         offt += cplen;
1637                         len -= cplen;
1638                         if (t_cplen == sizeof(struct sq_sge))
1639                                 pull_dst = true;
1640                 }
1641         }
1642
1643         return t_len;
1644 }
1645
1646 static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1647                                struct bnxt_qplib_sge *ssge,
1648                                u16 nsge, u16 *idx)
1649 {
1650         struct sq_sge *dsge;
1651         int indx, len = 0;
1652
1653         for (indx = 0; indx < nsge; indx++, (*idx)++) {
1654                 dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1655                 dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1656                 dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1657                 dsge->size = cpu_to_le32(ssge[indx].size);
1658                 len += ssge[indx].size;
1659         }
1660
1661         return len;
1662 }
1663
1664 static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1665                                      struct bnxt_qplib_swqe *wqe,
1666                                      u16 *wqe_sz, u16 *qdf, u8 mode)
1667 {
1668         u32 ilsize, bytes;
1669         u16 nsge;
1670         u16 slot;
1671
1672         nsge = wqe->num_sge;
1673         /* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1674         bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1675         if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1676                 ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1677                 bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1678                 bytes += sizeof(struct sq_send_hdr);
1679         }
1680
1681         *qdf =  __xlate_qfd(qp->sq.q_full_delta, bytes);
1682         slot = bytes >> 4;
1683         *wqe_sz = slot;
1684         if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1685                 slot = 8;
1686         return slot;
1687 }
1688
1689 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_q *sq,
1690                                      struct bnxt_qplib_swq *swq)
1691 {
1692         struct bnxt_qplib_hwq *hwq;
1693         u32 pg_num, pg_indx;
1694         void *buff;
1695         u32 tail;
1696
1697         hwq = &sq->hwq;
1698         if (!hwq->pad_pg)
1699                 return;
1700         tail = swq->slot_idx / sq->dbinfo.max_slot;
1701         pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1702         pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1703         buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1704         swq->psn_ext = buff;
1705         swq->psn_search = buff;
1706 }
1707
1708 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1709 {
1710         struct bnxt_qplib_q *sq = &qp->sq;
1711
1712         bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1713 }
1714
1715 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1716                          struct bnxt_qplib_swqe *wqe)
1717 {
1718         struct bnxt_qplib_nq_work *nq_work = NULL;
1719         int i, rc = 0, data_len = 0, pkt_num = 0;
1720         struct bnxt_qplib_q *sq = &qp->sq;
1721         struct bnxt_qplib_hwq *hwq;
1722         struct bnxt_qplib_swq *swq;
1723         bool sch_handler = false;
1724         u16 wqe_sz, qdf = 0;
1725         void *base_hdr;
1726         void *ext_hdr;
1727         __le32 temp32;
1728         u32 wqe_idx;
1729         u32 slots;
1730         u16 idx;
1731
1732         hwq = &sq->hwq;
1733         if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1734             qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1735                 dev_err(&hwq->pdev->dev,
1736                         "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1737                         qp->id, qp->state);
1738                 rc = -EINVAL;
1739                 goto done;
1740         }
1741
1742         slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1743         if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1744                 dev_err(&hwq->pdev->dev,
1745                         "prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1746                         hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1747                 rc = -ENOMEM;
1748                 goto done;
1749         }
1750
1751         swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1752         bnxt_qplib_pull_psn_buff(sq, swq);
1753
1754         idx = 0;
1755         swq->slot_idx = hwq->prod;
1756         swq->slots = slots;
1757         swq->wr_id = wqe->wr_id;
1758         swq->type = wqe->type;
1759         swq->flags = wqe->flags;
1760         swq->start_psn = sq->psn & BTH_PSN_MASK;
1761         if (qp->sig_type)
1762                 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1763
1764         if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1765                 sch_handler = true;
1766                 dev_dbg(&hwq->pdev->dev,
1767                         "%s Error QP. Scheduling for poll_cq\n", __func__);
1768                 goto queue_err;
1769         }
1770
1771         base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1772         ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1773         memset(base_hdr, 0, sizeof(struct sq_sge));
1774         memset(ext_hdr, 0, sizeof(struct sq_sge));
1775
1776         if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1777                 /* Copy the inline data */
1778                 data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1779         else
1780                 data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1781                                                &idx);
1782         if (data_len < 0)
1783                 goto queue_err;
1784         /* Specifics */
1785         switch (wqe->type) {
1786         case BNXT_QPLIB_SWQE_TYPE_SEND:
1787                 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1788                         struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1789                         struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1790                         /* Assemble info for Raw Ethertype QPs */
1791
1792                         sqe->wqe_type = wqe->type;
1793                         sqe->flags = wqe->flags;
1794                         sqe->wqe_size = wqe_sz;
1795                         sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1796                         sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1797                         sqe->length = cpu_to_le32(data_len);
1798                         ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1799                                 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1800                                 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1801
1802                         break;
1803                 }
1804                 fallthrough;
1805         case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1806         case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1807         {
1808                 struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1809                 struct sq_send_hdr *sqe = base_hdr;
1810
1811                 sqe->wqe_type = wqe->type;
1812                 sqe->flags = wqe->flags;
1813                 sqe->wqe_size = wqe_sz;
1814                 sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1815                 if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1816                     qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1817                         sqe->q_key = cpu_to_le32(wqe->send.q_key);
1818                         sqe->length = cpu_to_le32(data_len);
1819                         sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1820                         ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1821                                                       SQ_SEND_DST_QP_MASK);
1822                         ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1823                                                     SQ_SEND_AVID_MASK);
1824                 } else {
1825                         sqe->length = cpu_to_le32(data_len);
1826                         if (qp->mtu)
1827                                 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1828                         if (!pkt_num)
1829                                 pkt_num = 1;
1830                         sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1831                 }
1832                 break;
1833         }
1834         case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1835         case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1836         case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1837         {
1838                 struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
1839                 struct sq_rdma_hdr *sqe = base_hdr;
1840
1841                 sqe->wqe_type = wqe->type;
1842                 sqe->flags = wqe->flags;
1843                 sqe->wqe_size = wqe_sz;
1844                 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1845                 sqe->length = cpu_to_le32((u32)data_len);
1846                 ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1847                 ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1848                 if (qp->mtu)
1849                         pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1850                 if (!pkt_num)
1851                         pkt_num = 1;
1852                 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1853                 break;
1854         }
1855         case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1856         case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1857         {
1858                 struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
1859                 struct sq_atomic_hdr *sqe = base_hdr;
1860
1861                 sqe->wqe_type = wqe->type;
1862                 sqe->flags = wqe->flags;
1863                 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1864                 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1865                 ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1866                 ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1867                 if (qp->mtu)
1868                         pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1869                 if (!pkt_num)
1870                         pkt_num = 1;
1871                 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1872                 break;
1873         }
1874         case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1875         {
1876                 struct sq_localinvalidate *sqe = base_hdr;
1877
1878                 sqe->wqe_type = wqe->type;
1879                 sqe->flags = wqe->flags;
1880                 sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1881
1882                 break;
1883         }
1884         case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1885         {
1886                 struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
1887                 struct sq_fr_pmr_hdr *sqe = base_hdr;
1888
1889                 sqe->wqe_type = wqe->type;
1890                 sqe->flags = wqe->flags;
1891                 sqe->access_cntl = wqe->frmr.access_cntl |
1892                                    SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1893                 sqe->zero_based_page_size_log =
1894                         (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1895                         SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1896                         (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1897                 sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1898                 temp32 = cpu_to_le32(wqe->frmr.length);
1899                 memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1900                 sqe->numlevels_pbl_page_size_log =
1901                         ((wqe->frmr.pbl_pg_sz_log <<
1902                                         SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1903                                         SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1904                         ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1905                                         SQ_FR_PMR_NUMLEVELS_MASK);
1906
1907                 for (i = 0; i < wqe->frmr.page_list_len; i++)
1908                         wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1909                                                 wqe->frmr.page_list[i] |
1910                                                 PTU_PTE_VALID);
1911                 ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1912                 ext_sqe->va = cpu_to_le64(wqe->frmr.va);
1913
1914                 break;
1915         }
1916         case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1917         {
1918                 struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
1919                 struct sq_bind_hdr *sqe = base_hdr;
1920
1921                 sqe->wqe_type = wqe->type;
1922                 sqe->flags = wqe->flags;
1923                 sqe->access_cntl = wqe->bind.access_cntl;
1924                 sqe->mw_type_zero_based = wqe->bind.mw_type |
1925                         (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1926                 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1927                 sqe->l_key = cpu_to_le32(wqe->bind.r_key);
1928                 ext_sqe->va = cpu_to_le64(wqe->bind.va);
1929                 ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
1930                 break;
1931         }
1932         default:
1933                 /* Bad wqe, return error */
1934                 rc = -EINVAL;
1935                 goto done;
1936         }
1937         swq->next_psn = sq->psn & BTH_PSN_MASK;
1938         bnxt_qplib_fill_psn_search(qp, wqe, swq);
1939 queue_err:
1940         bnxt_qplib_swq_mod_start(sq, wqe_idx);
1941         bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
1942         qp->wqe_cnt++;
1943 done:
1944         if (sch_handler) {
1945                 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1946                 if (nq_work) {
1947                         nq_work->cq = qp->scq;
1948                         nq_work->nq = qp->scq->nq;
1949                         INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1950                         queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
1951                 } else {
1952                         dev_err(&hwq->pdev->dev,
1953                                 "FP: Failed to allocate SQ nq_work!\n");
1954                         rc = -ENOMEM;
1955                 }
1956         }
1957         return rc;
1958 }
1959
1960 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
1961 {
1962         struct bnxt_qplib_q *rq = &qp->rq;
1963
1964         bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
1965 }
1966
1967 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1968                          struct bnxt_qplib_swqe *wqe)
1969 {
1970         struct bnxt_qplib_nq_work *nq_work = NULL;
1971         struct bnxt_qplib_q *rq = &qp->rq;
1972         struct rq_wqe_hdr *base_hdr;
1973         struct rq_ext_hdr *ext_hdr;
1974         struct bnxt_qplib_hwq *hwq;
1975         struct bnxt_qplib_swq *swq;
1976         bool sch_handler = false;
1977         u16 wqe_sz, idx;
1978         u32 wqe_idx;
1979         int rc = 0;
1980
1981         hwq = &rq->hwq;
1982         if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1983                 dev_err(&hwq->pdev->dev,
1984                         "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1985                         qp->id, qp->state);
1986                 rc = -EINVAL;
1987                 goto done;
1988         }
1989
1990         if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
1991                 dev_err(&hwq->pdev->dev,
1992                         "FP: QP (0x%x) RQ is full!\n", qp->id);
1993                 rc = -EINVAL;
1994                 goto done;
1995         }
1996
1997         swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
1998         swq->wr_id = wqe->wr_id;
1999         swq->slots = rq->dbinfo.max_slot;
2000
2001         if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2002                 sch_handler = true;
2003                 dev_dbg(&hwq->pdev->dev,
2004                         "%s: Error QP. Scheduling for poll_cq\n", __func__);
2005                 goto queue_err;
2006         }
2007
2008         idx = 0;
2009         base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2010         ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2011         memset(base_hdr, 0, sizeof(struct sq_sge));
2012         memset(ext_hdr, 0, sizeof(struct sq_sge));
2013         wqe_sz = (sizeof(struct rq_wqe_hdr) +
2014         wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2015         bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2016         if (!wqe->num_sge) {
2017                 struct sq_sge *sge;
2018
2019                 sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2020                 sge->size = 0;
2021                 wqe_sz++;
2022         }
2023         base_hdr->wqe_type = wqe->type;
2024         base_hdr->flags = wqe->flags;
2025         base_hdr->wqe_size = wqe_sz;
2026         base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2027 queue_err:
2028         bnxt_qplib_swq_mod_start(rq, wqe_idx);
2029         bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
2030 done:
2031         if (sch_handler) {
2032                 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2033                 if (nq_work) {
2034                         nq_work->cq = qp->rcq;
2035                         nq_work->nq = qp->rcq->nq;
2036                         INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2037                         queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2038                 } else {
2039                         dev_err(&hwq->pdev->dev,
2040                                 "FP: Failed to allocate RQ nq_work!\n");
2041                         rc = -ENOMEM;
2042                 }
2043         }
2044
2045         return rc;
2046 }
2047
2048 /* CQ */
2049 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2050 {
2051         struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2052         struct bnxt_qplib_hwq_attr hwq_attr = {};
2053         struct creq_create_cq_resp resp = {};
2054         struct bnxt_qplib_cmdqmsg msg = {};
2055         struct cmdq_create_cq req = {};
2056         struct bnxt_qplib_pbl *pbl;
2057         u32 pg_sz_lvl;
2058         int rc;
2059
2060         if (!cq->dpi) {
2061                 dev_err(&rcfw->pdev->dev,
2062                         "FP: CREATE_CQ failed due to NULL DPI\n");
2063                 return -EINVAL;
2064         }
2065
2066         hwq_attr.res = res;
2067         hwq_attr.depth = cq->max_wqe;
2068         hwq_attr.stride = sizeof(struct cq_base);
2069         hwq_attr.type = HWQ_TYPE_QUEUE;
2070         hwq_attr.sginfo = &cq->sg_info;
2071         rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2072         if (rc)
2073                 return rc;
2074
2075         bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2076                                  CMDQ_BASE_OPCODE_CREATE_CQ,
2077                                  sizeof(req));
2078
2079         req.dpi = cpu_to_le32(cq->dpi->dpi);
2080         req.cq_handle = cpu_to_le64(cq->cq_handle);
2081         req.cq_size = cpu_to_le32(cq->hwq.max_elements);
2082         pbl = &cq->hwq.pbl[PBL_LVL_0];
2083         pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2084                      CMDQ_CREATE_CQ_PG_SIZE_SFT);
2085         pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2086         req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2087         req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2088         req.cq_fco_cnq_id = cpu_to_le32(
2089                         (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2090                          CMDQ_CREATE_CQ_CNQ_ID_SFT);
2091         bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2092                                 sizeof(resp), 0);
2093         rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2094         if (rc)
2095                 goto fail;
2096
2097         cq->id = le32_to_cpu(resp.xid);
2098         cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2099         init_waitqueue_head(&cq->waitq);
2100         INIT_LIST_HEAD(&cq->sqf_head);
2101         INIT_LIST_HEAD(&cq->rqf_head);
2102         spin_lock_init(&cq->compl_lock);
2103         spin_lock_init(&cq->flush_lock);
2104
2105         cq->dbinfo.hwq = &cq->hwq;
2106         cq->dbinfo.xid = cq->id;
2107         cq->dbinfo.db = cq->dpi->dbr;
2108         cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
2109
2110         bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2111
2112         return 0;
2113
2114 fail:
2115         bnxt_qplib_free_hwq(res, &cq->hwq);
2116         return rc;
2117 }
2118
2119 void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
2120                                    struct bnxt_qplib_cq *cq)
2121 {
2122         bnxt_qplib_free_hwq(res, &cq->hwq);
2123         memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
2124 }
2125
2126 int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
2127                          int new_cqes)
2128 {
2129         struct bnxt_qplib_hwq_attr hwq_attr = {};
2130         struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2131         struct creq_resize_cq_resp resp = {};
2132         struct bnxt_qplib_cmdqmsg msg = {};
2133         struct cmdq_resize_cq req = {};
2134         struct bnxt_qplib_pbl *pbl;
2135         u32 pg_sz, lvl, new_sz;
2136         int rc;
2137
2138         bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2139                                  CMDQ_BASE_OPCODE_RESIZE_CQ,
2140                                  sizeof(req));
2141         hwq_attr.sginfo = &cq->sg_info;
2142         hwq_attr.res = res;
2143         hwq_attr.depth = new_cqes;
2144         hwq_attr.stride = sizeof(struct cq_base);
2145         hwq_attr.type = HWQ_TYPE_QUEUE;
2146         rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
2147         if (rc)
2148                 return rc;
2149
2150         req.cq_cid = cpu_to_le32(cq->id);
2151         pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
2152         pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq);
2153         lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
2154                                        CMDQ_RESIZE_CQ_LVL_MASK;
2155         new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
2156                   CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
2157         req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl);
2158         req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2159
2160         bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2161                                 sizeof(resp), 0);
2162         rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2163         return rc;
2164 }
2165
2166 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2167 {
2168         struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2169         struct creq_destroy_cq_resp resp = {};
2170         struct bnxt_qplib_cmdqmsg msg = {};
2171         struct cmdq_destroy_cq req = {};
2172         u16 total_cnq_events;
2173         int rc;
2174
2175         bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2176                                  CMDQ_BASE_OPCODE_DESTROY_CQ,
2177                                  sizeof(req));
2178
2179         req.cq_cid = cpu_to_le32(cq->id);
2180         bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2181                                 sizeof(resp), 0);
2182         rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2183         if (rc)
2184                 return rc;
2185         total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2186         __wait_for_all_nqes(cq, total_cnq_events);
2187         bnxt_qplib_free_hwq(res, &cq->hwq);
2188         return 0;
2189 }
2190
2191 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2192                       struct bnxt_qplib_cqe **pcqe, int *budget)
2193 {
2194         struct bnxt_qplib_cqe *cqe;
2195         u32 start, last;
2196         int rc = 0;
2197
2198         /* Now complete all outstanding SQEs with FLUSHED_ERR */
2199         start = sq->swq_start;
2200         cqe = *pcqe;
2201         while (*budget) {
2202                 last = sq->swq_last;
2203                 if (start == last)
2204                         break;
2205                 /* Skip the FENCE WQE completions */
2206                 if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2207                         bnxt_qplib_cancel_phantom_processing(qp);
2208                         goto skip_compl;
2209                 }
2210                 memset(cqe, 0, sizeof(*cqe));
2211                 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2212                 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2213                 cqe->qp_handle = (u64)(unsigned long)qp;
2214                 cqe->wr_id = sq->swq[last].wr_id;
2215                 cqe->src_qp = qp->id;
2216                 cqe->type = sq->swq[last].type;
2217                 cqe++;
2218                 (*budget)--;
2219 skip_compl:
2220                 bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[last].slots);
2221                 sq->swq_last = sq->swq[last].next_idx;
2222         }
2223         *pcqe = cqe;
2224         if (!(*budget) && sq->swq_last != start)
2225                 /* Out of budget */
2226                 rc = -EAGAIN;
2227
2228         return rc;
2229 }
2230
2231 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2232                       struct bnxt_qplib_cqe **pcqe, int *budget)
2233 {
2234         struct bnxt_qplib_cqe *cqe;
2235         u32 start, last;
2236         int opcode = 0;
2237         int rc = 0;
2238
2239         switch (qp->type) {
2240         case CMDQ_CREATE_QP1_TYPE_GSI:
2241                 opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2242                 break;
2243         case CMDQ_CREATE_QP_TYPE_RC:
2244                 opcode = CQ_BASE_CQE_TYPE_RES_RC;
2245                 break;
2246         case CMDQ_CREATE_QP_TYPE_UD:
2247         case CMDQ_CREATE_QP_TYPE_GSI:
2248                 opcode = CQ_BASE_CQE_TYPE_RES_UD;
2249                 break;
2250         }
2251
2252         /* Flush the rest of the RQ */
2253         start = rq->swq_start;
2254         cqe = *pcqe;
2255         while (*budget) {
2256                 last = rq->swq_last;
2257                 if (last == start)
2258                         break;
2259                 memset(cqe, 0, sizeof(*cqe));
2260                 cqe->status =
2261                     CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2262                 cqe->opcode = opcode;
2263                 cqe->qp_handle = (unsigned long)qp;
2264                 cqe->wr_id = rq->swq[last].wr_id;
2265                 cqe++;
2266                 (*budget)--;
2267                 bnxt_qplib_hwq_incr_cons(&rq->hwq, rq->swq[last].slots);
2268                 rq->swq_last = rq->swq[last].next_idx;
2269         }
2270         *pcqe = cqe;
2271         if (!*budget && rq->swq_last != start)
2272                 /* Out of budget */
2273                 rc = -EAGAIN;
2274
2275         return rc;
2276 }
2277
2278 void bnxt_qplib_mark_qp_error(void *qp_handle)
2279 {
2280         struct bnxt_qplib_qp *qp = qp_handle;
2281
2282         if (!qp)
2283                 return;
2284
2285         /* Must block new posting of SQ and RQ */
2286         qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2287         bnxt_qplib_cancel_phantom_processing(qp);
2288 }
2289
2290 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2291  *       CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2292  */
2293 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2294                      u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2295 {
2296         u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
2297         struct bnxt_qplib_q *sq = &qp->sq;
2298         struct cq_req *peek_req_hwcqe;
2299         struct bnxt_qplib_qp *peek_qp;
2300         struct bnxt_qplib_q *peek_sq;
2301         struct bnxt_qplib_swq *swq;
2302         struct cq_base *peek_hwcqe;
2303         int i, rc = 0;
2304
2305         /* Normal mode */
2306         /* Check for the psn_search marking before completing */
2307         swq = &sq->swq[swq_last];
2308         if (swq->psn_search &&
2309             le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2310                 /* Unmark */
2311                 swq->psn_search->flags_next_psn = cpu_to_le32
2312                         (le32_to_cpu(swq->psn_search->flags_next_psn)
2313                                      & ~0x80000000);
2314                 dev_dbg(&cq->hwq.pdev->dev,
2315                         "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2316                         cq_cons, qp->id, swq_last, cqe_sq_cons);
2317                 sq->condition = true;
2318                 sq->send_phantom = true;
2319
2320                 /* TODO: Only ARM if the previous SQE is ARMALL */
2321                 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2322                 rc = -EAGAIN;
2323                 goto out;
2324         }
2325         if (sq->condition) {
2326                 /* Peek at the completions */
2327                 peek_raw_cq_cons = cq->hwq.cons;
2328                 peek_sw_cq_cons = cq_cons;
2329                 i = cq->hwq.max_elements;
2330                 while (i--) {
2331                         peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
2332                         peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2333                                                        peek_sw_cq_cons, NULL);
2334                         /* If the next hwcqe is VALID */
2335                         if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
2336                                           cq->hwq.max_elements)) {
2337                         /*
2338                          * The valid test of the entry must be done first before
2339                          * reading any further.
2340                          */
2341                                 dma_rmb();
2342                                 /* If the next hwcqe is a REQ */
2343                                 if ((peek_hwcqe->cqe_type_toggle &
2344                                     CQ_BASE_CQE_TYPE_MASK) ==
2345                                     CQ_BASE_CQE_TYPE_REQ) {
2346                                         peek_req_hwcqe = (struct cq_req *)
2347                                                          peek_hwcqe;
2348                                         peek_qp = (struct bnxt_qplib_qp *)
2349                                                 ((unsigned long)
2350                                                  le64_to_cpu
2351                                                  (peek_req_hwcqe->qp_handle));
2352                                         peek_sq = &peek_qp->sq;
2353                                         peek_sq_cons_idx =
2354                                                 ((le16_to_cpu(
2355                                                   peek_req_hwcqe->sq_cons_idx)
2356                                                   - 1) % sq->max_wqe);
2357                                         /* If the hwcqe's sq's wr_id matches */
2358                                         if (peek_sq == sq &&
2359                                             sq->swq[peek_sq_cons_idx].wr_id ==
2360                                             BNXT_QPLIB_FENCE_WRID) {
2361                                                 /*
2362                                                  *  Unbreak only if the phantom
2363                                                  *  comes back
2364                                                  */
2365                                                 dev_dbg(&cq->hwq.pdev->dev,
2366                                                         "FP: Got Phantom CQE\n");
2367                                                 sq->condition = false;
2368                                                 sq->single = true;
2369                                                 rc = 0;
2370                                                 goto out;
2371                                         }
2372                                 }
2373                                 /* Valid but not the phantom, so keep looping */
2374                         } else {
2375                                 /* Not valid yet, just exit and wait */
2376                                 rc = -EINVAL;
2377                                 goto out;
2378                         }
2379                         peek_sw_cq_cons++;
2380                         peek_raw_cq_cons++;
2381                 }
2382                 dev_err(&cq->hwq.pdev->dev,
2383                         "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2384                         cq_cons, qp->id, swq_last, cqe_sq_cons);
2385                 rc = -EINVAL;
2386         }
2387 out:
2388         return rc;
2389 }
2390
2391 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2392                                      struct cq_req *hwcqe,
2393                                      struct bnxt_qplib_cqe **pcqe, int *budget,
2394                                      u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2395 {
2396         struct bnxt_qplib_swq *swq;
2397         struct bnxt_qplib_cqe *cqe;
2398         struct bnxt_qplib_qp *qp;
2399         struct bnxt_qplib_q *sq;
2400         u32 cqe_sq_cons;
2401         int rc = 0;
2402
2403         qp = (struct bnxt_qplib_qp *)((unsigned long)
2404                                       le64_to_cpu(hwcqe->qp_handle));
2405         if (!qp) {
2406                 dev_err(&cq->hwq.pdev->dev,
2407                         "FP: Process Req qp is NULL\n");
2408                 return -EINVAL;
2409         }
2410         sq = &qp->sq;
2411
2412         cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
2413         if (qp->sq.flushed) {
2414                 dev_dbg(&cq->hwq.pdev->dev,
2415                         "%s: QP in Flush QP = %p\n", __func__, qp);
2416                 goto done;
2417         }
2418         /* Require to walk the sq's swq to fabricate CQEs for all previously
2419          * signaled SWQEs due to CQE aggregation from the current sq cons
2420          * to the cqe_sq_cons
2421          */
2422         cqe = *pcqe;
2423         while (*budget) {
2424                 if (sq->swq_last == cqe_sq_cons)
2425                         /* Done */
2426                         break;
2427
2428                 swq = &sq->swq[sq->swq_last];
2429                 memset(cqe, 0, sizeof(*cqe));
2430                 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2431                 cqe->qp_handle = (u64)(unsigned long)qp;
2432                 cqe->src_qp = qp->id;
2433                 cqe->wr_id = swq->wr_id;
2434                 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2435                         goto skip;
2436                 cqe->type = swq->type;
2437
2438                 /* For the last CQE, check for status.  For errors, regardless
2439                  * of the request being signaled or not, it must complete with
2440                  * the hwcqe error status
2441                  */
2442                 if (swq->next_idx == cqe_sq_cons &&
2443                     hwcqe->status != CQ_REQ_STATUS_OK) {
2444                         cqe->status = hwcqe->status;
2445                         dev_err(&cq->hwq.pdev->dev,
2446                                 "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2447                                 sq->swq_last, cqe->wr_id, cqe->status);
2448                         cqe++;
2449                         (*budget)--;
2450                         bnxt_qplib_mark_qp_error(qp);
2451                         /* Add qp to flush list of the CQ */
2452                         bnxt_qplib_add_flush_qp(qp);
2453                 } else {
2454                         /* Before we complete, do WA 9060 */
2455                         if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2456                                       cqe_sq_cons)) {
2457                                 *lib_qp = qp;
2458                                 goto out;
2459                         }
2460                         if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2461                                 cqe->status = CQ_REQ_STATUS_OK;
2462                                 cqe++;
2463                                 (*budget)--;
2464                         }
2465                 }
2466 skip:
2467                 bnxt_qplib_hwq_incr_cons(&sq->hwq, swq->slots);
2468                 sq->swq_last = swq->next_idx;
2469                 if (sq->single)
2470                         break;
2471         }
2472 out:
2473         *pcqe = cqe;
2474         if (sq->swq_last != cqe_sq_cons) {
2475                 /* Out of budget */
2476                 rc = -EAGAIN;
2477                 goto done;
2478         }
2479         /*
2480          * Back to normal completion mode only after it has completed all of
2481          * the WC for this CQE
2482          */
2483         sq->single = false;
2484 done:
2485         return rc;
2486 }
2487
2488 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2489 {
2490         spin_lock(&srq->hwq.lock);
2491         srq->swq[srq->last_idx].next_idx = (int)tag;
2492         srq->last_idx = (int)tag;
2493         srq->swq[srq->last_idx].next_idx = -1;
2494         srq->hwq.cons++; /* Support for SRQE counter */
2495         spin_unlock(&srq->hwq.lock);
2496 }
2497
2498 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2499                                         struct cq_res_rc *hwcqe,
2500                                         struct bnxt_qplib_cqe **pcqe,
2501                                         int *budget)
2502 {
2503         struct bnxt_qplib_srq *srq;
2504         struct bnxt_qplib_cqe *cqe;
2505         struct bnxt_qplib_qp *qp;
2506         struct bnxt_qplib_q *rq;
2507         u32 wr_id_idx;
2508
2509         qp = (struct bnxt_qplib_qp *)((unsigned long)
2510                                       le64_to_cpu(hwcqe->qp_handle));
2511         if (!qp) {
2512                 dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2513                 return -EINVAL;
2514         }
2515         if (qp->rq.flushed) {
2516                 dev_dbg(&cq->hwq.pdev->dev,
2517                         "%s: QP in Flush QP = %p\n", __func__, qp);
2518                 return 0;
2519         }
2520
2521         cqe = *pcqe;
2522         cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2523         cqe->length = le32_to_cpu(hwcqe->length);
2524         cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2525         cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2526         cqe->flags = le16_to_cpu(hwcqe->flags);
2527         cqe->status = hwcqe->status;
2528         cqe->qp_handle = (u64)(unsigned long)qp;
2529
2530         wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2531                                 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2532         if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2533                 srq = qp->srq;
2534                 if (!srq)
2535                         return -EINVAL;
2536                 if (wr_id_idx >= srq->hwq.max_elements) {
2537                         dev_err(&cq->hwq.pdev->dev,
2538                                 "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2539                                 wr_id_idx, srq->hwq.max_elements);
2540                         return -EINVAL;
2541                 }
2542                 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2543                 bnxt_qplib_release_srqe(srq, wr_id_idx);
2544                 cqe++;
2545                 (*budget)--;
2546                 *pcqe = cqe;
2547         } else {
2548                 struct bnxt_qplib_swq *swq;
2549
2550                 rq = &qp->rq;
2551                 if (wr_id_idx > (rq->max_wqe - 1)) {
2552                         dev_err(&cq->hwq.pdev->dev,
2553                                 "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2554                                 wr_id_idx, rq->max_wqe);
2555                         return -EINVAL;
2556                 }
2557                 if (wr_id_idx != rq->swq_last)
2558                         return -EINVAL;
2559                 swq = &rq->swq[rq->swq_last];
2560                 cqe->wr_id = swq->wr_id;
2561                 cqe++;
2562                 (*budget)--;
2563                 bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2564                 rq->swq_last = swq->next_idx;
2565                 *pcqe = cqe;
2566
2567                 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2568                         qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2569                         /* Add qp to flush list of the CQ */
2570                         bnxt_qplib_add_flush_qp(qp);
2571                 }
2572         }
2573
2574         return 0;
2575 }
2576
2577 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2578                                         struct cq_res_ud *hwcqe,
2579                                         struct bnxt_qplib_cqe **pcqe,
2580                                         int *budget)
2581 {
2582         struct bnxt_qplib_srq *srq;
2583         struct bnxt_qplib_cqe *cqe;
2584         struct bnxt_qplib_qp *qp;
2585         struct bnxt_qplib_q *rq;
2586         u32 wr_id_idx;
2587
2588         qp = (struct bnxt_qplib_qp *)((unsigned long)
2589                                       le64_to_cpu(hwcqe->qp_handle));
2590         if (!qp) {
2591                 dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2592                 return -EINVAL;
2593         }
2594         if (qp->rq.flushed) {
2595                 dev_dbg(&cq->hwq.pdev->dev,
2596                         "%s: QP in Flush QP = %p\n", __func__, qp);
2597                 return 0;
2598         }
2599         cqe = *pcqe;
2600         cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2601         cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2602         cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2603         cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2604         cqe->flags = le16_to_cpu(hwcqe->flags);
2605         cqe->status = hwcqe->status;
2606         cqe->qp_handle = (u64)(unsigned long)qp;
2607         /*FIXME: Endianness fix needed for smace */
2608         memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2609         wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2610                                 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2611         cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2612                                   ((le32_to_cpu(
2613                                   hwcqe->src_qp_high_srq_or_rq_wr_id) &
2614                                  CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2615
2616         if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2617                 srq = qp->srq;
2618                 if (!srq)
2619                         return -EINVAL;
2620
2621                 if (wr_id_idx >= srq->hwq.max_elements) {
2622                         dev_err(&cq->hwq.pdev->dev,
2623                                 "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2624                                 wr_id_idx, srq->hwq.max_elements);
2625                         return -EINVAL;
2626                 }
2627                 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2628                 bnxt_qplib_release_srqe(srq, wr_id_idx);
2629                 cqe++;
2630                 (*budget)--;
2631                 *pcqe = cqe;
2632         } else {
2633                 struct bnxt_qplib_swq *swq;
2634
2635                 rq = &qp->rq;
2636                 if (wr_id_idx > (rq->max_wqe - 1)) {
2637                         dev_err(&cq->hwq.pdev->dev,
2638                                 "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2639                                 wr_id_idx, rq->max_wqe);
2640                         return -EINVAL;
2641                 }
2642
2643                 if (rq->swq_last != wr_id_idx)
2644                         return -EINVAL;
2645                 swq = &rq->swq[rq->swq_last];
2646                 cqe->wr_id = swq->wr_id;
2647                 cqe++;
2648                 (*budget)--;
2649                 bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2650                 rq->swq_last = swq->next_idx;
2651                 *pcqe = cqe;
2652
2653                 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2654                         qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2655                         /* Add qp to flush list of the CQ */
2656                         bnxt_qplib_add_flush_qp(qp);
2657                 }
2658         }
2659
2660         return 0;
2661 }
2662
2663 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2664 {
2665         struct cq_base *hw_cqe;
2666         u32 sw_cons, raw_cons;
2667         bool rc = true;
2668
2669         raw_cons = cq->hwq.cons;
2670         sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2671         hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
2672          /* Check for Valid bit. If the CQE is valid, return false */
2673         rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2674         return rc;
2675 }
2676
2677 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2678                                                 struct cq_res_raweth_qp1 *hwcqe,
2679                                                 struct bnxt_qplib_cqe **pcqe,
2680                                                 int *budget)
2681 {
2682         struct bnxt_qplib_qp *qp;
2683         struct bnxt_qplib_q *rq;
2684         struct bnxt_qplib_srq *srq;
2685         struct bnxt_qplib_cqe *cqe;
2686         u32 wr_id_idx;
2687
2688         qp = (struct bnxt_qplib_qp *)((unsigned long)
2689                                       le64_to_cpu(hwcqe->qp_handle));
2690         if (!qp) {
2691                 dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2692                 return -EINVAL;
2693         }
2694         if (qp->rq.flushed) {
2695                 dev_dbg(&cq->hwq.pdev->dev,
2696                         "%s: QP in Flush QP = %p\n", __func__, qp);
2697                 return 0;
2698         }
2699         cqe = *pcqe;
2700         cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2701         cqe->flags = le16_to_cpu(hwcqe->flags);
2702         cqe->qp_handle = (u64)(unsigned long)qp;
2703
2704         wr_id_idx =
2705                 le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2706                                 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2707         cqe->src_qp = qp->id;
2708         if (qp->id == 1 && !cqe->length) {
2709                 /* Add workaround for the length misdetection */
2710                 cqe->length = 296;
2711         } else {
2712                 cqe->length = le16_to_cpu(hwcqe->length);
2713         }
2714         cqe->pkey_index = qp->pkey_index;
2715         memcpy(cqe->smac, qp->smac, 6);
2716
2717         cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2718         cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2719         cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2720
2721         if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2722                 srq = qp->srq;
2723                 if (!srq) {
2724                         dev_err(&cq->hwq.pdev->dev,
2725                                 "FP: SRQ used but not defined??\n");
2726                         return -EINVAL;
2727                 }
2728                 if (wr_id_idx >= srq->hwq.max_elements) {
2729                         dev_err(&cq->hwq.pdev->dev,
2730                                 "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2731                                 wr_id_idx, srq->hwq.max_elements);
2732                         return -EINVAL;
2733                 }
2734                 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2735                 bnxt_qplib_release_srqe(srq, wr_id_idx);
2736                 cqe++;
2737                 (*budget)--;
2738                 *pcqe = cqe;
2739         } else {
2740                 struct bnxt_qplib_swq *swq;
2741
2742                 rq = &qp->rq;
2743                 if (wr_id_idx > (rq->max_wqe - 1)) {
2744                         dev_err(&cq->hwq.pdev->dev,
2745                                 "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2746                                 wr_id_idx, rq->max_wqe);
2747                         return -EINVAL;
2748                 }
2749                 if (rq->swq_last != wr_id_idx)
2750                         return -EINVAL;
2751                 swq = &rq->swq[rq->swq_last];
2752                 cqe->wr_id = swq->wr_id;
2753                 cqe++;
2754                 (*budget)--;
2755                 bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2756                 rq->swq_last = swq->next_idx;
2757                 *pcqe = cqe;
2758
2759                 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2760                         qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2761                         /* Add qp to flush list of the CQ */
2762                         bnxt_qplib_add_flush_qp(qp);
2763                 }
2764         }
2765
2766         return 0;
2767 }
2768
2769 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2770                                           struct cq_terminal *hwcqe,
2771                                           struct bnxt_qplib_cqe **pcqe,
2772                                           int *budget)
2773 {
2774         struct bnxt_qplib_qp *qp;
2775         struct bnxt_qplib_q *sq, *rq;
2776         struct bnxt_qplib_cqe *cqe;
2777         u32 swq_last = 0, cqe_cons;
2778         int rc = 0;
2779
2780         /* Check the Status */
2781         if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2782                 dev_warn(&cq->hwq.pdev->dev,
2783                          "FP: CQ Process Terminal Error status = 0x%x\n",
2784                          hwcqe->status);
2785
2786         qp = (struct bnxt_qplib_qp *)((unsigned long)
2787                                       le64_to_cpu(hwcqe->qp_handle));
2788         if (!qp)
2789                 return -EINVAL;
2790
2791         /* Must block new posting of SQ and RQ */
2792         qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2793
2794         sq = &qp->sq;
2795         rq = &qp->rq;
2796
2797         cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2798         if (cqe_cons == 0xFFFF)
2799                 goto do_rq;
2800         cqe_cons %= sq->max_wqe;
2801
2802         if (qp->sq.flushed) {
2803                 dev_dbg(&cq->hwq.pdev->dev,
2804                         "%s: QP in Flush QP = %p\n", __func__, qp);
2805                 goto sq_done;
2806         }
2807
2808         /* Terminal CQE can also include aggregated successful CQEs prior.
2809          * So we must complete all CQEs from the current sq's cons to the
2810          * cq_cons with status OK
2811          */
2812         cqe = *pcqe;
2813         while (*budget) {
2814                 swq_last = sq->swq_last;
2815                 if (swq_last == cqe_cons)
2816                         break;
2817                 if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2818                         memset(cqe, 0, sizeof(*cqe));
2819                         cqe->status = CQ_REQ_STATUS_OK;
2820                         cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2821                         cqe->qp_handle = (u64)(unsigned long)qp;
2822                         cqe->src_qp = qp->id;
2823                         cqe->wr_id = sq->swq[swq_last].wr_id;
2824                         cqe->type = sq->swq[swq_last].type;
2825                         cqe++;
2826                         (*budget)--;
2827                 }
2828                 bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[swq_last].slots);
2829                 sq->swq_last = sq->swq[swq_last].next_idx;
2830         }
2831         *pcqe = cqe;
2832         if (!(*budget) && swq_last != cqe_cons) {
2833                 /* Out of budget */
2834                 rc = -EAGAIN;
2835                 goto sq_done;
2836         }
2837 sq_done:
2838         if (rc)
2839                 return rc;
2840 do_rq:
2841         cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2842         if (cqe_cons == 0xFFFF) {
2843                 goto done;
2844         } else if (cqe_cons > rq->max_wqe - 1) {
2845                 dev_err(&cq->hwq.pdev->dev,
2846                         "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2847                         cqe_cons, rq->max_wqe);
2848                 rc = -EINVAL;
2849                 goto done;
2850         }
2851
2852         if (qp->rq.flushed) {
2853                 dev_dbg(&cq->hwq.pdev->dev,
2854                         "%s: QP in Flush QP = %p\n", __func__, qp);
2855                 rc = 0;
2856                 goto done;
2857         }
2858
2859         /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2860          * from the current rq->cons to the rq->prod regardless what the
2861          * rq->cons the terminal CQE indicates
2862          */
2863
2864         /* Add qp to flush list of the CQ */
2865         bnxt_qplib_add_flush_qp(qp);
2866 done:
2867         return rc;
2868 }
2869
2870 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2871                                         struct cq_cutoff *hwcqe)
2872 {
2873         /* Check the Status */
2874         if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2875                 dev_err(&cq->hwq.pdev->dev,
2876                         "FP: CQ Process Cutoff Error status = 0x%x\n",
2877                         hwcqe->status);
2878                 return -EINVAL;
2879         }
2880         clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2881         wake_up_interruptible(&cq->waitq);
2882
2883         return 0;
2884 }
2885
2886 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2887                                   struct bnxt_qplib_cqe *cqe,
2888                                   int num_cqes)
2889 {
2890         struct bnxt_qplib_qp *qp = NULL;
2891         u32 budget = num_cqes;
2892         unsigned long flags;
2893
2894         spin_lock_irqsave(&cq->flush_lock, flags);
2895         list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2896                 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2897                 __flush_sq(&qp->sq, qp, &cqe, &budget);
2898         }
2899
2900         list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2901                 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2902                 __flush_rq(&qp->rq, qp, &cqe, &budget);
2903         }
2904         spin_unlock_irqrestore(&cq->flush_lock, flags);
2905
2906         return num_cqes - budget;
2907 }
2908
2909 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2910                        int num_cqes, struct bnxt_qplib_qp **lib_qp)
2911 {
2912         struct cq_base *hw_cqe;
2913         u32 sw_cons, raw_cons;
2914         int budget, rc = 0;
2915         u8 type;
2916
2917         raw_cons = cq->hwq.cons;
2918         budget = num_cqes;
2919
2920         while (budget) {
2921                 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2922                 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
2923
2924                 /* Check for Valid bit */
2925                 if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
2926                         break;
2927
2928                 /*
2929                  * The valid test of the entry must be done first before
2930                  * reading any further.
2931                  */
2932                 dma_rmb();
2933                 /* From the device's respective CQE format to qplib_wc*/
2934                 type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2935                 switch (type) {
2936                 case CQ_BASE_CQE_TYPE_REQ:
2937                         rc = bnxt_qplib_cq_process_req(cq,
2938                                                        (struct cq_req *)hw_cqe,
2939                                                        &cqe, &budget,
2940                                                        sw_cons, lib_qp);
2941                         break;
2942                 case CQ_BASE_CQE_TYPE_RES_RC:
2943                         rc = bnxt_qplib_cq_process_res_rc(cq,
2944                                                           (struct cq_res_rc *)
2945                                                           hw_cqe, &cqe,
2946                                                           &budget);
2947                         break;
2948                 case CQ_BASE_CQE_TYPE_RES_UD:
2949                         rc = bnxt_qplib_cq_process_res_ud
2950                                         (cq, (struct cq_res_ud *)hw_cqe, &cqe,
2951                                          &budget);
2952                         break;
2953                 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2954                         rc = bnxt_qplib_cq_process_res_raweth_qp1
2955                                         (cq, (struct cq_res_raweth_qp1 *)
2956                                          hw_cqe, &cqe, &budget);
2957                         break;
2958                 case CQ_BASE_CQE_TYPE_TERMINAL:
2959                         rc = bnxt_qplib_cq_process_terminal
2960                                         (cq, (struct cq_terminal *)hw_cqe,
2961                                          &cqe, &budget);
2962                         break;
2963                 case CQ_BASE_CQE_TYPE_CUT_OFF:
2964                         bnxt_qplib_cq_process_cutoff
2965                                         (cq, (struct cq_cutoff *)hw_cqe);
2966                         /* Done processing this CQ */
2967                         goto exit;
2968                 default:
2969                         dev_err(&cq->hwq.pdev->dev,
2970                                 "process_cq unknown type 0x%lx\n",
2971                                 hw_cqe->cqe_type_toggle &
2972                                 CQ_BASE_CQE_TYPE_MASK);
2973                         rc = -EINVAL;
2974                         break;
2975                 }
2976                 if (rc < 0) {
2977                         if (rc == -EAGAIN)
2978                                 break;
2979                         /* Error while processing the CQE, just skip to the
2980                          * next one
2981                          */
2982                         if (type != CQ_BASE_CQE_TYPE_TERMINAL)
2983                                 dev_err(&cq->hwq.pdev->dev,
2984                                         "process_cqe error rc = 0x%x\n", rc);
2985                 }
2986                 raw_cons++;
2987         }
2988         if (cq->hwq.cons != raw_cons) {
2989                 cq->hwq.cons = raw_cons;
2990                 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
2991         }
2992 exit:
2993         return num_cqes - budget;
2994 }
2995
2996 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
2997 {
2998         if (arm_type)
2999                 bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
3000         /* Using cq->arm_state variable to track whether to issue cq handler */
3001         atomic_set(&cq->arm_state, 1);
3002 }
3003
3004 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
3005 {
3006         flush_workqueue(qp->scq->nq->cqn_wq);
3007         if (qp->scq != qp->rcq)
3008                 flush_workqueue(qp->rcq->nq->cqn_wq);
3009 }