2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: IB Verbs interpreter
39 #include <linux/interrupt.h>
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/if_ether.h>
44 #include <net/addrconf.h>
46 #include <rdma/ib_verbs.h>
47 #include <rdma/ib_user_verbs.h>
48 #include <rdma/ib_umem.h>
49 #include <rdma/ib_addr.h>
50 #include <rdma/ib_mad.h>
51 #include <rdma/ib_cache.h>
52 #include <rdma/uverbs_ioctl.h>
57 #include "qplib_res.h"
60 #include "qplib_rcfw.h"
64 #include <rdma/bnxt_re-abi.h>
66 static int __from_ib_access_flags(int iflags)
70 if (iflags & IB_ACCESS_LOCAL_WRITE)
71 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
72 if (iflags & IB_ACCESS_REMOTE_READ)
73 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
74 if (iflags & IB_ACCESS_REMOTE_WRITE)
75 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
76 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
77 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
78 if (iflags & IB_ACCESS_MW_BIND)
79 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
80 if (iflags & IB_ZERO_BASED)
81 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
82 if (iflags & IB_ACCESS_ON_DEMAND)
83 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
87 static enum ib_access_flags __to_ib_access_flags(int qflags)
89 enum ib_access_flags iflags = 0;
91 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
92 iflags |= IB_ACCESS_LOCAL_WRITE;
93 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
94 iflags |= IB_ACCESS_REMOTE_WRITE;
95 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
96 iflags |= IB_ACCESS_REMOTE_READ;
97 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
98 iflags |= IB_ACCESS_REMOTE_ATOMIC;
99 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
100 iflags |= IB_ACCESS_MW_BIND;
101 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
102 iflags |= IB_ZERO_BASED;
103 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
104 iflags |= IB_ACCESS_ON_DEMAND;
108 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
109 struct bnxt_qplib_sge *sg_list, int num)
113 for (i = 0; i < num; i++) {
114 sg_list[i].addr = ib_sg_list[i].addr;
115 sg_list[i].lkey = ib_sg_list[i].lkey;
116 sg_list[i].size = ib_sg_list[i].length;
117 total += sg_list[i].size;
123 int bnxt_re_query_device(struct ib_device *ibdev,
124 struct ib_device_attr *ib_attr,
125 struct ib_udata *udata)
127 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
128 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
130 memset(ib_attr, 0, sizeof(*ib_attr));
131 memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
132 min(sizeof(dev_attr->fw_ver),
133 sizeof(ib_attr->fw_ver)));
134 addrconf_addr_eui48((u8 *)&ib_attr->sys_image_guid,
135 rdev->netdev->dev_addr);
136 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
137 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_SUPPORTED;
139 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
140 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
141 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
142 ib_attr->max_qp = dev_attr->max_qp;
143 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
144 ib_attr->device_cap_flags =
145 IB_DEVICE_CURR_QP_STATE_MOD
146 | IB_DEVICE_RC_RNR_NAK_GEN
147 | IB_DEVICE_SHUTDOWN_PORT
148 | IB_DEVICE_SYS_IMAGE_GUID
149 | IB_DEVICE_RESIZE_MAX_WR
150 | IB_DEVICE_PORT_ACTIVE_EVENT
151 | IB_DEVICE_N_NOTIFY_CQ
152 | IB_DEVICE_MEM_WINDOW
153 | IB_DEVICE_MEM_WINDOW_TYPE_2B
154 | IB_DEVICE_MEM_MGT_EXTENSIONS;
155 ib_attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
156 ib_attr->max_send_sge = dev_attr->max_qp_sges;
157 ib_attr->max_recv_sge = dev_attr->max_qp_sges;
158 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
159 ib_attr->max_cq = dev_attr->max_cq;
160 ib_attr->max_cqe = dev_attr->max_cq_wqes;
161 ib_attr->max_mr = dev_attr->max_mr;
162 ib_attr->max_pd = dev_attr->max_pd;
163 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
164 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
165 ib_attr->atomic_cap = IB_ATOMIC_NONE;
166 ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
167 if (dev_attr->is_atomic) {
168 ib_attr->atomic_cap = IB_ATOMIC_GLOB;
169 ib_attr->masked_atomic_cap = IB_ATOMIC_GLOB;
172 ib_attr->max_ee_rd_atom = 0;
173 ib_attr->max_res_rd_atom = 0;
174 ib_attr->max_ee_init_rd_atom = 0;
176 ib_attr->max_rdd = 0;
177 ib_attr->max_mw = dev_attr->max_mw;
178 ib_attr->max_raw_ipv6_qp = 0;
179 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
180 ib_attr->max_mcast_grp = 0;
181 ib_attr->max_mcast_qp_attach = 0;
182 ib_attr->max_total_mcast_qp_attach = 0;
183 ib_attr->max_ah = dev_attr->max_ah;
185 ib_attr->max_srq = dev_attr->max_srq;
186 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
187 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
189 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
191 ib_attr->max_pkeys = 1;
192 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
197 int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
198 struct ib_port_attr *port_attr)
200 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
201 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
203 memset(port_attr, 0, sizeof(*port_attr));
205 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
206 port_attr->state = IB_PORT_ACTIVE;
207 port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
209 port_attr->state = IB_PORT_DOWN;
210 port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
212 port_attr->max_mtu = IB_MTU_4096;
213 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
214 port_attr->gid_tbl_len = dev_attr->max_sgid;
215 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
216 IB_PORT_DEVICE_MGMT_SUP |
217 IB_PORT_VENDOR_CLASS_SUP;
218 port_attr->ip_gids = true;
220 port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
221 port_attr->bad_pkey_cntr = 0;
222 port_attr->qkey_viol_cntr = 0;
223 port_attr->pkey_tbl_len = dev_attr->max_pkey;
225 port_attr->sm_lid = 0;
227 port_attr->max_vl_num = 4;
228 port_attr->sm_sl = 0;
229 port_attr->subnet_timeout = 0;
230 port_attr->init_type_reply = 0;
231 port_attr->active_speed = rdev->active_speed;
232 port_attr->active_width = rdev->active_width;
237 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
238 struct ib_port_immutable *immutable)
240 struct ib_port_attr port_attr;
242 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
245 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
246 immutable->gid_tbl_len = port_attr.gid_tbl_len;
247 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
248 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
249 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
253 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
255 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
257 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
258 rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
259 rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
262 int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
263 u16 index, u16 *pkey)
268 *pkey = IB_DEFAULT_PKEY_FULL;
273 int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
274 int index, union ib_gid *gid)
276 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
279 /* Ignore port_num */
280 memset(gid, 0, sizeof(*gid));
281 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
282 &rdev->qplib_res.sgid_tbl, index,
283 (struct bnxt_qplib_gid *)gid);
287 int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
290 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
291 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
292 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
293 struct bnxt_qplib_gid *gid_to_del;
294 u16 vlan_id = 0xFFFF;
296 /* Delete the entry from the hardware */
301 if (sgid_tbl && sgid_tbl->active) {
302 if (ctx->idx >= sgid_tbl->max)
304 gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
305 vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
306 /* DEL_GID is called in WQ context(netdevice_event_work_handler)
307 * or via the ib_unregister_device path. In the former case QP1
308 * may not be destroyed yet, in which case just return as FW
309 * needs that entry to be present and will fail it's deletion.
310 * We could get invoked again after QP1 is destroyed OR get an
311 * ADD_GID call with a different GID value for the same index
312 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
315 rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
316 ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) {
317 ibdev_dbg(&rdev->ibdev,
318 "Trying to delete GID0 while QP1 is alive\n");
323 rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
326 ibdev_err(&rdev->ibdev,
327 "Failed to remove GID: %#x", rc);
329 ctx_tbl = sgid_tbl->ctx;
330 ctx_tbl[ctx->idx] = NULL;
340 int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
344 u16 vlan_id = 0xFFFF;
345 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
346 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
347 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
349 rc = rdma_read_gid_l2_fields(attr, &vlan_id, NULL);
353 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
354 rdev->qplib_res.netdev->dev_addr,
355 vlan_id, true, &tbl_idx);
356 if (rc == -EALREADY) {
357 ctx_tbl = sgid_tbl->ctx;
358 ctx_tbl[tbl_idx]->refcnt++;
359 *context = ctx_tbl[tbl_idx];
364 ibdev_err(&rdev->ibdev, "Failed to add GID: %#x", rc);
368 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
371 ctx_tbl = sgid_tbl->ctx;
374 ctx_tbl[tbl_idx] = ctx;
380 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
383 return IB_LINK_LAYER_ETHERNET;
386 #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
388 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
390 struct bnxt_re_fence_data *fence = &pd->fence;
391 struct ib_mr *ib_mr = &fence->mr->ib_mr;
392 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
394 memset(wqe, 0, sizeof(*wqe));
395 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
396 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
397 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
398 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
399 wqe->bind.zero_based = false;
400 wqe->bind.parent_l_key = ib_mr->lkey;
401 wqe->bind.va = (u64)(unsigned long)fence->va;
402 wqe->bind.length = fence->size;
403 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
404 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
406 /* Save the initial rkey in fence structure for now;
407 * wqe->bind.r_key will be set at (re)bind time.
409 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
412 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
414 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
416 struct ib_pd *ib_pd = qp->ib_qp.pd;
417 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
418 struct bnxt_re_fence_data *fence = &pd->fence;
419 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
420 struct bnxt_qplib_swqe wqe;
423 memcpy(&wqe, fence_wqe, sizeof(wqe));
424 wqe.bind.r_key = fence->bind_rkey;
425 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
427 ibdev_dbg(&qp->rdev->ibdev,
428 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
429 wqe.bind.r_key, qp->qplib_qp.id, pd);
430 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
432 ibdev_err(&qp->rdev->ibdev, "Failed to bind fence-WQE\n");
435 bnxt_qplib_post_send_db(&qp->qplib_qp);
440 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
442 struct bnxt_re_fence_data *fence = &pd->fence;
443 struct bnxt_re_dev *rdev = pd->rdev;
444 struct device *dev = &rdev->en_dev->pdev->dev;
445 struct bnxt_re_mr *mr = fence->mr;
448 bnxt_re_dealloc_mw(fence->mw);
453 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
456 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
460 if (fence->dma_addr) {
461 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
467 static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
469 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
470 struct bnxt_re_fence_data *fence = &pd->fence;
471 struct bnxt_re_dev *rdev = pd->rdev;
472 struct device *dev = &rdev->en_dev->pdev->dev;
473 struct bnxt_re_mr *mr = NULL;
474 dma_addr_t dma_addr = 0;
478 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
480 rc = dma_mapping_error(dev, dma_addr);
482 ibdev_err(&rdev->ibdev, "Failed to dma-map fence-MR-mem\n");
487 fence->dma_addr = dma_addr;
490 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
497 mr->qplib_mr.pd = &pd->qplib_pd;
498 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
499 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
500 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
502 ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
507 mr->ib_mr.lkey = mr->qplib_mr.lkey;
508 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
509 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
510 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL,
511 BNXT_RE_FENCE_PBL_SIZE, PAGE_SIZE);
513 ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n");
516 mr->ib_mr.rkey = mr->qplib_mr.rkey;
518 /* Create a fence MW only for kernel consumers */
519 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
521 ibdev_err(&rdev->ibdev,
522 "Failed to create fence-MW for PD: %p\n", pd);
528 bnxt_re_create_fence_wqe(pd);
532 bnxt_re_destroy_fence_mr(pd);
536 /* Protection Domains */
537 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
539 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
540 struct bnxt_re_dev *rdev = pd->rdev;
542 bnxt_re_destroy_fence_mr(pd);
544 if (pd->qplib_pd.id) {
545 if (!bnxt_qplib_dealloc_pd(&rdev->qplib_res,
546 &rdev->qplib_res.pd_tbl,
548 atomic_dec(&rdev->pd_count);
553 int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
555 struct ib_device *ibdev = ibpd->device;
556 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
557 struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
558 udata, struct bnxt_re_ucontext, ib_uctx);
559 struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
563 if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
564 ibdev_err(&rdev->ibdev, "Failed to allocate HW PD");
570 struct bnxt_re_pd_resp resp;
572 if (!ucntx->dpi.dbr) {
573 /* Allocate DPI in alloc_pd to avoid failing of
574 * ibv_devinfo and family of application when DPIs
577 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
578 &ucntx->dpi, ucntx)) {
584 resp.pdid = pd->qplib_pd.id;
585 /* Still allow mapping this DBR to the new user PD. */
586 resp.dpi = ucntx->dpi.dpi;
587 resp.dbr = (u64)ucntx->dpi.umdbr;
589 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
591 ibdev_err(&rdev->ibdev,
592 "Failed to copy user response\n");
598 if (bnxt_re_create_fence_mr(pd))
599 ibdev_warn(&rdev->ibdev,
600 "Failed to create Fence-MR\n");
601 atomic_inc(&rdev->pd_count);
605 bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
611 /* Address Handles */
612 int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
614 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
615 struct bnxt_re_dev *rdev = ah->rdev;
617 bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah,
618 !(flags & RDMA_DESTROY_AH_SLEEPABLE));
619 atomic_dec(&rdev->ah_count);
624 static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
629 case RDMA_NETWORK_IPV4:
630 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
632 case RDMA_NETWORK_IPV6:
633 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
636 nw_type = CMDQ_CREATE_AH_TYPE_V1;
642 int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
643 struct ib_udata *udata)
645 struct ib_pd *ib_pd = ib_ah->pd;
646 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
647 struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
648 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
649 struct bnxt_re_dev *rdev = pd->rdev;
650 const struct ib_gid_attr *sgid_attr;
651 struct bnxt_re_gid_ctx *ctx;
652 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
656 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
657 ibdev_err(&rdev->ibdev, "Failed to alloc AH: GRH not set");
662 ah->qplib_ah.pd = &pd->qplib_pd;
664 /* Supply the configuration for the HW */
665 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
666 sizeof(union ib_gid));
667 sgid_attr = grh->sgid_attr;
668 /* Get the HW context of the GID. The reference
669 * of GID table entry is already taken by the caller.
671 ctx = rdma_read_gid_hw_context(sgid_attr);
672 ah->qplib_ah.sgid_index = ctx->idx;
673 ah->qplib_ah.host_sgid_index = grh->sgid_index;
674 ah->qplib_ah.traffic_class = grh->traffic_class;
675 ah->qplib_ah.flow_label = grh->flow_label;
676 ah->qplib_ah.hop_limit = grh->hop_limit;
677 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
679 /* Get network header type for this GID */
680 nw_type = rdma_gid_attr_network_type(sgid_attr);
681 ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type);
683 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
684 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
686 RDMA_CREATE_AH_SLEEPABLE));
688 ibdev_err(&rdev->ibdev, "Failed to allocate HW AH");
692 /* Write AVID to shared page. */
694 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
695 udata, struct bnxt_re_ucontext, ib_uctx);
699 spin_lock_irqsave(&uctx->sh_lock, flag);
700 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
701 *wrptr = ah->qplib_ah.id;
702 wmb(); /* make sure cache is updated. */
703 spin_unlock_irqrestore(&uctx->sh_lock, flag);
705 atomic_inc(&rdev->ah_count);
710 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
712 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
714 ah_attr->type = ib_ah->type;
715 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
716 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
717 rdma_ah_set_grh(ah_attr, NULL, 0,
718 ah->qplib_ah.host_sgid_index,
719 0, ah->qplib_ah.traffic_class);
720 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
721 rdma_ah_set_port_num(ah_attr, 1);
722 rdma_ah_set_static_rate(ah_attr, 0);
726 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
727 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
731 spin_lock_irqsave(&qp->scq->cq_lock, flags);
732 if (qp->rcq != qp->scq)
733 spin_lock(&qp->rcq->cq_lock);
735 __acquire(&qp->rcq->cq_lock);
740 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
742 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
744 if (qp->rcq != qp->scq)
745 spin_unlock(&qp->rcq->cq_lock);
747 __release(&qp->rcq->cq_lock);
748 spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
751 static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
753 struct bnxt_re_qp *gsi_sqp;
754 struct bnxt_re_ah *gsi_sah;
755 struct bnxt_re_dev *rdev;
759 gsi_sqp = rdev->gsi_ctx.gsi_sqp;
760 gsi_sah = rdev->gsi_ctx.gsi_sah;
762 ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n");
763 bnxt_qplib_destroy_ah(&rdev->qplib_res,
766 atomic_dec(&rdev->ah_count);
767 bnxt_qplib_clean_qp(&qp->qplib_qp);
769 ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
770 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
772 ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
775 bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
777 /* remove from active qp list */
778 mutex_lock(&rdev->qp_lock);
779 list_del(&gsi_sqp->list);
780 mutex_unlock(&rdev->qp_lock);
781 atomic_dec(&rdev->qp_count);
783 kfree(rdev->gsi_ctx.sqp_tbl);
786 rdev->gsi_ctx.gsi_sqp = NULL;
787 rdev->gsi_ctx.gsi_sah = NULL;
788 rdev->gsi_ctx.sqp_tbl = NULL;
796 int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
798 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
799 struct bnxt_re_dev *rdev = qp->rdev;
803 bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
805 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
807 ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
811 if (rdma_is_kernel_res(&qp->ib_qp.res)) {
812 flags = bnxt_re_lock_cqs(qp);
813 bnxt_qplib_clean_qp(&qp->qplib_qp);
814 bnxt_re_unlock_cqs(qp, flags);
817 bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
819 if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
820 rc = bnxt_re_destroy_gsi_sqp(qp);
825 mutex_lock(&rdev->qp_lock);
827 mutex_unlock(&rdev->qp_lock);
828 atomic_dec(&rdev->qp_count);
830 ib_umem_release(qp->rumem);
831 ib_umem_release(qp->sumem);
836 static u8 __from_ib_qp_type(enum ib_qp_type type)
840 return CMDQ_CREATE_QP1_TYPE_GSI;
842 return CMDQ_CREATE_QP_TYPE_RC;
844 return CMDQ_CREATE_QP_TYPE_UD;
850 static u16 bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp *qplqp,
853 if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
855 return bnxt_re_get_rwqe_size(rsge);
858 static u16 bnxt_re_get_wqe_size(int ilsize, int nsge)
860 u16 wqe_size, calc_ils;
862 wqe_size = bnxt_re_get_swqe_size(nsge);
864 calc_ils = sizeof(struct sq_send_hdr) + ilsize;
865 wqe_size = max_t(u16, calc_ils, wqe_size);
866 wqe_size = ALIGN(wqe_size, sizeof(struct sq_send_hdr));
871 static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
872 struct ib_qp_init_attr *init_attr)
874 struct bnxt_qplib_dev_attr *dev_attr;
875 struct bnxt_qplib_qp *qplqp;
876 struct bnxt_re_dev *rdev;
877 struct bnxt_qplib_q *sq;
881 qplqp = &qp->qplib_qp;
883 dev_attr = &rdev->dev_attr;
885 align = sizeof(struct sq_send_hdr);
886 ilsize = ALIGN(init_attr->cap.max_inline_data, align);
888 sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
889 if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
891 /* For gen p4 and gen p5 backward compatibility mode
892 * wqe size is fixed to 128 bytes
894 if (sq->wqe_size < bnxt_re_get_swqe_size(dev_attr->max_qp_sges) &&
895 qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
896 sq->wqe_size = bnxt_re_get_swqe_size(dev_attr->max_qp_sges);
898 if (init_attr->cap.max_inline_data) {
899 qplqp->max_inline_data = sq->wqe_size -
900 sizeof(struct sq_send_hdr);
901 init_attr->cap.max_inline_data = qplqp->max_inline_data;
902 if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
903 sq->max_sge = qplqp->max_inline_data /
904 sizeof(struct sq_sge);
910 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
911 struct bnxt_re_qp *qp, struct ib_udata *udata)
913 struct bnxt_qplib_qp *qplib_qp;
914 struct bnxt_re_ucontext *cntx;
915 struct bnxt_re_qp_req ureq;
916 int bytes = 0, psn_sz;
917 struct ib_umem *umem;
920 qplib_qp = &qp->qplib_qp;
921 cntx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
923 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
926 bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
927 /* Consider mapping PSN search memory only for RC QPs. */
928 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
929 psn_sz = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
930 sizeof(struct sq_psn_search_ext) :
931 sizeof(struct sq_psn_search);
932 psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
933 qplib_qp->sq.max_wqe :
934 ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
935 sizeof(struct bnxt_qplib_sge));
936 bytes += (psn_nume * psn_sz);
939 bytes = PAGE_ALIGN(bytes);
940 umem = ib_umem_get(&rdev->ibdev, ureq.qpsva, bytes,
941 IB_ACCESS_LOCAL_WRITE);
943 return PTR_ERR(umem);
946 qplib_qp->sq.sg_info.umem = umem;
947 qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
948 qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
949 qplib_qp->qp_handle = ureq.qp_handle;
951 if (!qp->qplib_qp.srq) {
952 bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
953 bytes = PAGE_ALIGN(bytes);
954 umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes,
955 IB_ACCESS_LOCAL_WRITE);
959 qplib_qp->rq.sg_info.umem = umem;
960 qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
961 qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
964 qplib_qp->dpi = &cntx->dpi;
967 ib_umem_release(qp->sumem);
969 memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info));
971 return PTR_ERR(umem);
974 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
975 (struct bnxt_re_pd *pd,
976 struct bnxt_qplib_res *qp1_res,
977 struct bnxt_qplib_qp *qp1_qp)
979 struct bnxt_re_dev *rdev = pd->rdev;
980 struct bnxt_re_ah *ah;
984 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
989 ah->qplib_ah.pd = &pd->qplib_pd;
991 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
995 /* supply the dgid data same as sgid */
996 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
997 sizeof(union ib_gid));
998 ah->qplib_ah.sgid_index = 0;
1000 ah->qplib_ah.traffic_class = 0;
1001 ah->qplib_ah.flow_label = 0;
1002 ah->qplib_ah.hop_limit = 1;
1003 ah->qplib_ah.sl = 0;
1004 /* Have DMAC same as SMAC */
1005 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
1007 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false);
1009 ibdev_err(&rdev->ibdev,
1010 "Failed to allocate HW AH for Shadow QP");
1013 atomic_inc(&rdev->ah_count);
1022 static struct bnxt_re_qp *bnxt_re_create_shadow_qp
1023 (struct bnxt_re_pd *pd,
1024 struct bnxt_qplib_res *qp1_res,
1025 struct bnxt_qplib_qp *qp1_qp)
1027 struct bnxt_re_dev *rdev = pd->rdev;
1028 struct bnxt_re_qp *qp;
1031 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1037 /* Initialize the shadow QP structure from the QP1 values */
1038 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1040 qp->qplib_qp.pd = &pd->qplib_pd;
1041 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1042 qp->qplib_qp.type = IB_QPT_UD;
1044 qp->qplib_qp.max_inline_data = 0;
1045 qp->qplib_qp.sig_type = true;
1047 /* Shadow QP SQ depth should be same as QP1 RQ depth */
1048 qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6);
1049 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1050 qp->qplib_qp.sq.max_sge = 2;
1051 /* Q full delta can be 1 since it is internal QP */
1052 qp->qplib_qp.sq.q_full_delta = 1;
1053 qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE;
1054 qp->qplib_qp.sq.sg_info.pgshft = PAGE_SHIFT;
1056 qp->qplib_qp.scq = qp1_qp->scq;
1057 qp->qplib_qp.rcq = qp1_qp->rcq;
1059 qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6);
1060 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1061 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1062 /* Q full delta can be 1 since it is internal QP */
1063 qp->qplib_qp.rq.q_full_delta = 1;
1064 qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE;
1065 qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT;
1067 qp->qplib_qp.mtu = qp1_qp->mtu;
1069 qp->qplib_qp.sq_hdr_buf_size = 0;
1070 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1071 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1073 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1077 spin_lock_init(&qp->sq_lock);
1078 INIT_LIST_HEAD(&qp->list);
1079 mutex_lock(&rdev->qp_lock);
1080 list_add_tail(&qp->list, &rdev->qp_list);
1081 atomic_inc(&rdev->qp_count);
1082 mutex_unlock(&rdev->qp_lock);
1089 static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
1090 struct ib_qp_init_attr *init_attr)
1092 struct bnxt_qplib_dev_attr *dev_attr;
1093 struct bnxt_qplib_qp *qplqp;
1094 struct bnxt_re_dev *rdev;
1095 struct bnxt_qplib_q *rq;
1099 qplqp = &qp->qplib_qp;
1101 dev_attr = &rdev->dev_attr;
1103 if (init_attr->srq) {
1104 struct bnxt_re_srq *srq;
1106 srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq);
1107 qplqp->srq = &srq->qplib_srq;
1110 rq->max_sge = init_attr->cap.max_recv_sge;
1111 if (rq->max_sge > dev_attr->max_qp_sges)
1112 rq->max_sge = dev_attr->max_qp_sges;
1113 init_attr->cap.max_recv_sge = rq->max_sge;
1114 rq->wqe_size = bnxt_re_setup_rwqe_size(qplqp, rq->max_sge,
1115 dev_attr->max_qp_sges);
1116 /* Allocate 1 more than what's provided so posting max doesn't
1119 entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1);
1120 rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1121 rq->q_full_delta = 0;
1122 rq->sg_info.pgsize = PAGE_SIZE;
1123 rq->sg_info.pgshft = PAGE_SHIFT;
1129 static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
1131 struct bnxt_qplib_dev_attr *dev_attr;
1132 struct bnxt_qplib_qp *qplqp;
1133 struct bnxt_re_dev *rdev;
1136 qplqp = &qp->qplib_qp;
1137 dev_attr = &rdev->dev_attr;
1139 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
1140 qplqp->rq.max_sge = dev_attr->max_qp_sges;
1141 if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
1142 qplqp->rq.max_sge = dev_attr->max_qp_sges;
1143 qplqp->rq.max_sge = 6;
1147 static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
1148 struct ib_qp_init_attr *init_attr,
1149 struct ib_udata *udata)
1151 struct bnxt_qplib_dev_attr *dev_attr;
1152 struct bnxt_qplib_qp *qplqp;
1153 struct bnxt_re_dev *rdev;
1154 struct bnxt_qplib_q *sq;
1160 qplqp = &qp->qplib_qp;
1162 dev_attr = &rdev->dev_attr;
1164 sq->max_sge = init_attr->cap.max_send_sge;
1165 if (sq->max_sge > dev_attr->max_qp_sges) {
1166 sq->max_sge = dev_attr->max_qp_sges;
1167 init_attr->cap.max_send_sge = sq->max_sge;
1170 rc = bnxt_re_setup_swqe_size(qp, init_attr);
1174 entries = init_attr->cap.max_send_wr;
1175 /* Allocate 128 + 1 more than what's provided */
1176 diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
1177 0 : BNXT_QPLIB_RESERVED_QP_WRS;
1178 entries = roundup_pow_of_two(entries + diff + 1);
1179 sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
1180 sq->q_full_delta = diff + 1;
1182 * Reserving one slot for Phantom WQE. Application can
1183 * post one extra entry in this case. But allowing this to avoid
1184 * unexpected Queue full condition
1186 qplqp->sq.q_full_delta -= 1;
1187 qplqp->sq.sg_info.pgsize = PAGE_SIZE;
1188 qplqp->sq.sg_info.pgshft = PAGE_SHIFT;
1193 static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
1194 struct ib_qp_init_attr *init_attr)
1196 struct bnxt_qplib_dev_attr *dev_attr;
1197 struct bnxt_qplib_qp *qplqp;
1198 struct bnxt_re_dev *rdev;
1202 qplqp = &qp->qplib_qp;
1203 dev_attr = &rdev->dev_attr;
1205 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
1206 entries = roundup_pow_of_two(init_attr->cap.max_send_wr + 1);
1207 qplqp->sq.max_wqe = min_t(u32, entries,
1208 dev_attr->max_qp_wqes + 1);
1209 qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
1210 init_attr->cap.max_send_wr;
1211 qplqp->sq.max_sge++; /* Need one extra sge to put UD header */
1212 if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
1213 qplqp->sq.max_sge = dev_attr->max_qp_sges;
1217 static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
1218 struct ib_qp_init_attr *init_attr)
1220 struct bnxt_qplib_chip_ctx *chip_ctx;
1223 chip_ctx = rdev->chip_ctx;
1225 qptype = __from_ib_qp_type(init_attr->qp_type);
1226 if (qptype == IB_QPT_MAX) {
1227 ibdev_err(&rdev->ibdev, "QP type 0x%x not supported", qptype);
1228 qptype = -EOPNOTSUPP;
1232 if (bnxt_qplib_is_chip_gen_p5(chip_ctx) &&
1233 init_attr->qp_type == IB_QPT_GSI)
1234 qptype = CMDQ_CREATE_QP_TYPE_GSI;
1239 static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1240 struct ib_qp_init_attr *init_attr,
1241 struct ib_udata *udata)
1243 struct bnxt_qplib_dev_attr *dev_attr;
1244 struct bnxt_qplib_qp *qplqp;
1245 struct bnxt_re_dev *rdev;
1246 struct bnxt_re_cq *cq;
1250 qplqp = &qp->qplib_qp;
1251 dev_attr = &rdev->dev_attr;
1253 /* Setup misc params */
1254 ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
1255 qplqp->pd = &pd->qplib_pd;
1256 qplqp->qp_handle = (u64)qplqp;
1257 qplqp->max_inline_data = init_attr->cap.max_inline_data;
1258 qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ?
1260 qptype = bnxt_re_init_qp_type(rdev, init_attr);
1265 qplqp->type = (u8)qptype;
1266 qplqp->wqe_mode = rdev->chip_ctx->modes.wqe_mode;
1268 if (init_attr->qp_type == IB_QPT_RC) {
1269 qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
1270 qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1272 qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1273 qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */
1274 if (init_attr->create_flags) {
1275 ibdev_dbg(&rdev->ibdev,
1276 "QP create flags 0x%x not supported",
1277 init_attr->create_flags);
1282 if (init_attr->send_cq) {
1283 cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq);
1284 qplqp->scq = &cq->qplib_cq;
1288 if (init_attr->recv_cq) {
1289 cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq);
1290 qplqp->rcq = &cq->qplib_cq;
1295 rc = bnxt_re_init_rq_attr(qp, init_attr);
1298 if (init_attr->qp_type == IB_QPT_GSI)
1299 bnxt_re_adjust_gsi_rq_attr(qp);
1302 rc = bnxt_re_init_sq_attr(qp, init_attr, udata);
1305 if (init_attr->qp_type == IB_QPT_GSI)
1306 bnxt_re_adjust_gsi_sq_attr(qp, init_attr);
1308 if (udata) /* This will update DPI and qp_handle */
1309 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1314 static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
1315 struct bnxt_re_pd *pd)
1317 struct bnxt_re_sqp_entries *sqp_tbl;
1318 struct bnxt_re_dev *rdev;
1319 struct bnxt_re_qp *sqp;
1320 struct bnxt_re_ah *sah;
1324 /* Create a shadow QP to handle the QP1 traffic */
1325 sqp_tbl = kcalloc(BNXT_RE_MAX_GSI_SQP_ENTRIES, sizeof(*sqp_tbl),
1329 rdev->gsi_ctx.sqp_tbl = sqp_tbl;
1331 sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
1334 ibdev_err(&rdev->ibdev, "Failed to create Shadow QP for QP1");
1337 rdev->gsi_ctx.gsi_sqp = sqp;
1341 sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1344 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1347 ibdev_err(&rdev->ibdev,
1348 "Failed to create AH entry for ShadowQP");
1351 rdev->gsi_ctx.gsi_sah = sah;
1359 static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1360 struct ib_qp_init_attr *init_attr)
1362 struct bnxt_re_dev *rdev;
1363 struct bnxt_qplib_qp *qplqp;
1367 qplqp = &qp->qplib_qp;
1369 qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1370 qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1372 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp);
1374 ibdev_err(&rdev->ibdev, "create HW QP1 failed!");
1378 rc = bnxt_re_create_shadow_gsi(qp, pd);
1383 static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
1384 struct ib_qp_init_attr *init_attr,
1385 struct bnxt_qplib_dev_attr *dev_attr)
1389 if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes ||
1390 init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes ||
1391 init_attr->cap.max_send_sge > dev_attr->max_qp_sges ||
1392 init_attr->cap.max_recv_sge > dev_attr->max_qp_sges ||
1393 init_attr->cap.max_inline_data > dev_attr->max_inline_data) {
1394 ibdev_err(&rdev->ibdev,
1395 "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x",
1396 init_attr->cap.max_send_wr, dev_attr->max_qp_wqes,
1397 init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes,
1398 init_attr->cap.max_send_sge, dev_attr->max_qp_sges,
1399 init_attr->cap.max_recv_sge, dev_attr->max_qp_sges,
1400 init_attr->cap.max_inline_data,
1401 dev_attr->max_inline_data);
1407 int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
1408 struct ib_udata *udata)
1410 struct ib_pd *ib_pd = ib_qp->pd;
1411 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1412 struct bnxt_re_dev *rdev = pd->rdev;
1413 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1414 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1417 rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
1424 rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
1428 if (qp_init_attr->qp_type == IB_QPT_GSI &&
1429 !(bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))) {
1430 rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
1436 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1438 ibdev_err(&rdev->ibdev, "Failed to create HW QP");
1442 struct bnxt_re_qp_resp resp;
1444 resp.qpid = qp->qplib_qp.id;
1446 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1448 ibdev_err(&rdev->ibdev, "Failed to copy QP udata");
1454 qp->ib_qp.qp_num = qp->qplib_qp.id;
1455 if (qp_init_attr->qp_type == IB_QPT_GSI)
1456 rdev->gsi_ctx.gsi_qp = qp;
1457 spin_lock_init(&qp->sq_lock);
1458 spin_lock_init(&qp->rq_lock);
1459 INIT_LIST_HEAD(&qp->list);
1460 mutex_lock(&rdev->qp_lock);
1461 list_add_tail(&qp->list, &rdev->qp_list);
1462 mutex_unlock(&rdev->qp_lock);
1463 atomic_inc(&rdev->qp_count);
1467 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1469 ib_umem_release(qp->rumem);
1470 ib_umem_release(qp->sumem);
1475 static u8 __from_ib_qp_state(enum ib_qp_state state)
1479 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1481 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1483 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1485 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1487 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1489 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1492 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1496 static enum ib_qp_state __to_ib_qp_state(u8 state)
1499 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1500 return IB_QPS_RESET;
1501 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1503 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1505 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1507 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1509 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1511 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1517 static u32 __from_ib_mtu(enum ib_mtu mtu)
1521 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1523 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1525 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1527 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1529 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1531 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1535 static enum ib_mtu __to_ib_mtu(u32 mtu)
1537 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1538 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1540 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1542 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1544 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1546 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1553 /* Shared Receive Queues */
1554 int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
1556 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1558 struct bnxt_re_dev *rdev = srq->rdev;
1559 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1560 struct bnxt_qplib_nq *nq = NULL;
1563 nq = qplib_srq->cq->nq;
1564 bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1565 ib_umem_release(srq->umem);
1566 atomic_dec(&rdev->srq_count);
1572 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1573 struct bnxt_re_pd *pd,
1574 struct bnxt_re_srq *srq,
1575 struct ib_udata *udata)
1577 struct bnxt_re_srq_req ureq;
1578 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1579 struct ib_umem *umem;
1581 struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
1582 udata, struct bnxt_re_ucontext, ib_uctx);
1584 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1587 bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size);
1588 bytes = PAGE_ALIGN(bytes);
1589 umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes,
1590 IB_ACCESS_LOCAL_WRITE);
1592 return PTR_ERR(umem);
1595 qplib_srq->sg_info.umem = umem;
1596 qplib_srq->sg_info.pgsize = PAGE_SIZE;
1597 qplib_srq->sg_info.pgshft = PAGE_SHIFT;
1598 qplib_srq->srq_handle = ureq.srq_handle;
1599 qplib_srq->dpi = &cntx->dpi;
1604 int bnxt_re_create_srq(struct ib_srq *ib_srq,
1605 struct ib_srq_init_attr *srq_init_attr,
1606 struct ib_udata *udata)
1608 struct bnxt_qplib_dev_attr *dev_attr;
1609 struct bnxt_qplib_nq *nq = NULL;
1610 struct bnxt_re_dev *rdev;
1611 struct bnxt_re_srq *srq;
1612 struct bnxt_re_pd *pd;
1613 struct ib_pd *ib_pd;
1617 pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1619 dev_attr = &rdev->dev_attr;
1620 srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
1622 if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1623 ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded");
1628 if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1634 srq->qplib_srq.pd = &pd->qplib_pd;
1635 srq->qplib_srq.dpi = &rdev->dpi_privileged;
1636 /* Allocate 1 more than what's provided so posting max doesn't
1639 entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
1640 if (entries > dev_attr->max_srq_wqes + 1)
1641 entries = dev_attr->max_srq_wqes + 1;
1642 srq->qplib_srq.max_wqe = entries;
1644 srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1645 /* 128 byte wqe size for SRQ . So use max sges */
1646 srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(dev_attr->max_srq_sges);
1647 srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1648 srq->srq_limit = srq_init_attr->attr.srq_limit;
1649 srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
1653 rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1658 rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1660 ibdev_err(&rdev->ibdev, "Create HW SRQ failed!");
1665 struct bnxt_re_srq_resp resp;
1667 resp.srqid = srq->qplib_srq.id;
1668 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1670 ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!");
1671 bnxt_qplib_destroy_srq(&rdev->qplib_res,
1678 atomic_inc(&rdev->srq_count);
1679 spin_lock_init(&srq->lock);
1684 ib_umem_release(srq->umem);
1689 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1690 enum ib_srq_attr_mask srq_attr_mask,
1691 struct ib_udata *udata)
1693 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1695 struct bnxt_re_dev *rdev = srq->rdev;
1698 switch (srq_attr_mask) {
1700 /* SRQ resize is not supported */
1703 /* Change the SRQ threshold */
1704 if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1707 srq->qplib_srq.threshold = srq_attr->srq_limit;
1708 rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1710 ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!");
1713 /* On success, update the shadow */
1714 srq->srq_limit = srq_attr->srq_limit;
1715 /* No need to Build and send response back to udata */
1718 ibdev_err(&rdev->ibdev,
1719 "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1725 int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1727 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1729 struct bnxt_re_srq tsrq;
1730 struct bnxt_re_dev *rdev = srq->rdev;
1733 /* Get live SRQ attr */
1734 tsrq.qplib_srq.id = srq->qplib_srq.id;
1735 rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
1737 ibdev_err(&rdev->ibdev, "Query HW SRQ failed!");
1740 srq_attr->max_wr = srq->qplib_srq.max_wqe;
1741 srq_attr->max_sge = srq->qplib_srq.max_sge;
1742 srq_attr->srq_limit = tsrq.qplib_srq.threshold;
1747 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
1748 const struct ib_recv_wr **bad_wr)
1750 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1752 struct bnxt_qplib_swqe wqe;
1753 unsigned long flags;
1756 spin_lock_irqsave(&srq->lock, flags);
1758 /* Transcribe each ib_recv_wr to qplib_swqe */
1759 wqe.num_sge = wr->num_sge;
1760 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
1761 wqe.wr_id = wr->wr_id;
1762 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1764 rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1771 spin_unlock_irqrestore(&srq->lock, flags);
1775 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1776 struct bnxt_re_qp *qp1_qp,
1779 struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
1782 if (qp_attr_mask & IB_QP_STATE) {
1783 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1784 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1786 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1787 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1788 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1791 if (qp_attr_mask & IB_QP_QKEY) {
1792 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1793 /* Using a Random QKEY */
1794 qp->qplib_qp.qkey = 0x81818181;
1796 if (qp_attr_mask & IB_QP_SQ_PSN) {
1797 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1798 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1801 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1803 ibdev_err(&rdev->ibdev, "Failed to modify Shadow QP for QP1");
1807 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1808 int qp_attr_mask, struct ib_udata *udata)
1810 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1811 struct bnxt_re_dev *rdev = qp->rdev;
1812 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1813 enum ib_qp_state curr_qp_state, new_qp_state;
1818 if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1821 qp->qplib_qp.modify_flags = 0;
1822 if (qp_attr_mask & IB_QP_STATE) {
1823 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1824 new_qp_state = qp_attr->qp_state;
1825 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1826 ib_qp->qp_type, qp_attr_mask)) {
1827 ibdev_err(&rdev->ibdev,
1828 "Invalid attribute mask: %#x specified ",
1830 ibdev_err(&rdev->ibdev,
1831 "for qpn: %#x type: %#x",
1832 ib_qp->qp_num, ib_qp->qp_type);
1833 ibdev_err(&rdev->ibdev,
1834 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1835 curr_qp_state, new_qp_state);
1838 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1839 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1842 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1843 ibdev_dbg(&rdev->ibdev,
1844 "Move QP = %p to flush list\n", qp);
1845 flags = bnxt_re_lock_cqs(qp);
1846 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1847 bnxt_re_unlock_cqs(qp, flags);
1850 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1851 ibdev_dbg(&rdev->ibdev,
1852 "Move QP = %p out of flush list\n", qp);
1853 flags = bnxt_re_lock_cqs(qp);
1854 bnxt_qplib_clean_qp(&qp->qplib_qp);
1855 bnxt_re_unlock_cqs(qp, flags);
1858 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1859 qp->qplib_qp.modify_flags |=
1860 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1861 qp->qplib_qp.en_sqd_async_notify = true;
1863 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1864 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1865 qp->qplib_qp.access =
1866 __from_ib_access_flags(qp_attr->qp_access_flags);
1867 /* LOCAL_WRITE access must be set to allow RC receive */
1868 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1869 /* Temp: Set all params on QP as of now */
1870 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
1871 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
1873 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1874 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1875 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1877 if (qp_attr_mask & IB_QP_QKEY) {
1878 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1879 qp->qplib_qp.qkey = qp_attr->qkey;
1881 if (qp_attr_mask & IB_QP_AV) {
1882 const struct ib_global_route *grh =
1883 rdma_ah_read_grh(&qp_attr->ah_attr);
1884 const struct ib_gid_attr *sgid_attr;
1885 struct bnxt_re_gid_ctx *ctx;
1887 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1888 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1889 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1890 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1891 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1892 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1893 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1894 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
1895 sizeof(qp->qplib_qp.ah.dgid.data));
1896 qp->qplib_qp.ah.flow_label = grh->flow_label;
1897 sgid_attr = grh->sgid_attr;
1898 /* Get the HW context of the GID. The reference
1899 * of GID table entry is already taken by the caller.
1901 ctx = rdma_read_gid_hw_context(sgid_attr);
1902 qp->qplib_qp.ah.sgid_index = ctx->idx;
1903 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1904 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1905 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1906 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
1907 ether_addr_copy(qp->qplib_qp.ah.dmac,
1908 qp_attr->ah_attr.roce.dmac);
1910 rc = rdma_read_gid_l2_fields(sgid_attr, NULL,
1911 &qp->qplib_qp.smac[0]);
1915 nw_type = rdma_gid_attr_network_type(sgid_attr);
1917 case RDMA_NETWORK_IPV4:
1918 qp->qplib_qp.nw_type =
1919 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1921 case RDMA_NETWORK_IPV6:
1922 qp->qplib_qp.nw_type =
1923 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1926 qp->qplib_qp.nw_type =
1927 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1932 if (qp_attr_mask & IB_QP_PATH_MTU) {
1933 qp->qplib_qp.modify_flags |=
1934 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1935 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1936 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
1937 } else if (qp_attr->qp_state == IB_QPS_RTR) {
1938 qp->qplib_qp.modify_flags |=
1939 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1940 qp->qplib_qp.path_mtu =
1941 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1943 ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1946 if (qp_attr_mask & IB_QP_TIMEOUT) {
1947 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1948 qp->qplib_qp.timeout = qp_attr->timeout;
1950 if (qp_attr_mask & IB_QP_RETRY_CNT) {
1951 qp->qplib_qp.modify_flags |=
1952 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1953 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1955 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1956 qp->qplib_qp.modify_flags |=
1957 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1958 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1960 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1961 qp->qplib_qp.modify_flags |=
1962 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1963 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1965 if (qp_attr_mask & IB_QP_RQ_PSN) {
1966 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1967 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1969 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1970 qp->qplib_qp.modify_flags |=
1971 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
1972 /* Cap the max_rd_atomic to device max */
1973 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1974 dev_attr->max_qp_rd_atom);
1976 if (qp_attr_mask & IB_QP_SQ_PSN) {
1977 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1978 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1980 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1981 if (qp_attr->max_dest_rd_atomic >
1982 dev_attr->max_qp_init_rd_atom) {
1983 ibdev_err(&rdev->ibdev,
1984 "max_dest_rd_atomic requested%d is > dev_max%d",
1985 qp_attr->max_dest_rd_atomic,
1986 dev_attr->max_qp_init_rd_atom);
1990 qp->qplib_qp.modify_flags |=
1991 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1992 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1994 if (qp_attr_mask & IB_QP_CAP) {
1995 qp->qplib_qp.modify_flags |=
1996 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1997 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1998 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1999 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
2000 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
2001 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
2002 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
2003 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
2004 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
2005 (qp_attr->cap.max_inline_data >=
2006 dev_attr->max_inline_data)) {
2007 ibdev_err(&rdev->ibdev,
2008 "Create QP failed - max exceeded");
2011 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
2012 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
2013 dev_attr->max_qp_wqes + 1);
2014 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
2015 qp_attr->cap.max_send_wr;
2017 * Reserving one slot for Phantom WQE. Some application can
2018 * post one extra entry in this case. Allowing this to avoid
2019 * unexpected Queue full condition
2021 qp->qplib_qp.sq.q_full_delta -= 1;
2022 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
2023 if (qp->qplib_qp.rq.max_wqe) {
2024 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
2025 qp->qplib_qp.rq.max_wqe =
2026 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
2027 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
2028 qp_attr->cap.max_recv_wr;
2029 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
2031 /* SRQ was used prior, just ignore the RQ caps */
2034 if (qp_attr_mask & IB_QP_DEST_QPN) {
2035 qp->qplib_qp.modify_flags |=
2036 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
2037 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
2039 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
2041 ibdev_err(&rdev->ibdev, "Failed to modify HW QP");
2044 if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
2045 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
2049 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
2050 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
2052 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2053 struct bnxt_re_dev *rdev = qp->rdev;
2054 struct bnxt_qplib_qp *qplib_qp;
2057 qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
2061 qplib_qp->id = qp->qplib_qp.id;
2062 qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
2064 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
2066 ibdev_err(&rdev->ibdev, "Failed to query HW QP");
2069 qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
2070 qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state);
2071 qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
2072 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
2073 qp_attr->pkey_index = qplib_qp->pkey_index;
2074 qp_attr->qkey = qplib_qp->qkey;
2075 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2076 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
2077 qplib_qp->ah.host_sgid_index,
2078 qplib_qp->ah.hop_limit,
2079 qplib_qp->ah.traffic_class);
2080 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
2081 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
2082 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
2083 qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
2084 qp_attr->timeout = qplib_qp->timeout;
2085 qp_attr->retry_cnt = qplib_qp->retry_cnt;
2086 qp_attr->rnr_retry = qplib_qp->rnr_retry;
2087 qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
2088 qp_attr->rq_psn = qplib_qp->rq.psn;
2089 qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
2090 qp_attr->sq_psn = qplib_qp->sq.psn;
2091 qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
2092 qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
2094 qp_attr->dest_qp_num = qplib_qp->dest_qpn;
2096 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
2097 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
2098 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
2099 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
2100 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
2101 qp_init_attr->cap = qp_attr->cap;
2108 /* Routine for sending QP1 packets for RoCE V1 an V2
2110 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
2111 const struct ib_send_wr *wr,
2112 struct bnxt_qplib_swqe *wqe,
2115 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
2117 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
2118 const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr;
2119 struct bnxt_qplib_sge sge;
2123 bool is_eth = false;
2124 bool is_vlan = false;
2125 bool is_grh = false;
2126 bool is_udp = false;
2128 u16 vlan_id = 0xFFFF;
2132 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
2134 rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL);
2138 /* Get network header type for this GID */
2139 nw_type = rdma_gid_attr_network_type(sgid_attr);
2141 case RDMA_NETWORK_IPV4:
2142 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
2144 case RDMA_NETWORK_IPV6:
2145 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
2148 nw_type = BNXT_RE_ROCE_V1_PACKET;
2151 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
2152 is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
2154 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
2156 ether_type = ETH_P_IP;
2159 ether_type = ETH_P_IPV6;
2163 ether_type = ETH_P_IBOE;
2168 is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
2170 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
2171 ip_version, is_udp, 0, &qp->qp1_hdr);
2174 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
2175 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
2177 /* For vlan, check the sgid for vlan existence */
2180 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
2182 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
2183 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
2186 if (is_grh || (ip_version == 6)) {
2187 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw,
2188 sizeof(sgid_attr->gid));
2189 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
2190 sizeof(sgid_attr->gid));
2191 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
2194 if (ip_version == 4) {
2195 qp->qp1_hdr.ip4.tos = 0;
2196 qp->qp1_hdr.ip4.id = 0;
2197 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
2198 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
2200 memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4);
2201 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
2202 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
2206 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
2207 qp->qp1_hdr.udp.sport = htons(0x8CD1);
2208 qp->qp1_hdr.udp.csum = 0;
2212 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
2213 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2214 qp->qp1_hdr.immediate_present = 1;
2216 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
2218 if (wr->send_flags & IB_SEND_SOLICITED)
2219 qp->qp1_hdr.bth.solicited_event = 1;
2221 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
2223 /* P_key for QP1 is for all members */
2224 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
2225 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
2226 qp->qp1_hdr.bth.ack_req = 0;
2228 qp->send_psn &= BTH_PSN_MASK;
2229 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
2231 /* Use the priviledged Q_Key for QP1 */
2232 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
2233 qp->qp1_hdr.deth.source_qpn = IB_QP1;
2235 /* Pack the QP1 to the transmit buffer */
2236 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
2238 ib_ud_header_pack(&qp->qp1_hdr, buf);
2239 for (i = wqe->num_sge; i; i--) {
2240 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
2241 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
2242 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
2246 * Max Header buf size for IPV6 RoCE V2 is 86,
2247 * which is same as the QP1 SQ header buffer.
2248 * Header buf size for IPV4 RoCE V2 can be 66.
2249 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
2250 * Subtract 20 bytes from QP1 SQ header buf size
2252 if (is_udp && ip_version == 4)
2255 * Max Header buf size for RoCE V1 is 78.
2256 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
2257 * Subtract 8 bytes from QP1 SQ header buf size
2262 /* Subtract 4 bytes for non vlan packets */
2266 wqe->sg_list[0].addr = sge.addr;
2267 wqe->sg_list[0].lkey = sge.lkey;
2268 wqe->sg_list[0].size = sge.size;
2272 ibdev_err(&qp->rdev->ibdev, "QP1 buffer is empty!");
2278 /* For the MAD layer, it only provides the recv SGE the size of
2279 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
2280 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
2281 * receive packet (334 bytes) with no VLAN and then copy the GRH
2282 * and the MAD datagram out to the provided SGE.
2284 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2285 const struct ib_recv_wr *wr,
2286 struct bnxt_qplib_swqe *wqe,
2289 struct bnxt_re_sqp_entries *sqp_entry;
2290 struct bnxt_qplib_sge ref, sge;
2291 struct bnxt_re_dev *rdev;
2296 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2298 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2301 /* Create 1 SGE to receive the entire
2304 /* Save the reference from ULP */
2305 ref.addr = wqe->sg_list[0].addr;
2306 ref.lkey = wqe->sg_list[0].lkey;
2307 ref.size = wqe->sg_list[0].size;
2309 sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
2312 wqe->sg_list[0].addr = sge.addr;
2313 wqe->sg_list[0].lkey = sge.lkey;
2314 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2315 sge.size -= wqe->sg_list[0].size;
2317 sqp_entry->sge.addr = ref.addr;
2318 sqp_entry->sge.lkey = ref.lkey;
2319 sqp_entry->sge.size = ref.size;
2320 /* Store the wrid for reporting completion */
2321 sqp_entry->wrid = wqe->wr_id;
2322 /* change the wqe->wrid to table index */
2323 wqe->wr_id = rq_prod_index;
2327 static int is_ud_qp(struct bnxt_re_qp *qp)
2329 return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
2330 qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
2333 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2334 const struct ib_send_wr *wr,
2335 struct bnxt_qplib_swqe *wqe)
2337 struct bnxt_re_ah *ah = NULL;
2340 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2341 wqe->send.q_key = ud_wr(wr)->remote_qkey;
2342 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2343 wqe->send.avid = ah->qplib_ah.id;
2345 switch (wr->opcode) {
2347 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2349 case IB_WR_SEND_WITH_IMM:
2350 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2351 wqe->send.imm_data = wr->ex.imm_data;
2353 case IB_WR_SEND_WITH_INV:
2354 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2355 wqe->send.inv_key = wr->ex.invalidate_rkey;
2360 if (wr->send_flags & IB_SEND_SIGNALED)
2361 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2362 if (wr->send_flags & IB_SEND_FENCE)
2363 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2364 if (wr->send_flags & IB_SEND_SOLICITED)
2365 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2366 if (wr->send_flags & IB_SEND_INLINE)
2367 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2372 static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
2373 struct bnxt_qplib_swqe *wqe)
2375 switch (wr->opcode) {
2376 case IB_WR_RDMA_WRITE:
2377 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2379 case IB_WR_RDMA_WRITE_WITH_IMM:
2380 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2381 wqe->rdma.imm_data = wr->ex.imm_data;
2383 case IB_WR_RDMA_READ:
2384 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2385 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2390 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2391 wqe->rdma.r_key = rdma_wr(wr)->rkey;
2392 if (wr->send_flags & IB_SEND_SIGNALED)
2393 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2394 if (wr->send_flags & IB_SEND_FENCE)
2395 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2396 if (wr->send_flags & IB_SEND_SOLICITED)
2397 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2398 if (wr->send_flags & IB_SEND_INLINE)
2399 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2404 static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
2405 struct bnxt_qplib_swqe *wqe)
2407 switch (wr->opcode) {
2408 case IB_WR_ATOMIC_CMP_AND_SWP:
2409 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2410 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2411 wqe->atomic.swap_data = atomic_wr(wr)->swap;
2413 case IB_WR_ATOMIC_FETCH_AND_ADD:
2414 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2415 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2420 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2421 wqe->atomic.r_key = atomic_wr(wr)->rkey;
2422 if (wr->send_flags & IB_SEND_SIGNALED)
2423 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2424 if (wr->send_flags & IB_SEND_FENCE)
2425 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2426 if (wr->send_flags & IB_SEND_SOLICITED)
2427 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2431 static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
2432 struct bnxt_qplib_swqe *wqe)
2434 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2435 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2437 /* Need unconditional fence for local invalidate
2438 * opcode to work as expected.
2440 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2442 if (wr->send_flags & IB_SEND_SIGNALED)
2443 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2444 if (wr->send_flags & IB_SEND_SOLICITED)
2445 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2450 static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
2451 struct bnxt_qplib_swqe *wqe)
2453 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2454 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2455 int access = wr->access;
2457 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2458 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2459 wqe->frmr.page_list = mr->pages;
2460 wqe->frmr.page_list_len = mr->npages;
2461 wqe->frmr.levels = qplib_frpl->hwq.level;
2462 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2464 /* Need unconditional fence for reg_mr
2465 * opcode to function as expected.
2468 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2470 if (wr->wr.send_flags & IB_SEND_SIGNALED)
2471 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2473 if (access & IB_ACCESS_LOCAL_WRITE)
2474 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2475 if (access & IB_ACCESS_REMOTE_READ)
2476 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2477 if (access & IB_ACCESS_REMOTE_WRITE)
2478 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2479 if (access & IB_ACCESS_REMOTE_ATOMIC)
2480 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2481 if (access & IB_ACCESS_MW_BIND)
2482 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2484 wqe->frmr.l_key = wr->key;
2485 wqe->frmr.length = wr->mr->length;
2486 wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K);
2487 wqe->frmr.pg_sz_log = ilog2(wr->mr->page_size >> PAGE_SHIFT_4K);
2488 wqe->frmr.va = wr->mr->iova;
2492 static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2493 const struct ib_send_wr *wr,
2494 struct bnxt_qplib_swqe *wqe)
2496 /* Copy the inline data to the data field */
2501 in_data = wqe->inline_data;
2502 for (i = 0; i < wr->num_sge; i++) {
2503 sge_addr = (void *)(unsigned long)
2504 wr->sg_list[i].addr;
2505 sge_len = wr->sg_list[i].length;
2507 if ((sge_len + wqe->inline_len) >
2508 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2509 ibdev_err(&rdev->ibdev,
2510 "Inline data size requested > supported value");
2513 sge_len = wr->sg_list[i].length;
2515 memcpy(in_data, sge_addr, sge_len);
2516 in_data += wr->sg_list[i].length;
2517 wqe->inline_len += wr->sg_list[i].length;
2519 return wqe->inline_len;
2522 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2523 const struct ib_send_wr *wr,
2524 struct bnxt_qplib_swqe *wqe)
2528 if (wr->send_flags & IB_SEND_INLINE)
2529 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2531 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2537 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2539 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2540 qp->ib_qp.qp_type == IB_QPT_GSI ||
2541 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2542 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2544 struct ib_qp_attr qp_attr;
2546 qp_attr_mask = IB_QP_STATE;
2547 qp_attr.qp_state = IB_QPS_RTS;
2548 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2549 qp->qplib_qp.wqe_cnt = 0;
2553 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2554 struct bnxt_re_qp *qp,
2555 const struct ib_send_wr *wr)
2557 int rc = 0, payload_sz = 0;
2558 unsigned long flags;
2560 spin_lock_irqsave(&qp->sq_lock, flags);
2562 struct bnxt_qplib_swqe wqe = {};
2565 wqe.num_sge = wr->num_sge;
2566 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2567 ibdev_err(&rdev->ibdev,
2568 "Limit exceeded for Send SGEs");
2573 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2574 if (payload_sz < 0) {
2578 wqe.wr_id = wr->wr_id;
2580 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2582 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2584 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2587 ibdev_err(&rdev->ibdev,
2588 "Post send failed opcode = %#x rc = %d",
2594 bnxt_qplib_post_send_db(&qp->qplib_qp);
2595 bnxt_ud_qp_hw_stall_workaround(qp);
2596 spin_unlock_irqrestore(&qp->sq_lock, flags);
2600 int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
2601 const struct ib_send_wr **bad_wr)
2603 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2604 struct bnxt_qplib_swqe wqe;
2605 int rc = 0, payload_sz = 0;
2606 unsigned long flags;
2608 spin_lock_irqsave(&qp->sq_lock, flags);
2611 memset(&wqe, 0, sizeof(wqe));
2614 wqe.num_sge = wr->num_sge;
2615 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2616 ibdev_err(&qp->rdev->ibdev,
2617 "Limit exceeded for Send SGEs");
2622 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2623 if (payload_sz < 0) {
2627 wqe.wr_id = wr->wr_id;
2629 switch (wr->opcode) {
2631 case IB_WR_SEND_WITH_IMM:
2632 if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) {
2633 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2637 wqe.rawqp1.lflags |=
2638 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2640 switch (wr->send_flags) {
2641 case IB_SEND_IP_CSUM:
2642 wqe.rawqp1.lflags |=
2643 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2649 case IB_WR_SEND_WITH_INV:
2650 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2652 case IB_WR_RDMA_WRITE:
2653 case IB_WR_RDMA_WRITE_WITH_IMM:
2654 case IB_WR_RDMA_READ:
2655 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2657 case IB_WR_ATOMIC_CMP_AND_SWP:
2658 case IB_WR_ATOMIC_FETCH_AND_ADD:
2659 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2661 case IB_WR_RDMA_READ_WITH_INV:
2662 ibdev_err(&qp->rdev->ibdev,
2663 "RDMA Read with Invalidate is not supported");
2666 case IB_WR_LOCAL_INV:
2667 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2670 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2673 /* Unsupported WRs */
2674 ibdev_err(&qp->rdev->ibdev,
2675 "WR (%#x) is not supported", wr->opcode);
2680 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2683 ibdev_err(&qp->rdev->ibdev,
2684 "post_send failed op:%#x qps = %#x rc = %d\n",
2685 wr->opcode, qp->qplib_qp.state, rc);
2691 bnxt_qplib_post_send_db(&qp->qplib_qp);
2692 bnxt_ud_qp_hw_stall_workaround(qp);
2693 spin_unlock_irqrestore(&qp->sq_lock, flags);
2698 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2699 struct bnxt_re_qp *qp,
2700 const struct ib_recv_wr *wr)
2702 struct bnxt_qplib_swqe wqe;
2705 memset(&wqe, 0, sizeof(wqe));
2708 memset(&wqe, 0, sizeof(wqe));
2711 wqe.num_sge = wr->num_sge;
2712 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2713 ibdev_err(&rdev->ibdev,
2714 "Limit exceeded for Receive SGEs");
2718 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2719 wqe.wr_id = wr->wr_id;
2720 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2722 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2729 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2733 int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
2734 const struct ib_recv_wr **bad_wr)
2736 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2737 struct bnxt_qplib_swqe wqe;
2738 int rc = 0, payload_sz = 0;
2739 unsigned long flags;
2742 spin_lock_irqsave(&qp->rq_lock, flags);
2745 memset(&wqe, 0, sizeof(wqe));
2748 wqe.num_sge = wr->num_sge;
2749 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2750 ibdev_err(&qp->rdev->ibdev,
2751 "Limit exceeded for Receive SGEs");
2757 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2759 wqe.wr_id = wr->wr_id;
2760 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2762 if (ib_qp->qp_type == IB_QPT_GSI &&
2763 qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI)
2764 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2767 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2773 /* Ring DB if the RQEs posted reaches a threshold value */
2774 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2775 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2783 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2785 spin_unlock_irqrestore(&qp->rq_lock, flags);
2790 /* Completion Queues */
2791 int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
2793 struct bnxt_re_cq *cq;
2794 struct bnxt_qplib_nq *nq;
2795 struct bnxt_re_dev *rdev;
2797 cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2799 nq = cq->qplib_cq.nq;
2801 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2802 ib_umem_release(cq->umem);
2804 atomic_dec(&rdev->cq_count);
2810 int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
2811 struct ib_udata *udata)
2813 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
2814 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2815 struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
2817 int cqe = attr->cqe;
2818 struct bnxt_qplib_nq *nq = NULL;
2819 unsigned int nq_alloc_cnt;
2824 /* Validate CQ fields */
2825 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2826 ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded");
2831 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2833 entries = roundup_pow_of_two(cqe + 1);
2834 if (entries > dev_attr->max_cq_wqes + 1)
2835 entries = dev_attr->max_cq_wqes + 1;
2837 cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
2838 cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
2840 struct bnxt_re_cq_req req;
2841 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
2842 udata, struct bnxt_re_ucontext, ib_uctx);
2843 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2848 cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va,
2849 entries * sizeof(struct cq_base),
2850 IB_ACCESS_LOCAL_WRITE);
2851 if (IS_ERR(cq->umem)) {
2852 rc = PTR_ERR(cq->umem);
2855 cq->qplib_cq.sg_info.umem = cq->umem;
2856 cq->qplib_cq.dpi = &uctx->dpi;
2858 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2859 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2866 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2869 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
2870 * used for getting the NQ index.
2872 nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
2873 nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
2874 cq->qplib_cq.max_wqe = entries;
2875 cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
2876 cq->qplib_cq.nq = nq;
2878 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2880 ibdev_err(&rdev->ibdev, "Failed to create HW CQ");
2884 cq->ib_cq.cqe = entries;
2885 cq->cq_period = cq->qplib_cq.period;
2888 atomic_inc(&rdev->cq_count);
2889 spin_lock_init(&cq->cq_lock);
2892 struct bnxt_re_cq_resp resp;
2894 resp.cqid = cq->qplib_cq.id;
2895 resp.tail = cq->qplib_cq.hwq.cons;
2896 resp.phase = cq->qplib_cq.period;
2898 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2900 ibdev_err(&rdev->ibdev, "Failed to copy CQ udata");
2901 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2909 ib_umem_release(cq->umem);
2915 static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq)
2917 struct bnxt_re_dev *rdev = cq->rdev;
2919 bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq);
2921 cq->qplib_cq.max_wqe = cq->resize_cqe;
2922 if (cq->resize_umem) {
2923 ib_umem_release(cq->umem);
2924 cq->umem = cq->resize_umem;
2925 cq->resize_umem = NULL;
2930 int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
2932 struct bnxt_qplib_sg_info sg_info = {};
2933 struct bnxt_qplib_dpi *orig_dpi = NULL;
2934 struct bnxt_qplib_dev_attr *dev_attr;
2935 struct bnxt_re_ucontext *uctx = NULL;
2936 struct bnxt_re_resize_cq_req req;
2937 struct bnxt_re_dev *rdev;
2938 struct bnxt_re_cq *cq;
2941 cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
2943 dev_attr = &rdev->dev_attr;
2944 if (!ibcq->uobject) {
2945 ibdev_err(&rdev->ibdev, "Kernel CQ Resize not supported");
2949 if (cq->resize_umem) {
2950 ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - Busy",
2955 /* Check the requested cq depth out of supported depth */
2956 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2957 ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - out of range cqe %d",
2958 cq->qplib_cq.id, cqe);
2962 entries = roundup_pow_of_two(cqe + 1);
2963 if (entries > dev_attr->max_cq_wqes + 1)
2964 entries = dev_attr->max_cq_wqes + 1;
2966 uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
2968 /* uverbs consumer */
2969 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2974 cq->resize_umem = ib_umem_get(&rdev->ibdev, req.cq_va,
2975 entries * sizeof(struct cq_base),
2976 IB_ACCESS_LOCAL_WRITE);
2977 if (IS_ERR(cq->resize_umem)) {
2978 rc = PTR_ERR(cq->resize_umem);
2979 cq->resize_umem = NULL;
2980 ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %d\n",
2984 cq->resize_cqe = entries;
2985 memcpy(&sg_info, &cq->qplib_cq.sg_info, sizeof(sg_info));
2986 orig_dpi = cq->qplib_cq.dpi;
2988 cq->qplib_cq.sg_info.umem = cq->resize_umem;
2989 cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
2990 cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
2991 cq->qplib_cq.dpi = &uctx->dpi;
2993 rc = bnxt_qplib_resize_cq(&rdev->qplib_res, &cq->qplib_cq, entries);
2995 ibdev_err(&rdev->ibdev, "Resize HW CQ %#x failed!",
3000 cq->ib_cq.cqe = cq->resize_cqe;
3005 if (cq->resize_umem) {
3006 ib_umem_release(cq->resize_umem);
3007 cq->resize_umem = NULL;
3009 memcpy(&cq->qplib_cq.sg_info, &sg_info, sizeof(sg_info));
3010 cq->qplib_cq.dpi = orig_dpi;
3015 static u8 __req_to_ib_wc_status(u8 qstatus)
3018 case CQ_REQ_STATUS_OK:
3019 return IB_WC_SUCCESS;
3020 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
3021 return IB_WC_BAD_RESP_ERR;
3022 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
3023 return IB_WC_LOC_LEN_ERR;
3024 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
3025 return IB_WC_LOC_QP_OP_ERR;
3026 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
3027 return IB_WC_LOC_PROT_ERR;
3028 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
3029 return IB_WC_GENERAL_ERR;
3030 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
3031 return IB_WC_REM_INV_REQ_ERR;
3032 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
3033 return IB_WC_REM_ACCESS_ERR;
3034 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
3035 return IB_WC_REM_OP_ERR;
3036 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
3037 return IB_WC_RNR_RETRY_EXC_ERR;
3038 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
3039 return IB_WC_RETRY_EXC_ERR;
3040 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
3041 return IB_WC_WR_FLUSH_ERR;
3043 return IB_WC_GENERAL_ERR;
3048 static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
3051 case CQ_RES_RAWETH_QP1_STATUS_OK:
3052 return IB_WC_SUCCESS;
3053 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
3054 return IB_WC_LOC_ACCESS_ERR;
3055 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
3056 return IB_WC_LOC_LEN_ERR;
3057 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
3058 return IB_WC_LOC_PROT_ERR;
3059 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
3060 return IB_WC_LOC_QP_OP_ERR;
3061 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
3062 return IB_WC_GENERAL_ERR;
3063 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
3064 return IB_WC_WR_FLUSH_ERR;
3065 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
3066 return IB_WC_WR_FLUSH_ERR;
3068 return IB_WC_GENERAL_ERR;
3072 static u8 __rc_to_ib_wc_status(u8 qstatus)
3075 case CQ_RES_RC_STATUS_OK:
3076 return IB_WC_SUCCESS;
3077 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
3078 return IB_WC_LOC_ACCESS_ERR;
3079 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
3080 return IB_WC_LOC_LEN_ERR;
3081 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
3082 return IB_WC_LOC_PROT_ERR;
3083 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
3084 return IB_WC_LOC_QP_OP_ERR;
3085 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
3086 return IB_WC_GENERAL_ERR;
3087 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
3088 return IB_WC_REM_INV_REQ_ERR;
3089 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
3090 return IB_WC_WR_FLUSH_ERR;
3091 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
3092 return IB_WC_WR_FLUSH_ERR;
3094 return IB_WC_GENERAL_ERR;
3098 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
3100 switch (cqe->type) {
3101 case BNXT_QPLIB_SWQE_TYPE_SEND:
3102 wc->opcode = IB_WC_SEND;
3104 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
3105 wc->opcode = IB_WC_SEND;
3106 wc->wc_flags |= IB_WC_WITH_IMM;
3108 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
3109 wc->opcode = IB_WC_SEND;
3110 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3112 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
3113 wc->opcode = IB_WC_RDMA_WRITE;
3115 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
3116 wc->opcode = IB_WC_RDMA_WRITE;
3117 wc->wc_flags |= IB_WC_WITH_IMM;
3119 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
3120 wc->opcode = IB_WC_RDMA_READ;
3122 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
3123 wc->opcode = IB_WC_COMP_SWAP;
3125 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
3126 wc->opcode = IB_WC_FETCH_ADD;
3128 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
3129 wc->opcode = IB_WC_LOCAL_INV;
3131 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
3132 wc->opcode = IB_WC_REG_MR;
3135 wc->opcode = IB_WC_SEND;
3139 wc->status = __req_to_ib_wc_status(cqe->status);
3142 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
3143 u16 raweth_qp1_flags2)
3145 bool is_ipv6 = false, is_ipv4 = false;
3147 /* raweth_qp1_flags Bit 9-6 indicates itype */
3148 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3149 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3152 if (raweth_qp1_flags2 &
3153 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
3155 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
3156 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
3157 (raweth_qp1_flags2 &
3158 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
3159 (is_ipv6 = true) : (is_ipv4 = true);
3161 BNXT_RE_ROCEV2_IPV6_PACKET :
3162 BNXT_RE_ROCEV2_IPV4_PACKET);
3164 return BNXT_RE_ROCE_V1_PACKET;
3168 static int bnxt_re_to_ib_nw_type(int nw_type)
3170 u8 nw_hdr_type = 0xFF;
3173 case BNXT_RE_ROCE_V1_PACKET:
3174 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
3176 case BNXT_RE_ROCEV2_IPV4_PACKET:
3177 nw_hdr_type = RDMA_NETWORK_IPV4;
3179 case BNXT_RE_ROCEV2_IPV6_PACKET:
3180 nw_hdr_type = RDMA_NETWORK_IPV6;
3186 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
3190 struct ethhdr *eth_hdr;
3194 tmp_buf = (u8 *)rq_hdr_buf;
3196 * If dest mac is not same as I/F mac, this could be a
3197 * loopback address or multicast address, check whether
3198 * it is a loopback packet
3200 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
3202 /* Check the ether type */
3203 eth_hdr = (struct ethhdr *)tmp_buf;
3204 eth_type = ntohs(eth_hdr->h_proto);
3212 struct udphdr *udp_hdr;
3214 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
3215 sizeof(struct ipv6hdr));
3216 tmp_buf += sizeof(struct ethhdr) + len;
3217 udp_hdr = (struct udphdr *)tmp_buf;
3218 if (ntohs(udp_hdr->dest) ==
3231 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
3232 struct bnxt_qplib_cqe *cqe)
3234 struct bnxt_re_dev *rdev = gsi_qp->rdev;
3235 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3236 struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp;
3237 struct bnxt_re_ah *gsi_sah;
3238 struct ib_send_wr *swr;
3239 struct ib_ud_wr udwr;
3240 struct ib_recv_wr rwr;
3244 dma_addr_t rq_hdr_buf_map;
3245 dma_addr_t shrq_hdr_buf_map;
3248 struct ib_sge s_sge[2];
3249 struct ib_sge r_sge[2];
3252 memset(&udwr, 0, sizeof(udwr));
3253 memset(&rwr, 0, sizeof(rwr));
3254 memset(&s_sge, 0, sizeof(s_sge));
3255 memset(&r_sge, 0, sizeof(r_sge));
3258 tbl_idx = cqe->wr_id;
3260 rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf +
3261 (tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size);
3262 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3265 /* Shadow QP header buffer */
3266 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3268 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3270 /* Store this cqe */
3271 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
3272 sqp_entry->qp1_qp = gsi_qp;
3274 /* Find packet type from the cqe */
3276 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
3277 cqe->raweth_qp1_flags2);
3279 ibdev_err(&rdev->ibdev, "Invalid packet\n");
3283 /* Adjust the offset for the user buffer and post in the rq */
3285 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
3289 * QP1 loopback packet has 4 bytes of internal header before
3290 * ether header. Skip these four bytes.
3292 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
3295 /* First send SGE . Skip the ether header*/
3296 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
3298 s_sge[0].lkey = 0xFFFFFFFF;
3299 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
3300 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
3302 /* Second Send SGE */
3303 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
3304 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
3305 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
3307 s_sge[1].lkey = 0xFFFFFFFF;
3308 s_sge[1].length = 256;
3310 /* First recv SGE */
3312 r_sge[0].addr = shrq_hdr_buf_map;
3313 r_sge[0].lkey = 0xFFFFFFFF;
3314 r_sge[0].length = 40;
3316 r_sge[1].addr = sqp_entry->sge.addr + offset;
3317 r_sge[1].lkey = sqp_entry->sge.lkey;
3318 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
3320 /* Create receive work request */
3322 rwr.sg_list = r_sge;
3323 rwr.wr_id = tbl_idx;
3326 rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
3328 ibdev_err(&rdev->ibdev,
3329 "Failed to post Rx buffers to shadow QP");
3334 swr->sg_list = s_sge;
3335 swr->wr_id = tbl_idx;
3336 swr->opcode = IB_WR_SEND;
3338 gsi_sah = rdev->gsi_ctx.gsi_sah;
3339 udwr.ah = &gsi_sah->ib_ah;
3340 udwr.remote_qpn = gsi_sqp->qplib_qp.id;
3341 udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
3343 /* post data received in the send queue */
3344 rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
3349 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
3350 struct bnxt_qplib_cqe *cqe)
3352 wc->opcode = IB_WC_RECV;
3353 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
3354 wc->wc_flags |= IB_WC_GRH;
3357 static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev,
3361 * Check if the vlan is configured in the host. If not configured, it
3362 * can be a transparent VLAN. So dont report the vlan id.
3364 if (!__vlan_find_dev_deep_rcu(rdev->netdev,
3365 htons(ETH_P_8021Q), vlan_id))
3370 static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
3377 metadata = orig_cqe->raweth_qp1_metadata;
3378 if (orig_cqe->raweth_qp1_flags2 &
3379 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
3381 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
3382 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
3383 if (tpid == ETH_P_8021Q) {
3385 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
3387 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
3388 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
3396 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3397 struct bnxt_qplib_cqe *cqe)
3399 wc->opcode = IB_WC_RECV;
3400 wc->status = __rc_to_ib_wc_status(cqe->status);
3402 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3403 wc->wc_flags |= IB_WC_WITH_IMM;
3404 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3405 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3406 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3407 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3408 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3411 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
3413 struct bnxt_qplib_cqe *cqe)
3415 struct bnxt_re_dev *rdev = gsi_sqp->rdev;
3416 struct bnxt_re_qp *gsi_qp = NULL;
3417 struct bnxt_qplib_cqe *orig_cqe = NULL;
3418 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3424 tbl_idx = cqe->wr_id;
3426 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3427 gsi_qp = sqp_entry->qp1_qp;
3428 orig_cqe = &sqp_entry->cqe;
3430 wc->wr_id = sqp_entry->wrid;
3431 wc->byte_len = orig_cqe->length;
3432 wc->qp = &gsi_qp->ib_qp;
3434 wc->ex.imm_data = orig_cqe->immdata;
3435 wc->src_qp = orig_cqe->src_qp;
3436 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3437 if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3438 if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3439 wc->vlan_id = vlan_id;
3441 wc->wc_flags |= IB_WC_WITH_VLAN;
3445 wc->vendor_err = orig_cqe->status;
3447 wc->opcode = IB_WC_RECV;
3448 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3449 wc->wc_flags |= IB_WC_GRH;
3451 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3452 orig_cqe->raweth_qp1_flags2);
3454 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3455 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3459 static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
3461 struct bnxt_qplib_cqe *cqe)
3463 struct bnxt_re_dev *rdev;
3468 wc->opcode = IB_WC_RECV;
3469 wc->status = __rc_to_ib_wc_status(cqe->status);
3471 if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
3472 wc->wc_flags |= IB_WC_WITH_IMM;
3473 /* report only on GSI QP for Thor */
3474 if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) {
3475 wc->wc_flags |= IB_WC_GRH;
3476 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3477 wc->wc_flags |= IB_WC_WITH_SMAC;
3478 if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
3479 vlan_id = (cqe->cfa_meta & 0xFFF);
3481 /* Mark only if vlan_id is non zero */
3482 if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3483 wc->vlan_id = vlan_id;
3484 wc->wc_flags |= IB_WC_WITH_VLAN;
3486 nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
3487 CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
3488 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3489 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3494 static int send_phantom_wqe(struct bnxt_re_qp *qp)
3496 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3497 unsigned long flags;
3500 spin_lock_irqsave(&qp->sq_lock, flags);
3502 rc = bnxt_re_bind_fence_mw(lib_qp);
3504 lib_qp->sq.phantom_wqe_cnt++;
3505 ibdev_dbg(&qp->rdev->ibdev,
3506 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3507 lib_qp->id, lib_qp->sq.hwq.prod,
3508 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3509 lib_qp->sq.phantom_wqe_cnt);
3512 spin_unlock_irqrestore(&qp->sq_lock, flags);
3516 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3518 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3519 struct bnxt_re_qp *qp, *sh_qp;
3520 struct bnxt_qplib_cqe *cqe;
3521 int i, ncqe, budget;
3522 struct bnxt_qplib_q *sq;
3523 struct bnxt_qplib_qp *lib_qp;
3525 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3526 unsigned long flags;
3528 /* User CQ; the only processing we do is to
3529 * complete any pending CQ resize operation.
3532 if (cq->resize_umem)
3533 bnxt_re_resize_cq_complete(cq);
3537 spin_lock_irqsave(&cq->cq_lock, flags);
3538 budget = min_t(u32, num_entries, cq->max_cql);
3539 num_entries = budget;
3541 ibdev_err(&cq->rdev->ibdev, "POLL CQ : no CQL to use");
3547 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3550 if (sq->send_phantom) {
3551 qp = container_of(lib_qp,
3552 struct bnxt_re_qp, qplib_qp);
3553 if (send_phantom_wqe(qp) == -ENOMEM)
3554 ibdev_err(&cq->rdev->ibdev,
3555 "Phantom failed! Scheduled to send again\n");
3557 sq->send_phantom = false;
3561 ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3568 for (i = 0; i < ncqe; i++, cqe++) {
3569 /* Transcribe each qplib_wqe back to ib_wc */
3570 memset(wc, 0, sizeof(*wc));
3572 wc->wr_id = cqe->wr_id;
3573 wc->byte_len = cqe->length;
3575 ((struct bnxt_qplib_qp *)
3576 (unsigned long)(cqe->qp_handle),
3577 struct bnxt_re_qp, qplib_qp);
3578 wc->qp = &qp->ib_qp;
3579 wc->ex.imm_data = cqe->immdata;
3580 wc->src_qp = cqe->src_qp;
3581 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3583 wc->vendor_err = cqe->status;
3585 switch (cqe->opcode) {
3586 case CQ_BASE_CQE_TYPE_REQ:
3587 sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3589 qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3590 /* Handle this completion with
3591 * the stored completion
3593 memset(wc, 0, sizeof(*wc));
3596 bnxt_re_process_req_wc(wc, cqe);
3598 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3602 rc = bnxt_re_process_raw_qp_pkt_rx
3605 memset(wc, 0, sizeof(*wc));
3610 /* Errors need not be looped back.
3611 * But change the wr_id to the one
3612 * stored in the table
3614 tbl_idx = cqe->wr_id;
3615 sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx];
3616 wc->wr_id = sqp_entry->wrid;
3617 bnxt_re_process_res_rawqp1_wc(wc, cqe);
3619 case CQ_BASE_CQE_TYPE_RES_RC:
3620 bnxt_re_process_res_rc_wc(wc, cqe);
3622 case CQ_BASE_CQE_TYPE_RES_UD:
3623 sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3625 qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3626 /* Handle this completion with
3627 * the stored completion
3632 bnxt_re_process_res_shadow_qp_wc
3637 bnxt_re_process_res_ud_wc(qp, wc, cqe);
3640 ibdev_err(&cq->rdev->ibdev,
3641 "POLL CQ : type 0x%x not handled",
3650 spin_unlock_irqrestore(&cq->cq_lock, flags);
3651 return num_entries - budget;
3654 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3655 enum ib_cq_notify_flags ib_cqn_flags)
3657 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3658 int type = 0, rc = 0;
3659 unsigned long flags;
3661 spin_lock_irqsave(&cq->cq_lock, flags);
3662 /* Trigger on the very next completion */
3663 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3664 type = DBC_DBC_TYPE_CQ_ARMALL;
3665 /* Trigger on the next solicited completion */
3666 else if (ib_cqn_flags & IB_CQ_SOLICITED)
3667 type = DBC_DBC_TYPE_CQ_ARMSE;
3669 /* Poll to see if there are missed events */
3670 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3671 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3675 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3678 spin_unlock_irqrestore(&cq->cq_lock, flags);
3682 /* Memory Regions */
3683 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3685 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3686 struct bnxt_re_dev *rdev = pd->rdev;
3687 struct bnxt_re_mr *mr;
3690 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3692 return ERR_PTR(-ENOMEM);
3695 mr->qplib_mr.pd = &pd->qplib_pd;
3696 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3697 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3699 /* Allocate and register 0 as the address */
3700 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3704 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3705 mr->qplib_mr.total_size = -1; /* Infinte length */
3706 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, 0,
3711 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3712 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3713 IB_ACCESS_REMOTE_ATOMIC))
3714 mr->ib_mr.rkey = mr->ib_mr.lkey;
3715 atomic_inc(&rdev->mr_count);
3720 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3726 int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3728 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3729 struct bnxt_re_dev *rdev = mr->rdev;
3732 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3734 ibdev_err(&rdev->ibdev, "Dereg MR failed: %#x\n", rc);
3739 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3745 ib_umem_release(mr->ib_umem);
3748 atomic_dec(&rdev->mr_count);
3752 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3754 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3756 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3759 mr->pages[mr->npages++] = addr;
3763 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3764 unsigned int *sg_offset)
3766 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3769 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3772 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3775 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3776 struct bnxt_re_dev *rdev = pd->rdev;
3777 struct bnxt_re_mr *mr = NULL;
3780 if (type != IB_MR_TYPE_MEM_REG) {
3781 ibdev_dbg(&rdev->ibdev, "MR type 0x%x not supported", type);
3782 return ERR_PTR(-EINVAL);
3784 if (max_num_sg > MAX_PBL_LVL_1_PGS)
3785 return ERR_PTR(-EINVAL);
3787 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3789 return ERR_PTR(-ENOMEM);
3792 mr->qplib_mr.pd = &pd->qplib_pd;
3793 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3794 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3796 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3800 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3801 mr->ib_mr.rkey = mr->ib_mr.lkey;
3803 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3808 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3809 &mr->qplib_frpl, max_num_sg);
3811 ibdev_err(&rdev->ibdev,
3812 "Failed to allocate HW FR page list");
3816 atomic_inc(&rdev->mr_count);
3822 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3828 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3829 struct ib_udata *udata)
3831 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3832 struct bnxt_re_dev *rdev = pd->rdev;
3833 struct bnxt_re_mw *mw;
3836 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3838 return ERR_PTR(-ENOMEM);
3840 mw->qplib_mw.pd = &pd->qplib_pd;
3842 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3843 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3844 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3845 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3847 ibdev_err(&rdev->ibdev, "Allocate MW failed!");
3850 mw->ib_mw.rkey = mw->qplib_mw.rkey;
3852 atomic_inc(&rdev->mw_count);
3860 int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3862 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3863 struct bnxt_re_dev *rdev = mw->rdev;
3866 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3868 ibdev_err(&rdev->ibdev, "Free MW failed: %#x\n", rc);
3873 atomic_dec(&rdev->mw_count);
3878 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3879 u64 virt_addr, int mr_access_flags,
3880 struct ib_udata *udata)
3882 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3883 struct bnxt_re_dev *rdev = pd->rdev;
3884 struct bnxt_re_mr *mr;
3885 struct ib_umem *umem;
3886 unsigned long page_size;
3889 if (length > BNXT_RE_MAX_MR_SIZE) {
3890 ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n",
3891 length, BNXT_RE_MAX_MR_SIZE);
3892 return ERR_PTR(-ENOMEM);
3895 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3897 return ERR_PTR(-ENOMEM);
3900 mr->qplib_mr.pd = &pd->qplib_pd;
3901 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3902 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3904 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3906 ibdev_err(&rdev->ibdev, "Failed to allocate MR");
3909 /* The fixed portion of the rkey is the same as the lkey */
3910 mr->ib_mr.rkey = mr->qplib_mr.rkey;
3912 umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
3914 ibdev_err(&rdev->ibdev, "Failed to get umem");
3920 mr->qplib_mr.va = virt_addr;
3921 page_size = ib_umem_find_best_pgsz(
3922 umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr);
3924 ibdev_err(&rdev->ibdev, "umem page size unsupported!");
3928 mr->qplib_mr.total_size = length;
3930 umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
3931 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
3932 umem_pgs, page_size);
3934 ibdev_err(&rdev->ibdev, "Failed to register user MR");
3938 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3939 mr->ib_mr.rkey = mr->qplib_mr.lkey;
3940 atomic_inc(&rdev->mr_count);
3944 ib_umem_release(umem);
3946 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3952 int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
3954 struct ib_device *ibdev = ctx->device;
3955 struct bnxt_re_ucontext *uctx =
3956 container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
3957 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3958 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3959 struct bnxt_re_uctx_resp resp = {};
3960 u32 chip_met_rev_num = 0;
3963 ibdev_dbg(ibdev, "ABI version requested %u", ibdev->ops.uverbs_abi_ver);
3965 if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3966 ibdev_dbg(ibdev, " is different from the device %d ",
3967 BNXT_RE_ABI_VERSION);
3973 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3978 spin_lock_init(&uctx->sh_lock);
3980 resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX;
3981 chip_met_rev_num = rdev->chip_ctx->chip_num;
3982 chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_rev & 0xFF) <<
3983 BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
3984 chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) <<
3985 BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
3986 resp.chip_id0 = chip_met_rev_num;
3987 /*Temp, Use xa_alloc instead */
3988 resp.dev_id = rdev->en_dev->pdev->devfn;
3989 resp.max_qp = rdev->qplib_ctx.qpc_count;
3990 resp.pg_size = PAGE_SIZE;
3991 resp.cqe_sz = sizeof(struct cq_base);
3992 resp.max_cqd = dev_attr->max_cq_wqes;
3994 resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
3995 resp.mode = rdev->chip_ctx->modes.wqe_mode;
3997 rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
3999 ibdev_err(ibdev, "Failed to copy user context");
4006 free_page((unsigned long)uctx->shpg);
4012 void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
4014 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
4015 struct bnxt_re_ucontext,
4018 struct bnxt_re_dev *rdev = uctx->rdev;
4021 free_page((unsigned long)uctx->shpg);
4023 if (uctx->dpi.dbr) {
4024 /* Free DPI only if this is the first PD allocated by the
4025 * application and mark the context dpi as NULL
4027 bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
4028 &rdev->qplib_res.dpi_tbl, &uctx->dpi);
4029 uctx->dpi.dbr = NULL;
4033 /* Helper function to mmap the virtual memory from user app */
4034 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
4036 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
4037 struct bnxt_re_ucontext,
4039 struct bnxt_re_dev *rdev = uctx->rdev;
4042 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
4045 if (vma->vm_pgoff) {
4046 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
4047 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
4048 PAGE_SIZE, vma->vm_page_prot)) {
4049 ibdev_err(&rdev->ibdev, "Failed to map DPI");
4053 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
4054 if (remap_pfn_range(vma, vma->vm_start,
4055 pfn, PAGE_SIZE, vma->vm_page_prot)) {
4056 ibdev_err(&rdev->ibdev, "Failed to map shared page");