2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: IB Verbs interpreter
39 #include <linux/interrupt.h>
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/if_ether.h>
44 #include <net/addrconf.h>
46 #include <rdma/ib_verbs.h>
47 #include <rdma/ib_user_verbs.h>
48 #include <rdma/ib_umem.h>
49 #include <rdma/ib_addr.h>
50 #include <rdma/ib_mad.h>
51 #include <rdma/ib_cache.h>
52 #include <rdma/uverbs_ioctl.h>
57 #include "qplib_res.h"
60 #include "qplib_rcfw.h"
65 #include <rdma/uverbs_types.h>
66 #include <rdma/uverbs_std_types.h>
68 #include <rdma/ib_user_ioctl_cmds.h>
70 #define UVERBS_MODULE_NAME bnxt_re
71 #include <rdma/uverbs_named_ioctl.h>
73 #include <rdma/bnxt_re-abi.h>
75 static int __from_ib_access_flags(int iflags)
79 if (iflags & IB_ACCESS_LOCAL_WRITE)
80 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
81 if (iflags & IB_ACCESS_REMOTE_READ)
82 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
83 if (iflags & IB_ACCESS_REMOTE_WRITE)
84 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
85 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
86 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
87 if (iflags & IB_ACCESS_MW_BIND)
88 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
89 if (iflags & IB_ZERO_BASED)
90 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
91 if (iflags & IB_ACCESS_ON_DEMAND)
92 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
96 static enum ib_access_flags __to_ib_access_flags(int qflags)
98 enum ib_access_flags iflags = 0;
100 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
101 iflags |= IB_ACCESS_LOCAL_WRITE;
102 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
103 iflags |= IB_ACCESS_REMOTE_WRITE;
104 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
105 iflags |= IB_ACCESS_REMOTE_READ;
106 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
107 iflags |= IB_ACCESS_REMOTE_ATOMIC;
108 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
109 iflags |= IB_ACCESS_MW_BIND;
110 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
111 iflags |= IB_ZERO_BASED;
112 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
113 iflags |= IB_ACCESS_ON_DEMAND;
117 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
118 struct bnxt_qplib_sge *sg_list, int num)
122 for (i = 0; i < num; i++) {
123 sg_list[i].addr = ib_sg_list[i].addr;
124 sg_list[i].lkey = ib_sg_list[i].lkey;
125 sg_list[i].size = ib_sg_list[i].length;
126 total += sg_list[i].size;
132 int bnxt_re_query_device(struct ib_device *ibdev,
133 struct ib_device_attr *ib_attr,
134 struct ib_udata *udata)
136 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
137 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
139 memset(ib_attr, 0, sizeof(*ib_attr));
140 memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
141 min(sizeof(dev_attr->fw_ver),
142 sizeof(ib_attr->fw_ver)));
143 addrconf_addr_eui48((u8 *)&ib_attr->sys_image_guid,
144 rdev->netdev->dev_addr);
145 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
146 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_SUPPORTED;
148 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
149 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
150 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
151 ib_attr->max_qp = dev_attr->max_qp;
152 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
153 ib_attr->device_cap_flags =
154 IB_DEVICE_CURR_QP_STATE_MOD
155 | IB_DEVICE_RC_RNR_NAK_GEN
156 | IB_DEVICE_SHUTDOWN_PORT
157 | IB_DEVICE_SYS_IMAGE_GUID
158 | IB_DEVICE_RESIZE_MAX_WR
159 | IB_DEVICE_PORT_ACTIVE_EVENT
160 | IB_DEVICE_N_NOTIFY_CQ
161 | IB_DEVICE_MEM_WINDOW
162 | IB_DEVICE_MEM_WINDOW_TYPE_2B
163 | IB_DEVICE_MEM_MGT_EXTENSIONS;
164 ib_attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
165 ib_attr->max_send_sge = dev_attr->max_qp_sges;
166 ib_attr->max_recv_sge = dev_attr->max_qp_sges;
167 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
168 ib_attr->max_cq = dev_attr->max_cq;
169 ib_attr->max_cqe = dev_attr->max_cq_wqes;
170 ib_attr->max_mr = dev_attr->max_mr;
171 ib_attr->max_pd = dev_attr->max_pd;
172 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
173 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
174 ib_attr->atomic_cap = IB_ATOMIC_NONE;
175 ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
176 if (dev_attr->is_atomic) {
177 ib_attr->atomic_cap = IB_ATOMIC_GLOB;
178 ib_attr->masked_atomic_cap = IB_ATOMIC_GLOB;
181 ib_attr->max_ee_rd_atom = 0;
182 ib_attr->max_res_rd_atom = 0;
183 ib_attr->max_ee_init_rd_atom = 0;
185 ib_attr->max_rdd = 0;
186 ib_attr->max_mw = dev_attr->max_mw;
187 ib_attr->max_raw_ipv6_qp = 0;
188 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
189 ib_attr->max_mcast_grp = 0;
190 ib_attr->max_mcast_qp_attach = 0;
191 ib_attr->max_total_mcast_qp_attach = 0;
192 ib_attr->max_ah = dev_attr->max_ah;
194 ib_attr->max_srq = dev_attr->max_srq;
195 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
196 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
198 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
200 ib_attr->max_pkeys = 1;
201 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
206 int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
207 struct ib_port_attr *port_attr)
209 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
210 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
213 memset(port_attr, 0, sizeof(*port_attr));
215 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
216 port_attr->state = IB_PORT_ACTIVE;
217 port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
219 port_attr->state = IB_PORT_DOWN;
220 port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
222 port_attr->max_mtu = IB_MTU_4096;
223 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
224 port_attr->gid_tbl_len = dev_attr->max_sgid;
225 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
226 IB_PORT_DEVICE_MGMT_SUP |
227 IB_PORT_VENDOR_CLASS_SUP;
228 port_attr->ip_gids = true;
230 port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
231 port_attr->bad_pkey_cntr = 0;
232 port_attr->qkey_viol_cntr = 0;
233 port_attr->pkey_tbl_len = dev_attr->max_pkey;
235 port_attr->sm_lid = 0;
237 port_attr->max_vl_num = 4;
238 port_attr->sm_sl = 0;
239 port_attr->subnet_timeout = 0;
240 port_attr->init_type_reply = 0;
241 rc = ib_get_eth_speed(&rdev->ibdev, port_num, &port_attr->active_speed,
242 &port_attr->active_width);
247 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
248 struct ib_port_immutable *immutable)
250 struct ib_port_attr port_attr;
252 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
255 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
256 immutable->gid_tbl_len = port_attr.gid_tbl_len;
257 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
258 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
259 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
263 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
265 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
267 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
268 rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
269 rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
272 int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
273 u16 index, u16 *pkey)
278 *pkey = IB_DEFAULT_PKEY_FULL;
283 int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
284 int index, union ib_gid *gid)
286 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
289 /* Ignore port_num */
290 memset(gid, 0, sizeof(*gid));
291 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
292 &rdev->qplib_res.sgid_tbl, index,
293 (struct bnxt_qplib_gid *)gid);
297 int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
300 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
301 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
302 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
303 struct bnxt_qplib_gid *gid_to_del;
304 u16 vlan_id = 0xFFFF;
306 /* Delete the entry from the hardware */
311 if (sgid_tbl && sgid_tbl->active) {
312 if (ctx->idx >= sgid_tbl->max)
314 gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
315 vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
316 /* DEL_GID is called in WQ context(netdevice_event_work_handler)
317 * or via the ib_unregister_device path. In the former case QP1
318 * may not be destroyed yet, in which case just return as FW
319 * needs that entry to be present and will fail it's deletion.
320 * We could get invoked again after QP1 is destroyed OR get an
321 * ADD_GID call with a different GID value for the same index
322 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
325 rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
326 ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) {
327 ibdev_dbg(&rdev->ibdev,
328 "Trying to delete GID0 while QP1 is alive\n");
333 rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
336 ibdev_err(&rdev->ibdev,
337 "Failed to remove GID: %#x", rc);
339 ctx_tbl = sgid_tbl->ctx;
340 ctx_tbl[ctx->idx] = NULL;
350 int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
354 u16 vlan_id = 0xFFFF;
355 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
356 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
357 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
359 rc = rdma_read_gid_l2_fields(attr, &vlan_id, NULL);
363 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
364 rdev->qplib_res.netdev->dev_addr,
365 vlan_id, true, &tbl_idx);
366 if (rc == -EALREADY) {
367 ctx_tbl = sgid_tbl->ctx;
368 ctx_tbl[tbl_idx]->refcnt++;
369 *context = ctx_tbl[tbl_idx];
374 ibdev_err(&rdev->ibdev, "Failed to add GID: %#x", rc);
378 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
381 ctx_tbl = sgid_tbl->ctx;
384 ctx_tbl[tbl_idx] = ctx;
390 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
393 return IB_LINK_LAYER_ETHERNET;
396 #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
398 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
400 struct bnxt_re_fence_data *fence = &pd->fence;
401 struct ib_mr *ib_mr = &fence->mr->ib_mr;
402 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
404 memset(wqe, 0, sizeof(*wqe));
405 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
406 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
407 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
408 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
409 wqe->bind.zero_based = false;
410 wqe->bind.parent_l_key = ib_mr->lkey;
411 wqe->bind.va = (u64)(unsigned long)fence->va;
412 wqe->bind.length = fence->size;
413 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
414 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
416 /* Save the initial rkey in fence structure for now;
417 * wqe->bind.r_key will be set at (re)bind time.
419 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
422 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
424 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
426 struct ib_pd *ib_pd = qp->ib_qp.pd;
427 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
428 struct bnxt_re_fence_data *fence = &pd->fence;
429 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
430 struct bnxt_qplib_swqe wqe;
433 memcpy(&wqe, fence_wqe, sizeof(wqe));
434 wqe.bind.r_key = fence->bind_rkey;
435 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
437 ibdev_dbg(&qp->rdev->ibdev,
438 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
439 wqe.bind.r_key, qp->qplib_qp.id, pd);
440 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
442 ibdev_err(&qp->rdev->ibdev, "Failed to bind fence-WQE\n");
445 bnxt_qplib_post_send_db(&qp->qplib_qp);
450 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
452 struct bnxt_re_fence_data *fence = &pd->fence;
453 struct bnxt_re_dev *rdev = pd->rdev;
454 struct device *dev = &rdev->en_dev->pdev->dev;
455 struct bnxt_re_mr *mr = fence->mr;
458 bnxt_re_dealloc_mw(fence->mw);
463 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
466 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
470 if (fence->dma_addr) {
471 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
477 static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
479 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
480 struct bnxt_re_fence_data *fence = &pd->fence;
481 struct bnxt_re_dev *rdev = pd->rdev;
482 struct device *dev = &rdev->en_dev->pdev->dev;
483 struct bnxt_re_mr *mr = NULL;
484 dma_addr_t dma_addr = 0;
488 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
490 rc = dma_mapping_error(dev, dma_addr);
492 ibdev_err(&rdev->ibdev, "Failed to dma-map fence-MR-mem\n");
497 fence->dma_addr = dma_addr;
500 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
507 mr->qplib_mr.pd = &pd->qplib_pd;
508 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
509 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
510 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
512 ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
517 mr->ib_mr.lkey = mr->qplib_mr.lkey;
518 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
519 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
520 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL,
521 BNXT_RE_FENCE_PBL_SIZE, PAGE_SIZE);
523 ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n");
526 mr->ib_mr.rkey = mr->qplib_mr.rkey;
528 /* Create a fence MW only for kernel consumers */
529 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
531 ibdev_err(&rdev->ibdev,
532 "Failed to create fence-MW for PD: %p\n", pd);
538 bnxt_re_create_fence_wqe(pd);
542 bnxt_re_destroy_fence_mr(pd);
546 static struct bnxt_re_user_mmap_entry*
547 bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset,
548 enum bnxt_re_mmap_flag mmap_flag, u64 *offset)
550 struct bnxt_re_user_mmap_entry *entry;
553 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
557 entry->mem_offset = mem_offset;
558 entry->mmap_flag = mmap_flag;
562 case BNXT_RE_MMAP_SH_PAGE:
563 ret = rdma_user_mmap_entry_insert_exact(&uctx->ib_uctx,
564 &entry->rdma_entry, PAGE_SIZE, 0);
566 case BNXT_RE_MMAP_UC_DB:
567 case BNXT_RE_MMAP_WC_DB:
568 case BNXT_RE_MMAP_DBR_BAR:
569 case BNXT_RE_MMAP_DBR_PAGE:
570 ret = rdma_user_mmap_entry_insert(&uctx->ib_uctx,
571 &entry->rdma_entry, PAGE_SIZE);
583 *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
588 /* Protection Domains */
589 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
591 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
592 struct bnxt_re_dev *rdev = pd->rdev;
595 rdma_user_mmap_entry_remove(pd->pd_db_mmap);
596 pd->pd_db_mmap = NULL;
599 bnxt_re_destroy_fence_mr(pd);
601 if (pd->qplib_pd.id) {
602 if (!bnxt_qplib_dealloc_pd(&rdev->qplib_res,
603 &rdev->qplib_res.pd_tbl,
605 atomic_dec(&rdev->stats.res.pd_count);
610 int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
612 struct ib_device *ibdev = ibpd->device;
613 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
614 struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
615 udata, struct bnxt_re_ucontext, ib_uctx);
616 struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
617 struct bnxt_re_user_mmap_entry *entry = NULL;
622 if (bnxt_qplib_alloc_pd(&rdev->qplib_res, &pd->qplib_pd)) {
623 ibdev_err(&rdev->ibdev, "Failed to allocate HW PD");
629 struct bnxt_re_pd_resp resp = {};
631 if (!ucntx->dpi.dbr) {
632 /* Allocate DPI in alloc_pd to avoid failing of
633 * ibv_devinfo and family of application when DPIs
636 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res,
637 &ucntx->dpi, ucntx, BNXT_QPLIB_DPI_TYPE_UC)) {
643 resp.pdid = pd->qplib_pd.id;
644 /* Still allow mapping this DBR to the new user PD. */
645 resp.dpi = ucntx->dpi.dpi;
647 entry = bnxt_re_mmap_entry_insert(ucntx, (u64)ucntx->dpi.umdbr,
648 BNXT_RE_MMAP_UC_DB, &resp.dbr);
655 pd->pd_db_mmap = &entry->rdma_entry;
657 rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
659 rdma_user_mmap_entry_remove(pd->pd_db_mmap);
666 if (bnxt_re_create_fence_mr(pd))
667 ibdev_warn(&rdev->ibdev,
668 "Failed to create Fence-MR\n");
669 active_pds = atomic_inc_return(&rdev->stats.res.pd_count);
670 if (active_pds > rdev->stats.res.pd_watermark)
671 rdev->stats.res.pd_watermark = active_pds;
675 bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
681 /* Address Handles */
682 int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
684 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
685 struct bnxt_re_dev *rdev = ah->rdev;
689 block = !(flags & RDMA_DESTROY_AH_SLEEPABLE);
690 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block);
691 if (BNXT_RE_CHECK_RC(rc)) {
692 if (rc == -ETIMEDOUT)
697 atomic_dec(&rdev->stats.res.ah_count);
702 static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
707 case RDMA_NETWORK_IPV4:
708 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
710 case RDMA_NETWORK_IPV6:
711 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
714 nw_type = CMDQ_CREATE_AH_TYPE_V1;
720 int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
721 struct ib_udata *udata)
723 struct ib_pd *ib_pd = ib_ah->pd;
724 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
725 struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
726 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
727 struct bnxt_re_dev *rdev = pd->rdev;
728 const struct ib_gid_attr *sgid_attr;
729 struct bnxt_re_gid_ctx *ctx;
730 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
735 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
736 ibdev_err(&rdev->ibdev, "Failed to alloc AH: GRH not set");
741 ah->qplib_ah.pd = &pd->qplib_pd;
743 /* Supply the configuration for the HW */
744 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
745 sizeof(union ib_gid));
746 sgid_attr = grh->sgid_attr;
747 /* Get the HW context of the GID. The reference
748 * of GID table entry is already taken by the caller.
750 ctx = rdma_read_gid_hw_context(sgid_attr);
751 ah->qplib_ah.sgid_index = ctx->idx;
752 ah->qplib_ah.host_sgid_index = grh->sgid_index;
753 ah->qplib_ah.traffic_class = grh->traffic_class;
754 ah->qplib_ah.flow_label = grh->flow_label;
755 ah->qplib_ah.hop_limit = grh->hop_limit;
756 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
758 /* Get network header type for this GID */
759 nw_type = rdma_gid_attr_network_type(sgid_attr);
760 ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type);
762 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
763 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
765 RDMA_CREATE_AH_SLEEPABLE));
767 ibdev_err(&rdev->ibdev, "Failed to allocate HW AH");
771 /* Write AVID to shared page. */
773 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
774 udata, struct bnxt_re_ucontext, ib_uctx);
778 spin_lock_irqsave(&uctx->sh_lock, flag);
779 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
780 *wrptr = ah->qplib_ah.id;
781 wmb(); /* make sure cache is updated. */
782 spin_unlock_irqrestore(&uctx->sh_lock, flag);
784 active_ahs = atomic_inc_return(&rdev->stats.res.ah_count);
785 if (active_ahs > rdev->stats.res.ah_watermark)
786 rdev->stats.res.ah_watermark = active_ahs;
791 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
793 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
795 ah_attr->type = ib_ah->type;
796 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
797 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
798 rdma_ah_set_grh(ah_attr, NULL, 0,
799 ah->qplib_ah.host_sgid_index,
800 0, ah->qplib_ah.traffic_class);
801 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
802 rdma_ah_set_port_num(ah_attr, 1);
803 rdma_ah_set_static_rate(ah_attr, 0);
807 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
808 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
812 spin_lock_irqsave(&qp->scq->cq_lock, flags);
813 if (qp->rcq != qp->scq)
814 spin_lock(&qp->rcq->cq_lock);
816 __acquire(&qp->rcq->cq_lock);
821 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
823 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
825 if (qp->rcq != qp->scq)
826 spin_unlock(&qp->rcq->cq_lock);
828 __release(&qp->rcq->cq_lock);
829 spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
832 static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
834 struct bnxt_re_qp *gsi_sqp;
835 struct bnxt_re_ah *gsi_sah;
836 struct bnxt_re_dev *rdev;
840 gsi_sqp = rdev->gsi_ctx.gsi_sqp;
841 gsi_sah = rdev->gsi_ctx.gsi_sah;
843 ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n");
844 bnxt_qplib_destroy_ah(&rdev->qplib_res,
847 atomic_dec(&rdev->stats.res.ah_count);
848 bnxt_qplib_clean_qp(&qp->qplib_qp);
850 ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
851 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
853 ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
856 bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
858 /* remove from active qp list */
859 mutex_lock(&rdev->qp_lock);
860 list_del(&gsi_sqp->list);
861 mutex_unlock(&rdev->qp_lock);
862 atomic_dec(&rdev->stats.res.qp_count);
864 kfree(rdev->gsi_ctx.sqp_tbl);
867 rdev->gsi_ctx.gsi_sqp = NULL;
868 rdev->gsi_ctx.gsi_sah = NULL;
869 rdev->gsi_ctx.sqp_tbl = NULL;
877 int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
879 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
880 struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
881 struct bnxt_re_dev *rdev = qp->rdev;
882 struct bnxt_qplib_nq *scq_nq = NULL;
883 struct bnxt_qplib_nq *rcq_nq = NULL;
887 bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
889 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
891 ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
895 if (rdma_is_kernel_res(&qp->ib_qp.res)) {
896 flags = bnxt_re_lock_cqs(qp);
897 bnxt_qplib_clean_qp(&qp->qplib_qp);
898 bnxt_re_unlock_cqs(qp, flags);
901 bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
903 if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
904 rc = bnxt_re_destroy_gsi_sqp(qp);
909 mutex_lock(&rdev->qp_lock);
911 mutex_unlock(&rdev->qp_lock);
912 atomic_dec(&rdev->stats.res.qp_count);
914 ib_umem_release(qp->rumem);
915 ib_umem_release(qp->sumem);
917 /* Flush all the entries of notification queue associated with
920 scq_nq = qplib_qp->scq->nq;
921 rcq_nq = qplib_qp->rcq->nq;
922 bnxt_re_synchronize_nq(scq_nq);
923 if (scq_nq != rcq_nq)
924 bnxt_re_synchronize_nq(rcq_nq);
929 static u8 __from_ib_qp_type(enum ib_qp_type type)
933 return CMDQ_CREATE_QP1_TYPE_GSI;
935 return CMDQ_CREATE_QP_TYPE_RC;
937 return CMDQ_CREATE_QP_TYPE_UD;
943 static u16 bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp *qplqp,
946 if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
948 return bnxt_re_get_rwqe_size(rsge);
951 static u16 bnxt_re_get_wqe_size(int ilsize, int nsge)
953 u16 wqe_size, calc_ils;
955 wqe_size = bnxt_re_get_swqe_size(nsge);
957 calc_ils = sizeof(struct sq_send_hdr) + ilsize;
958 wqe_size = max_t(u16, calc_ils, wqe_size);
959 wqe_size = ALIGN(wqe_size, sizeof(struct sq_send_hdr));
964 static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
965 struct ib_qp_init_attr *init_attr)
967 struct bnxt_qplib_dev_attr *dev_attr;
968 struct bnxt_qplib_qp *qplqp;
969 struct bnxt_re_dev *rdev;
970 struct bnxt_qplib_q *sq;
974 qplqp = &qp->qplib_qp;
976 dev_attr = &rdev->dev_attr;
978 align = sizeof(struct sq_send_hdr);
979 ilsize = ALIGN(init_attr->cap.max_inline_data, align);
981 sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
982 if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
984 /* For gen p4 and gen p5 backward compatibility mode
985 * wqe size is fixed to 128 bytes
987 if (sq->wqe_size < bnxt_re_get_swqe_size(dev_attr->max_qp_sges) &&
988 qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
989 sq->wqe_size = bnxt_re_get_swqe_size(dev_attr->max_qp_sges);
991 if (init_attr->cap.max_inline_data) {
992 qplqp->max_inline_data = sq->wqe_size -
993 sizeof(struct sq_send_hdr);
994 init_attr->cap.max_inline_data = qplqp->max_inline_data;
995 if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
996 sq->max_sge = qplqp->max_inline_data /
997 sizeof(struct sq_sge);
1003 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
1004 struct bnxt_re_qp *qp, struct ib_udata *udata)
1006 struct bnxt_qplib_qp *qplib_qp;
1007 struct bnxt_re_ucontext *cntx;
1008 struct bnxt_re_qp_req ureq;
1009 int bytes = 0, psn_sz;
1010 struct ib_umem *umem;
1013 qplib_qp = &qp->qplib_qp;
1014 cntx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
1016 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1019 bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
1020 /* Consider mapping PSN search memory only for RC QPs. */
1021 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1022 psn_sz = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
1023 sizeof(struct sq_psn_search_ext) :
1024 sizeof(struct sq_psn_search);
1025 psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1026 qplib_qp->sq.max_wqe :
1027 ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
1028 sizeof(struct bnxt_qplib_sge));
1029 bytes += (psn_nume * psn_sz);
1032 bytes = PAGE_ALIGN(bytes);
1033 umem = ib_umem_get(&rdev->ibdev, ureq.qpsva, bytes,
1034 IB_ACCESS_LOCAL_WRITE);
1036 return PTR_ERR(umem);
1039 qplib_qp->sq.sg_info.umem = umem;
1040 qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
1041 qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
1042 qplib_qp->qp_handle = ureq.qp_handle;
1044 if (!qp->qplib_qp.srq) {
1045 bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
1046 bytes = PAGE_ALIGN(bytes);
1047 umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes,
1048 IB_ACCESS_LOCAL_WRITE);
1052 qplib_qp->rq.sg_info.umem = umem;
1053 qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
1054 qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
1057 qplib_qp->dpi = &cntx->dpi;
1060 ib_umem_release(qp->sumem);
1062 memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info));
1064 return PTR_ERR(umem);
1067 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
1068 (struct bnxt_re_pd *pd,
1069 struct bnxt_qplib_res *qp1_res,
1070 struct bnxt_qplib_qp *qp1_qp)
1072 struct bnxt_re_dev *rdev = pd->rdev;
1073 struct bnxt_re_ah *ah;
1077 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
1082 ah->qplib_ah.pd = &pd->qplib_pd;
1084 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
1088 /* supply the dgid data same as sgid */
1089 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
1090 sizeof(union ib_gid));
1091 ah->qplib_ah.sgid_index = 0;
1093 ah->qplib_ah.traffic_class = 0;
1094 ah->qplib_ah.flow_label = 0;
1095 ah->qplib_ah.hop_limit = 1;
1096 ah->qplib_ah.sl = 0;
1097 /* Have DMAC same as SMAC */
1098 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
1100 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false);
1102 ibdev_err(&rdev->ibdev,
1103 "Failed to allocate HW AH for Shadow QP");
1106 atomic_inc(&rdev->stats.res.ah_count);
1115 static struct bnxt_re_qp *bnxt_re_create_shadow_qp
1116 (struct bnxt_re_pd *pd,
1117 struct bnxt_qplib_res *qp1_res,
1118 struct bnxt_qplib_qp *qp1_qp)
1120 struct bnxt_re_dev *rdev = pd->rdev;
1121 struct bnxt_re_qp *qp;
1124 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1130 /* Initialize the shadow QP structure from the QP1 values */
1131 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1133 qp->qplib_qp.pd = &pd->qplib_pd;
1134 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1135 qp->qplib_qp.type = IB_QPT_UD;
1137 qp->qplib_qp.max_inline_data = 0;
1138 qp->qplib_qp.sig_type = true;
1140 /* Shadow QP SQ depth should be same as QP1 RQ depth */
1141 qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6);
1142 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1143 qp->qplib_qp.sq.max_sge = 2;
1144 /* Q full delta can be 1 since it is internal QP */
1145 qp->qplib_qp.sq.q_full_delta = 1;
1146 qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE;
1147 qp->qplib_qp.sq.sg_info.pgshft = PAGE_SHIFT;
1149 qp->qplib_qp.scq = qp1_qp->scq;
1150 qp->qplib_qp.rcq = qp1_qp->rcq;
1152 qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6);
1153 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1154 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1155 /* Q full delta can be 1 since it is internal QP */
1156 qp->qplib_qp.rq.q_full_delta = 1;
1157 qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE;
1158 qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT;
1160 qp->qplib_qp.mtu = qp1_qp->mtu;
1162 qp->qplib_qp.sq_hdr_buf_size = 0;
1163 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1164 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1166 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1170 spin_lock_init(&qp->sq_lock);
1171 INIT_LIST_HEAD(&qp->list);
1172 mutex_lock(&rdev->qp_lock);
1173 list_add_tail(&qp->list, &rdev->qp_list);
1174 atomic_inc(&rdev->stats.res.qp_count);
1175 mutex_unlock(&rdev->qp_lock);
1182 static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
1183 struct ib_qp_init_attr *init_attr)
1185 struct bnxt_qplib_dev_attr *dev_attr;
1186 struct bnxt_qplib_qp *qplqp;
1187 struct bnxt_re_dev *rdev;
1188 struct bnxt_qplib_q *rq;
1192 qplqp = &qp->qplib_qp;
1194 dev_attr = &rdev->dev_attr;
1196 if (init_attr->srq) {
1197 struct bnxt_re_srq *srq;
1199 srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq);
1200 qplqp->srq = &srq->qplib_srq;
1203 rq->max_sge = init_attr->cap.max_recv_sge;
1204 if (rq->max_sge > dev_attr->max_qp_sges)
1205 rq->max_sge = dev_attr->max_qp_sges;
1206 init_attr->cap.max_recv_sge = rq->max_sge;
1207 rq->wqe_size = bnxt_re_setup_rwqe_size(qplqp, rq->max_sge,
1208 dev_attr->max_qp_sges);
1209 /* Allocate 1 more than what's provided so posting max doesn't
1212 entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1);
1213 rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1214 rq->q_full_delta = 0;
1215 rq->sg_info.pgsize = PAGE_SIZE;
1216 rq->sg_info.pgshft = PAGE_SHIFT;
1222 static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
1224 struct bnxt_qplib_dev_attr *dev_attr;
1225 struct bnxt_qplib_qp *qplqp;
1226 struct bnxt_re_dev *rdev;
1229 qplqp = &qp->qplib_qp;
1230 dev_attr = &rdev->dev_attr;
1232 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
1233 qplqp->rq.max_sge = dev_attr->max_qp_sges;
1234 if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
1235 qplqp->rq.max_sge = dev_attr->max_qp_sges;
1236 qplqp->rq.max_sge = 6;
1240 static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
1241 struct ib_qp_init_attr *init_attr,
1242 struct ib_udata *udata)
1244 struct bnxt_qplib_dev_attr *dev_attr;
1245 struct bnxt_qplib_qp *qplqp;
1246 struct bnxt_re_dev *rdev;
1247 struct bnxt_qplib_q *sq;
1253 qplqp = &qp->qplib_qp;
1255 dev_attr = &rdev->dev_attr;
1257 sq->max_sge = init_attr->cap.max_send_sge;
1258 if (sq->max_sge > dev_attr->max_qp_sges) {
1259 sq->max_sge = dev_attr->max_qp_sges;
1260 init_attr->cap.max_send_sge = sq->max_sge;
1263 rc = bnxt_re_setup_swqe_size(qp, init_attr);
1267 entries = init_attr->cap.max_send_wr;
1268 /* Allocate 128 + 1 more than what's provided */
1269 diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
1270 0 : BNXT_QPLIB_RESERVED_QP_WRS;
1271 entries = roundup_pow_of_two(entries + diff + 1);
1272 sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
1273 sq->q_full_delta = diff + 1;
1275 * Reserving one slot for Phantom WQE. Application can
1276 * post one extra entry in this case. But allowing this to avoid
1277 * unexpected Queue full condition
1279 qplqp->sq.q_full_delta -= 1;
1280 qplqp->sq.sg_info.pgsize = PAGE_SIZE;
1281 qplqp->sq.sg_info.pgshft = PAGE_SHIFT;
1286 static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
1287 struct ib_qp_init_attr *init_attr)
1289 struct bnxt_qplib_dev_attr *dev_attr;
1290 struct bnxt_qplib_qp *qplqp;
1291 struct bnxt_re_dev *rdev;
1295 qplqp = &qp->qplib_qp;
1296 dev_attr = &rdev->dev_attr;
1298 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
1299 entries = roundup_pow_of_two(init_attr->cap.max_send_wr + 1);
1300 qplqp->sq.max_wqe = min_t(u32, entries,
1301 dev_attr->max_qp_wqes + 1);
1302 qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
1303 init_attr->cap.max_send_wr;
1304 qplqp->sq.max_sge++; /* Need one extra sge to put UD header */
1305 if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
1306 qplqp->sq.max_sge = dev_attr->max_qp_sges;
1310 static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
1311 struct ib_qp_init_attr *init_attr)
1313 struct bnxt_qplib_chip_ctx *chip_ctx;
1316 chip_ctx = rdev->chip_ctx;
1318 qptype = __from_ib_qp_type(init_attr->qp_type);
1319 if (qptype == IB_QPT_MAX) {
1320 ibdev_err(&rdev->ibdev, "QP type 0x%x not supported", qptype);
1321 qptype = -EOPNOTSUPP;
1325 if (bnxt_qplib_is_chip_gen_p5(chip_ctx) &&
1326 init_attr->qp_type == IB_QPT_GSI)
1327 qptype = CMDQ_CREATE_QP_TYPE_GSI;
1332 static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1333 struct ib_qp_init_attr *init_attr,
1334 struct ib_udata *udata)
1336 struct bnxt_qplib_dev_attr *dev_attr;
1337 struct bnxt_qplib_qp *qplqp;
1338 struct bnxt_re_dev *rdev;
1339 struct bnxt_re_cq *cq;
1343 qplqp = &qp->qplib_qp;
1344 dev_attr = &rdev->dev_attr;
1346 /* Setup misc params */
1347 ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
1348 qplqp->pd = &pd->qplib_pd;
1349 qplqp->qp_handle = (u64)qplqp;
1350 qplqp->max_inline_data = init_attr->cap.max_inline_data;
1351 qplqp->sig_type = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1352 qptype = bnxt_re_init_qp_type(rdev, init_attr);
1357 qplqp->type = (u8)qptype;
1358 qplqp->wqe_mode = rdev->chip_ctx->modes.wqe_mode;
1360 if (init_attr->qp_type == IB_QPT_RC) {
1361 qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
1362 qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1364 qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1365 qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */
1366 if (init_attr->create_flags) {
1367 ibdev_dbg(&rdev->ibdev,
1368 "QP create flags 0x%x not supported",
1369 init_attr->create_flags);
1374 if (init_attr->send_cq) {
1375 cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq);
1376 qplqp->scq = &cq->qplib_cq;
1380 if (init_attr->recv_cq) {
1381 cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq);
1382 qplqp->rcq = &cq->qplib_cq;
1387 rc = bnxt_re_init_rq_attr(qp, init_attr);
1390 if (init_attr->qp_type == IB_QPT_GSI)
1391 bnxt_re_adjust_gsi_rq_attr(qp);
1394 rc = bnxt_re_init_sq_attr(qp, init_attr, udata);
1397 if (init_attr->qp_type == IB_QPT_GSI)
1398 bnxt_re_adjust_gsi_sq_attr(qp, init_attr);
1400 if (udata) /* This will update DPI and qp_handle */
1401 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1406 static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
1407 struct bnxt_re_pd *pd)
1409 struct bnxt_re_sqp_entries *sqp_tbl;
1410 struct bnxt_re_dev *rdev;
1411 struct bnxt_re_qp *sqp;
1412 struct bnxt_re_ah *sah;
1416 /* Create a shadow QP to handle the QP1 traffic */
1417 sqp_tbl = kcalloc(BNXT_RE_MAX_GSI_SQP_ENTRIES, sizeof(*sqp_tbl),
1421 rdev->gsi_ctx.sqp_tbl = sqp_tbl;
1423 sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
1426 ibdev_err(&rdev->ibdev, "Failed to create Shadow QP for QP1");
1429 rdev->gsi_ctx.gsi_sqp = sqp;
1433 sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1436 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1439 ibdev_err(&rdev->ibdev,
1440 "Failed to create AH entry for ShadowQP");
1443 rdev->gsi_ctx.gsi_sah = sah;
1451 static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1452 struct ib_qp_init_attr *init_attr)
1454 struct bnxt_re_dev *rdev;
1455 struct bnxt_qplib_qp *qplqp;
1459 qplqp = &qp->qplib_qp;
1461 qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1462 qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1464 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp);
1466 ibdev_err(&rdev->ibdev, "create HW QP1 failed!");
1470 rc = bnxt_re_create_shadow_gsi(qp, pd);
1475 static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
1476 struct ib_qp_init_attr *init_attr,
1477 struct bnxt_qplib_dev_attr *dev_attr)
1481 if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes ||
1482 init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes ||
1483 init_attr->cap.max_send_sge > dev_attr->max_qp_sges ||
1484 init_attr->cap.max_recv_sge > dev_attr->max_qp_sges ||
1485 init_attr->cap.max_inline_data > dev_attr->max_inline_data) {
1486 ibdev_err(&rdev->ibdev,
1487 "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x",
1488 init_attr->cap.max_send_wr, dev_attr->max_qp_wqes,
1489 init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes,
1490 init_attr->cap.max_send_sge, dev_attr->max_qp_sges,
1491 init_attr->cap.max_recv_sge, dev_attr->max_qp_sges,
1492 init_attr->cap.max_inline_data,
1493 dev_attr->max_inline_data);
1499 int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
1500 struct ib_udata *udata)
1502 struct ib_pd *ib_pd = ib_qp->pd;
1503 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1504 struct bnxt_re_dev *rdev = pd->rdev;
1505 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1506 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1510 rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
1517 rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
1521 if (qp_init_attr->qp_type == IB_QPT_GSI &&
1522 !(bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))) {
1523 rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
1529 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1531 ibdev_err(&rdev->ibdev, "Failed to create HW QP");
1535 struct bnxt_re_qp_resp resp;
1537 resp.qpid = qp->qplib_qp.id;
1539 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1541 ibdev_err(&rdev->ibdev, "Failed to copy QP udata");
1547 qp->ib_qp.qp_num = qp->qplib_qp.id;
1548 if (qp_init_attr->qp_type == IB_QPT_GSI)
1549 rdev->gsi_ctx.gsi_qp = qp;
1550 spin_lock_init(&qp->sq_lock);
1551 spin_lock_init(&qp->rq_lock);
1552 INIT_LIST_HEAD(&qp->list);
1553 mutex_lock(&rdev->qp_lock);
1554 list_add_tail(&qp->list, &rdev->qp_list);
1555 mutex_unlock(&rdev->qp_lock);
1556 active_qps = atomic_inc_return(&rdev->stats.res.qp_count);
1557 if (active_qps > rdev->stats.res.qp_watermark)
1558 rdev->stats.res.qp_watermark = active_qps;
1559 if (qp_init_attr->qp_type == IB_QPT_RC) {
1560 active_qps = atomic_inc_return(&rdev->stats.res.rc_qp_count);
1561 if (active_qps > rdev->stats.res.rc_qp_watermark)
1562 rdev->stats.res.rc_qp_watermark = active_qps;
1563 } else if (qp_init_attr->qp_type == IB_QPT_UD) {
1564 active_qps = atomic_inc_return(&rdev->stats.res.ud_qp_count);
1565 if (active_qps > rdev->stats.res.ud_qp_watermark)
1566 rdev->stats.res.ud_qp_watermark = active_qps;
1571 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1573 ib_umem_release(qp->rumem);
1574 ib_umem_release(qp->sumem);
1579 static u8 __from_ib_qp_state(enum ib_qp_state state)
1583 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1585 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1587 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1589 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1591 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1593 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1596 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1600 static enum ib_qp_state __to_ib_qp_state(u8 state)
1603 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1604 return IB_QPS_RESET;
1605 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1607 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1609 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1611 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1613 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1615 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1621 static u32 __from_ib_mtu(enum ib_mtu mtu)
1625 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1627 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1629 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1631 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1633 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1635 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1639 static enum ib_mtu __to_ib_mtu(u32 mtu)
1641 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1642 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1644 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1646 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1648 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1650 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1657 /* Shared Receive Queues */
1658 int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
1660 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1662 struct bnxt_re_dev *rdev = srq->rdev;
1663 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1664 struct bnxt_qplib_nq *nq = NULL;
1667 nq = qplib_srq->cq->nq;
1668 bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1669 ib_umem_release(srq->umem);
1670 atomic_dec(&rdev->stats.res.srq_count);
1676 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1677 struct bnxt_re_pd *pd,
1678 struct bnxt_re_srq *srq,
1679 struct ib_udata *udata)
1681 struct bnxt_re_srq_req ureq;
1682 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1683 struct ib_umem *umem;
1685 struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
1686 udata, struct bnxt_re_ucontext, ib_uctx);
1688 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1691 bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size);
1692 bytes = PAGE_ALIGN(bytes);
1693 umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes,
1694 IB_ACCESS_LOCAL_WRITE);
1696 return PTR_ERR(umem);
1699 qplib_srq->sg_info.umem = umem;
1700 qplib_srq->sg_info.pgsize = PAGE_SIZE;
1701 qplib_srq->sg_info.pgshft = PAGE_SHIFT;
1702 qplib_srq->srq_handle = ureq.srq_handle;
1703 qplib_srq->dpi = &cntx->dpi;
1708 int bnxt_re_create_srq(struct ib_srq *ib_srq,
1709 struct ib_srq_init_attr *srq_init_attr,
1710 struct ib_udata *udata)
1712 struct bnxt_qplib_dev_attr *dev_attr;
1713 struct bnxt_qplib_nq *nq = NULL;
1714 struct bnxt_re_dev *rdev;
1715 struct bnxt_re_srq *srq;
1716 struct bnxt_re_pd *pd;
1717 struct ib_pd *ib_pd;
1722 pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1724 dev_attr = &rdev->dev_attr;
1725 srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
1727 if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1728 ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded");
1733 if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1739 srq->qplib_srq.pd = &pd->qplib_pd;
1740 srq->qplib_srq.dpi = &rdev->dpi_privileged;
1741 /* Allocate 1 more than what's provided so posting max doesn't
1744 entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
1745 if (entries > dev_attr->max_srq_wqes + 1)
1746 entries = dev_attr->max_srq_wqes + 1;
1747 srq->qplib_srq.max_wqe = entries;
1749 srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1750 /* 128 byte wqe size for SRQ . So use max sges */
1751 srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(dev_attr->max_srq_sges);
1752 srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1753 srq->srq_limit = srq_init_attr->attr.srq_limit;
1754 srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
1758 rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1763 rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1765 ibdev_err(&rdev->ibdev, "Create HW SRQ failed!");
1770 struct bnxt_re_srq_resp resp;
1772 resp.srqid = srq->qplib_srq.id;
1773 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1775 ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!");
1776 bnxt_qplib_destroy_srq(&rdev->qplib_res,
1783 active_srqs = atomic_inc_return(&rdev->stats.res.srq_count);
1784 if (active_srqs > rdev->stats.res.srq_watermark)
1785 rdev->stats.res.srq_watermark = active_srqs;
1786 spin_lock_init(&srq->lock);
1791 ib_umem_release(srq->umem);
1796 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1797 enum ib_srq_attr_mask srq_attr_mask,
1798 struct ib_udata *udata)
1800 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1802 struct bnxt_re_dev *rdev = srq->rdev;
1805 switch (srq_attr_mask) {
1807 /* SRQ resize is not supported */
1810 /* Change the SRQ threshold */
1811 if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1814 srq->qplib_srq.threshold = srq_attr->srq_limit;
1815 rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1817 ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!");
1820 /* On success, update the shadow */
1821 srq->srq_limit = srq_attr->srq_limit;
1822 /* No need to Build and send response back to udata */
1825 ibdev_err(&rdev->ibdev,
1826 "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1832 int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1834 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1836 struct bnxt_re_srq tsrq;
1837 struct bnxt_re_dev *rdev = srq->rdev;
1840 /* Get live SRQ attr */
1841 tsrq.qplib_srq.id = srq->qplib_srq.id;
1842 rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
1844 ibdev_err(&rdev->ibdev, "Query HW SRQ failed!");
1847 srq_attr->max_wr = srq->qplib_srq.max_wqe;
1848 srq_attr->max_sge = srq->qplib_srq.max_sge;
1849 srq_attr->srq_limit = tsrq.qplib_srq.threshold;
1854 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
1855 const struct ib_recv_wr **bad_wr)
1857 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1859 struct bnxt_qplib_swqe wqe;
1860 unsigned long flags;
1863 spin_lock_irqsave(&srq->lock, flags);
1865 /* Transcribe each ib_recv_wr to qplib_swqe */
1866 wqe.num_sge = wr->num_sge;
1867 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
1868 wqe.wr_id = wr->wr_id;
1869 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1871 rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1878 spin_unlock_irqrestore(&srq->lock, flags);
1882 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1883 struct bnxt_re_qp *qp1_qp,
1886 struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
1889 if (qp_attr_mask & IB_QP_STATE) {
1890 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1891 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1893 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1894 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1895 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1898 if (qp_attr_mask & IB_QP_QKEY) {
1899 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1900 /* Using a Random QKEY */
1901 qp->qplib_qp.qkey = 0x81818181;
1903 if (qp_attr_mask & IB_QP_SQ_PSN) {
1904 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1905 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1908 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1910 ibdev_err(&rdev->ibdev, "Failed to modify Shadow QP for QP1");
1914 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1915 int qp_attr_mask, struct ib_udata *udata)
1917 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1918 struct bnxt_re_dev *rdev = qp->rdev;
1919 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1920 enum ib_qp_state curr_qp_state, new_qp_state;
1925 if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1928 qp->qplib_qp.modify_flags = 0;
1929 if (qp_attr_mask & IB_QP_STATE) {
1930 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1931 new_qp_state = qp_attr->qp_state;
1932 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1933 ib_qp->qp_type, qp_attr_mask)) {
1934 ibdev_err(&rdev->ibdev,
1935 "Invalid attribute mask: %#x specified ",
1937 ibdev_err(&rdev->ibdev,
1938 "for qpn: %#x type: %#x",
1939 ib_qp->qp_num, ib_qp->qp_type);
1940 ibdev_err(&rdev->ibdev,
1941 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1942 curr_qp_state, new_qp_state);
1945 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1946 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1949 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1950 ibdev_dbg(&rdev->ibdev,
1951 "Move QP = %p to flush list\n", qp);
1952 flags = bnxt_re_lock_cqs(qp);
1953 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1954 bnxt_re_unlock_cqs(qp, flags);
1957 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1958 ibdev_dbg(&rdev->ibdev,
1959 "Move QP = %p out of flush list\n", qp);
1960 flags = bnxt_re_lock_cqs(qp);
1961 bnxt_qplib_clean_qp(&qp->qplib_qp);
1962 bnxt_re_unlock_cqs(qp, flags);
1965 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1966 qp->qplib_qp.modify_flags |=
1967 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1968 qp->qplib_qp.en_sqd_async_notify = true;
1970 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1971 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1972 qp->qplib_qp.access =
1973 __from_ib_access_flags(qp_attr->qp_access_flags);
1974 /* LOCAL_WRITE access must be set to allow RC receive */
1975 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1976 /* Temp: Set all params on QP as of now */
1977 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
1978 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
1980 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1981 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1982 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1984 if (qp_attr_mask & IB_QP_QKEY) {
1985 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1986 qp->qplib_qp.qkey = qp_attr->qkey;
1988 if (qp_attr_mask & IB_QP_AV) {
1989 const struct ib_global_route *grh =
1990 rdma_ah_read_grh(&qp_attr->ah_attr);
1991 const struct ib_gid_attr *sgid_attr;
1992 struct bnxt_re_gid_ctx *ctx;
1994 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1995 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1996 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1997 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1998 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1999 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
2000 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
2001 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
2002 sizeof(qp->qplib_qp.ah.dgid.data));
2003 qp->qplib_qp.ah.flow_label = grh->flow_label;
2004 sgid_attr = grh->sgid_attr;
2005 /* Get the HW context of the GID. The reference
2006 * of GID table entry is already taken by the caller.
2008 ctx = rdma_read_gid_hw_context(sgid_attr);
2009 qp->qplib_qp.ah.sgid_index = ctx->idx;
2010 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
2011 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
2012 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
2013 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
2014 ether_addr_copy(qp->qplib_qp.ah.dmac,
2015 qp_attr->ah_attr.roce.dmac);
2017 rc = rdma_read_gid_l2_fields(sgid_attr, NULL,
2018 &qp->qplib_qp.smac[0]);
2022 nw_type = rdma_gid_attr_network_type(sgid_attr);
2024 case RDMA_NETWORK_IPV4:
2025 qp->qplib_qp.nw_type =
2026 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
2028 case RDMA_NETWORK_IPV6:
2029 qp->qplib_qp.nw_type =
2030 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
2033 qp->qplib_qp.nw_type =
2034 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
2039 if (qp_attr_mask & IB_QP_PATH_MTU) {
2040 qp->qplib_qp.modify_flags |=
2041 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
2042 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
2043 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
2044 } else if (qp_attr->qp_state == IB_QPS_RTR) {
2045 qp->qplib_qp.modify_flags |=
2046 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
2047 qp->qplib_qp.path_mtu =
2048 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
2050 ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
2053 if (qp_attr_mask & IB_QP_TIMEOUT) {
2054 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
2055 qp->qplib_qp.timeout = qp_attr->timeout;
2057 if (qp_attr_mask & IB_QP_RETRY_CNT) {
2058 qp->qplib_qp.modify_flags |=
2059 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
2060 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
2062 if (qp_attr_mask & IB_QP_RNR_RETRY) {
2063 qp->qplib_qp.modify_flags |=
2064 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
2065 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
2067 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
2068 qp->qplib_qp.modify_flags |=
2069 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
2070 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
2072 if (qp_attr_mask & IB_QP_RQ_PSN) {
2073 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
2074 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
2076 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2077 qp->qplib_qp.modify_flags |=
2078 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
2079 /* Cap the max_rd_atomic to device max */
2080 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
2081 dev_attr->max_qp_rd_atom);
2083 if (qp_attr_mask & IB_QP_SQ_PSN) {
2084 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
2085 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
2087 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2088 if (qp_attr->max_dest_rd_atomic >
2089 dev_attr->max_qp_init_rd_atom) {
2090 ibdev_err(&rdev->ibdev,
2091 "max_dest_rd_atomic requested%d is > dev_max%d",
2092 qp_attr->max_dest_rd_atomic,
2093 dev_attr->max_qp_init_rd_atom);
2097 qp->qplib_qp.modify_flags |=
2098 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
2099 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
2101 if (qp_attr_mask & IB_QP_CAP) {
2102 qp->qplib_qp.modify_flags |=
2103 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
2104 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
2105 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
2106 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
2107 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
2108 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
2109 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
2110 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
2111 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
2112 (qp_attr->cap.max_inline_data >=
2113 dev_attr->max_inline_data)) {
2114 ibdev_err(&rdev->ibdev,
2115 "Create QP failed - max exceeded");
2118 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
2119 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
2120 dev_attr->max_qp_wqes + 1);
2121 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
2122 qp_attr->cap.max_send_wr;
2124 * Reserving one slot for Phantom WQE. Some application can
2125 * post one extra entry in this case. Allowing this to avoid
2126 * unexpected Queue full condition
2128 qp->qplib_qp.sq.q_full_delta -= 1;
2129 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
2130 if (qp->qplib_qp.rq.max_wqe) {
2131 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
2132 qp->qplib_qp.rq.max_wqe =
2133 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
2134 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
2135 qp_attr->cap.max_recv_wr;
2136 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
2138 /* SRQ was used prior, just ignore the RQ caps */
2141 if (qp_attr_mask & IB_QP_DEST_QPN) {
2142 qp->qplib_qp.modify_flags |=
2143 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
2144 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
2146 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
2148 ibdev_err(&rdev->ibdev, "Failed to modify HW QP");
2151 if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
2152 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
2156 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
2157 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
2159 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2160 struct bnxt_re_dev *rdev = qp->rdev;
2161 struct bnxt_qplib_qp *qplib_qp;
2164 qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
2168 qplib_qp->id = qp->qplib_qp.id;
2169 qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
2171 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
2173 ibdev_err(&rdev->ibdev, "Failed to query HW QP");
2176 qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
2177 qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state);
2178 qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
2179 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
2180 qp_attr->pkey_index = qplib_qp->pkey_index;
2181 qp_attr->qkey = qplib_qp->qkey;
2182 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2183 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
2184 qplib_qp->ah.host_sgid_index,
2185 qplib_qp->ah.hop_limit,
2186 qplib_qp->ah.traffic_class);
2187 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
2188 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
2189 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
2190 qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
2191 qp_attr->timeout = qplib_qp->timeout;
2192 qp_attr->retry_cnt = qplib_qp->retry_cnt;
2193 qp_attr->rnr_retry = qplib_qp->rnr_retry;
2194 qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
2195 qp_attr->rq_psn = qplib_qp->rq.psn;
2196 qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
2197 qp_attr->sq_psn = qplib_qp->sq.psn;
2198 qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
2199 qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
2201 qp_attr->dest_qp_num = qplib_qp->dest_qpn;
2203 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
2204 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
2205 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
2206 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
2207 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
2208 qp_init_attr->cap = qp_attr->cap;
2215 /* Routine for sending QP1 packets for RoCE V1 an V2
2217 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
2218 const struct ib_send_wr *wr,
2219 struct bnxt_qplib_swqe *wqe,
2222 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
2224 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
2225 const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr;
2226 struct bnxt_qplib_sge sge;
2230 bool is_eth = false;
2231 bool is_vlan = false;
2232 bool is_grh = false;
2233 bool is_udp = false;
2235 u16 vlan_id = 0xFFFF;
2239 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
2241 rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL);
2245 /* Get network header type for this GID */
2246 nw_type = rdma_gid_attr_network_type(sgid_attr);
2248 case RDMA_NETWORK_IPV4:
2249 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
2251 case RDMA_NETWORK_IPV6:
2252 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
2255 nw_type = BNXT_RE_ROCE_V1_PACKET;
2258 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
2259 is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
2261 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
2263 ether_type = ETH_P_IP;
2266 ether_type = ETH_P_IPV6;
2270 ether_type = ETH_P_IBOE;
2275 is_vlan = vlan_id && (vlan_id < 0x1000);
2277 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
2278 ip_version, is_udp, 0, &qp->qp1_hdr);
2281 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
2282 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
2284 /* For vlan, check the sgid for vlan existence */
2287 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
2289 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
2290 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
2293 if (is_grh || (ip_version == 6)) {
2294 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw,
2295 sizeof(sgid_attr->gid));
2296 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
2297 sizeof(sgid_attr->gid));
2298 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
2301 if (ip_version == 4) {
2302 qp->qp1_hdr.ip4.tos = 0;
2303 qp->qp1_hdr.ip4.id = 0;
2304 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
2305 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
2307 memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4);
2308 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
2309 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
2313 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
2314 qp->qp1_hdr.udp.sport = htons(0x8CD1);
2315 qp->qp1_hdr.udp.csum = 0;
2319 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
2320 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2321 qp->qp1_hdr.immediate_present = 1;
2323 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
2325 if (wr->send_flags & IB_SEND_SOLICITED)
2326 qp->qp1_hdr.bth.solicited_event = 1;
2328 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
2330 /* P_key for QP1 is for all members */
2331 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
2332 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
2333 qp->qp1_hdr.bth.ack_req = 0;
2335 qp->send_psn &= BTH_PSN_MASK;
2336 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
2338 /* Use the priviledged Q_Key for QP1 */
2339 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
2340 qp->qp1_hdr.deth.source_qpn = IB_QP1;
2342 /* Pack the QP1 to the transmit buffer */
2343 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
2345 ib_ud_header_pack(&qp->qp1_hdr, buf);
2346 for (i = wqe->num_sge; i; i--) {
2347 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
2348 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
2349 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
2353 * Max Header buf size for IPV6 RoCE V2 is 86,
2354 * which is same as the QP1 SQ header buffer.
2355 * Header buf size for IPV4 RoCE V2 can be 66.
2356 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
2357 * Subtract 20 bytes from QP1 SQ header buf size
2359 if (is_udp && ip_version == 4)
2362 * Max Header buf size for RoCE V1 is 78.
2363 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
2364 * Subtract 8 bytes from QP1 SQ header buf size
2369 /* Subtract 4 bytes for non vlan packets */
2373 wqe->sg_list[0].addr = sge.addr;
2374 wqe->sg_list[0].lkey = sge.lkey;
2375 wqe->sg_list[0].size = sge.size;
2379 ibdev_err(&qp->rdev->ibdev, "QP1 buffer is empty!");
2385 /* For the MAD layer, it only provides the recv SGE the size of
2386 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
2387 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
2388 * receive packet (334 bytes) with no VLAN and then copy the GRH
2389 * and the MAD datagram out to the provided SGE.
2391 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2392 const struct ib_recv_wr *wr,
2393 struct bnxt_qplib_swqe *wqe,
2396 struct bnxt_re_sqp_entries *sqp_entry;
2397 struct bnxt_qplib_sge ref, sge;
2398 struct bnxt_re_dev *rdev;
2403 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2405 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2408 /* Create 1 SGE to receive the entire
2411 /* Save the reference from ULP */
2412 ref.addr = wqe->sg_list[0].addr;
2413 ref.lkey = wqe->sg_list[0].lkey;
2414 ref.size = wqe->sg_list[0].size;
2416 sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
2419 wqe->sg_list[0].addr = sge.addr;
2420 wqe->sg_list[0].lkey = sge.lkey;
2421 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2422 sge.size -= wqe->sg_list[0].size;
2424 sqp_entry->sge.addr = ref.addr;
2425 sqp_entry->sge.lkey = ref.lkey;
2426 sqp_entry->sge.size = ref.size;
2427 /* Store the wrid for reporting completion */
2428 sqp_entry->wrid = wqe->wr_id;
2429 /* change the wqe->wrid to table index */
2430 wqe->wr_id = rq_prod_index;
2434 static int is_ud_qp(struct bnxt_re_qp *qp)
2436 return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
2437 qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
2440 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2441 const struct ib_send_wr *wr,
2442 struct bnxt_qplib_swqe *wqe)
2444 struct bnxt_re_ah *ah = NULL;
2447 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2448 wqe->send.q_key = ud_wr(wr)->remote_qkey;
2449 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2450 wqe->send.avid = ah->qplib_ah.id;
2452 switch (wr->opcode) {
2454 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2456 case IB_WR_SEND_WITH_IMM:
2457 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2458 wqe->send.imm_data = wr->ex.imm_data;
2460 case IB_WR_SEND_WITH_INV:
2461 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2462 wqe->send.inv_key = wr->ex.invalidate_rkey;
2467 if (wr->send_flags & IB_SEND_SIGNALED)
2468 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2469 if (wr->send_flags & IB_SEND_FENCE)
2470 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2471 if (wr->send_flags & IB_SEND_SOLICITED)
2472 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2473 if (wr->send_flags & IB_SEND_INLINE)
2474 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2479 static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
2480 struct bnxt_qplib_swqe *wqe)
2482 switch (wr->opcode) {
2483 case IB_WR_RDMA_WRITE:
2484 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2486 case IB_WR_RDMA_WRITE_WITH_IMM:
2487 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2488 wqe->rdma.imm_data = wr->ex.imm_data;
2490 case IB_WR_RDMA_READ:
2491 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2492 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2497 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2498 wqe->rdma.r_key = rdma_wr(wr)->rkey;
2499 if (wr->send_flags & IB_SEND_SIGNALED)
2500 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2501 if (wr->send_flags & IB_SEND_FENCE)
2502 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2503 if (wr->send_flags & IB_SEND_SOLICITED)
2504 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2505 if (wr->send_flags & IB_SEND_INLINE)
2506 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2511 static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
2512 struct bnxt_qplib_swqe *wqe)
2514 switch (wr->opcode) {
2515 case IB_WR_ATOMIC_CMP_AND_SWP:
2516 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2517 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2518 wqe->atomic.swap_data = atomic_wr(wr)->swap;
2520 case IB_WR_ATOMIC_FETCH_AND_ADD:
2521 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2522 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2527 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2528 wqe->atomic.r_key = atomic_wr(wr)->rkey;
2529 if (wr->send_flags & IB_SEND_SIGNALED)
2530 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2531 if (wr->send_flags & IB_SEND_FENCE)
2532 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2533 if (wr->send_flags & IB_SEND_SOLICITED)
2534 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2538 static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
2539 struct bnxt_qplib_swqe *wqe)
2541 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2542 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2544 /* Need unconditional fence for local invalidate
2545 * opcode to work as expected.
2547 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2549 if (wr->send_flags & IB_SEND_SIGNALED)
2550 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2551 if (wr->send_flags & IB_SEND_SOLICITED)
2552 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2557 static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
2558 struct bnxt_qplib_swqe *wqe)
2560 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2561 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2562 int access = wr->access;
2564 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2565 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2566 wqe->frmr.page_list = mr->pages;
2567 wqe->frmr.page_list_len = mr->npages;
2568 wqe->frmr.levels = qplib_frpl->hwq.level;
2569 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2571 /* Need unconditional fence for reg_mr
2572 * opcode to function as expected.
2575 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2577 if (wr->wr.send_flags & IB_SEND_SIGNALED)
2578 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2580 if (access & IB_ACCESS_LOCAL_WRITE)
2581 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2582 if (access & IB_ACCESS_REMOTE_READ)
2583 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2584 if (access & IB_ACCESS_REMOTE_WRITE)
2585 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2586 if (access & IB_ACCESS_REMOTE_ATOMIC)
2587 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2588 if (access & IB_ACCESS_MW_BIND)
2589 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2591 wqe->frmr.l_key = wr->key;
2592 wqe->frmr.length = wr->mr->length;
2593 wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K);
2594 wqe->frmr.pg_sz_log = ilog2(wr->mr->page_size >> PAGE_SHIFT_4K);
2595 wqe->frmr.va = wr->mr->iova;
2599 static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2600 const struct ib_send_wr *wr,
2601 struct bnxt_qplib_swqe *wqe)
2603 /* Copy the inline data to the data field */
2608 in_data = wqe->inline_data;
2609 for (i = 0; i < wr->num_sge; i++) {
2610 sge_addr = (void *)(unsigned long)
2611 wr->sg_list[i].addr;
2612 sge_len = wr->sg_list[i].length;
2614 if ((sge_len + wqe->inline_len) >
2615 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2616 ibdev_err(&rdev->ibdev,
2617 "Inline data size requested > supported value");
2620 sge_len = wr->sg_list[i].length;
2622 memcpy(in_data, sge_addr, sge_len);
2623 in_data += wr->sg_list[i].length;
2624 wqe->inline_len += wr->sg_list[i].length;
2626 return wqe->inline_len;
2629 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2630 const struct ib_send_wr *wr,
2631 struct bnxt_qplib_swqe *wqe)
2635 if (wr->send_flags & IB_SEND_INLINE)
2636 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2638 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2644 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2646 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2647 qp->ib_qp.qp_type == IB_QPT_GSI ||
2648 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2649 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2651 struct ib_qp_attr qp_attr;
2653 qp_attr_mask = IB_QP_STATE;
2654 qp_attr.qp_state = IB_QPS_RTS;
2655 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2656 qp->qplib_qp.wqe_cnt = 0;
2660 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2661 struct bnxt_re_qp *qp,
2662 const struct ib_send_wr *wr)
2664 int rc = 0, payload_sz = 0;
2665 unsigned long flags;
2667 spin_lock_irqsave(&qp->sq_lock, flags);
2669 struct bnxt_qplib_swqe wqe = {};
2672 wqe.num_sge = wr->num_sge;
2673 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2674 ibdev_err(&rdev->ibdev,
2675 "Limit exceeded for Send SGEs");
2680 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2681 if (payload_sz < 0) {
2685 wqe.wr_id = wr->wr_id;
2687 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2689 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2691 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2694 ibdev_err(&rdev->ibdev,
2695 "Post send failed opcode = %#x rc = %d",
2701 bnxt_qplib_post_send_db(&qp->qplib_qp);
2702 bnxt_ud_qp_hw_stall_workaround(qp);
2703 spin_unlock_irqrestore(&qp->sq_lock, flags);
2707 int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
2708 const struct ib_send_wr **bad_wr)
2710 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2711 struct bnxt_qplib_swqe wqe;
2712 int rc = 0, payload_sz = 0;
2713 unsigned long flags;
2715 spin_lock_irqsave(&qp->sq_lock, flags);
2718 memset(&wqe, 0, sizeof(wqe));
2721 wqe.num_sge = wr->num_sge;
2722 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2723 ibdev_err(&qp->rdev->ibdev,
2724 "Limit exceeded for Send SGEs");
2729 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2730 if (payload_sz < 0) {
2734 wqe.wr_id = wr->wr_id;
2736 switch (wr->opcode) {
2738 case IB_WR_SEND_WITH_IMM:
2739 if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) {
2740 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2744 wqe.rawqp1.lflags |=
2745 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2747 switch (wr->send_flags) {
2748 case IB_SEND_IP_CSUM:
2749 wqe.rawqp1.lflags |=
2750 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2756 case IB_WR_SEND_WITH_INV:
2757 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2759 case IB_WR_RDMA_WRITE:
2760 case IB_WR_RDMA_WRITE_WITH_IMM:
2761 case IB_WR_RDMA_READ:
2762 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2764 case IB_WR_ATOMIC_CMP_AND_SWP:
2765 case IB_WR_ATOMIC_FETCH_AND_ADD:
2766 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2768 case IB_WR_RDMA_READ_WITH_INV:
2769 ibdev_err(&qp->rdev->ibdev,
2770 "RDMA Read with Invalidate is not supported");
2773 case IB_WR_LOCAL_INV:
2774 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2777 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2780 /* Unsupported WRs */
2781 ibdev_err(&qp->rdev->ibdev,
2782 "WR (%#x) is not supported", wr->opcode);
2787 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2790 ibdev_err(&qp->rdev->ibdev,
2791 "post_send failed op:%#x qps = %#x rc = %d\n",
2792 wr->opcode, qp->qplib_qp.state, rc);
2798 bnxt_qplib_post_send_db(&qp->qplib_qp);
2799 bnxt_ud_qp_hw_stall_workaround(qp);
2800 spin_unlock_irqrestore(&qp->sq_lock, flags);
2805 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2806 struct bnxt_re_qp *qp,
2807 const struct ib_recv_wr *wr)
2809 struct bnxt_qplib_swqe wqe;
2814 memset(&wqe, 0, sizeof(wqe));
2817 wqe.num_sge = wr->num_sge;
2818 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2819 ibdev_err(&rdev->ibdev,
2820 "Limit exceeded for Receive SGEs");
2824 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2825 wqe.wr_id = wr->wr_id;
2826 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2828 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2835 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2839 int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
2840 const struct ib_recv_wr **bad_wr)
2842 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2843 struct bnxt_qplib_swqe wqe;
2844 int rc = 0, payload_sz = 0;
2845 unsigned long flags;
2848 spin_lock_irqsave(&qp->rq_lock, flags);
2851 memset(&wqe, 0, sizeof(wqe));
2854 wqe.num_sge = wr->num_sge;
2855 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2856 ibdev_err(&qp->rdev->ibdev,
2857 "Limit exceeded for Receive SGEs");
2863 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2865 wqe.wr_id = wr->wr_id;
2866 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2868 if (ib_qp->qp_type == IB_QPT_GSI &&
2869 qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI)
2870 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2873 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2879 /* Ring DB if the RQEs posted reaches a threshold value */
2880 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2881 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2889 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2891 spin_unlock_irqrestore(&qp->rq_lock, flags);
2896 /* Completion Queues */
2897 int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
2899 struct bnxt_re_cq *cq;
2900 struct bnxt_qplib_nq *nq;
2901 struct bnxt_re_dev *rdev;
2903 cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2905 nq = cq->qplib_cq.nq;
2907 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2908 ib_umem_release(cq->umem);
2910 atomic_dec(&rdev->stats.res.cq_count);
2916 int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
2917 struct ib_udata *udata)
2919 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
2920 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2921 struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
2923 int cqe = attr->cqe;
2924 struct bnxt_qplib_nq *nq = NULL;
2925 unsigned int nq_alloc_cnt;
2931 /* Validate CQ fields */
2932 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2933 ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded");
2938 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2940 entries = roundup_pow_of_two(cqe + 1);
2941 if (entries > dev_attr->max_cq_wqes + 1)
2942 entries = dev_attr->max_cq_wqes + 1;
2944 cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
2945 cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
2947 struct bnxt_re_cq_req req;
2948 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
2949 udata, struct bnxt_re_ucontext, ib_uctx);
2950 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2955 cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va,
2956 entries * sizeof(struct cq_base),
2957 IB_ACCESS_LOCAL_WRITE);
2958 if (IS_ERR(cq->umem)) {
2959 rc = PTR_ERR(cq->umem);
2962 cq->qplib_cq.sg_info.umem = cq->umem;
2963 cq->qplib_cq.dpi = &uctx->dpi;
2965 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2966 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2973 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2976 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
2977 * used for getting the NQ index.
2979 nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
2980 nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
2981 cq->qplib_cq.max_wqe = entries;
2982 cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
2983 cq->qplib_cq.nq = nq;
2985 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2987 ibdev_err(&rdev->ibdev, "Failed to create HW CQ");
2991 cq->ib_cq.cqe = entries;
2992 cq->cq_period = cq->qplib_cq.period;
2995 active_cqs = atomic_inc_return(&rdev->stats.res.cq_count);
2996 if (active_cqs > rdev->stats.res.cq_watermark)
2997 rdev->stats.res.cq_watermark = active_cqs;
2998 spin_lock_init(&cq->cq_lock);
3001 struct bnxt_re_cq_resp resp;
3003 resp.cqid = cq->qplib_cq.id;
3004 resp.tail = cq->qplib_cq.hwq.cons;
3005 resp.phase = cq->qplib_cq.period;
3007 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
3009 ibdev_err(&rdev->ibdev, "Failed to copy CQ udata");
3010 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
3018 ib_umem_release(cq->umem);
3024 static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq)
3026 struct bnxt_re_dev *rdev = cq->rdev;
3028 bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq);
3030 cq->qplib_cq.max_wqe = cq->resize_cqe;
3031 if (cq->resize_umem) {
3032 ib_umem_release(cq->umem);
3033 cq->umem = cq->resize_umem;
3034 cq->resize_umem = NULL;
3039 int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
3041 struct bnxt_qplib_sg_info sg_info = {};
3042 struct bnxt_qplib_dpi *orig_dpi = NULL;
3043 struct bnxt_qplib_dev_attr *dev_attr;
3044 struct bnxt_re_ucontext *uctx = NULL;
3045 struct bnxt_re_resize_cq_req req;
3046 struct bnxt_re_dev *rdev;
3047 struct bnxt_re_cq *cq;
3050 cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
3052 dev_attr = &rdev->dev_attr;
3053 if (!ibcq->uobject) {
3054 ibdev_err(&rdev->ibdev, "Kernel CQ Resize not supported");
3058 if (cq->resize_umem) {
3059 ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - Busy",
3064 /* Check the requested cq depth out of supported depth */
3065 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
3066 ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - out of range cqe %d",
3067 cq->qplib_cq.id, cqe);
3071 entries = roundup_pow_of_two(cqe + 1);
3072 if (entries > dev_attr->max_cq_wqes + 1)
3073 entries = dev_attr->max_cq_wqes + 1;
3075 uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
3077 /* uverbs consumer */
3078 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
3083 cq->resize_umem = ib_umem_get(&rdev->ibdev, req.cq_va,
3084 entries * sizeof(struct cq_base),
3085 IB_ACCESS_LOCAL_WRITE);
3086 if (IS_ERR(cq->resize_umem)) {
3087 rc = PTR_ERR(cq->resize_umem);
3088 cq->resize_umem = NULL;
3089 ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %d\n",
3093 cq->resize_cqe = entries;
3094 memcpy(&sg_info, &cq->qplib_cq.sg_info, sizeof(sg_info));
3095 orig_dpi = cq->qplib_cq.dpi;
3097 cq->qplib_cq.sg_info.umem = cq->resize_umem;
3098 cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
3099 cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
3100 cq->qplib_cq.dpi = &uctx->dpi;
3102 rc = bnxt_qplib_resize_cq(&rdev->qplib_res, &cq->qplib_cq, entries);
3104 ibdev_err(&rdev->ibdev, "Resize HW CQ %#x failed!",
3109 cq->ib_cq.cqe = cq->resize_cqe;
3110 atomic_inc(&rdev->stats.res.resize_count);
3115 if (cq->resize_umem) {
3116 ib_umem_release(cq->resize_umem);
3117 cq->resize_umem = NULL;
3119 memcpy(&cq->qplib_cq.sg_info, &sg_info, sizeof(sg_info));
3120 cq->qplib_cq.dpi = orig_dpi;
3125 static u8 __req_to_ib_wc_status(u8 qstatus)
3128 case CQ_REQ_STATUS_OK:
3129 return IB_WC_SUCCESS;
3130 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
3131 return IB_WC_BAD_RESP_ERR;
3132 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
3133 return IB_WC_LOC_LEN_ERR;
3134 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
3135 return IB_WC_LOC_QP_OP_ERR;
3136 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
3137 return IB_WC_LOC_PROT_ERR;
3138 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
3139 return IB_WC_GENERAL_ERR;
3140 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
3141 return IB_WC_REM_INV_REQ_ERR;
3142 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
3143 return IB_WC_REM_ACCESS_ERR;
3144 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
3145 return IB_WC_REM_OP_ERR;
3146 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
3147 return IB_WC_RNR_RETRY_EXC_ERR;
3148 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
3149 return IB_WC_RETRY_EXC_ERR;
3150 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
3151 return IB_WC_WR_FLUSH_ERR;
3153 return IB_WC_GENERAL_ERR;
3158 static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
3161 case CQ_RES_RAWETH_QP1_STATUS_OK:
3162 return IB_WC_SUCCESS;
3163 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
3164 return IB_WC_LOC_ACCESS_ERR;
3165 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
3166 return IB_WC_LOC_LEN_ERR;
3167 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
3168 return IB_WC_LOC_PROT_ERR;
3169 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
3170 return IB_WC_LOC_QP_OP_ERR;
3171 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
3172 return IB_WC_GENERAL_ERR;
3173 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
3174 return IB_WC_WR_FLUSH_ERR;
3175 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
3176 return IB_WC_WR_FLUSH_ERR;
3178 return IB_WC_GENERAL_ERR;
3182 static u8 __rc_to_ib_wc_status(u8 qstatus)
3185 case CQ_RES_RC_STATUS_OK:
3186 return IB_WC_SUCCESS;
3187 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
3188 return IB_WC_LOC_ACCESS_ERR;
3189 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
3190 return IB_WC_LOC_LEN_ERR;
3191 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
3192 return IB_WC_LOC_PROT_ERR;
3193 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
3194 return IB_WC_LOC_QP_OP_ERR;
3195 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
3196 return IB_WC_GENERAL_ERR;
3197 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
3198 return IB_WC_REM_INV_REQ_ERR;
3199 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
3200 return IB_WC_WR_FLUSH_ERR;
3201 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
3202 return IB_WC_WR_FLUSH_ERR;
3204 return IB_WC_GENERAL_ERR;
3208 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
3210 switch (cqe->type) {
3211 case BNXT_QPLIB_SWQE_TYPE_SEND:
3212 wc->opcode = IB_WC_SEND;
3214 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
3215 wc->opcode = IB_WC_SEND;
3216 wc->wc_flags |= IB_WC_WITH_IMM;
3218 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
3219 wc->opcode = IB_WC_SEND;
3220 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3222 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
3223 wc->opcode = IB_WC_RDMA_WRITE;
3225 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
3226 wc->opcode = IB_WC_RDMA_WRITE;
3227 wc->wc_flags |= IB_WC_WITH_IMM;
3229 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
3230 wc->opcode = IB_WC_RDMA_READ;
3232 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
3233 wc->opcode = IB_WC_COMP_SWAP;
3235 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
3236 wc->opcode = IB_WC_FETCH_ADD;
3238 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
3239 wc->opcode = IB_WC_LOCAL_INV;
3241 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
3242 wc->opcode = IB_WC_REG_MR;
3245 wc->opcode = IB_WC_SEND;
3249 wc->status = __req_to_ib_wc_status(cqe->status);
3252 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
3253 u16 raweth_qp1_flags2)
3255 bool is_ipv6 = false, is_ipv4 = false;
3257 /* raweth_qp1_flags Bit 9-6 indicates itype */
3258 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3259 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3262 if (raweth_qp1_flags2 &
3263 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
3265 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
3266 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
3267 (raweth_qp1_flags2 &
3268 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
3269 (is_ipv6 = true) : (is_ipv4 = true);
3271 BNXT_RE_ROCEV2_IPV6_PACKET :
3272 BNXT_RE_ROCEV2_IPV4_PACKET);
3274 return BNXT_RE_ROCE_V1_PACKET;
3278 static int bnxt_re_to_ib_nw_type(int nw_type)
3280 u8 nw_hdr_type = 0xFF;
3283 case BNXT_RE_ROCE_V1_PACKET:
3284 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
3286 case BNXT_RE_ROCEV2_IPV4_PACKET:
3287 nw_hdr_type = RDMA_NETWORK_IPV4;
3289 case BNXT_RE_ROCEV2_IPV6_PACKET:
3290 nw_hdr_type = RDMA_NETWORK_IPV6;
3296 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
3300 struct ethhdr *eth_hdr;
3304 tmp_buf = (u8 *)rq_hdr_buf;
3306 * If dest mac is not same as I/F mac, this could be a
3307 * loopback address or multicast address, check whether
3308 * it is a loopback packet
3310 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
3312 /* Check the ether type */
3313 eth_hdr = (struct ethhdr *)tmp_buf;
3314 eth_type = ntohs(eth_hdr->h_proto);
3322 struct udphdr *udp_hdr;
3324 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
3325 sizeof(struct ipv6hdr));
3326 tmp_buf += sizeof(struct ethhdr) + len;
3327 udp_hdr = (struct udphdr *)tmp_buf;
3328 if (ntohs(udp_hdr->dest) ==
3341 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
3342 struct bnxt_qplib_cqe *cqe)
3344 struct bnxt_re_dev *rdev = gsi_qp->rdev;
3345 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3346 struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp;
3347 dma_addr_t shrq_hdr_buf_map;
3348 struct ib_sge s_sge[2] = {};
3349 struct ib_sge r_sge[2] = {};
3350 struct bnxt_re_ah *gsi_sah;
3351 struct ib_recv_wr rwr = {};
3352 dma_addr_t rq_hdr_buf_map;
3353 struct ib_ud_wr udwr = {};
3354 struct ib_send_wr *swr;
3363 tbl_idx = cqe->wr_id;
3365 rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf +
3366 (tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size);
3367 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3370 /* Shadow QP header buffer */
3371 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3373 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3375 /* Store this cqe */
3376 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
3377 sqp_entry->qp1_qp = gsi_qp;
3379 /* Find packet type from the cqe */
3381 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
3382 cqe->raweth_qp1_flags2);
3384 ibdev_err(&rdev->ibdev, "Invalid packet\n");
3388 /* Adjust the offset for the user buffer and post in the rq */
3390 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
3394 * QP1 loopback packet has 4 bytes of internal header before
3395 * ether header. Skip these four bytes.
3397 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
3400 /* First send SGE . Skip the ether header*/
3401 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
3403 s_sge[0].lkey = 0xFFFFFFFF;
3404 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
3405 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
3407 /* Second Send SGE */
3408 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
3409 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
3410 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
3412 s_sge[1].lkey = 0xFFFFFFFF;
3413 s_sge[1].length = 256;
3415 /* First recv SGE */
3417 r_sge[0].addr = shrq_hdr_buf_map;
3418 r_sge[0].lkey = 0xFFFFFFFF;
3419 r_sge[0].length = 40;
3421 r_sge[1].addr = sqp_entry->sge.addr + offset;
3422 r_sge[1].lkey = sqp_entry->sge.lkey;
3423 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
3425 /* Create receive work request */
3427 rwr.sg_list = r_sge;
3428 rwr.wr_id = tbl_idx;
3431 rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
3433 ibdev_err(&rdev->ibdev,
3434 "Failed to post Rx buffers to shadow QP");
3439 swr->sg_list = s_sge;
3440 swr->wr_id = tbl_idx;
3441 swr->opcode = IB_WR_SEND;
3443 gsi_sah = rdev->gsi_ctx.gsi_sah;
3444 udwr.ah = &gsi_sah->ib_ah;
3445 udwr.remote_qpn = gsi_sqp->qplib_qp.id;
3446 udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
3448 /* post data received in the send queue */
3449 return bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
3452 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
3453 struct bnxt_qplib_cqe *cqe)
3455 wc->opcode = IB_WC_RECV;
3456 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
3457 wc->wc_flags |= IB_WC_GRH;
3460 static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev,
3464 * Check if the vlan is configured in the host. If not configured, it
3465 * can be a transparent VLAN. So dont report the vlan id.
3467 if (!__vlan_find_dev_deep_rcu(rdev->netdev,
3468 htons(ETH_P_8021Q), vlan_id))
3473 static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
3480 metadata = orig_cqe->raweth_qp1_metadata;
3481 if (orig_cqe->raweth_qp1_flags2 &
3482 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
3484 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
3485 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
3486 if (tpid == ETH_P_8021Q) {
3488 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
3490 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
3491 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
3499 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3500 struct bnxt_qplib_cqe *cqe)
3502 wc->opcode = IB_WC_RECV;
3503 wc->status = __rc_to_ib_wc_status(cqe->status);
3505 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3506 wc->wc_flags |= IB_WC_WITH_IMM;
3507 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3508 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3509 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3510 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3511 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3514 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
3516 struct bnxt_qplib_cqe *cqe)
3518 struct bnxt_re_dev *rdev = gsi_sqp->rdev;
3519 struct bnxt_re_qp *gsi_qp = NULL;
3520 struct bnxt_qplib_cqe *orig_cqe = NULL;
3521 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3527 tbl_idx = cqe->wr_id;
3529 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3530 gsi_qp = sqp_entry->qp1_qp;
3531 orig_cqe = &sqp_entry->cqe;
3533 wc->wr_id = sqp_entry->wrid;
3534 wc->byte_len = orig_cqe->length;
3535 wc->qp = &gsi_qp->ib_qp;
3537 wc->ex.imm_data = orig_cqe->immdata;
3538 wc->src_qp = orig_cqe->src_qp;
3539 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3540 if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3541 if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3542 wc->vlan_id = vlan_id;
3544 wc->wc_flags |= IB_WC_WITH_VLAN;
3548 wc->vendor_err = orig_cqe->status;
3550 wc->opcode = IB_WC_RECV;
3551 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3552 wc->wc_flags |= IB_WC_GRH;
3554 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3555 orig_cqe->raweth_qp1_flags2);
3557 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3558 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3562 static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
3564 struct bnxt_qplib_cqe *cqe)
3566 struct bnxt_re_dev *rdev;
3571 wc->opcode = IB_WC_RECV;
3572 wc->status = __rc_to_ib_wc_status(cqe->status);
3574 if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
3575 wc->wc_flags |= IB_WC_WITH_IMM;
3576 /* report only on GSI QP for Thor */
3577 if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) {
3578 wc->wc_flags |= IB_WC_GRH;
3579 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3580 wc->wc_flags |= IB_WC_WITH_SMAC;
3581 if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
3582 vlan_id = (cqe->cfa_meta & 0xFFF);
3584 /* Mark only if vlan_id is non zero */
3585 if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3586 wc->vlan_id = vlan_id;
3587 wc->wc_flags |= IB_WC_WITH_VLAN;
3589 nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
3590 CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
3591 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3592 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3597 static int send_phantom_wqe(struct bnxt_re_qp *qp)
3599 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3600 unsigned long flags;
3603 spin_lock_irqsave(&qp->sq_lock, flags);
3605 rc = bnxt_re_bind_fence_mw(lib_qp);
3607 lib_qp->sq.phantom_wqe_cnt++;
3608 ibdev_dbg(&qp->rdev->ibdev,
3609 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3610 lib_qp->id, lib_qp->sq.hwq.prod,
3611 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3612 lib_qp->sq.phantom_wqe_cnt);
3615 spin_unlock_irqrestore(&qp->sq_lock, flags);
3619 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3621 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3622 struct bnxt_re_qp *qp, *sh_qp;
3623 struct bnxt_qplib_cqe *cqe;
3624 int i, ncqe, budget;
3625 struct bnxt_qplib_q *sq;
3626 struct bnxt_qplib_qp *lib_qp;
3628 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3629 unsigned long flags;
3631 /* User CQ; the only processing we do is to
3632 * complete any pending CQ resize operation.
3635 if (cq->resize_umem)
3636 bnxt_re_resize_cq_complete(cq);
3640 spin_lock_irqsave(&cq->cq_lock, flags);
3641 budget = min_t(u32, num_entries, cq->max_cql);
3642 num_entries = budget;
3644 ibdev_err(&cq->rdev->ibdev, "POLL CQ : no CQL to use");
3650 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3653 if (sq->send_phantom) {
3654 qp = container_of(lib_qp,
3655 struct bnxt_re_qp, qplib_qp);
3656 if (send_phantom_wqe(qp) == -ENOMEM)
3657 ibdev_err(&cq->rdev->ibdev,
3658 "Phantom failed! Scheduled to send again\n");
3660 sq->send_phantom = false;
3664 ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3671 for (i = 0; i < ncqe; i++, cqe++) {
3672 /* Transcribe each qplib_wqe back to ib_wc */
3673 memset(wc, 0, sizeof(*wc));
3675 wc->wr_id = cqe->wr_id;
3676 wc->byte_len = cqe->length;
3678 ((struct bnxt_qplib_qp *)
3679 (unsigned long)(cqe->qp_handle),
3680 struct bnxt_re_qp, qplib_qp);
3681 wc->qp = &qp->ib_qp;
3682 wc->ex.imm_data = cqe->immdata;
3683 wc->src_qp = cqe->src_qp;
3684 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3686 wc->vendor_err = cqe->status;
3688 switch (cqe->opcode) {
3689 case CQ_BASE_CQE_TYPE_REQ:
3690 sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3692 qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3693 /* Handle this completion with
3694 * the stored completion
3696 memset(wc, 0, sizeof(*wc));
3699 bnxt_re_process_req_wc(wc, cqe);
3701 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3705 rc = bnxt_re_process_raw_qp_pkt_rx
3708 memset(wc, 0, sizeof(*wc));
3713 /* Errors need not be looped back.
3714 * But change the wr_id to the one
3715 * stored in the table
3717 tbl_idx = cqe->wr_id;
3718 sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx];
3719 wc->wr_id = sqp_entry->wrid;
3720 bnxt_re_process_res_rawqp1_wc(wc, cqe);
3722 case CQ_BASE_CQE_TYPE_RES_RC:
3723 bnxt_re_process_res_rc_wc(wc, cqe);
3725 case CQ_BASE_CQE_TYPE_RES_UD:
3726 sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3728 qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3729 /* Handle this completion with
3730 * the stored completion
3735 bnxt_re_process_res_shadow_qp_wc
3740 bnxt_re_process_res_ud_wc(qp, wc, cqe);
3743 ibdev_err(&cq->rdev->ibdev,
3744 "POLL CQ : type 0x%x not handled",
3753 spin_unlock_irqrestore(&cq->cq_lock, flags);
3754 return num_entries - budget;
3757 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3758 enum ib_cq_notify_flags ib_cqn_flags)
3760 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3761 int type = 0, rc = 0;
3762 unsigned long flags;
3764 spin_lock_irqsave(&cq->cq_lock, flags);
3765 /* Trigger on the very next completion */
3766 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3767 type = DBC_DBC_TYPE_CQ_ARMALL;
3768 /* Trigger on the next solicited completion */
3769 else if (ib_cqn_flags & IB_CQ_SOLICITED)
3770 type = DBC_DBC_TYPE_CQ_ARMSE;
3772 /* Poll to see if there are missed events */
3773 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3774 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3778 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3781 spin_unlock_irqrestore(&cq->cq_lock, flags);
3785 /* Memory Regions */
3786 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3788 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3789 struct bnxt_re_dev *rdev = pd->rdev;
3790 struct bnxt_re_mr *mr;
3794 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3796 return ERR_PTR(-ENOMEM);
3799 mr->qplib_mr.pd = &pd->qplib_pd;
3800 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3801 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3803 /* Allocate and register 0 as the address */
3804 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3808 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3809 mr->qplib_mr.total_size = -1; /* Infinte length */
3810 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, 0,
3815 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3816 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3817 IB_ACCESS_REMOTE_ATOMIC))
3818 mr->ib_mr.rkey = mr->ib_mr.lkey;
3819 active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
3820 if (active_mrs > rdev->stats.res.mr_watermark)
3821 rdev->stats.res.mr_watermark = active_mrs;
3826 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3832 int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3834 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3835 struct bnxt_re_dev *rdev = mr->rdev;
3838 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3840 ibdev_err(&rdev->ibdev, "Dereg MR failed: %#x\n", rc);
3845 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3851 ib_umem_release(mr->ib_umem);
3854 atomic_dec(&rdev->stats.res.mr_count);
3858 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3860 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3862 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3865 mr->pages[mr->npages++] = addr;
3869 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3870 unsigned int *sg_offset)
3872 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3875 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3878 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3881 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3882 struct bnxt_re_dev *rdev = pd->rdev;
3883 struct bnxt_re_mr *mr = NULL;
3887 if (type != IB_MR_TYPE_MEM_REG) {
3888 ibdev_dbg(&rdev->ibdev, "MR type 0x%x not supported", type);
3889 return ERR_PTR(-EINVAL);
3891 if (max_num_sg > MAX_PBL_LVL_1_PGS)
3892 return ERR_PTR(-EINVAL);
3894 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3896 return ERR_PTR(-ENOMEM);
3899 mr->qplib_mr.pd = &pd->qplib_pd;
3900 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3901 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3903 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3907 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3908 mr->ib_mr.rkey = mr->ib_mr.lkey;
3910 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3915 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3916 &mr->qplib_frpl, max_num_sg);
3918 ibdev_err(&rdev->ibdev,
3919 "Failed to allocate HW FR page list");
3923 active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
3924 if (active_mrs > rdev->stats.res.mr_watermark)
3925 rdev->stats.res.mr_watermark = active_mrs;
3931 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3937 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3938 struct ib_udata *udata)
3940 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3941 struct bnxt_re_dev *rdev = pd->rdev;
3942 struct bnxt_re_mw *mw;
3946 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3948 return ERR_PTR(-ENOMEM);
3950 mw->qplib_mw.pd = &pd->qplib_pd;
3952 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3953 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3954 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3955 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3957 ibdev_err(&rdev->ibdev, "Allocate MW failed!");
3960 mw->ib_mw.rkey = mw->qplib_mw.rkey;
3962 active_mws = atomic_inc_return(&rdev->stats.res.mw_count);
3963 if (active_mws > rdev->stats.res.mw_watermark)
3964 rdev->stats.res.mw_watermark = active_mws;
3972 int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3974 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3975 struct bnxt_re_dev *rdev = mw->rdev;
3978 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3980 ibdev_err(&rdev->ibdev, "Free MW failed: %#x\n", rc);
3985 atomic_dec(&rdev->stats.res.mw_count);
3989 static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64 virt_addr,
3990 int mr_access_flags, struct ib_umem *umem)
3992 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3993 struct bnxt_re_dev *rdev = pd->rdev;
3994 unsigned long page_size;
3995 struct bnxt_re_mr *mr;
3999 if (length > BNXT_RE_MAX_MR_SIZE) {
4000 ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n",
4001 length, BNXT_RE_MAX_MR_SIZE);
4002 return ERR_PTR(-ENOMEM);
4005 page_size = ib_umem_find_best_pgsz(umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr);
4007 ibdev_err(&rdev->ibdev, "umem page size unsupported!");
4008 return ERR_PTR(-EINVAL);
4011 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
4013 return ERR_PTR(-ENOMEM);
4016 mr->qplib_mr.pd = &pd->qplib_pd;
4017 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
4018 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
4020 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
4022 ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
4026 /* The fixed portion of the rkey is the same as the lkey */
4027 mr->ib_mr.rkey = mr->qplib_mr.rkey;
4029 mr->qplib_mr.va = virt_addr;
4030 mr->qplib_mr.total_size = length;
4032 umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
4033 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
4034 umem_pgs, page_size);
4036 ibdev_err(&rdev->ibdev, "Failed to register user MR - rc = %d\n", rc);
4041 mr->ib_mr.lkey = mr->qplib_mr.lkey;
4042 mr->ib_mr.rkey = mr->qplib_mr.lkey;
4043 active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
4044 if (active_mrs > rdev->stats.res.mr_watermark)
4045 rdev->stats.res.mr_watermark = active_mrs;
4050 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4056 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
4057 u64 virt_addr, int mr_access_flags,
4058 struct ib_udata *udata)
4060 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4061 struct bnxt_re_dev *rdev = pd->rdev;
4062 struct ib_umem *umem;
4063 struct ib_mr *ib_mr;
4065 umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
4067 return ERR_CAST(umem);
4069 ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
4071 ib_umem_release(umem);
4075 struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
4076 u64 length, u64 virt_addr, int fd,
4077 int mr_access_flags, struct ib_udata *udata)
4079 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4080 struct bnxt_re_dev *rdev = pd->rdev;
4081 struct ib_umem_dmabuf *umem_dmabuf;
4082 struct ib_umem *umem;
4083 struct ib_mr *ib_mr;
4085 umem_dmabuf = ib_umem_dmabuf_get_pinned(&rdev->ibdev, start, length,
4086 fd, mr_access_flags);
4087 if (IS_ERR(umem_dmabuf))
4088 return ERR_CAST(umem_dmabuf);
4090 umem = &umem_dmabuf->umem;
4092 ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
4094 ib_umem_release(umem);
4098 int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
4100 struct ib_device *ibdev = ctx->device;
4101 struct bnxt_re_ucontext *uctx =
4102 container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
4103 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
4104 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
4105 struct bnxt_re_user_mmap_entry *entry;
4106 struct bnxt_re_uctx_resp resp = {};
4107 u32 chip_met_rev_num = 0;
4110 ibdev_dbg(ibdev, "ABI version requested %u", ibdev->ops.uverbs_abi_ver);
4112 if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
4113 ibdev_dbg(ibdev, " is different from the device %d ",
4114 BNXT_RE_ABI_VERSION);
4120 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
4125 spin_lock_init(&uctx->sh_lock);
4127 resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX;
4128 chip_met_rev_num = rdev->chip_ctx->chip_num;
4129 chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_rev & 0xFF) <<
4130 BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
4131 chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) <<
4132 BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
4133 resp.chip_id0 = chip_met_rev_num;
4134 /*Temp, Use xa_alloc instead */
4135 resp.dev_id = rdev->en_dev->pdev->devfn;
4136 resp.max_qp = rdev->qplib_ctx.qpc_count;
4137 resp.pg_size = PAGE_SIZE;
4138 resp.cqe_sz = sizeof(struct cq_base);
4139 resp.max_cqd = dev_attr->max_cq_wqes;
4141 resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
4142 resp.mode = rdev->chip_ctx->modes.wqe_mode;
4144 if (rdev->chip_ctx->modes.db_push)
4145 resp.comp_mask |= BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED;
4147 entry = bnxt_re_mmap_entry_insert(uctx, 0, BNXT_RE_MMAP_SH_PAGE, NULL);
4152 uctx->shpage_mmap = &entry->rdma_entry;
4153 if (rdev->pacing.dbr_pacing)
4154 resp.comp_mask |= BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED;
4156 rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
4158 ibdev_err(ibdev, "Failed to copy user context");
4165 free_page((unsigned long)uctx->shpg);
4171 void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
4173 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
4174 struct bnxt_re_ucontext,
4177 struct bnxt_re_dev *rdev = uctx->rdev;
4179 rdma_user_mmap_entry_remove(uctx->shpage_mmap);
4180 uctx->shpage_mmap = NULL;
4182 free_page((unsigned long)uctx->shpg);
4184 if (uctx->dpi.dbr) {
4185 /* Free DPI only if this is the first PD allocated by the
4186 * application and mark the context dpi as NULL
4188 bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->dpi);
4189 uctx->dpi.dbr = NULL;
4193 /* Helper function to mmap the virtual memory from user app */
4194 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
4196 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
4197 struct bnxt_re_ucontext,
4199 struct bnxt_re_user_mmap_entry *bnxt_entry;
4200 struct rdma_user_mmap_entry *rdma_entry;
4204 rdma_entry = rdma_user_mmap_entry_get(&uctx->ib_uctx, vma);
4208 bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
4211 switch (bnxt_entry->mmap_flag) {
4212 case BNXT_RE_MMAP_WC_DB:
4213 pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
4214 ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
4215 pgprot_writecombine(vma->vm_page_prot),
4218 case BNXT_RE_MMAP_UC_DB:
4219 pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
4220 ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
4221 pgprot_noncached(vma->vm_page_prot),
4224 case BNXT_RE_MMAP_SH_PAGE:
4225 ret = vm_insert_page(vma, vma->vm_start, virt_to_page(uctx->shpg));
4227 case BNXT_RE_MMAP_DBR_BAR:
4228 pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
4229 ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
4230 pgprot_noncached(vma->vm_page_prot),
4233 case BNXT_RE_MMAP_DBR_PAGE:
4234 /* Driver doesn't expect write access for user space */
4235 if (vma->vm_flags & VM_WRITE)
4237 ret = vm_insert_page(vma, vma->vm_start,
4238 virt_to_page((void *)bnxt_entry->mem_offset));
4245 rdma_user_mmap_entry_put(rdma_entry);
4249 void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
4251 struct bnxt_re_user_mmap_entry *bnxt_entry;
4253 bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
4259 static int UVERBS_HANDLER(BNXT_RE_METHOD_NOTIFY_DRV)(struct uverbs_attr_bundle *attrs)
4261 struct bnxt_re_ucontext *uctx;
4263 uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
4264 bnxt_re_pacing_alert(uctx->rdev);
4268 static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *attrs)
4270 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
4271 enum bnxt_re_alloc_page_type alloc_type;
4272 struct bnxt_re_user_mmap_entry *entry;
4273 enum bnxt_re_mmap_flag mmap_flag;
4274 struct bnxt_qplib_chip_ctx *cctx;
4275 struct bnxt_re_ucontext *uctx;
4276 struct bnxt_re_dev *rdev;
4283 uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
4285 return PTR_ERR(uctx);
4287 err = uverbs_get_const(&alloc_type, attrs, BNXT_RE_ALLOC_PAGE_TYPE);
4292 cctx = rdev->chip_ctx;
4294 switch (alloc_type) {
4295 case BNXT_RE_ALLOC_WC_PAGE:
4296 if (cctx->modes.db_push) {
4297 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res, &uctx->wcdpi,
4298 uctx, BNXT_QPLIB_DPI_TYPE_WC))
4301 dpi = uctx->wcdpi.dpi;
4302 addr = (u64)uctx->wcdpi.umdbr;
4303 mmap_flag = BNXT_RE_MMAP_WC_DB;
4309 case BNXT_RE_ALLOC_DBR_BAR_PAGE:
4311 addr = (u64)rdev->pacing.dbr_bar_addr;
4312 mmap_flag = BNXT_RE_MMAP_DBR_BAR;
4315 case BNXT_RE_ALLOC_DBR_PAGE:
4317 addr = (u64)rdev->pacing.dbr_page;
4318 mmap_flag = BNXT_RE_MMAP_DBR_PAGE;
4325 entry = bnxt_re_mmap_entry_insert(uctx, addr, mmap_flag, &mmap_offset);
4329 uobj->object = entry;
4330 uverbs_finalize_uobj_create(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
4331 err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
4332 &mmap_offset, sizeof(mmap_offset));
4336 err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
4337 &length, sizeof(length));
4341 err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_DPI,
4342 &dpi, sizeof(length));
4349 static int alloc_page_obj_cleanup(struct ib_uobject *uobject,
4350 enum rdma_remove_reason why,
4351 struct uverbs_attr_bundle *attrs)
4353 struct bnxt_re_user_mmap_entry *entry = uobject->object;
4354 struct bnxt_re_ucontext *uctx = entry->uctx;
4356 switch (entry->mmap_flag) {
4357 case BNXT_RE_MMAP_WC_DB:
4358 if (uctx && uctx->wcdpi.dbr) {
4359 struct bnxt_re_dev *rdev = uctx->rdev;
4361 bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->wcdpi);
4362 uctx->wcdpi.dbr = NULL;
4365 case BNXT_RE_MMAP_DBR_BAR:
4366 case BNXT_RE_MMAP_DBR_PAGE:
4371 rdma_user_mmap_entry_remove(&entry->rdma_entry);
4376 DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_ALLOC_PAGE,
4377 UVERBS_ATTR_IDR(BNXT_RE_ALLOC_PAGE_HANDLE,
4378 BNXT_RE_OBJECT_ALLOC_PAGE,
4381 UVERBS_ATTR_CONST_IN(BNXT_RE_ALLOC_PAGE_TYPE,
4382 enum bnxt_re_alloc_page_type,
4384 UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
4385 UVERBS_ATTR_TYPE(u64),
4387 UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
4388 UVERBS_ATTR_TYPE(u32),
4390 UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_DPI,
4391 UVERBS_ATTR_TYPE(u32),
4394 DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_DESTROY_PAGE,
4395 UVERBS_ATTR_IDR(BNXT_RE_DESTROY_PAGE_HANDLE,
4396 BNXT_RE_OBJECT_ALLOC_PAGE,
4397 UVERBS_ACCESS_DESTROY,
4400 DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_ALLOC_PAGE,
4401 UVERBS_TYPE_ALLOC_IDR(alloc_page_obj_cleanup),
4402 &UVERBS_METHOD(BNXT_RE_METHOD_ALLOC_PAGE),
4403 &UVERBS_METHOD(BNXT_RE_METHOD_DESTROY_PAGE));
4405 DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_NOTIFY_DRV);
4407 DECLARE_UVERBS_GLOBAL_METHODS(BNXT_RE_OBJECT_NOTIFY_DRV,
4408 &UVERBS_METHOD(BNXT_RE_METHOD_NOTIFY_DRV));
4410 const struct uapi_definition bnxt_re_uapi_defs[] = {
4411 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_ALLOC_PAGE),
4412 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_NOTIFY_DRV),