1 // SPDX-License-Identifier: GPL-2.0
3 /* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
4 /* Kai Shen <kaishen@linux.alibaba.com> */
5 /* Copyright (c) 2020-2022, Alibaba Group. */
7 /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
8 /* Copyright (c) 2008-2019, IBM Corporation */
10 /* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. */
12 #include <linux/errno.h>
13 #include <linux/pci.h>
14 #include <linux/types.h>
15 #include <linux/uaccess.h>
16 #include <linux/vmalloc.h>
17 #include <net/addrconf.h>
18 #include <rdma/erdma-abi.h>
19 #include <rdma/ib_umem.h>
20 #include <rdma/ib_user_verbs.h>
21 #include <rdma/ib_verbs.h>
22 #include <rdma/uverbs_ioctl.h>
27 #include "erdma_verbs.h"
29 static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp)
31 struct erdma_cmdq_create_qp_req req;
32 struct erdma_pd *pd = to_epd(qp->ibqp.pd);
33 struct erdma_uqp *user_qp;
37 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
38 CMDQ_OPCODE_CREATE_QP);
40 req.cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_QP_SQ_DEPTH_MASK,
41 ilog2(qp->attrs.sq_size)) |
42 FIELD_PREP(ERDMA_CMD_CREATE_QP_QPN_MASK, QP_ID(qp));
43 req.cfg1 = FIELD_PREP(ERDMA_CMD_CREATE_QP_RQ_DEPTH_MASK,
44 ilog2(qp->attrs.rq_size)) |
45 FIELD_PREP(ERDMA_CMD_CREATE_QP_PD_MASK, pd->pdn);
47 if (rdma_is_kernel_res(&qp->ibqp.res)) {
48 u32 pgsz_range = ilog2(SZ_1M) - PAGE_SHIFT;
51 FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
53 FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn);
55 FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
57 FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn);
60 FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_OFFSET_MASK, 0) |
61 FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK, 1) |
62 FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK,
64 req.rq_mtt_cfg = req.sq_mtt_cfg;
66 req.rq_buf_addr = qp->kern_qp.rq_buf_dma_addr;
67 req.sq_buf_addr = qp->kern_qp.sq_buf_dma_addr;
68 req.sq_db_info_dma_addr = qp->kern_qp.sq_buf_dma_addr +
69 (qp->attrs.sq_size << SQEBB_SHIFT);
70 req.rq_db_info_dma_addr = qp->kern_qp.rq_buf_dma_addr +
71 (qp->attrs.rq_size << RQE_SHIFT);
73 user_qp = &qp->user_qp;
74 req.sq_cqn_mtt_cfg = FIELD_PREP(
75 ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
76 ilog2(user_qp->sq_mtt.page_size) - PAGE_SHIFT);
78 FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn);
80 req.rq_cqn_mtt_cfg = FIELD_PREP(
81 ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
82 ilog2(user_qp->rq_mtt.page_size) - PAGE_SHIFT);
84 FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn);
86 req.sq_mtt_cfg = user_qp->sq_mtt.page_offset;
87 req.sq_mtt_cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK,
88 user_qp->sq_mtt.mtt_nents) |
89 FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK,
90 user_qp->sq_mtt.mtt_type);
92 req.rq_mtt_cfg = user_qp->rq_mtt.page_offset;
93 req.rq_mtt_cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK,
94 user_qp->rq_mtt.mtt_nents) |
95 FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK,
96 user_qp->rq_mtt.mtt_type);
98 req.sq_buf_addr = user_qp->sq_mtt.mtt_entry[0];
99 req.rq_buf_addr = user_qp->rq_mtt.mtt_entry[0];
101 req.sq_db_info_dma_addr = user_qp->sq_db_info_dma_addr;
102 req.rq_db_info_dma_addr = user_qp->rq_db_info_dma_addr;
105 err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), &resp0,
109 FIELD_GET(ERDMA_CMDQ_CREATE_QP_RESP_COOKIE_MASK, resp0);
114 static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
116 struct erdma_cmdq_reg_mr_req req;
117 struct erdma_pd *pd = to_epd(mr->ibmr.pd);
121 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, CMDQ_OPCODE_REG_MR);
123 req.cfg0 = FIELD_PREP(ERDMA_CMD_MR_VALID_MASK, mr->valid) |
124 FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, mr->ibmr.lkey & 0xFF) |
125 FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, mr->ibmr.lkey >> 8);
126 req.cfg1 = FIELD_PREP(ERDMA_CMD_REGMR_PD_MASK, pd->pdn) |
127 FIELD_PREP(ERDMA_CMD_REGMR_TYPE_MASK, mr->type) |
128 FIELD_PREP(ERDMA_CMD_REGMR_RIGHT_MASK, mr->access) |
129 FIELD_PREP(ERDMA_CMD_REGMR_ACC_MODE_MASK, 0);
130 req.cfg2 = FIELD_PREP(ERDMA_CMD_REGMR_PAGESIZE_MASK,
131 ilog2(mr->mem.page_size)) |
132 FIELD_PREP(ERDMA_CMD_REGMR_MTT_TYPE_MASK, mr->mem.mtt_type) |
133 FIELD_PREP(ERDMA_CMD_REGMR_MTT_CNT_MASK, mr->mem.page_cnt);
135 if (mr->type == ERDMA_MR_TYPE_DMA)
138 if (mr->type == ERDMA_MR_TYPE_NORMAL) {
139 req.start_va = mr->mem.va;
140 req.size = mr->mem.len;
143 if (mr->type == ERDMA_MR_TYPE_FRMR ||
144 mr->mem.mtt_type == ERDMA_MR_INDIRECT_MTT) {
145 phy_addr = req.phy_addr;
146 *phy_addr = mr->mem.mtt_entry[0];
148 phy_addr = req.phy_addr;
149 for (i = 0; i < mr->mem.mtt_nents; i++)
150 *phy_addr++ = mr->mem.mtt_entry[i];
154 return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
158 static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq)
160 struct erdma_cmdq_create_cq_req req;
162 struct erdma_mem *mtt;
164 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
165 CMDQ_OPCODE_CREATE_CQ);
167 req.cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_CQN_MASK, cq->cqn) |
168 FIELD_PREP(ERDMA_CMD_CREATE_CQ_DEPTH_MASK, ilog2(cq->depth));
169 req.cfg1 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_EQN_MASK, cq->assoc_eqn);
171 if (rdma_is_kernel_res(&cq->ibcq.res)) {
173 req.cfg0 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
174 ilog2(page_size) - PAGE_SHIFT);
175 req.qbuf_addr_l = lower_32_bits(cq->kern_cq.qbuf_dma_addr);
176 req.qbuf_addr_h = upper_32_bits(cq->kern_cq.qbuf_dma_addr);
178 req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK, 1) |
179 FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_TYPE_MASK,
180 ERDMA_MR_INLINE_MTT);
182 req.first_page_offset = 0;
183 req.cq_db_info_addr =
184 cq->kern_cq.qbuf_dma_addr + (cq->depth << CQE_SHIFT);
186 mtt = &cq->user_cq.qbuf_mtt;
187 req.cfg0 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
188 ilog2(mtt->page_size) - PAGE_SHIFT);
189 if (mtt->mtt_nents == 1) {
190 req.qbuf_addr_l = lower_32_bits(*(u64 *)mtt->mtt_buf);
191 req.qbuf_addr_h = upper_32_bits(*(u64 *)mtt->mtt_buf);
193 req.qbuf_addr_l = lower_32_bits(mtt->mtt_entry[0]);
194 req.qbuf_addr_h = upper_32_bits(mtt->mtt_entry[0]);
196 req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK,
198 req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_TYPE_MASK,
201 req.first_page_offset = mtt->page_offset;
202 req.cq_db_info_addr = cq->user_cq.db_info_dma_addr;
205 return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
209 static int erdma_alloc_idx(struct erdma_resource_cb *res_cb)
214 spin_lock_irqsave(&res_cb->lock, flags);
215 idx = find_next_zero_bit(res_cb->bitmap, res_cb->max_cap,
216 res_cb->next_alloc_idx);
217 if (idx == res_cb->max_cap) {
218 idx = find_first_zero_bit(res_cb->bitmap, res_cb->max_cap);
219 if (idx == res_cb->max_cap) {
220 res_cb->next_alloc_idx = 1;
221 spin_unlock_irqrestore(&res_cb->lock, flags);
226 set_bit(idx, res_cb->bitmap);
227 res_cb->next_alloc_idx = idx + 1;
228 spin_unlock_irqrestore(&res_cb->lock, flags);
233 static inline void erdma_free_idx(struct erdma_resource_cb *res_cb, u32 idx)
238 spin_lock_irqsave(&res_cb->lock, flags);
239 used = __test_and_clear_bit(idx, res_cb->bitmap);
240 spin_unlock_irqrestore(&res_cb->lock, flags);
244 static struct rdma_user_mmap_entry *
245 erdma_user_mmap_entry_insert(struct erdma_ucontext *uctx, void *address,
246 u32 size, u8 mmap_flag, u64 *mmap_offset)
248 struct erdma_user_mmap_entry *entry =
249 kzalloc(sizeof(*entry), GFP_KERNEL);
255 entry->address = (u64)address;
256 entry->mmap_flag = mmap_flag;
258 size = PAGE_ALIGN(size);
260 ret = rdma_user_mmap_entry_insert(&uctx->ibucontext, &entry->rdma_entry,
267 *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
269 return &entry->rdma_entry;
272 int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
273 struct ib_udata *unused)
275 struct erdma_dev *dev = to_edev(ibdev);
277 memset(attr, 0, sizeof(*attr));
279 attr->max_mr_size = dev->attrs.max_mr_size;
280 attr->vendor_id = PCI_VENDOR_ID_ALIBABA;
281 attr->vendor_part_id = dev->pdev->device;
282 attr->hw_ver = dev->pdev->revision;
283 attr->max_qp = dev->attrs.max_qp;
284 attr->max_qp_wr = min(dev->attrs.max_send_wr, dev->attrs.max_recv_wr);
285 attr->max_qp_rd_atom = dev->attrs.max_ord;
286 attr->max_qp_init_rd_atom = dev->attrs.max_ird;
287 attr->max_res_rd_atom = dev->attrs.max_qp * dev->attrs.max_ird;
288 attr->device_cap_flags = IB_DEVICE_MEM_MGT_EXTENSIONS;
289 attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
290 ibdev->local_dma_lkey = dev->attrs.local_dma_key;
291 attr->max_send_sge = dev->attrs.max_send_sge;
292 attr->max_recv_sge = dev->attrs.max_recv_sge;
293 attr->max_sge_rd = dev->attrs.max_sge_rd;
294 attr->max_cq = dev->attrs.max_cq;
295 attr->max_cqe = dev->attrs.max_cqe;
296 attr->max_mr = dev->attrs.max_mr;
297 attr->max_pd = dev->attrs.max_pd;
298 attr->max_mw = dev->attrs.max_mw;
299 attr->max_fast_reg_page_list_len = ERDMA_MAX_FRMR_PA;
300 attr->page_size_cap = ERDMA_PAGE_SIZE_SUPPORT;
301 attr->fw_ver = dev->attrs.fw_version;
304 addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
305 dev->netdev->dev_addr);
310 int erdma_query_gid(struct ib_device *ibdev, u32 port, int idx,
313 struct erdma_dev *dev = to_edev(ibdev);
315 memset(gid, 0, sizeof(*gid));
316 ether_addr_copy(gid->raw, dev->attrs.peer_addr);
321 int erdma_query_port(struct ib_device *ibdev, u32 port,
322 struct ib_port_attr *attr)
324 struct erdma_dev *dev = to_edev(ibdev);
325 struct net_device *ndev = dev->netdev;
327 memset(attr, 0, sizeof(*attr));
329 attr->gid_tbl_len = 1;
330 attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
331 attr->max_msg_sz = -1;
336 ib_get_eth_speed(ibdev, port, &attr->active_speed, &attr->active_width);
337 attr->max_mtu = ib_mtu_int_to_enum(ndev->mtu);
338 attr->active_mtu = ib_mtu_int_to_enum(ndev->mtu);
339 if (netif_running(ndev) && netif_carrier_ok(ndev))
340 dev->state = IB_PORT_ACTIVE;
342 dev->state = IB_PORT_DOWN;
343 attr->state = dev->state;
346 if (dev->state == IB_PORT_ACTIVE)
347 attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
349 attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
354 int erdma_get_port_immutable(struct ib_device *ibdev, u32 port,
355 struct ib_port_immutable *port_immutable)
357 port_immutable->gid_tbl_len = 1;
358 port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
363 int erdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
365 struct erdma_pd *pd = to_epd(ibpd);
366 struct erdma_dev *dev = to_edev(ibpd->device);
369 pdn = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_PD]);
378 int erdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
380 struct erdma_pd *pd = to_epd(ibpd);
381 struct erdma_dev *dev = to_edev(ibpd->device);
383 erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_PD], pd->pdn);
388 static int erdma_qp_validate_cap(struct erdma_dev *dev,
389 struct ib_qp_init_attr *attrs)
391 if ((attrs->cap.max_send_wr > dev->attrs.max_send_wr) ||
392 (attrs->cap.max_recv_wr > dev->attrs.max_recv_wr) ||
393 (attrs->cap.max_send_sge > dev->attrs.max_send_sge) ||
394 (attrs->cap.max_recv_sge > dev->attrs.max_recv_sge) ||
395 (attrs->cap.max_inline_data > ERDMA_MAX_INLINE) ||
396 !attrs->cap.max_send_wr || !attrs->cap.max_recv_wr) {
403 static int erdma_qp_validate_attr(struct erdma_dev *dev,
404 struct ib_qp_init_attr *attrs)
406 if (attrs->qp_type != IB_QPT_RC)
412 if (!attrs->send_cq || !attrs->recv_cq)
418 static void free_kernel_qp(struct erdma_qp *qp)
420 struct erdma_dev *dev = qp->dev;
422 vfree(qp->kern_qp.swr_tbl);
423 vfree(qp->kern_qp.rwr_tbl);
425 if (qp->kern_qp.sq_buf)
428 WARPPED_BUFSIZE(qp->attrs.sq_size << SQEBB_SHIFT),
429 qp->kern_qp.sq_buf, qp->kern_qp.sq_buf_dma_addr);
431 if (qp->kern_qp.rq_buf)
434 WARPPED_BUFSIZE(qp->attrs.rq_size << RQE_SHIFT),
435 qp->kern_qp.rq_buf, qp->kern_qp.rq_buf_dma_addr);
438 static int init_kernel_qp(struct erdma_dev *dev, struct erdma_qp *qp,
439 struct ib_qp_init_attr *attrs)
441 struct erdma_kqp *kqp = &qp->kern_qp;
444 if (attrs->sq_sig_type == IB_SIGNAL_ALL_WR)
452 dev->func_bar + (ERDMA_SDB_SHARED_PAGE_INDEX << PAGE_SHIFT);
453 kqp->hw_rq_db = dev->func_bar + ERDMA_BAR_RQDB_SPACE_OFFSET;
455 kqp->swr_tbl = vmalloc(qp->attrs.sq_size * sizeof(u64));
456 kqp->rwr_tbl = vmalloc(qp->attrs.rq_size * sizeof(u64));
457 if (!kqp->swr_tbl || !kqp->rwr_tbl)
460 size = (qp->attrs.sq_size << SQEBB_SHIFT) + ERDMA_EXTRA_BUFFER_SIZE;
461 kqp->sq_buf = dma_alloc_coherent(&dev->pdev->dev, size,
462 &kqp->sq_buf_dma_addr, GFP_KERNEL);
466 size = (qp->attrs.rq_size << RQE_SHIFT) + ERDMA_EXTRA_BUFFER_SIZE;
467 kqp->rq_buf = dma_alloc_coherent(&dev->pdev->dev, size,
468 &kqp->rq_buf_dma_addr, GFP_KERNEL);
472 kqp->sq_db_info = kqp->sq_buf + (qp->attrs.sq_size << SQEBB_SHIFT);
473 kqp->rq_db_info = kqp->rq_buf + (qp->attrs.rq_size << RQE_SHIFT);
482 static int get_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem,
483 u64 start, u64 len, int access, u64 virt,
484 unsigned long req_page_size, u8 force_indirect_mtt)
486 struct ib_block_iter biter;
487 uint64_t *phy_addr = NULL;
490 mem->umem = ib_umem_get(&dev->ibdev, start, len, access);
491 if (IS_ERR(mem->umem)) {
492 ret = PTR_ERR(mem->umem);
499 mem->page_size = ib_umem_find_best_pgsz(mem->umem, req_page_size, virt);
500 mem->page_offset = start & (mem->page_size - 1);
501 mem->mtt_nents = ib_umem_num_dma_blocks(mem->umem, mem->page_size);
502 mem->page_cnt = mem->mtt_nents;
504 if (mem->page_cnt > ERDMA_MAX_INLINE_MTT_ENTRIES ||
505 force_indirect_mtt) {
506 mem->mtt_type = ERDMA_MR_INDIRECT_MTT;
508 alloc_pages_exact(MTT_SIZE(mem->page_cnt), GFP_KERNEL);
513 phy_addr = mem->mtt_buf;
515 mem->mtt_type = ERDMA_MR_INLINE_MTT;
516 phy_addr = mem->mtt_entry;
519 rdma_umem_for_each_dma_block(mem->umem, &biter, mem->page_size) {
520 *phy_addr = rdma_block_iter_dma_address(&biter);
524 if (mem->mtt_type == ERDMA_MR_INDIRECT_MTT) {
526 dma_map_single(&dev->pdev->dev, mem->mtt_buf,
527 MTT_SIZE(mem->page_cnt), DMA_TO_DEVICE);
528 if (dma_mapping_error(&dev->pdev->dev, mem->mtt_entry[0])) {
529 free_pages_exact(mem->mtt_buf, MTT_SIZE(mem->page_cnt));
540 ib_umem_release(mem->umem);
547 static void put_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem)
550 dma_unmap_single(&dev->pdev->dev, mem->mtt_entry[0],
551 MTT_SIZE(mem->page_cnt), DMA_TO_DEVICE);
552 free_pages_exact(mem->mtt_buf, MTT_SIZE(mem->page_cnt));
556 ib_umem_release(mem->umem);
561 static int erdma_map_user_dbrecords(struct erdma_ucontext *ctx,
563 struct erdma_user_dbrecords_page **dbr_page,
564 dma_addr_t *dma_addr)
566 struct erdma_user_dbrecords_page *page = NULL;
569 mutex_lock(&ctx->dbrecords_page_mutex);
571 list_for_each_entry(page, &ctx->dbrecords_page_list, list)
572 if (page->va == (dbrecords_va & PAGE_MASK))
575 page = kmalloc(sizeof(*page), GFP_KERNEL);
581 page->va = (dbrecords_va & PAGE_MASK);
584 page->umem = ib_umem_get(ctx->ibucontext.device,
585 dbrecords_va & PAGE_MASK, PAGE_SIZE, 0);
586 if (IS_ERR(page->umem)) {
587 rv = PTR_ERR(page->umem);
592 list_add(&page->list, &ctx->dbrecords_page_list);
595 *dma_addr = sg_dma_address(page->umem->sgt_append.sgt.sgl) +
596 (dbrecords_va & ~PAGE_MASK);
601 mutex_unlock(&ctx->dbrecords_page_mutex);
606 erdma_unmap_user_dbrecords(struct erdma_ucontext *ctx,
607 struct erdma_user_dbrecords_page **dbr_page)
609 if (!ctx || !(*dbr_page))
612 mutex_lock(&ctx->dbrecords_page_mutex);
613 if (--(*dbr_page)->refcnt == 0) {
614 list_del(&(*dbr_page)->list);
615 ib_umem_release((*dbr_page)->umem);
620 mutex_unlock(&ctx->dbrecords_page_mutex);
623 static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
624 u64 va, u32 len, u64 db_info_va)
626 dma_addr_t db_info_dma_addr;
630 if (len < (PAGE_ALIGN(qp->attrs.sq_size * SQEBB_SIZE) +
631 qp->attrs.rq_size * RQE_SIZE))
634 ret = get_mtt_entries(qp->dev, &qp->user_qp.sq_mtt, va,
635 qp->attrs.sq_size << SQEBB_SHIFT, 0, va,
640 rq_offset = PAGE_ALIGN(qp->attrs.sq_size << SQEBB_SHIFT);
641 qp->user_qp.rq_offset = rq_offset;
643 ret = get_mtt_entries(qp->dev, &qp->user_qp.rq_mtt, va + rq_offset,
644 qp->attrs.rq_size << RQE_SHIFT, 0, va + rq_offset,
649 ret = erdma_map_user_dbrecords(uctx, db_info_va,
650 &qp->user_qp.user_dbr_page,
655 qp->user_qp.sq_db_info_dma_addr = db_info_dma_addr;
656 qp->user_qp.rq_db_info_dma_addr = db_info_dma_addr + ERDMA_DB_SIZE;
661 put_mtt_entries(qp->dev, &qp->user_qp.rq_mtt);
664 put_mtt_entries(qp->dev, &qp->user_qp.sq_mtt);
669 static void free_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx)
671 put_mtt_entries(qp->dev, &qp->user_qp.sq_mtt);
672 put_mtt_entries(qp->dev, &qp->user_qp.rq_mtt);
673 erdma_unmap_user_dbrecords(uctx, &qp->user_qp.user_dbr_page);
676 int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
677 struct ib_udata *udata)
679 struct erdma_qp *qp = to_eqp(ibqp);
680 struct erdma_dev *dev = to_edev(ibqp->device);
681 struct erdma_ucontext *uctx = rdma_udata_to_drv_context(
682 udata, struct erdma_ucontext, ibucontext);
683 struct erdma_ureq_create_qp ureq;
684 struct erdma_uresp_create_qp uresp;
687 ret = erdma_qp_validate_cap(dev, attrs);
691 ret = erdma_qp_validate_attr(dev, attrs);
695 qp->scq = to_ecq(attrs->send_cq);
696 qp->rcq = to_ecq(attrs->recv_cq);
698 qp->attrs.cc = dev->attrs.cc;
700 init_rwsem(&qp->state_lock);
702 init_completion(&qp->safe_free);
704 ret = xa_alloc_cyclic(&dev->qp_xa, &qp->ibqp.qp_num, qp,
705 XA_LIMIT(1, dev->attrs.max_qp - 1),
706 &dev->next_alloc_qpn, GFP_KERNEL);
712 qp->attrs.sq_size = roundup_pow_of_two(attrs->cap.max_send_wr *
713 ERDMA_MAX_WQEBB_PER_SQE);
714 qp->attrs.rq_size = roundup_pow_of_two(attrs->cap.max_recv_wr);
717 ret = ib_copy_from_udata(&ureq, udata,
718 min(sizeof(ureq), udata->inlen));
722 ret = init_user_qp(qp, uctx, ureq.qbuf_va, ureq.qbuf_len,
727 memset(&uresp, 0, sizeof(uresp));
729 uresp.num_sqe = qp->attrs.sq_size;
730 uresp.num_rqe = qp->attrs.rq_size;
731 uresp.qp_id = QP_ID(qp);
732 uresp.rq_offset = qp->user_qp.rq_offset;
734 ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
738 init_kernel_qp(dev, qp, attrs);
741 qp->attrs.max_send_sge = attrs->cap.max_send_sge;
742 qp->attrs.max_recv_sge = attrs->cap.max_recv_sge;
743 qp->attrs.state = ERDMA_QP_STATE_IDLE;
745 ret = create_qp_cmd(dev, qp);
749 spin_lock_init(&qp->lock);
755 free_user_qp(qp, uctx);
759 xa_erase(&dev->qp_xa, QP_ID(qp));
764 static int erdma_create_stag(struct erdma_dev *dev, u32 *stag)
768 stag_idx = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX]);
772 /* For now, we always let key field be zero. */
773 *stag = (stag_idx << 8);
778 struct ib_mr *erdma_get_dma_mr(struct ib_pd *ibpd, int acc)
780 struct erdma_dev *dev = to_edev(ibpd->device);
785 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
787 return ERR_PTR(-ENOMEM);
789 ret = erdma_create_stag(dev, &stag);
793 mr->type = ERDMA_MR_TYPE_DMA;
795 mr->ibmr.lkey = stag;
796 mr->ibmr.rkey = stag;
798 mr->access = ERDMA_MR_ACC_LR | to_erdma_access_flags(acc);
799 ret = regmr_cmd(dev, mr);
801 goto out_remove_stag;
806 erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX],
815 struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
819 struct erdma_dev *dev = to_edev(ibpd->device);
823 if (mr_type != IB_MR_TYPE_MEM_REG)
824 return ERR_PTR(-EOPNOTSUPP);
826 if (max_num_sg > ERDMA_MR_MAX_MTT_CNT)
827 return ERR_PTR(-EINVAL);
829 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
831 return ERR_PTR(-ENOMEM);
833 ret = erdma_create_stag(dev, &stag);
837 mr->type = ERDMA_MR_TYPE_FRMR;
839 mr->ibmr.lkey = stag;
840 mr->ibmr.rkey = stag;
842 /* update it in FRMR. */
843 mr->access = ERDMA_MR_ACC_LR | ERDMA_MR_ACC_LW | ERDMA_MR_ACC_RR |
846 mr->mem.page_size = PAGE_SIZE; /* update it later. */
847 mr->mem.page_cnt = max_num_sg;
848 mr->mem.mtt_type = ERDMA_MR_INDIRECT_MTT;
850 alloc_pages_exact(MTT_SIZE(mr->mem.page_cnt), GFP_KERNEL);
851 if (!mr->mem.mtt_buf) {
853 goto out_remove_stag;
856 mr->mem.mtt_entry[0] =
857 dma_map_single(&dev->pdev->dev, mr->mem.mtt_buf,
858 MTT_SIZE(mr->mem.page_cnt), DMA_TO_DEVICE);
859 if (dma_mapping_error(&dev->pdev->dev, mr->mem.mtt_entry[0])) {
864 ret = regmr_cmd(dev, mr);
871 dma_unmap_single(&dev->pdev->dev, mr->mem.mtt_entry[0],
872 MTT_SIZE(mr->mem.page_cnt), DMA_TO_DEVICE);
874 free_pages_exact(mr->mem.mtt_buf, MTT_SIZE(mr->mem.page_cnt));
877 erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX],
886 static int erdma_set_page(struct ib_mr *ibmr, u64 addr)
888 struct erdma_mr *mr = to_emr(ibmr);
890 if (mr->mem.mtt_nents >= mr->mem.page_cnt)
893 *((u64 *)mr->mem.mtt_buf + mr->mem.mtt_nents) = addr;
899 int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
900 unsigned int *sg_offset)
902 struct erdma_mr *mr = to_emr(ibmr);
905 mr->mem.mtt_nents = 0;
907 num = ib_sg_to_pages(&mr->ibmr, sg, sg_nents, sg_offset,
913 struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
914 u64 virt, int access, struct ib_udata *udata)
916 struct erdma_mr *mr = NULL;
917 struct erdma_dev *dev = to_edev(ibpd->device);
921 if (!len || len > dev->attrs.max_mr_size)
922 return ERR_PTR(-EINVAL);
924 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
926 return ERR_PTR(-ENOMEM);
928 ret = get_mtt_entries(dev, &mr->mem, start, len, access, virt,
933 ret = erdma_create_stag(dev, &stag);
935 goto err_out_put_mtt;
937 mr->ibmr.lkey = mr->ibmr.rkey = stag;
941 mr->access = ERDMA_MR_ACC_LR | to_erdma_access_flags(access);
943 mr->type = ERDMA_MR_TYPE_NORMAL;
945 ret = regmr_cmd(dev, mr);
952 erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX],
956 put_mtt_entries(dev, &mr->mem);
964 int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
967 struct erdma_dev *dev = to_edev(ibmr->device);
968 struct erdma_cmdq_dereg_mr_req req;
973 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
974 CMDQ_OPCODE_DEREG_MR);
976 req.cfg = FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, ibmr->lkey >> 8) |
977 FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, ibmr->lkey & 0xFF);
979 ret = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
984 erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX], ibmr->lkey >> 8);
986 put_mtt_entries(dev, &mr->mem);
992 int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
994 struct erdma_cq *cq = to_ecq(ibcq);
995 struct erdma_dev *dev = to_edev(ibcq->device);
996 struct erdma_ucontext *ctx = rdma_udata_to_drv_context(
997 udata, struct erdma_ucontext, ibucontext);
999 struct erdma_cmdq_destroy_cq_req req;
1001 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
1002 CMDQ_OPCODE_DESTROY_CQ);
1005 err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
1010 if (rdma_is_kernel_res(&cq->ibcq.res)) {
1011 dma_free_coherent(&dev->pdev->dev,
1012 WARPPED_BUFSIZE(cq->depth << CQE_SHIFT),
1013 cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
1015 erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
1016 put_mtt_entries(dev, &cq->user_cq.qbuf_mtt);
1019 xa_erase(&dev->cq_xa, cq->cqn);
1024 int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
1026 struct erdma_qp *qp = to_eqp(ibqp);
1027 struct erdma_dev *dev = to_edev(ibqp->device);
1028 struct erdma_ucontext *ctx = rdma_udata_to_drv_context(
1029 udata, struct erdma_ucontext, ibucontext);
1030 struct erdma_qp_attrs qp_attrs;
1032 struct erdma_cmdq_destroy_qp_req req;
1034 down_write(&qp->state_lock);
1035 qp_attrs.state = ERDMA_QP_STATE_ERROR;
1036 erdma_modify_qp_internal(qp, &qp_attrs, ERDMA_QP_ATTR_STATE);
1037 up_write(&qp->state_lock);
1039 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
1040 CMDQ_OPCODE_DESTROY_QP);
1041 req.qpn = QP_ID(qp);
1043 err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
1049 wait_for_completion(&qp->safe_free);
1051 if (rdma_is_kernel_res(&qp->ibqp.res)) {
1052 vfree(qp->kern_qp.swr_tbl);
1053 vfree(qp->kern_qp.rwr_tbl);
1056 WARPPED_BUFSIZE(qp->attrs.rq_size << RQE_SHIFT),
1057 qp->kern_qp.rq_buf, qp->kern_qp.rq_buf_dma_addr);
1060 WARPPED_BUFSIZE(qp->attrs.sq_size << SQEBB_SHIFT),
1061 qp->kern_qp.sq_buf, qp->kern_qp.sq_buf_dma_addr);
1063 put_mtt_entries(dev, &qp->user_qp.sq_mtt);
1064 put_mtt_entries(dev, &qp->user_qp.rq_mtt);
1065 erdma_unmap_user_dbrecords(ctx, &qp->user_qp.user_dbr_page);
1069 erdma_cep_put(qp->cep);
1070 xa_erase(&dev->qp_xa, QP_ID(qp));
1075 void erdma_qp_get_ref(struct ib_qp *ibqp)
1077 erdma_qp_get(to_eqp(ibqp));
1080 void erdma_qp_put_ref(struct ib_qp *ibqp)
1082 erdma_qp_put(to_eqp(ibqp));
1085 int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
1087 struct rdma_user_mmap_entry *rdma_entry;
1088 struct erdma_user_mmap_entry *entry;
1092 rdma_entry = rdma_user_mmap_entry_get(ctx, vma);
1096 entry = to_emmap(rdma_entry);
1098 switch (entry->mmap_flag) {
1099 case ERDMA_MMAP_IO_NC:
1101 prot = pgprot_device(vma->vm_page_prot);
1107 err = rdma_user_mmap_io(ctx, vma, PFN_DOWN(entry->address), PAGE_SIZE,
1110 rdma_user_mmap_entry_put(rdma_entry);
1114 void erdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
1116 struct erdma_user_mmap_entry *entry = to_emmap(rdma_entry);
1121 #define ERDMA_SDB_PAGE 0
1122 #define ERDMA_SDB_ENTRY 1
1123 #define ERDMA_SDB_SHARED 2
1125 static void alloc_db_resources(struct erdma_dev *dev,
1126 struct erdma_ucontext *ctx)
1129 struct erdma_devattr *attrs = &dev->attrs;
1131 if (attrs->disable_dwqe)
1132 goto alloc_normal_db;
1134 /* Try to alloc independent SDB page. */
1135 spin_lock(&dev->db_bitmap_lock);
1136 bitmap_idx = find_first_zero_bit(dev->sdb_page, attrs->dwqe_pages);
1137 if (bitmap_idx != attrs->dwqe_pages) {
1138 set_bit(bitmap_idx, dev->sdb_page);
1139 spin_unlock(&dev->db_bitmap_lock);
1141 ctx->sdb_type = ERDMA_SDB_PAGE;
1142 ctx->sdb_idx = bitmap_idx;
1143 ctx->sdb_page_idx = bitmap_idx;
1144 ctx->sdb = dev->func_bar_addr + ERDMA_BAR_SQDB_SPACE_OFFSET +
1145 (bitmap_idx << PAGE_SHIFT);
1146 ctx->sdb_page_off = 0;
1151 bitmap_idx = find_first_zero_bit(dev->sdb_entry, attrs->dwqe_entries);
1152 if (bitmap_idx != attrs->dwqe_entries) {
1153 set_bit(bitmap_idx, dev->sdb_entry);
1154 spin_unlock(&dev->db_bitmap_lock);
1156 ctx->sdb_type = ERDMA_SDB_ENTRY;
1157 ctx->sdb_idx = bitmap_idx;
1158 ctx->sdb_page_idx = attrs->dwqe_pages +
1159 bitmap_idx / ERDMA_DWQE_TYPE1_CNT_PER_PAGE;
1160 ctx->sdb_page_off = bitmap_idx % ERDMA_DWQE_TYPE1_CNT_PER_PAGE;
1162 ctx->sdb = dev->func_bar_addr + ERDMA_BAR_SQDB_SPACE_OFFSET +
1163 (ctx->sdb_page_idx << PAGE_SHIFT);
1168 spin_unlock(&dev->db_bitmap_lock);
1171 ctx->sdb_type = ERDMA_SDB_SHARED;
1173 ctx->sdb_page_idx = ERDMA_SDB_SHARED_PAGE_INDEX;
1174 ctx->sdb_page_off = 0;
1176 ctx->sdb = dev->func_bar_addr + (ctx->sdb_page_idx << PAGE_SHIFT);
1179 static void erdma_uctx_user_mmap_entries_remove(struct erdma_ucontext *uctx)
1181 rdma_user_mmap_entry_remove(uctx->sq_db_mmap_entry);
1182 rdma_user_mmap_entry_remove(uctx->rq_db_mmap_entry);
1183 rdma_user_mmap_entry_remove(uctx->cq_db_mmap_entry);
1186 int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata)
1188 struct erdma_ucontext *ctx = to_ectx(ibctx);
1189 struct erdma_dev *dev = to_edev(ibctx->device);
1191 struct erdma_uresp_alloc_ctx uresp = {};
1193 if (atomic_inc_return(&dev->num_ctx) > ERDMA_MAX_CONTEXT) {
1198 INIT_LIST_HEAD(&ctx->dbrecords_page_list);
1199 mutex_init(&ctx->dbrecords_page_mutex);
1201 alloc_db_resources(dev, ctx);
1203 ctx->rdb = dev->func_bar_addr + ERDMA_BAR_RQDB_SPACE_OFFSET;
1204 ctx->cdb = dev->func_bar_addr + ERDMA_BAR_CQDB_SPACE_OFFSET;
1206 if (udata->outlen < sizeof(uresp)) {
1211 ctx->sq_db_mmap_entry = erdma_user_mmap_entry_insert(
1212 ctx, (void *)ctx->sdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.sdb);
1213 if (!ctx->sq_db_mmap_entry) {
1218 ctx->rq_db_mmap_entry = erdma_user_mmap_entry_insert(
1219 ctx, (void *)ctx->rdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.rdb);
1220 if (!ctx->rq_db_mmap_entry) {
1225 ctx->cq_db_mmap_entry = erdma_user_mmap_entry_insert(
1226 ctx, (void *)ctx->cdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.cdb);
1227 if (!ctx->cq_db_mmap_entry) {
1232 uresp.dev_id = dev->pdev->device;
1233 uresp.sdb_type = ctx->sdb_type;
1234 uresp.sdb_offset = ctx->sdb_page_off;
1236 ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1243 erdma_uctx_user_mmap_entries_remove(ctx);
1244 atomic_dec(&dev->num_ctx);
1248 void erdma_dealloc_ucontext(struct ib_ucontext *ibctx)
1250 struct erdma_ucontext *ctx = to_ectx(ibctx);
1251 struct erdma_dev *dev = to_edev(ibctx->device);
1253 spin_lock(&dev->db_bitmap_lock);
1254 if (ctx->sdb_type == ERDMA_SDB_PAGE)
1255 clear_bit(ctx->sdb_idx, dev->sdb_page);
1256 else if (ctx->sdb_type == ERDMA_SDB_ENTRY)
1257 clear_bit(ctx->sdb_idx, dev->sdb_entry);
1259 erdma_uctx_user_mmap_entries_remove(ctx);
1261 spin_unlock(&dev->db_bitmap_lock);
1263 atomic_dec(&dev->num_ctx);
1266 static int ib_qp_state_to_erdma_qp_state[IB_QPS_ERR + 1] = {
1267 [IB_QPS_RESET] = ERDMA_QP_STATE_IDLE,
1268 [IB_QPS_INIT] = ERDMA_QP_STATE_IDLE,
1269 [IB_QPS_RTR] = ERDMA_QP_STATE_RTR,
1270 [IB_QPS_RTS] = ERDMA_QP_STATE_RTS,
1271 [IB_QPS_SQD] = ERDMA_QP_STATE_CLOSING,
1272 [IB_QPS_SQE] = ERDMA_QP_STATE_TERMINATE,
1273 [IB_QPS_ERR] = ERDMA_QP_STATE_ERROR
1276 int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1277 struct ib_udata *udata)
1279 struct erdma_qp_attrs new_attrs;
1280 enum erdma_qp_attr_mask erdma_attr_mask = 0;
1281 struct erdma_qp *qp = to_eqp(ibqp);
1284 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1287 memset(&new_attrs, 0, sizeof(new_attrs));
1289 if (attr_mask & IB_QP_STATE) {
1290 new_attrs.state = ib_qp_state_to_erdma_qp_state[attr->qp_state];
1292 erdma_attr_mask |= ERDMA_QP_ATTR_STATE;
1295 down_write(&qp->state_lock);
1297 ret = erdma_modify_qp_internal(qp, &new_attrs, erdma_attr_mask);
1299 up_write(&qp->state_lock);
1304 int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
1305 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1307 struct erdma_qp *qp;
1308 struct erdma_dev *dev;
1310 if (ibqp && qp_attr && qp_init_attr) {
1312 dev = to_edev(ibqp->device);
1317 qp_attr->cap.max_inline_data = ERDMA_MAX_INLINE;
1318 qp_init_attr->cap.max_inline_data = ERDMA_MAX_INLINE;
1320 qp_attr->cap.max_send_wr = qp->attrs.sq_size;
1321 qp_attr->cap.max_recv_wr = qp->attrs.rq_size;
1322 qp_attr->cap.max_send_sge = qp->attrs.max_send_sge;
1323 qp_attr->cap.max_recv_sge = qp->attrs.max_recv_sge;
1325 qp_attr->path_mtu = ib_mtu_int_to_enum(dev->netdev->mtu);
1326 qp_attr->max_rd_atomic = qp->attrs.irq_size;
1327 qp_attr->max_dest_rd_atomic = qp->attrs.orq_size;
1329 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
1330 IB_ACCESS_REMOTE_WRITE |
1331 IB_ACCESS_REMOTE_READ;
1333 qp_init_attr->cap = qp_attr->cap;
1338 static int erdma_init_user_cq(struct erdma_ucontext *ctx, struct erdma_cq *cq,
1339 struct erdma_ureq_create_cq *ureq)
1342 struct erdma_dev *dev = to_edev(cq->ibcq.device);
1344 ret = get_mtt_entries(dev, &cq->user_cq.qbuf_mtt, ureq->qbuf_va,
1345 ureq->qbuf_len, 0, ureq->qbuf_va, SZ_64M - SZ_4K,
1350 ret = erdma_map_user_dbrecords(ctx, ureq->db_record_va,
1351 &cq->user_cq.user_dbr_page,
1352 &cq->user_cq.db_info_dma_addr);
1354 put_mtt_entries(dev, &cq->user_cq.qbuf_mtt);
1359 static int erdma_init_kernel_cq(struct erdma_cq *cq)
1361 struct erdma_dev *dev = to_edev(cq->ibcq.device);
1364 dma_alloc_coherent(&dev->pdev->dev,
1365 WARPPED_BUFSIZE(cq->depth << CQE_SHIFT),
1366 &cq->kern_cq.qbuf_dma_addr, GFP_KERNEL);
1367 if (!cq->kern_cq.qbuf)
1370 cq->kern_cq.db_record =
1371 (u64 *)(cq->kern_cq.qbuf + (cq->depth << CQE_SHIFT));
1372 spin_lock_init(&cq->kern_cq.lock);
1373 /* use default cqdb addr */
1374 cq->kern_cq.db = dev->func_bar + ERDMA_BAR_CQDB_SPACE_OFFSET;
1379 int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1380 struct ib_udata *udata)
1382 struct erdma_cq *cq = to_ecq(ibcq);
1383 struct erdma_dev *dev = to_edev(ibcq->device);
1384 unsigned int depth = attr->cqe;
1386 struct erdma_ucontext *ctx = rdma_udata_to_drv_context(
1387 udata, struct erdma_ucontext, ibucontext);
1389 if (depth > dev->attrs.max_cqe)
1392 depth = roundup_pow_of_two(depth);
1393 cq->ibcq.cqe = depth;
1395 cq->assoc_eqn = attr->comp_vector + 1;
1397 ret = xa_alloc_cyclic(&dev->cq_xa, &cq->cqn, cq,
1398 XA_LIMIT(1, dev->attrs.max_cq - 1),
1399 &dev->next_alloc_cqn, GFP_KERNEL);
1403 if (!rdma_is_kernel_res(&ibcq->res)) {
1404 struct erdma_ureq_create_cq ureq;
1405 struct erdma_uresp_create_cq uresp;
1407 ret = ib_copy_from_udata(&ureq, udata,
1408 min(udata->inlen, sizeof(ureq)));
1412 ret = erdma_init_user_cq(ctx, cq, &ureq);
1416 uresp.cq_id = cq->cqn;
1417 uresp.num_cqe = depth;
1419 ret = ib_copy_to_udata(udata, &uresp,
1420 min(sizeof(uresp), udata->outlen));
1424 ret = erdma_init_kernel_cq(cq);
1429 ret = create_cq_cmd(dev, cq);
1436 if (!rdma_is_kernel_res(&ibcq->res)) {
1437 erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
1438 put_mtt_entries(dev, &cq->user_cq.qbuf_mtt);
1440 dma_free_coherent(&dev->pdev->dev,
1441 WARPPED_BUFSIZE(depth << CQE_SHIFT),
1442 cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
1446 xa_erase(&dev->cq_xa, cq->cqn);
1451 void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason)
1453 struct ib_event event;
1455 event.device = &dev->ibdev;
1456 event.element.port_num = 1;
1457 event.event = reason;
1459 ib_dispatch_event(&event);