1 // SPDX-License-Identifier: GPL-2.0
3 /* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
4 /* Kai Shen <kaishen@linux.alibaba.com> */
5 /* Copyright (c) 2020-2022, Alibaba Group. */
7 /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
8 /* Copyright (c) 2008-2019, IBM Corporation */
10 /* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. */
12 #include <linux/vmalloc.h>
13 #include <net/addrconf.h>
14 #include <rdma/erdma-abi.h>
15 #include <rdma/ib_umem.h>
16 #include <rdma/uverbs_ioctl.h>
20 #include "erdma_verbs.h"
22 static void assemble_qbuf_mtt_for_cmd(struct erdma_mem *mem, u32 *cfg,
23 u64 *addr0, u64 *addr1)
25 struct erdma_mtt *mtt = mem->mtt;
27 if (mem->mtt_nents > ERDMA_MAX_INLINE_MTT_ENTRIES) {
28 *addr0 = mtt->buf_dma;
29 *cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_LEVEL_MASK,
33 memcpy(addr1, mtt->buf + 1, MTT_SIZE(mem->mtt_nents - 1));
34 *cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_LEVEL_MASK,
39 static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
41 struct erdma_dev *dev = to_edev(qp->ibqp.device);
42 struct erdma_pd *pd = to_epd(qp->ibqp.pd);
43 struct erdma_cmdq_create_qp_req req;
44 struct erdma_uqp *user_qp;
48 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
49 CMDQ_OPCODE_CREATE_QP);
51 req.cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_QP_SQ_DEPTH_MASK,
52 ilog2(qp->attrs.sq_size)) |
53 FIELD_PREP(ERDMA_CMD_CREATE_QP_QPN_MASK, QP_ID(qp));
54 req.cfg1 = FIELD_PREP(ERDMA_CMD_CREATE_QP_RQ_DEPTH_MASK,
55 ilog2(qp->attrs.rq_size)) |
56 FIELD_PREP(ERDMA_CMD_CREATE_QP_PD_MASK, pd->pdn);
58 if (rdma_is_kernel_res(&qp->ibqp.res)) {
59 u32 pgsz_range = ilog2(SZ_1M) - ERDMA_HW_PAGE_SHIFT;
62 FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
64 FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn);
66 FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
68 FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn);
71 FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_OFFSET_MASK, 0) |
72 FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK, 1) |
73 FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_LEVEL_MASK,
75 req.rq_mtt_cfg = req.sq_mtt_cfg;
77 req.rq_buf_addr = qp->kern_qp.rq_buf_dma_addr;
78 req.sq_buf_addr = qp->kern_qp.sq_buf_dma_addr;
79 req.sq_db_info_dma_addr = qp->kern_qp.sq_buf_dma_addr +
80 (qp->attrs.sq_size << SQEBB_SHIFT);
81 req.rq_db_info_dma_addr = qp->kern_qp.rq_buf_dma_addr +
82 (qp->attrs.rq_size << RQE_SHIFT);
84 user_qp = &qp->user_qp;
85 req.sq_cqn_mtt_cfg = FIELD_PREP(
86 ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
87 ilog2(user_qp->sq_mem.page_size) - ERDMA_HW_PAGE_SHIFT);
89 FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn);
91 req.rq_cqn_mtt_cfg = FIELD_PREP(
92 ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
93 ilog2(user_qp->rq_mem.page_size) - ERDMA_HW_PAGE_SHIFT);
95 FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn);
97 req.sq_mtt_cfg = user_qp->sq_mem.page_offset;
98 req.sq_mtt_cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK,
99 user_qp->sq_mem.mtt_nents);
101 req.rq_mtt_cfg = user_qp->rq_mem.page_offset;
102 req.rq_mtt_cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK,
103 user_qp->rq_mem.mtt_nents);
105 assemble_qbuf_mtt_for_cmd(&user_qp->sq_mem, &req.sq_mtt_cfg,
106 &req.sq_buf_addr, req.sq_mtt_entry);
107 assemble_qbuf_mtt_for_cmd(&user_qp->rq_mem, &req.rq_mtt_cfg,
108 &req.rq_buf_addr, req.rq_mtt_entry);
110 req.sq_db_info_dma_addr = user_qp->sq_db_info_dma_addr;
111 req.rq_db_info_dma_addr = user_qp->rq_db_info_dma_addr;
113 if (uctx->ext_db.enable) {
114 req.sq_cqn_mtt_cfg |=
115 FIELD_PREP(ERDMA_CMD_CREATE_QP_DB_CFG_MASK, 1);
117 FIELD_PREP(ERDMA_CMD_CREATE_QP_SQDB_CFG_MASK,
118 uctx->ext_db.sdb_off) |
119 FIELD_PREP(ERDMA_CMD_CREATE_QP_RQDB_CFG_MASK,
120 uctx->ext_db.rdb_off);
124 err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0,
128 FIELD_GET(ERDMA_CMDQ_CREATE_QP_RESP_COOKIE_MASK, resp0);
133 static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
135 struct erdma_pd *pd = to_epd(mr->ibmr.pd);
136 u32 mtt_level = ERDMA_MR_MTT_0LEVEL;
137 struct erdma_cmdq_reg_mr_req req;
139 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, CMDQ_OPCODE_REG_MR);
141 if (mr->type == ERDMA_MR_TYPE_FRMR ||
142 mr->mem.page_cnt > ERDMA_MAX_INLINE_MTT_ENTRIES) {
143 if (mr->mem.mtt->continuous) {
144 req.phy_addr[0] = mr->mem.mtt->buf_dma;
145 mtt_level = ERDMA_MR_MTT_1LEVEL;
147 req.phy_addr[0] = sg_dma_address(mr->mem.mtt->sglist);
148 mtt_level = mr->mem.mtt->level;
150 } else if (mr->type != ERDMA_MR_TYPE_DMA) {
151 memcpy(req.phy_addr, mr->mem.mtt->buf,
152 MTT_SIZE(mr->mem.page_cnt));
155 req.cfg0 = FIELD_PREP(ERDMA_CMD_MR_VALID_MASK, mr->valid) |
156 FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, mr->ibmr.lkey & 0xFF) |
157 FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, mr->ibmr.lkey >> 8);
158 req.cfg1 = FIELD_PREP(ERDMA_CMD_REGMR_PD_MASK, pd->pdn) |
159 FIELD_PREP(ERDMA_CMD_REGMR_TYPE_MASK, mr->type) |
160 FIELD_PREP(ERDMA_CMD_REGMR_RIGHT_MASK, mr->access);
161 req.cfg2 = FIELD_PREP(ERDMA_CMD_REGMR_PAGESIZE_MASK,
162 ilog2(mr->mem.page_size)) |
163 FIELD_PREP(ERDMA_CMD_REGMR_MTT_LEVEL_MASK, mtt_level) |
164 FIELD_PREP(ERDMA_CMD_REGMR_MTT_CNT_MASK, mr->mem.page_cnt);
166 if (mr->type == ERDMA_MR_TYPE_DMA)
169 if (mr->type == ERDMA_MR_TYPE_NORMAL) {
170 req.start_va = mr->mem.va;
171 req.size = mr->mem.len;
174 if (!mr->mem.mtt->continuous && mr->mem.mtt->level > 1) {
175 req.cfg0 |= FIELD_PREP(ERDMA_CMD_MR_VERSION_MASK, 1);
176 req.cfg2 |= FIELD_PREP(ERDMA_CMD_REGMR_MTT_PAGESIZE_MASK,
177 PAGE_SHIFT - ERDMA_HW_PAGE_SHIFT);
178 req.size_h = upper_32_bits(mr->mem.len);
179 req.mtt_cnt_h = mr->mem.page_cnt >> 20;
183 return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
186 static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
188 struct erdma_dev *dev = to_edev(cq->ibcq.device);
189 struct erdma_cmdq_create_cq_req req;
190 struct erdma_mem *mem;
193 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
194 CMDQ_OPCODE_CREATE_CQ);
196 req.cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_CQN_MASK, cq->cqn) |
197 FIELD_PREP(ERDMA_CMD_CREATE_CQ_DEPTH_MASK, ilog2(cq->depth));
198 req.cfg1 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_EQN_MASK, cq->assoc_eqn);
200 if (rdma_is_kernel_res(&cq->ibcq.res)) {
202 req.cfg0 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
203 ilog2(page_size) - ERDMA_HW_PAGE_SHIFT);
204 req.qbuf_addr_l = lower_32_bits(cq->kern_cq.qbuf_dma_addr);
205 req.qbuf_addr_h = upper_32_bits(cq->kern_cq.qbuf_dma_addr);
207 req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK, 1) |
208 FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_LEVEL_MASK,
209 ERDMA_MR_MTT_0LEVEL);
211 req.first_page_offset = 0;
212 req.cq_db_info_addr =
213 cq->kern_cq.qbuf_dma_addr + (cq->depth << CQE_SHIFT);
215 mem = &cq->user_cq.qbuf_mem;
217 FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
218 ilog2(mem->page_size) - ERDMA_HW_PAGE_SHIFT);
219 if (mem->mtt_nents == 1) {
220 req.qbuf_addr_l = lower_32_bits(mem->mtt->buf[0]);
221 req.qbuf_addr_h = upper_32_bits(mem->mtt->buf[0]);
223 FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_LEVEL_MASK,
224 ERDMA_MR_MTT_0LEVEL);
226 req.qbuf_addr_l = lower_32_bits(mem->mtt->buf_dma);
227 req.qbuf_addr_h = upper_32_bits(mem->mtt->buf_dma);
229 FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_LEVEL_MASK,
230 ERDMA_MR_MTT_1LEVEL);
232 req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK,
235 req.first_page_offset = mem->page_offset;
236 req.cq_db_info_addr = cq->user_cq.db_info_dma_addr;
238 if (uctx->ext_db.enable) {
239 req.cfg1 |= FIELD_PREP(
240 ERDMA_CMD_CREATE_CQ_MTT_DB_CFG_MASK, 1);
241 req.cfg2 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_DB_CFG_MASK,
242 uctx->ext_db.cdb_off);
246 return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
249 static int erdma_alloc_idx(struct erdma_resource_cb *res_cb)
254 spin_lock_irqsave(&res_cb->lock, flags);
255 idx = find_next_zero_bit(res_cb->bitmap, res_cb->max_cap,
256 res_cb->next_alloc_idx);
257 if (idx == res_cb->max_cap) {
258 idx = find_first_zero_bit(res_cb->bitmap, res_cb->max_cap);
259 if (idx == res_cb->max_cap) {
260 res_cb->next_alloc_idx = 1;
261 spin_unlock_irqrestore(&res_cb->lock, flags);
266 set_bit(idx, res_cb->bitmap);
267 res_cb->next_alloc_idx = idx + 1;
268 spin_unlock_irqrestore(&res_cb->lock, flags);
273 static inline void erdma_free_idx(struct erdma_resource_cb *res_cb, u32 idx)
278 spin_lock_irqsave(&res_cb->lock, flags);
279 used = __test_and_clear_bit(idx, res_cb->bitmap);
280 spin_unlock_irqrestore(&res_cb->lock, flags);
284 static struct rdma_user_mmap_entry *
285 erdma_user_mmap_entry_insert(struct erdma_ucontext *uctx, void *address,
286 u32 size, u8 mmap_flag, u64 *mmap_offset)
288 struct erdma_user_mmap_entry *entry =
289 kzalloc(sizeof(*entry), GFP_KERNEL);
295 entry->address = (u64)address;
296 entry->mmap_flag = mmap_flag;
298 size = PAGE_ALIGN(size);
300 ret = rdma_user_mmap_entry_insert(&uctx->ibucontext, &entry->rdma_entry,
307 *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
309 return &entry->rdma_entry;
312 int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
313 struct ib_udata *unused)
315 struct erdma_dev *dev = to_edev(ibdev);
317 memset(attr, 0, sizeof(*attr));
319 attr->max_mr_size = dev->attrs.max_mr_size;
320 attr->vendor_id = PCI_VENDOR_ID_ALIBABA;
321 attr->vendor_part_id = dev->pdev->device;
322 attr->hw_ver = dev->pdev->revision;
323 attr->max_qp = dev->attrs.max_qp - 1;
324 attr->max_qp_wr = min(dev->attrs.max_send_wr, dev->attrs.max_recv_wr);
325 attr->max_qp_rd_atom = dev->attrs.max_ord;
326 attr->max_qp_init_rd_atom = dev->attrs.max_ird;
327 attr->max_res_rd_atom = dev->attrs.max_qp * dev->attrs.max_ird;
328 attr->device_cap_flags = IB_DEVICE_MEM_MGT_EXTENSIONS;
329 attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
330 ibdev->local_dma_lkey = dev->attrs.local_dma_key;
331 attr->max_send_sge = dev->attrs.max_send_sge;
332 attr->max_recv_sge = dev->attrs.max_recv_sge;
333 attr->max_sge_rd = dev->attrs.max_sge_rd;
334 attr->max_cq = dev->attrs.max_cq - 1;
335 attr->max_cqe = dev->attrs.max_cqe;
336 attr->max_mr = dev->attrs.max_mr;
337 attr->max_pd = dev->attrs.max_pd;
338 attr->max_mw = dev->attrs.max_mw;
339 attr->max_fast_reg_page_list_len = ERDMA_MAX_FRMR_PA;
340 attr->page_size_cap = ERDMA_PAGE_SIZE_SUPPORT;
342 if (dev->attrs.cap_flags & ERDMA_DEV_CAP_FLAGS_ATOMIC)
343 attr->atomic_cap = IB_ATOMIC_GLOB;
345 attr->fw_ver = dev->attrs.fw_version;
348 addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
349 dev->netdev->dev_addr);
354 int erdma_query_gid(struct ib_device *ibdev, u32 port, int idx,
357 struct erdma_dev *dev = to_edev(ibdev);
359 memset(gid, 0, sizeof(*gid));
360 ether_addr_copy(gid->raw, dev->attrs.peer_addr);
365 int erdma_query_port(struct ib_device *ibdev, u32 port,
366 struct ib_port_attr *attr)
368 struct erdma_dev *dev = to_edev(ibdev);
369 struct net_device *ndev = dev->netdev;
371 memset(attr, 0, sizeof(*attr));
373 attr->gid_tbl_len = 1;
374 attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
375 attr->max_msg_sz = -1;
380 ib_get_eth_speed(ibdev, port, &attr->active_speed, &attr->active_width);
381 attr->max_mtu = ib_mtu_int_to_enum(ndev->mtu);
382 attr->active_mtu = ib_mtu_int_to_enum(ndev->mtu);
383 if (netif_running(ndev) && netif_carrier_ok(ndev))
384 dev->state = IB_PORT_ACTIVE;
386 dev->state = IB_PORT_DOWN;
387 attr->state = dev->state;
390 if (dev->state == IB_PORT_ACTIVE)
391 attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
393 attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
398 int erdma_get_port_immutable(struct ib_device *ibdev, u32 port,
399 struct ib_port_immutable *port_immutable)
401 port_immutable->gid_tbl_len = 1;
402 port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
407 int erdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
409 struct erdma_pd *pd = to_epd(ibpd);
410 struct erdma_dev *dev = to_edev(ibpd->device);
413 pdn = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_PD]);
422 int erdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
424 struct erdma_pd *pd = to_epd(ibpd);
425 struct erdma_dev *dev = to_edev(ibpd->device);
427 erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_PD], pd->pdn);
432 static void erdma_flush_worker(struct work_struct *work)
434 struct delayed_work *dwork = to_delayed_work(work);
435 struct erdma_qp *qp =
436 container_of(dwork, struct erdma_qp, reflush_dwork);
437 struct erdma_cmdq_reflush_req req;
439 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
440 CMDQ_OPCODE_REFLUSH);
442 req.sq_pi = qp->kern_qp.sq_pi;
443 req.rq_pi = qp->kern_qp.rq_pi;
444 erdma_post_cmd_wait(&qp->dev->cmdq, &req, sizeof(req), NULL, NULL);
447 static int erdma_qp_validate_cap(struct erdma_dev *dev,
448 struct ib_qp_init_attr *attrs)
450 if ((attrs->cap.max_send_wr > dev->attrs.max_send_wr) ||
451 (attrs->cap.max_recv_wr > dev->attrs.max_recv_wr) ||
452 (attrs->cap.max_send_sge > dev->attrs.max_send_sge) ||
453 (attrs->cap.max_recv_sge > dev->attrs.max_recv_sge) ||
454 (attrs->cap.max_inline_data > ERDMA_MAX_INLINE) ||
455 !attrs->cap.max_send_wr || !attrs->cap.max_recv_wr) {
462 static int erdma_qp_validate_attr(struct erdma_dev *dev,
463 struct ib_qp_init_attr *attrs)
465 if (attrs->qp_type != IB_QPT_RC)
471 if (!attrs->send_cq || !attrs->recv_cq)
477 static void free_kernel_qp(struct erdma_qp *qp)
479 struct erdma_dev *dev = qp->dev;
481 vfree(qp->kern_qp.swr_tbl);
482 vfree(qp->kern_qp.rwr_tbl);
484 if (qp->kern_qp.sq_buf)
487 WARPPED_BUFSIZE(qp->attrs.sq_size << SQEBB_SHIFT),
488 qp->kern_qp.sq_buf, qp->kern_qp.sq_buf_dma_addr);
490 if (qp->kern_qp.rq_buf)
493 WARPPED_BUFSIZE(qp->attrs.rq_size << RQE_SHIFT),
494 qp->kern_qp.rq_buf, qp->kern_qp.rq_buf_dma_addr);
497 static int init_kernel_qp(struct erdma_dev *dev, struct erdma_qp *qp,
498 struct ib_qp_init_attr *attrs)
500 struct erdma_kqp *kqp = &qp->kern_qp;
503 if (attrs->sq_sig_type == IB_SIGNAL_ALL_WR)
511 dev->func_bar + (ERDMA_SDB_SHARED_PAGE_INDEX << PAGE_SHIFT);
512 kqp->hw_rq_db = dev->func_bar + ERDMA_BAR_RQDB_SPACE_OFFSET;
514 kqp->swr_tbl = vmalloc_array(qp->attrs.sq_size, sizeof(u64));
515 kqp->rwr_tbl = vmalloc_array(qp->attrs.rq_size, sizeof(u64));
516 if (!kqp->swr_tbl || !kqp->rwr_tbl)
519 size = (qp->attrs.sq_size << SQEBB_SHIFT) + ERDMA_EXTRA_BUFFER_SIZE;
520 kqp->sq_buf = dma_alloc_coherent(&dev->pdev->dev, size,
521 &kqp->sq_buf_dma_addr, GFP_KERNEL);
525 size = (qp->attrs.rq_size << RQE_SHIFT) + ERDMA_EXTRA_BUFFER_SIZE;
526 kqp->rq_buf = dma_alloc_coherent(&dev->pdev->dev, size,
527 &kqp->rq_buf_dma_addr, GFP_KERNEL);
531 kqp->sq_db_info = kqp->sq_buf + (qp->attrs.sq_size << SQEBB_SHIFT);
532 kqp->rq_db_info = kqp->rq_buf + (qp->attrs.rq_size << RQE_SHIFT);
541 static void erdma_fill_bottom_mtt(struct erdma_dev *dev, struct erdma_mem *mem)
543 struct erdma_mtt *mtt = mem->mtt;
544 struct ib_block_iter biter;
547 while (mtt->low_level)
548 mtt = mtt->low_level;
550 rdma_umem_for_each_dma_block(mem->umem, &biter, mem->page_size)
551 mtt->buf[idx++] = rdma_block_iter_dma_address(&biter);
554 static struct erdma_mtt *erdma_create_cont_mtt(struct erdma_dev *dev,
557 struct erdma_mtt *mtt;
559 mtt = kzalloc(sizeof(*mtt), GFP_KERNEL);
561 return ERR_PTR(-ENOMEM);
564 mtt->buf = kzalloc(mtt->size, GFP_KERNEL);
568 mtt->continuous = true;
569 mtt->buf_dma = dma_map_single(&dev->pdev->dev, mtt->buf, mtt->size,
571 if (dma_mapping_error(&dev->pdev->dev, mtt->buf_dma))
572 goto err_free_mtt_buf;
582 return ERR_PTR(-ENOMEM);
585 static void erdma_destroy_mtt_buf_sg(struct erdma_dev *dev,
586 struct erdma_mtt *mtt)
588 dma_unmap_sg(&dev->pdev->dev, mtt->sglist, mtt->nsg, DMA_TO_DEVICE);
592 static void erdma_destroy_scatter_mtt(struct erdma_dev *dev,
593 struct erdma_mtt *mtt)
595 erdma_destroy_mtt_buf_sg(dev, mtt);
600 static void erdma_init_middle_mtt(struct erdma_mtt *mtt,
601 struct erdma_mtt *low_mtt)
603 struct scatterlist *sg;
606 for_each_sg(low_mtt->sglist, sg, low_mtt->nsg, i)
607 mtt->buf[idx++] = sg_dma_address(sg);
610 static int erdma_create_mtt_buf_sg(struct erdma_dev *dev, struct erdma_mtt *mtt)
612 struct scatterlist *sglist;
613 void *buf = mtt->buf;
617 /* Failed if buf is not page aligned */
618 if ((uintptr_t)buf & ~PAGE_MASK)
621 npages = DIV_ROUND_UP(mtt->size, PAGE_SIZE);
622 sglist = vzalloc(npages * sizeof(*sglist));
626 sg_init_table(sglist, npages);
627 for (i = 0; i < npages; i++) {
628 pg = vmalloc_to_page(buf);
631 sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
635 nsg = dma_map_sg(&dev->pdev->dev, sglist, npages, DMA_TO_DEVICE);
639 mtt->sglist = sglist;
649 static struct erdma_mtt *erdma_create_scatter_mtt(struct erdma_dev *dev,
652 struct erdma_mtt *mtt;
655 mtt = kzalloc(sizeof(*mtt), GFP_KERNEL);
657 return ERR_PTR(-ENOMEM);
659 mtt->size = ALIGN(size, PAGE_SIZE);
660 mtt->buf = vzalloc(mtt->size);
661 mtt->continuous = false;
665 ret = erdma_create_mtt_buf_sg(dev, mtt);
667 goto err_free_mtt_buf;
669 ibdev_dbg(&dev->ibdev, "create scatter mtt, size:%lu, nsg:%u\n",
670 mtt->size, mtt->nsg);
683 static struct erdma_mtt *erdma_create_mtt(struct erdma_dev *dev, size_t size,
684 bool force_continuous)
686 struct erdma_mtt *mtt, *tmp_mtt;
689 ibdev_dbg(&dev->ibdev, "create_mtt, size:%lu, force cont:%d\n", size,
692 if (!(dev->attrs.cap_flags & ERDMA_DEV_CAP_FLAGS_MTT_VA))
693 force_continuous = true;
695 if (force_continuous)
696 return erdma_create_cont_mtt(dev, size);
698 mtt = erdma_create_scatter_mtt(dev, size);
703 /* convergence the mtt table. */
704 while (mtt->nsg != 1 && level <= 3) {
705 tmp_mtt = erdma_create_scatter_mtt(dev, MTT_SIZE(mtt->nsg));
706 if (IS_ERR(tmp_mtt)) {
707 ret = PTR_ERR(tmp_mtt);
710 erdma_init_middle_mtt(tmp_mtt, mtt);
711 tmp_mtt->low_level = mtt;
722 ibdev_dbg(&dev->ibdev, "top mtt: level:%d, dma_addr 0x%llx\n",
723 mtt->level, mtt->sglist[0].dma_address);
728 tmp_mtt = mtt->low_level;
729 erdma_destroy_scatter_mtt(dev, mtt);
736 static void erdma_destroy_mtt(struct erdma_dev *dev, struct erdma_mtt *mtt)
738 struct erdma_mtt *tmp_mtt;
740 if (mtt->continuous) {
741 dma_unmap_single(&dev->pdev->dev, mtt->buf_dma, mtt->size,
747 tmp_mtt = mtt->low_level;
748 erdma_destroy_scatter_mtt(dev, mtt);
754 static int get_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem,
755 u64 start, u64 len, int access, u64 virt,
756 unsigned long req_page_size, bool force_continuous)
760 mem->umem = ib_umem_get(&dev->ibdev, start, len, access);
761 if (IS_ERR(mem->umem)) {
762 ret = PTR_ERR(mem->umem);
769 mem->page_size = ib_umem_find_best_pgsz(mem->umem, req_page_size, virt);
770 mem->page_offset = start & (mem->page_size - 1);
771 mem->mtt_nents = ib_umem_num_dma_blocks(mem->umem, mem->page_size);
772 mem->page_cnt = mem->mtt_nents;
773 mem->mtt = erdma_create_mtt(dev, MTT_SIZE(mem->page_cnt),
775 if (IS_ERR(mem->mtt)) {
776 ret = PTR_ERR(mem->mtt);
780 erdma_fill_bottom_mtt(dev, mem);
786 ib_umem_release(mem->umem);
793 static void put_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem)
796 erdma_destroy_mtt(dev, mem->mtt);
799 ib_umem_release(mem->umem);
804 static int erdma_map_user_dbrecords(struct erdma_ucontext *ctx,
806 struct erdma_user_dbrecords_page **dbr_page,
807 dma_addr_t *dma_addr)
809 struct erdma_user_dbrecords_page *page = NULL;
812 mutex_lock(&ctx->dbrecords_page_mutex);
814 list_for_each_entry(page, &ctx->dbrecords_page_list, list)
815 if (page->va == (dbrecords_va & PAGE_MASK))
818 page = kmalloc(sizeof(*page), GFP_KERNEL);
824 page->va = (dbrecords_va & PAGE_MASK);
827 page->umem = ib_umem_get(ctx->ibucontext.device,
828 dbrecords_va & PAGE_MASK, PAGE_SIZE, 0);
829 if (IS_ERR(page->umem)) {
830 rv = PTR_ERR(page->umem);
835 list_add(&page->list, &ctx->dbrecords_page_list);
838 *dma_addr = sg_dma_address(page->umem->sgt_append.sgt.sgl) +
839 (dbrecords_va & ~PAGE_MASK);
844 mutex_unlock(&ctx->dbrecords_page_mutex);
849 erdma_unmap_user_dbrecords(struct erdma_ucontext *ctx,
850 struct erdma_user_dbrecords_page **dbr_page)
852 if (!ctx || !(*dbr_page))
855 mutex_lock(&ctx->dbrecords_page_mutex);
856 if (--(*dbr_page)->refcnt == 0) {
857 list_del(&(*dbr_page)->list);
858 ib_umem_release((*dbr_page)->umem);
863 mutex_unlock(&ctx->dbrecords_page_mutex);
866 static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
867 u64 va, u32 len, u64 db_info_va)
869 dma_addr_t db_info_dma_addr;
873 if (len < (ALIGN(qp->attrs.sq_size * SQEBB_SIZE, ERDMA_HW_PAGE_SIZE) +
874 qp->attrs.rq_size * RQE_SIZE))
877 ret = get_mtt_entries(qp->dev, &qp->user_qp.sq_mem, va,
878 qp->attrs.sq_size << SQEBB_SHIFT, 0, va,
879 (SZ_1M - SZ_4K), true);
883 rq_offset = ALIGN(qp->attrs.sq_size << SQEBB_SHIFT, ERDMA_HW_PAGE_SIZE);
884 qp->user_qp.rq_offset = rq_offset;
886 ret = get_mtt_entries(qp->dev, &qp->user_qp.rq_mem, va + rq_offset,
887 qp->attrs.rq_size << RQE_SHIFT, 0, va + rq_offset,
888 (SZ_1M - SZ_4K), true);
892 ret = erdma_map_user_dbrecords(uctx, db_info_va,
893 &qp->user_qp.user_dbr_page,
898 qp->user_qp.sq_db_info_dma_addr = db_info_dma_addr;
899 qp->user_qp.rq_db_info_dma_addr = db_info_dma_addr + ERDMA_DB_SIZE;
904 put_mtt_entries(qp->dev, &qp->user_qp.rq_mem);
907 put_mtt_entries(qp->dev, &qp->user_qp.sq_mem);
912 static void free_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx)
914 put_mtt_entries(qp->dev, &qp->user_qp.sq_mem);
915 put_mtt_entries(qp->dev, &qp->user_qp.rq_mem);
916 erdma_unmap_user_dbrecords(uctx, &qp->user_qp.user_dbr_page);
919 int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
920 struct ib_udata *udata)
922 struct erdma_qp *qp = to_eqp(ibqp);
923 struct erdma_dev *dev = to_edev(ibqp->device);
924 struct erdma_ucontext *uctx = rdma_udata_to_drv_context(
925 udata, struct erdma_ucontext, ibucontext);
926 struct erdma_ureq_create_qp ureq;
927 struct erdma_uresp_create_qp uresp;
930 ret = erdma_qp_validate_cap(dev, attrs);
934 ret = erdma_qp_validate_attr(dev, attrs);
938 qp->scq = to_ecq(attrs->send_cq);
939 qp->rcq = to_ecq(attrs->recv_cq);
941 qp->attrs.cc = dev->attrs.cc;
943 init_rwsem(&qp->state_lock);
945 init_completion(&qp->safe_free);
947 ret = xa_alloc_cyclic(&dev->qp_xa, &qp->ibqp.qp_num, qp,
948 XA_LIMIT(1, dev->attrs.max_qp - 1),
949 &dev->next_alloc_qpn, GFP_KERNEL);
955 qp->attrs.sq_size = roundup_pow_of_two(attrs->cap.max_send_wr *
956 ERDMA_MAX_WQEBB_PER_SQE);
957 qp->attrs.rq_size = roundup_pow_of_two(attrs->cap.max_recv_wr);
960 ret = ib_copy_from_udata(&ureq, udata,
961 min(sizeof(ureq), udata->inlen));
965 ret = init_user_qp(qp, uctx, ureq.qbuf_va, ureq.qbuf_len,
970 memset(&uresp, 0, sizeof(uresp));
972 uresp.num_sqe = qp->attrs.sq_size;
973 uresp.num_rqe = qp->attrs.rq_size;
974 uresp.qp_id = QP_ID(qp);
975 uresp.rq_offset = qp->user_qp.rq_offset;
977 ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
981 init_kernel_qp(dev, qp, attrs);
984 qp->attrs.max_send_sge = attrs->cap.max_send_sge;
985 qp->attrs.max_recv_sge = attrs->cap.max_recv_sge;
986 qp->attrs.state = ERDMA_QP_STATE_IDLE;
987 INIT_DELAYED_WORK(&qp->reflush_dwork, erdma_flush_worker);
989 ret = create_qp_cmd(uctx, qp);
993 spin_lock_init(&qp->lock);
999 free_user_qp(qp, uctx);
1003 xa_erase(&dev->qp_xa, QP_ID(qp));
1008 static int erdma_create_stag(struct erdma_dev *dev, u32 *stag)
1012 stag_idx = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX]);
1016 /* For now, we always let key field be zero. */
1017 *stag = (stag_idx << 8);
1022 struct ib_mr *erdma_get_dma_mr(struct ib_pd *ibpd, int acc)
1024 struct erdma_dev *dev = to_edev(ibpd->device);
1025 struct erdma_mr *mr;
1029 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1031 return ERR_PTR(-ENOMEM);
1033 ret = erdma_create_stag(dev, &stag);
1037 mr->type = ERDMA_MR_TYPE_DMA;
1039 mr->ibmr.lkey = stag;
1040 mr->ibmr.rkey = stag;
1042 mr->access = ERDMA_MR_ACC_LR | to_erdma_access_flags(acc);
1043 ret = regmr_cmd(dev, mr);
1045 goto out_remove_stag;
1050 erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX],
1051 mr->ibmr.lkey >> 8);
1056 return ERR_PTR(ret);
1059 struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
1062 struct erdma_mr *mr;
1063 struct erdma_dev *dev = to_edev(ibpd->device);
1067 if (mr_type != IB_MR_TYPE_MEM_REG)
1068 return ERR_PTR(-EOPNOTSUPP);
1070 if (max_num_sg > ERDMA_MR_MAX_MTT_CNT)
1071 return ERR_PTR(-EINVAL);
1073 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1075 return ERR_PTR(-ENOMEM);
1077 ret = erdma_create_stag(dev, &stag);
1081 mr->type = ERDMA_MR_TYPE_FRMR;
1083 mr->ibmr.lkey = stag;
1084 mr->ibmr.rkey = stag;
1086 /* update it in FRMR. */
1087 mr->access = ERDMA_MR_ACC_LR | ERDMA_MR_ACC_LW | ERDMA_MR_ACC_RR |
1090 mr->mem.page_size = PAGE_SIZE; /* update it later. */
1091 mr->mem.page_cnt = max_num_sg;
1092 mr->mem.mtt = erdma_create_mtt(dev, MTT_SIZE(max_num_sg), true);
1093 if (IS_ERR(mr->mem.mtt)) {
1094 ret = PTR_ERR(mr->mem.mtt);
1095 goto out_remove_stag;
1098 ret = regmr_cmd(dev, mr);
1100 goto out_destroy_mtt;
1105 erdma_destroy_mtt(dev, mr->mem.mtt);
1108 erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX],
1109 mr->ibmr.lkey >> 8);
1114 return ERR_PTR(ret);
1117 static int erdma_set_page(struct ib_mr *ibmr, u64 addr)
1119 struct erdma_mr *mr = to_emr(ibmr);
1121 if (mr->mem.mtt_nents >= mr->mem.page_cnt)
1124 mr->mem.mtt->buf[mr->mem.mtt_nents] = addr;
1125 mr->mem.mtt_nents++;
1130 int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1131 unsigned int *sg_offset)
1133 struct erdma_mr *mr = to_emr(ibmr);
1136 mr->mem.mtt_nents = 0;
1138 num = ib_sg_to_pages(&mr->ibmr, sg, sg_nents, sg_offset,
1144 struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
1145 u64 virt, int access, struct ib_udata *udata)
1147 struct erdma_mr *mr = NULL;
1148 struct erdma_dev *dev = to_edev(ibpd->device);
1152 if (!len || len > dev->attrs.max_mr_size)
1153 return ERR_PTR(-EINVAL);
1155 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1157 return ERR_PTR(-ENOMEM);
1159 ret = get_mtt_entries(dev, &mr->mem, start, len, access, virt,
1160 SZ_2G - SZ_4K, false);
1164 ret = erdma_create_stag(dev, &stag);
1166 goto err_out_put_mtt;
1168 mr->ibmr.lkey = mr->ibmr.rkey = stag;
1172 mr->access = ERDMA_MR_ACC_LR | to_erdma_access_flags(access);
1174 mr->type = ERDMA_MR_TYPE_NORMAL;
1176 ret = regmr_cmd(dev, mr);
1183 erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX],
1184 mr->ibmr.lkey >> 8);
1187 put_mtt_entries(dev, &mr->mem);
1192 return ERR_PTR(ret);
1195 int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1197 struct erdma_mr *mr;
1198 struct erdma_dev *dev = to_edev(ibmr->device);
1199 struct erdma_cmdq_dereg_mr_req req;
1204 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
1205 CMDQ_OPCODE_DEREG_MR);
1207 req.cfg = FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, ibmr->lkey >> 8) |
1208 FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, ibmr->lkey & 0xFF);
1210 ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
1214 erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX], ibmr->lkey >> 8);
1216 put_mtt_entries(dev, &mr->mem);
1222 int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1224 struct erdma_cq *cq = to_ecq(ibcq);
1225 struct erdma_dev *dev = to_edev(ibcq->device);
1226 struct erdma_ucontext *ctx = rdma_udata_to_drv_context(
1227 udata, struct erdma_ucontext, ibucontext);
1229 struct erdma_cmdq_destroy_cq_req req;
1231 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
1232 CMDQ_OPCODE_DESTROY_CQ);
1235 err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
1239 if (rdma_is_kernel_res(&cq->ibcq.res)) {
1240 dma_free_coherent(&dev->pdev->dev,
1241 WARPPED_BUFSIZE(cq->depth << CQE_SHIFT),
1242 cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
1244 erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
1245 put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
1248 xa_erase(&dev->cq_xa, cq->cqn);
1253 int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
1255 struct erdma_qp *qp = to_eqp(ibqp);
1256 struct erdma_dev *dev = to_edev(ibqp->device);
1257 struct erdma_ucontext *ctx = rdma_udata_to_drv_context(
1258 udata, struct erdma_ucontext, ibucontext);
1259 struct erdma_qp_attrs qp_attrs;
1261 struct erdma_cmdq_destroy_qp_req req;
1263 down_write(&qp->state_lock);
1264 qp_attrs.state = ERDMA_QP_STATE_ERROR;
1265 erdma_modify_qp_internal(qp, &qp_attrs, ERDMA_QP_ATTR_STATE);
1266 up_write(&qp->state_lock);
1268 cancel_delayed_work_sync(&qp->reflush_dwork);
1270 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
1271 CMDQ_OPCODE_DESTROY_QP);
1272 req.qpn = QP_ID(qp);
1274 err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
1279 wait_for_completion(&qp->safe_free);
1281 if (rdma_is_kernel_res(&qp->ibqp.res)) {
1282 vfree(qp->kern_qp.swr_tbl);
1283 vfree(qp->kern_qp.rwr_tbl);
1286 WARPPED_BUFSIZE(qp->attrs.rq_size << RQE_SHIFT),
1287 qp->kern_qp.rq_buf, qp->kern_qp.rq_buf_dma_addr);
1290 WARPPED_BUFSIZE(qp->attrs.sq_size << SQEBB_SHIFT),
1291 qp->kern_qp.sq_buf, qp->kern_qp.sq_buf_dma_addr);
1293 put_mtt_entries(dev, &qp->user_qp.sq_mem);
1294 put_mtt_entries(dev, &qp->user_qp.rq_mem);
1295 erdma_unmap_user_dbrecords(ctx, &qp->user_qp.user_dbr_page);
1299 erdma_cep_put(qp->cep);
1300 xa_erase(&dev->qp_xa, QP_ID(qp));
1305 void erdma_qp_get_ref(struct ib_qp *ibqp)
1307 erdma_qp_get(to_eqp(ibqp));
1310 void erdma_qp_put_ref(struct ib_qp *ibqp)
1312 erdma_qp_put(to_eqp(ibqp));
1315 int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
1317 struct rdma_user_mmap_entry *rdma_entry;
1318 struct erdma_user_mmap_entry *entry;
1322 rdma_entry = rdma_user_mmap_entry_get(ctx, vma);
1326 entry = to_emmap(rdma_entry);
1328 switch (entry->mmap_flag) {
1329 case ERDMA_MMAP_IO_NC:
1331 prot = pgprot_device(vma->vm_page_prot);
1338 err = rdma_user_mmap_io(ctx, vma, PFN_DOWN(entry->address), PAGE_SIZE,
1342 rdma_user_mmap_entry_put(rdma_entry);
1346 void erdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
1348 struct erdma_user_mmap_entry *entry = to_emmap(rdma_entry);
1353 static int alloc_db_resources(struct erdma_dev *dev, struct erdma_ucontext *ctx,
1356 struct erdma_cmdq_ext_db_req req = {};
1361 * CAP_SYS_RAWIO is required if hardware does not support extend
1362 * doorbell mechanism.
1364 if (!ext_db_en && !capable(CAP_SYS_RAWIO))
1368 ctx->sdb = dev->func_bar_addr + ERDMA_BAR_SQDB_SPACE_OFFSET;
1369 ctx->rdb = dev->func_bar_addr + ERDMA_BAR_RQDB_SPACE_OFFSET;
1370 ctx->cdb = dev->func_bar_addr + ERDMA_BAR_CQDB_SPACE_OFFSET;
1374 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
1375 CMDQ_OPCODE_ALLOC_DB);
1377 req.cfg = FIELD_PREP(ERDMA_CMD_EXT_DB_CQ_EN_MASK, 1) |
1378 FIELD_PREP(ERDMA_CMD_EXT_DB_RQ_EN_MASK, 1) |
1379 FIELD_PREP(ERDMA_CMD_EXT_DB_SQ_EN_MASK, 1);
1381 ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &val0, &val1);
1385 ctx->ext_db.enable = true;
1386 ctx->ext_db.sdb_off = ERDMA_GET(val0, ALLOC_DB_RESP_SDB);
1387 ctx->ext_db.rdb_off = ERDMA_GET(val0, ALLOC_DB_RESP_RDB);
1388 ctx->ext_db.cdb_off = ERDMA_GET(val0, ALLOC_DB_RESP_CDB);
1390 ctx->sdb = dev->func_bar_addr + (ctx->ext_db.sdb_off << PAGE_SHIFT);
1391 ctx->cdb = dev->func_bar_addr + (ctx->ext_db.rdb_off << PAGE_SHIFT);
1392 ctx->rdb = dev->func_bar_addr + (ctx->ext_db.cdb_off << PAGE_SHIFT);
1397 static void free_db_resources(struct erdma_dev *dev, struct erdma_ucontext *ctx)
1399 struct erdma_cmdq_ext_db_req req = {};
1402 if (!ctx->ext_db.enable)
1405 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
1406 CMDQ_OPCODE_FREE_DB);
1408 req.cfg = FIELD_PREP(ERDMA_CMD_EXT_DB_CQ_EN_MASK, 1) |
1409 FIELD_PREP(ERDMA_CMD_EXT_DB_RQ_EN_MASK, 1) |
1410 FIELD_PREP(ERDMA_CMD_EXT_DB_SQ_EN_MASK, 1);
1412 req.sdb_off = ctx->ext_db.sdb_off;
1413 req.rdb_off = ctx->ext_db.rdb_off;
1414 req.cdb_off = ctx->ext_db.cdb_off;
1416 ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
1418 ibdev_err_ratelimited(&dev->ibdev,
1419 "free db resources failed %d", ret);
1422 static void erdma_uctx_user_mmap_entries_remove(struct erdma_ucontext *uctx)
1424 rdma_user_mmap_entry_remove(uctx->sq_db_mmap_entry);
1425 rdma_user_mmap_entry_remove(uctx->rq_db_mmap_entry);
1426 rdma_user_mmap_entry_remove(uctx->cq_db_mmap_entry);
1429 int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata)
1431 struct erdma_ucontext *ctx = to_ectx(ibctx);
1432 struct erdma_dev *dev = to_edev(ibctx->device);
1434 struct erdma_uresp_alloc_ctx uresp = {};
1436 if (atomic_inc_return(&dev->num_ctx) > ERDMA_MAX_CONTEXT) {
1441 if (udata->outlen < sizeof(uresp)) {
1446 INIT_LIST_HEAD(&ctx->dbrecords_page_list);
1447 mutex_init(&ctx->dbrecords_page_mutex);
1449 ret = alloc_db_resources(dev, ctx,
1450 !!(dev->attrs.cap_flags &
1451 ERDMA_DEV_CAP_FLAGS_EXTEND_DB));
1455 ctx->sq_db_mmap_entry = erdma_user_mmap_entry_insert(
1456 ctx, (void *)ctx->sdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.sdb);
1457 if (!ctx->sq_db_mmap_entry) {
1459 goto err_free_ext_db;
1462 ctx->rq_db_mmap_entry = erdma_user_mmap_entry_insert(
1463 ctx, (void *)ctx->rdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.rdb);
1464 if (!ctx->rq_db_mmap_entry) {
1466 goto err_put_mmap_entries;
1469 ctx->cq_db_mmap_entry = erdma_user_mmap_entry_insert(
1470 ctx, (void *)ctx->cdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.cdb);
1471 if (!ctx->cq_db_mmap_entry) {
1473 goto err_put_mmap_entries;
1476 uresp.dev_id = dev->pdev->device;
1478 ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1480 goto err_put_mmap_entries;
1484 err_put_mmap_entries:
1485 erdma_uctx_user_mmap_entries_remove(ctx);
1488 free_db_resources(dev, ctx);
1491 atomic_dec(&dev->num_ctx);
1495 void erdma_dealloc_ucontext(struct ib_ucontext *ibctx)
1497 struct erdma_dev *dev = to_edev(ibctx->device);
1498 struct erdma_ucontext *ctx = to_ectx(ibctx);
1500 erdma_uctx_user_mmap_entries_remove(ctx);
1501 free_db_resources(dev, ctx);
1502 atomic_dec(&dev->num_ctx);
1505 static int ib_qp_state_to_erdma_qp_state[IB_QPS_ERR + 1] = {
1506 [IB_QPS_RESET] = ERDMA_QP_STATE_IDLE,
1507 [IB_QPS_INIT] = ERDMA_QP_STATE_IDLE,
1508 [IB_QPS_RTR] = ERDMA_QP_STATE_RTR,
1509 [IB_QPS_RTS] = ERDMA_QP_STATE_RTS,
1510 [IB_QPS_SQD] = ERDMA_QP_STATE_CLOSING,
1511 [IB_QPS_SQE] = ERDMA_QP_STATE_TERMINATE,
1512 [IB_QPS_ERR] = ERDMA_QP_STATE_ERROR
1515 int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1516 struct ib_udata *udata)
1518 struct erdma_qp_attrs new_attrs;
1519 enum erdma_qp_attr_mask erdma_attr_mask = 0;
1520 struct erdma_qp *qp = to_eqp(ibqp);
1523 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1526 memset(&new_attrs, 0, sizeof(new_attrs));
1528 if (attr_mask & IB_QP_STATE) {
1529 new_attrs.state = ib_qp_state_to_erdma_qp_state[attr->qp_state];
1531 erdma_attr_mask |= ERDMA_QP_ATTR_STATE;
1534 down_write(&qp->state_lock);
1536 ret = erdma_modify_qp_internal(qp, &new_attrs, erdma_attr_mask);
1538 up_write(&qp->state_lock);
1543 int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
1544 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1546 struct erdma_qp *qp;
1547 struct erdma_dev *dev;
1549 if (ibqp && qp_attr && qp_init_attr) {
1551 dev = to_edev(ibqp->device);
1556 qp_attr->cap.max_inline_data = ERDMA_MAX_INLINE;
1557 qp_init_attr->cap.max_inline_data = ERDMA_MAX_INLINE;
1559 qp_attr->cap.max_send_wr = qp->attrs.sq_size;
1560 qp_attr->cap.max_recv_wr = qp->attrs.rq_size;
1561 qp_attr->cap.max_send_sge = qp->attrs.max_send_sge;
1562 qp_attr->cap.max_recv_sge = qp->attrs.max_recv_sge;
1564 qp_attr->path_mtu = ib_mtu_int_to_enum(dev->netdev->mtu);
1565 qp_attr->max_rd_atomic = qp->attrs.irq_size;
1566 qp_attr->max_dest_rd_atomic = qp->attrs.orq_size;
1568 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
1569 IB_ACCESS_REMOTE_WRITE |
1570 IB_ACCESS_REMOTE_READ;
1572 qp_init_attr->cap = qp_attr->cap;
1577 static int erdma_init_user_cq(struct erdma_ucontext *ctx, struct erdma_cq *cq,
1578 struct erdma_ureq_create_cq *ureq)
1581 struct erdma_dev *dev = to_edev(cq->ibcq.device);
1583 ret = get_mtt_entries(dev, &cq->user_cq.qbuf_mem, ureq->qbuf_va,
1584 ureq->qbuf_len, 0, ureq->qbuf_va, SZ_64M - SZ_4K,
1589 ret = erdma_map_user_dbrecords(ctx, ureq->db_record_va,
1590 &cq->user_cq.user_dbr_page,
1591 &cq->user_cq.db_info_dma_addr);
1593 put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
1598 static int erdma_init_kernel_cq(struct erdma_cq *cq)
1600 struct erdma_dev *dev = to_edev(cq->ibcq.device);
1603 dma_alloc_coherent(&dev->pdev->dev,
1604 WARPPED_BUFSIZE(cq->depth << CQE_SHIFT),
1605 &cq->kern_cq.qbuf_dma_addr, GFP_KERNEL);
1606 if (!cq->kern_cq.qbuf)
1609 cq->kern_cq.db_record =
1610 (u64 *)(cq->kern_cq.qbuf + (cq->depth << CQE_SHIFT));
1611 spin_lock_init(&cq->kern_cq.lock);
1612 /* use default cqdb addr */
1613 cq->kern_cq.db = dev->func_bar + ERDMA_BAR_CQDB_SPACE_OFFSET;
1618 int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1619 struct ib_udata *udata)
1621 struct erdma_cq *cq = to_ecq(ibcq);
1622 struct erdma_dev *dev = to_edev(ibcq->device);
1623 unsigned int depth = attr->cqe;
1625 struct erdma_ucontext *ctx = rdma_udata_to_drv_context(
1626 udata, struct erdma_ucontext, ibucontext);
1628 if (depth > dev->attrs.max_cqe)
1631 depth = roundup_pow_of_two(depth);
1632 cq->ibcq.cqe = depth;
1634 cq->assoc_eqn = attr->comp_vector + 1;
1636 ret = xa_alloc_cyclic(&dev->cq_xa, &cq->cqn, cq,
1637 XA_LIMIT(1, dev->attrs.max_cq - 1),
1638 &dev->next_alloc_cqn, GFP_KERNEL);
1642 if (!rdma_is_kernel_res(&ibcq->res)) {
1643 struct erdma_ureq_create_cq ureq;
1644 struct erdma_uresp_create_cq uresp;
1646 ret = ib_copy_from_udata(&ureq, udata,
1647 min(udata->inlen, sizeof(ureq)));
1651 ret = erdma_init_user_cq(ctx, cq, &ureq);
1655 uresp.cq_id = cq->cqn;
1656 uresp.num_cqe = depth;
1658 ret = ib_copy_to_udata(udata, &uresp,
1659 min(sizeof(uresp), udata->outlen));
1663 ret = erdma_init_kernel_cq(cq);
1668 ret = create_cq_cmd(ctx, cq);
1675 if (!rdma_is_kernel_res(&ibcq->res)) {
1676 erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
1677 put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
1679 dma_free_coherent(&dev->pdev->dev,
1680 WARPPED_BUFSIZE(depth << CQE_SHIFT),
1681 cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
1685 xa_erase(&dev->cq_xa, cq->cqn);
1690 void erdma_set_mtu(struct erdma_dev *dev, u32 mtu)
1692 struct erdma_cmdq_config_mtu_req req;
1694 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
1695 CMDQ_OPCODE_CONF_MTU);
1698 erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
1701 void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason)
1703 struct ib_event event;
1705 event.device = &dev->ibdev;
1706 event.element.port_num = 1;
1707 event.event = reason;
1709 ib_dispatch_event(&event);