1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
6 * irdma_query_device - get device attributes
7 * @ibdev: device pointer from stack
8 * @props: returning device attributes
11 static int irdma_query_device(struct ib_device *ibdev,
12 struct ib_device_attr *props,
13 struct ib_udata *udata)
15 struct irdma_device *iwdev = to_iwdev(ibdev);
16 struct irdma_pci_f *rf = iwdev->rf;
17 struct pci_dev *pcidev = iwdev->rf->pcidev;
18 struct irdma_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs;
20 if (udata->inlen || udata->outlen)
23 memset(props, 0, sizeof(*props));
24 ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
25 props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
26 irdma_fw_minor_ver(&rf->sc_dev);
27 props->device_cap_flags = iwdev->device_cap_flags;
28 props->vendor_id = pcidev->vendor;
29 props->vendor_part_id = pcidev->device;
31 props->hw_ver = rf->pcidev->revision;
32 props->page_size_cap = hw_attrs->page_size_cap;
33 props->max_mr_size = hw_attrs->max_mr_size;
34 props->max_qp = rf->max_qp - rf->used_qps;
35 props->max_qp_wr = hw_attrs->max_qp_wr;
36 props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
37 props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
38 props->max_cq = rf->max_cq - rf->used_cqs;
39 props->max_cqe = rf->max_cqe;
40 props->max_mr = rf->max_mr - rf->used_mrs;
41 props->max_mw = props->max_mr;
42 props->max_pd = rf->max_pd - rf->used_pds;
43 props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
44 props->max_qp_rd_atom = hw_attrs->max_hw_ird;
45 props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
46 if (rdma_protocol_roce(ibdev, 1))
47 props->max_pkeys = IRDMA_PKEY_TBL_SZ;
48 props->max_ah = rf->max_ah;
49 props->max_mcast_grp = rf->max_mcg;
50 props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
51 props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX;
52 props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR;
53 #define HCA_CLOCK_TIMESTAMP_MASK 0x1ffff
54 if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_2)
55 props->timestamp_mask = HCA_CLOCK_TIMESTAMP_MASK;
61 * irdma_get_eth_speed_and_width - Get IB port speed and width from netdev speed
62 * @link_speed: netdev phy link speed
63 * @active_speed: IB port speed
64 * @active_width: IB port width
66 static void irdma_get_eth_speed_and_width(u32 link_speed, u16 *active_speed,
69 if (link_speed <= SPEED_1000) {
70 *active_width = IB_WIDTH_1X;
71 *active_speed = IB_SPEED_SDR;
72 } else if (link_speed <= SPEED_10000) {
73 *active_width = IB_WIDTH_1X;
74 *active_speed = IB_SPEED_FDR10;
75 } else if (link_speed <= SPEED_20000) {
76 *active_width = IB_WIDTH_4X;
77 *active_speed = IB_SPEED_DDR;
78 } else if (link_speed <= SPEED_25000) {
79 *active_width = IB_WIDTH_1X;
80 *active_speed = IB_SPEED_EDR;
81 } else if (link_speed <= SPEED_40000) {
82 *active_width = IB_WIDTH_4X;
83 *active_speed = IB_SPEED_FDR10;
85 *active_width = IB_WIDTH_4X;
86 *active_speed = IB_SPEED_EDR;
91 * irdma_query_port - get port attributes
92 * @ibdev: device pointer from stack
93 * @port: port number for query
94 * @props: returning device attributes
96 static int irdma_query_port(struct ib_device *ibdev, u32 port,
97 struct ib_port_attr *props)
99 struct irdma_device *iwdev = to_iwdev(ibdev);
100 struct net_device *netdev = iwdev->netdev;
102 /* no need to zero out pros here. done by caller */
104 props->max_mtu = IB_MTU_4096;
105 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
110 if (netif_carrier_ok(netdev) && netif_running(netdev)) {
111 props->state = IB_PORT_ACTIVE;
112 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
114 props->state = IB_PORT_DOWN;
115 props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
117 irdma_get_eth_speed_and_width(SPEED_100000, &props->active_speed,
118 &props->active_width);
120 if (rdma_protocol_roce(ibdev, 1)) {
121 props->gid_tbl_len = 32;
122 props->ip_gids = true;
123 props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ;
125 props->gid_tbl_len = 1;
127 props->qkey_viol_cntr = 0;
128 props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP;
129 props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size;
135 * irdma_disassociate_ucontext - Disassociate user context
136 * @context: ib user context
138 static void irdma_disassociate_ucontext(struct ib_ucontext *context)
142 static int irdma_mmap_legacy(struct irdma_ucontext *ucontext,
143 struct vm_area_struct *vma)
147 if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
150 vma->vm_private_data = ucontext;
151 pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] +
152 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
154 return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
155 pgprot_noncached(vma->vm_page_prot), NULL);
158 static void irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
160 struct irdma_user_mmap_entry *entry = to_irdma_mmap_entry(rdma_entry);
165 static struct rdma_user_mmap_entry*
166 irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
167 enum irdma_mmap_flag mmap_flag, u64 *mmap_offset)
169 struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
175 entry->bar_offset = bar_offset;
176 entry->mmap_flag = mmap_flag;
178 ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext,
179 &entry->rdma_entry, PAGE_SIZE);
184 *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
186 return &entry->rdma_entry;
190 * irdma_mmap - user memory map
191 * @context: context created during alloc
192 * @vma: kernel info for user memory map
194 static int irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
196 struct rdma_user_mmap_entry *rdma_entry;
197 struct irdma_user_mmap_entry *entry;
198 struct irdma_ucontext *ucontext;
202 ucontext = to_ucontext(context);
204 /* Legacy support for libi40iw with hard-coded mmap key */
205 if (ucontext->legacy_mode)
206 return irdma_mmap_legacy(ucontext, vma);
208 rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
210 ibdev_dbg(&ucontext->iwdev->ibdev,
211 "VERBS: pgoff[0x%lx] does not have valid entry\n",
216 entry = to_irdma_mmap_entry(rdma_entry);
217 ibdev_dbg(&ucontext->iwdev->ibdev,
218 "VERBS: bar_offset [0x%llx] mmap_flag [%d]\n",
219 entry->bar_offset, entry->mmap_flag);
221 pfn = (entry->bar_offset +
222 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
224 switch (entry->mmap_flag) {
225 case IRDMA_MMAP_IO_NC:
226 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
227 pgprot_noncached(vma->vm_page_prot),
230 case IRDMA_MMAP_IO_WC:
231 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
232 pgprot_writecombine(vma->vm_page_prot),
240 ibdev_dbg(&ucontext->iwdev->ibdev,
241 "VERBS: bar_offset [0x%llx] mmap_flag[%d] err[%d]\n",
242 entry->bar_offset, entry->mmap_flag, ret);
243 rdma_user_mmap_entry_put(rdma_entry);
249 * irdma_alloc_push_page - allocate a push page for qp
252 static void irdma_alloc_push_page(struct irdma_qp *iwqp)
254 struct irdma_cqp_request *cqp_request;
255 struct cqp_cmds_info *cqp_info;
256 struct irdma_device *iwdev = iwqp->iwdev;
257 struct irdma_sc_qp *qp = &iwqp->sc_qp;
258 enum irdma_status_code status;
260 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
264 cqp_info = &cqp_request->info;
265 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
266 cqp_info->post_sq = 1;
267 cqp_info->in.u.manage_push_page.info.push_idx = 0;
268 cqp_info->in.u.manage_push_page.info.qs_handle =
269 qp->vsi->qos[qp->user_pri].qs_handle;
270 cqp_info->in.u.manage_push_page.info.free_page = 0;
271 cqp_info->in.u.manage_push_page.info.push_page_type = 0;
272 cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp;
273 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
275 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
276 if (!status && cqp_request->compl_info.op_ret_val <
277 iwdev->rf->sc_dev.hw_attrs.max_hw_device_pages) {
278 qp->push_idx = cqp_request->compl_info.op_ret_val;
282 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
286 * irdma_alloc_ucontext - Allocate the user context data structure
287 * @uctx: uverbs context pointer
290 * This keeps track of all objects associated with a particular
293 static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
294 struct ib_udata *udata)
296 struct ib_device *ibdev = uctx->device;
297 struct irdma_device *iwdev = to_iwdev(ibdev);
298 struct irdma_alloc_ucontext_req req;
299 struct irdma_alloc_ucontext_resp uresp = {};
300 struct irdma_ucontext *ucontext = to_ucontext(uctx);
301 struct irdma_uk_attrs *uk_attrs;
303 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
306 if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER)
309 ucontext->iwdev = iwdev;
310 ucontext->abi_ver = req.userspace_ver;
312 uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
313 /* GEN_1 legacy support with libi40iw */
314 if (udata->outlen < sizeof(uresp)) {
315 if (uk_attrs->hw_rev != IRDMA_GEN_1)
318 ucontext->legacy_mode = true;
319 uresp.max_qps = iwdev->rf->max_qp;
320 uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;
321 uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;
322 uresp.kernel_ver = req.userspace_ver;
323 if (ib_copy_to_udata(udata, &uresp,
324 min(sizeof(uresp), udata->outlen)))
327 u64 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
329 ucontext->db_mmap_entry =
330 irdma_user_mmap_entry_insert(ucontext, bar_off,
333 if (!ucontext->db_mmap_entry)
336 uresp.kernel_ver = IRDMA_ABI_VER;
337 uresp.feature_flags = uk_attrs->feature_flags;
338 uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
339 uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges;
340 uresp.max_hw_inline = uk_attrs->max_hw_inline;
341 uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta;
342 uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta;
343 uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk;
344 uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
345 uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
346 uresp.hw_rev = uk_attrs->hw_rev;
347 if (ib_copy_to_udata(udata, &uresp,
348 min(sizeof(uresp), udata->outlen))) {
349 rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
354 INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
355 spin_lock_init(&ucontext->cq_reg_mem_list_lock);
356 INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
357 spin_lock_init(&ucontext->qp_reg_mem_list_lock);
362 ibdev_err(&iwdev->ibdev,
363 "Invalid userspace driver version detected. Detected version %d, should be %d\n",
364 req.userspace_ver, IRDMA_ABI_VER);
369 * irdma_dealloc_ucontext - deallocate the user context data structure
370 * @context: user context created during alloc
372 static void irdma_dealloc_ucontext(struct ib_ucontext *context)
374 struct irdma_ucontext *ucontext = to_ucontext(context);
376 rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
380 * irdma_alloc_pd - allocate protection domain
384 static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
386 struct irdma_pd *iwpd = to_iwpd(pd);
387 struct irdma_device *iwdev = to_iwdev(pd->device);
388 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
389 struct irdma_pci_f *rf = iwdev->rf;
390 struct irdma_alloc_pd_resp uresp = {};
391 struct irdma_sc_pd *sc_pd;
395 err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
400 sc_pd = &iwpd->sc_pd;
402 struct irdma_ucontext *ucontext =
403 rdma_udata_to_drv_context(udata, struct irdma_ucontext,
405 irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
407 if (ib_copy_to_udata(udata, &uresp,
408 min(sizeof(uresp), udata->outlen))) {
413 irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
418 irdma_free_rsrc(rf, rf->allocated_pds, pd_id);
424 * irdma_dealloc_pd - deallocate pd
425 * @ibpd: ptr of pd to be deallocated
428 static int irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
430 struct irdma_pd *iwpd = to_iwpd(ibpd);
431 struct irdma_device *iwdev = to_iwdev(ibpd->device);
433 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
439 * irdma_get_pbl - Retrieve pbl from a list given a virtual
441 * @va: user virtual address
442 * @pbl_list: pbl list to search in (QP's or CQ's)
444 static struct irdma_pbl *irdma_get_pbl(unsigned long va,
445 struct list_head *pbl_list)
447 struct irdma_pbl *iwpbl;
449 list_for_each_entry (iwpbl, pbl_list, list) {
450 if (iwpbl->user_base == va) {
451 list_del(&iwpbl->list);
452 iwpbl->on_list = false;
461 * irdma_clean_cqes - clean cq entries for qp
462 * @iwqp: qp ptr (user or kernel)
465 static void irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq)
467 struct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
470 spin_lock_irqsave(&iwcq->lock, flags);
471 irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq);
472 spin_unlock_irqrestore(&iwcq->lock, flags);
475 static void irdma_remove_push_mmap_entries(struct irdma_qp *iwqp)
477 if (iwqp->push_db_mmap_entry) {
478 rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry);
479 iwqp->push_db_mmap_entry = NULL;
481 if (iwqp->push_wqe_mmap_entry) {
482 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
483 iwqp->push_wqe_mmap_entry = NULL;
487 static int irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext,
488 struct irdma_qp *iwqp,
489 u64 *push_wqe_mmap_key,
490 u64 *push_db_mmap_key)
492 struct irdma_device *iwdev = ucontext->iwdev;
495 rsvd = IRDMA_PF_BAR_RSVD;
496 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
497 /* skip over db page */
498 bar_off += IRDMA_HW_PAGE_SIZE;
500 bar_off += rsvd + iwqp->sc_qp.push_idx * IRDMA_HW_PAGE_SIZE;
501 iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
502 bar_off, IRDMA_MMAP_IO_WC,
504 if (!iwqp->push_wqe_mmap_entry)
507 /* push doorbell page */
508 bar_off += IRDMA_HW_PAGE_SIZE;
509 iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
510 bar_off, IRDMA_MMAP_IO_NC,
512 if (!iwqp->push_db_mmap_entry) {
513 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
521 * irdma_destroy_qp - destroy qp
522 * @ibqp: qp's ib pointer also to get to device's qp address
525 static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
527 struct irdma_qp *iwqp = to_iwqp(ibqp);
528 struct irdma_device *iwdev = iwqp->iwdev;
530 iwqp->sc_qp.qp_uk.destroy_pending = true;
532 if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS)
533 irdma_modify_qp_to_err(&iwqp->sc_qp);
535 irdma_qp_rem_ref(&iwqp->ibqp);
536 wait_for_completion(&iwqp->free_qp);
537 irdma_free_lsmm_rsrc(iwqp);
538 irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
540 if (!iwqp->user_mode) {
542 irdma_clean_cqes(iwqp, iwqp->iwscq);
543 if (iwqp->iwrcq != iwqp->iwscq)
544 irdma_clean_cqes(iwqp, iwqp->iwrcq);
547 irdma_remove_push_mmap_entries(iwqp);
548 irdma_free_qp_rsrc(iwqp);
554 * irdma_setup_virt_qp - setup for allocation of virtual qp
555 * @iwdev: irdma device
557 * @init_info: initialize info to return
559 static void irdma_setup_virt_qp(struct irdma_device *iwdev,
560 struct irdma_qp *iwqp,
561 struct irdma_qp_init_info *init_info)
563 struct irdma_pbl *iwpbl = iwqp->iwpbl;
564 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
566 iwqp->page = qpmr->sq_page;
567 init_info->shadow_area_pa = qpmr->shadow;
568 if (iwpbl->pbl_allocated) {
569 init_info->virtual_map = true;
570 init_info->sq_pa = qpmr->sq_pbl.idx;
571 init_info->rq_pa = qpmr->rq_pbl.idx;
573 init_info->sq_pa = qpmr->sq_pbl.addr;
574 init_info->rq_pa = qpmr->rq_pbl.addr;
579 * irdma_setup_kmode_qp - setup initialization for kernel mode qp
580 * @iwdev: iwarp device
581 * @iwqp: qp ptr (user or kernel)
582 * @info: initialize info to return
583 * @init_attr: Initial QP create attributes
585 static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
586 struct irdma_qp *iwqp,
587 struct irdma_qp_init_info *info,
588 struct ib_qp_init_attr *init_attr)
590 struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;
591 u32 sqdepth, rqdepth;
594 enum irdma_status_code status;
595 struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
596 struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
598 irdma_get_wqe_shift(uk_attrs,
599 uk_attrs->hw_rev >= IRDMA_GEN_2 ? ukinfo->max_sq_frag_cnt + 1 :
600 ukinfo->max_sq_frag_cnt,
601 ukinfo->max_inline_data, &sqshift);
602 status = irdma_get_sqdepth(uk_attrs, ukinfo->sq_size, sqshift,
607 if (uk_attrs->hw_rev == IRDMA_GEN_1)
608 rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
610 irdma_get_wqe_shift(uk_attrs, ukinfo->max_rq_frag_cnt, 0,
613 status = irdma_get_rqdepth(uk_attrs, ukinfo->rq_size, rqshift,
618 iwqp->kqp.sq_wrid_mem =
619 kcalloc(sqdepth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
620 if (!iwqp->kqp.sq_wrid_mem)
623 iwqp->kqp.rq_wrid_mem =
624 kcalloc(rqdepth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
625 if (!iwqp->kqp.rq_wrid_mem) {
626 kfree(iwqp->kqp.sq_wrid_mem);
627 iwqp->kqp.sq_wrid_mem = NULL;
631 ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem;
632 ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem;
634 size = (sqdepth + rqdepth) * IRDMA_QP_WQE_MIN_SIZE;
635 size += (IRDMA_SHADOW_AREA_SIZE << 3);
637 mem->size = ALIGN(size, 256);
638 mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size,
639 &mem->pa, GFP_KERNEL);
641 kfree(iwqp->kqp.sq_wrid_mem);
642 iwqp->kqp.sq_wrid_mem = NULL;
643 kfree(iwqp->kqp.rq_wrid_mem);
644 iwqp->kqp.rq_wrid_mem = NULL;
648 ukinfo->sq = mem->va;
649 info->sq_pa = mem->pa;
650 ukinfo->rq = &ukinfo->sq[sqdepth];
651 info->rq_pa = info->sq_pa + (sqdepth * IRDMA_QP_WQE_MIN_SIZE);
652 ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
653 info->shadow_area_pa = info->rq_pa + (rqdepth * IRDMA_QP_WQE_MIN_SIZE);
654 ukinfo->sq_size = sqdepth >> sqshift;
655 ukinfo->rq_size = rqdepth >> rqshift;
656 ukinfo->qp_id = iwqp->ibqp.qp_num;
658 init_attr->cap.max_send_wr = (sqdepth - IRDMA_SQ_RSVD) >> sqshift;
659 init_attr->cap.max_recv_wr = (rqdepth - IRDMA_RQ_RSVD) >> rqshift;
664 static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
666 struct irdma_pci_f *rf = iwqp->iwdev->rf;
667 struct irdma_cqp_request *cqp_request;
668 struct cqp_cmds_info *cqp_info;
669 struct irdma_create_qp_info *qp_info;
670 enum irdma_status_code status;
672 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
676 cqp_info = &cqp_request->info;
677 qp_info = &cqp_request->info.in.u.qp_create.info;
678 memset(qp_info, 0, sizeof(*qp_info));
679 qp_info->mac_valid = true;
680 qp_info->cq_num_valid = true;
681 qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE;
683 cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
684 cqp_info->post_sq = 1;
685 cqp_info->in.u.qp_create.qp = &iwqp->sc_qp;
686 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
687 status = irdma_handle_cqp_op(rf, cqp_request);
688 irdma_put_cqp_request(&rf->cqp, cqp_request);
690 return status ? -ENOMEM : 0;
693 static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
694 struct irdma_qp_host_ctx_info *ctx_info)
696 struct irdma_device *iwdev = iwqp->iwdev;
697 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
698 struct irdma_roce_offload_info *roce_info;
699 struct irdma_udp_offload_info *udp_info;
701 udp_info = &iwqp->udp_info;
702 udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu));
703 udp_info->cwnd = iwdev->roce_cwnd;
704 udp_info->rexmit_thresh = 2;
705 udp_info->rnr_nak_thresh = 2;
706 udp_info->src_port = 0xc000;
707 udp_info->dst_port = ROCE_V2_UDP_DPORT;
708 roce_info = &iwqp->roce_info;
709 ether_addr_copy(roce_info->mac_addr, iwdev->netdev->dev_addr);
711 roce_info->rd_en = true;
712 roce_info->wr_rdresp_en = true;
713 roce_info->bind_en = true;
714 roce_info->dcqcn_en = false;
715 roce_info->rtomin = 5;
717 roce_info->ack_credits = iwdev->roce_ackcreds;
718 roce_info->ird_size = dev->hw_attrs.max_hw_ird;
719 roce_info->ord_size = dev->hw_attrs.max_hw_ord;
721 if (!iwqp->user_mode) {
722 roce_info->priv_mode_en = true;
723 roce_info->fast_reg_en = true;
724 roce_info->udprivcq_en = true;
726 roce_info->roce_tver = 0;
728 ctx_info->roce_info = &iwqp->roce_info;
729 ctx_info->udp_info = &iwqp->udp_info;
730 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
733 static void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
734 struct irdma_qp_host_ctx_info *ctx_info)
736 struct irdma_device *iwdev = iwqp->iwdev;
737 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
738 struct irdma_iwarp_offload_info *iwarp_info;
740 iwarp_info = &iwqp->iwarp_info;
741 ether_addr_copy(iwarp_info->mac_addr, iwdev->netdev->dev_addr);
742 iwarp_info->rd_en = true;
743 iwarp_info->wr_rdresp_en = true;
744 iwarp_info->bind_en = true;
745 iwarp_info->ecn_en = true;
746 iwarp_info->rtomin = 5;
748 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
749 iwarp_info->ib_rd_en = true;
750 if (!iwqp->user_mode) {
751 iwarp_info->priv_mode_en = true;
752 iwarp_info->fast_reg_en = true;
754 iwarp_info->ddp_ver = 1;
755 iwarp_info->rdmap_ver = 1;
757 ctx_info->iwarp_info = &iwqp->iwarp_info;
758 ctx_info->iwarp_info_valid = true;
759 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
760 ctx_info->iwarp_info_valid = false;
763 static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
764 struct irdma_device *iwdev)
766 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
767 struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
769 if (init_attr->create_flags)
772 if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
773 init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
774 init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags)
777 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
778 if (init_attr->qp_type != IB_QPT_RC &&
779 init_attr->qp_type != IB_QPT_UD &&
780 init_attr->qp_type != IB_QPT_GSI)
783 if (init_attr->qp_type != IB_QPT_RC)
791 * irdma_create_qp - create qp
793 * @init_attr: attributes for qp
794 * @udata: user data for create qp
796 static int irdma_create_qp(struct ib_qp *ibqp,
797 struct ib_qp_init_attr *init_attr,
798 struct ib_udata *udata)
800 struct ib_pd *ibpd = ibqp->pd;
801 struct irdma_pd *iwpd = to_iwpd(ibpd);
802 struct irdma_device *iwdev = to_iwdev(ibpd->device);
803 struct irdma_pci_f *rf = iwdev->rf;
804 struct irdma_qp *iwqp = to_iwqp(ibqp);
805 struct irdma_create_qp_req req;
806 struct irdma_create_qp_resp uresp = {};
808 enum irdma_status_code ret;
812 struct irdma_sc_qp *qp;
813 struct irdma_sc_dev *dev = &rf->sc_dev;
814 struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
815 struct irdma_qp_init_info init_info = {};
816 struct irdma_qp_host_ctx_info *ctx_info;
819 err_code = irdma_validate_qp_attrs(init_attr, iwdev);
823 sq_size = init_attr->cap.max_send_wr;
824 rq_size = init_attr->cap.max_recv_wr;
826 init_info.vsi = &iwdev->vsi;
827 init_info.qp_uk_init_info.uk_attrs = uk_attrs;
828 init_info.qp_uk_init_info.sq_size = sq_size;
829 init_info.qp_uk_init_info.rq_size = rq_size;
830 init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
831 init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
832 init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
835 qp->qp_uk.back_qp = iwqp;
836 qp->qp_uk.lock = &iwqp->lock;
837 qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
840 iwqp->q2_ctx_mem.size = ALIGN(IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE,
842 iwqp->q2_ctx_mem.va = dma_alloc_coherent(dev->hw->device,
843 iwqp->q2_ctx_mem.size,
844 &iwqp->q2_ctx_mem.pa,
846 if (!iwqp->q2_ctx_mem.va)
849 init_info.q2 = iwqp->q2_ctx_mem.va;
850 init_info.q2_pa = iwqp->q2_ctx_mem.pa;
851 init_info.host_ctx = (__le64 *)(init_info.q2 + IRDMA_Q2_BUF_SIZE);
852 init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE;
854 if (init_attr->qp_type == IB_QPT_GSI)
857 err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
858 &qp_num, &rf->next_qp);
863 iwqp->ibqp.qp_num = qp_num;
865 iwqp->iwscq = to_iwcq(init_attr->send_cq);
866 iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
867 iwqp->host_ctx.va = init_info.host_ctx;
868 iwqp->host_ctx.pa = init_info.host_ctx_pa;
869 iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
871 init_info.pd = &iwpd->sc_pd;
872 init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
873 if (!rdma_protocol_roce(&iwdev->ibdev, 1))
874 init_info.qp_uk_init_info.first_sq_wq = 1;
875 iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
876 init_waitqueue_head(&iwqp->waitq);
877 init_waitqueue_head(&iwqp->mod_qp_waitq);
880 err_code = ib_copy_from_udata(&req, udata,
881 min(sizeof(req), udata->inlen));
883 ibdev_dbg(&iwdev->ibdev,
884 "VERBS: ib_copy_from_data fail\n");
888 iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
890 if (req.user_wqe_bufs) {
891 struct irdma_ucontext *ucontext =
892 rdma_udata_to_drv_context(udata,
893 struct irdma_ucontext,
896 init_info.qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
897 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
898 iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
899 &ucontext->qp_reg_mem_list);
900 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
904 ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n");
908 init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
909 irdma_setup_virt_qp(iwdev, iwqp, &init_info);
911 init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
912 err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
916 ibdev_dbg(&iwdev->ibdev, "VERBS: setup qp failed\n");
920 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
921 if (init_attr->qp_type == IB_QPT_RC) {
922 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_RC;
923 init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
924 IRDMA_WRITE_WITH_IMM |
927 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_UD;
928 init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
932 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_IWARP;
933 init_info.qp_uk_init_info.qp_caps = IRDMA_WRITE_WITH_IMM;
936 if (dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1)
937 init_info.qp_uk_init_info.qp_caps |= IRDMA_PUSH_MODE;
939 ret = irdma_sc_qp_init(qp, &init_info);
942 ibdev_dbg(&iwdev->ibdev, "VERBS: qp_init fail\n");
946 ctx_info = &iwqp->ctx_info;
947 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
948 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
950 if (rdma_protocol_roce(&iwdev->ibdev, 1))
951 irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info);
953 irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info);
955 err_code = irdma_cqp_create_qp_cmd(iwqp);
959 refcount_set(&iwqp->refcnt, 1);
960 spin_lock_init(&iwqp->lock);
961 spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
962 iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
963 rf->qp_table[qp_num] = iwqp;
964 iwqp->max_send_wr = sq_size;
965 iwqp->max_recv_wr = rq_size;
967 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
968 if (dev->ws_add(&iwdev->vsi, 0)) {
969 irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
974 irdma_qp_add_qos(&iwqp->sc_qp);
978 /* GEN_1 legacy support with libi40iw does not have expanded uresp struct */
979 if (udata->outlen < sizeof(uresp)) {
981 uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1;
983 if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
986 uresp.actual_sq_size = sq_size;
987 uresp.actual_rq_size = rq_size;
988 uresp.qp_id = qp_num;
989 uresp.qp_caps = qp->qp_uk.qp_caps;
991 err_code = ib_copy_to_udata(udata, &uresp,
992 min(sizeof(uresp), udata->outlen));
994 ibdev_dbg(&iwdev->ibdev, "VERBS: copy_to_udata failed\n");
995 irdma_destroy_qp(&iwqp->ibqp, udata);
1000 init_completion(&iwqp->free_qp);
1004 irdma_free_qp_rsrc(iwqp);
1008 static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
1012 if (rdma_protocol_roce(iwqp->ibqp.device, 1)) {
1013 if (iwqp->roce_info.wr_rdresp_en) {
1014 acc_flags |= IB_ACCESS_LOCAL_WRITE;
1015 acc_flags |= IB_ACCESS_REMOTE_WRITE;
1017 if (iwqp->roce_info.rd_en)
1018 acc_flags |= IB_ACCESS_REMOTE_READ;
1019 if (iwqp->roce_info.bind_en)
1020 acc_flags |= IB_ACCESS_MW_BIND;
1022 if (iwqp->iwarp_info.wr_rdresp_en) {
1023 acc_flags |= IB_ACCESS_LOCAL_WRITE;
1024 acc_flags |= IB_ACCESS_REMOTE_WRITE;
1026 if (iwqp->iwarp_info.rd_en)
1027 acc_flags |= IB_ACCESS_REMOTE_READ;
1028 if (iwqp->iwarp_info.bind_en)
1029 acc_flags |= IB_ACCESS_MW_BIND;
1035 * irdma_query_qp - query qp attributes
1037 * @attr: attributes pointer
1038 * @attr_mask: Not used
1039 * @init_attr: qp attributes to return
1041 static int irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1042 int attr_mask, struct ib_qp_init_attr *init_attr)
1044 struct irdma_qp *iwqp = to_iwqp(ibqp);
1045 struct irdma_sc_qp *qp = &iwqp->sc_qp;
1047 memset(attr, 0, sizeof(*attr));
1048 memset(init_attr, 0, sizeof(*init_attr));
1050 attr->qp_state = iwqp->ibqp_state;
1051 attr->cur_qp_state = iwqp->ibqp_state;
1052 attr->cap.max_send_wr = iwqp->max_send_wr;
1053 attr->cap.max_recv_wr = iwqp->max_recv_wr;
1054 attr->cap.max_inline_data = qp->qp_uk.max_inline_data;
1055 attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt;
1056 attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt;
1057 attr->qp_access_flags = irdma_get_ib_acc_flags(iwqp);
1059 if (rdma_protocol_roce(ibqp->device, 1)) {
1060 attr->path_mtu = ib_mtu_int_to_enum(iwqp->udp_info.snd_mss);
1061 attr->qkey = iwqp->roce_info.qkey;
1062 attr->rq_psn = iwqp->udp_info.epsn;
1063 attr->sq_psn = iwqp->udp_info.psn_nxt;
1064 attr->dest_qp_num = iwqp->roce_info.dest_qp;
1065 attr->pkey_index = iwqp->roce_info.p_key;
1066 attr->retry_cnt = iwqp->udp_info.rexmit_thresh;
1067 attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh;
1068 attr->max_rd_atomic = iwqp->roce_info.ord_size;
1069 attr->max_dest_rd_atomic = iwqp->roce_info.ird_size;
1072 init_attr->event_handler = iwqp->ibqp.event_handler;
1073 init_attr->qp_context = iwqp->ibqp.qp_context;
1074 init_attr->send_cq = iwqp->ibqp.send_cq;
1075 init_attr->recv_cq = iwqp->ibqp.recv_cq;
1076 init_attr->cap = attr->cap;
1082 * irdma_query_pkey - Query partition key
1083 * @ibdev: device pointer from stack
1084 * @port: port number
1085 * @index: index of pkey
1086 * @pkey: pointer to store the pkey
1088 static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
1091 if (index >= IRDMA_PKEY_TBL_SZ)
1094 *pkey = IRDMA_DEFAULT_PKEY;
1099 * irdma_modify_qp_roce - modify qp request
1100 * @ibqp: qp's pointer for modify
1101 * @attr: access attributes
1102 * @attr_mask: state mask
1105 int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1106 int attr_mask, struct ib_udata *udata)
1108 struct irdma_pd *iwpd = to_iwpd(ibqp->pd);
1109 struct irdma_qp *iwqp = to_iwqp(ibqp);
1110 struct irdma_device *iwdev = iwqp->iwdev;
1111 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1112 struct irdma_qp_host_ctx_info *ctx_info;
1113 struct irdma_roce_offload_info *roce_info;
1114 struct irdma_udp_offload_info *udp_info;
1115 struct irdma_modify_qp_info info = {};
1116 struct irdma_modify_qp_resp uresp = {};
1117 struct irdma_modify_qp_req ureq = {};
1118 unsigned long flags;
1119 u8 issue_modify_qp = 0;
1122 ctx_info = &iwqp->ctx_info;
1123 roce_info = &iwqp->roce_info;
1124 udp_info = &iwqp->udp_info;
1126 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1129 if (attr_mask & IB_QP_DEST_QPN)
1130 roce_info->dest_qp = attr->dest_qp_num;
1132 if (attr_mask & IB_QP_PKEY_INDEX) {
1133 ret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index,
1139 if (attr_mask & IB_QP_QKEY)
1140 roce_info->qkey = attr->qkey;
1142 if (attr_mask & IB_QP_PATH_MTU)
1143 udp_info->snd_mss = ib_mtu_enum_to_int(attr->path_mtu);
1145 if (attr_mask & IB_QP_SQ_PSN) {
1146 udp_info->psn_nxt = attr->sq_psn;
1147 udp_info->lsn = 0xffff;
1148 udp_info->psn_una = attr->sq_psn;
1149 udp_info->psn_max = attr->sq_psn;
1152 if (attr_mask & IB_QP_RQ_PSN)
1153 udp_info->epsn = attr->rq_psn;
1155 if (attr_mask & IB_QP_RNR_RETRY)
1156 udp_info->rnr_nak_thresh = attr->rnr_retry;
1158 if (attr_mask & IB_QP_RETRY_CNT)
1159 udp_info->rexmit_thresh = attr->retry_cnt;
1161 ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id;
1163 if (attr_mask & IB_QP_AV) {
1164 struct irdma_av *av = &iwqp->roce_ah.av;
1165 const struct ib_gid_attr *sgid_attr;
1166 u16 vlan_id = VLAN_N_VID;
1169 memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah));
1170 if (attr->ah_attr.ah_flags & IB_AH_GRH) {
1171 udp_info->ttl = attr->ah_attr.grh.hop_limit;
1172 udp_info->flow_label = attr->ah_attr.grh.flow_label;
1173 udp_info->tos = attr->ah_attr.grh.traffic_class;
1174 irdma_qp_rem_qos(&iwqp->sc_qp);
1175 dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
1176 ctx_info->user_pri = rt_tos2priority(udp_info->tos);
1177 iwqp->sc_qp.user_pri = ctx_info->user_pri;
1178 if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
1180 irdma_qp_add_qos(&iwqp->sc_qp);
1182 sgid_attr = attr->ah_attr.grh.sgid_attr;
1183 ret = rdma_read_gid_l2_fields(sgid_attr, &vlan_id,
1184 ctx_info->roce_info->mac_addr);
1188 if (vlan_id >= VLAN_N_VID && iwdev->dcb)
1190 if (vlan_id < VLAN_N_VID) {
1191 udp_info->insert_vlan_tag = true;
1192 udp_info->vlan_tag = vlan_id |
1193 ctx_info->user_pri << VLAN_PRIO_SHIFT;
1195 udp_info->insert_vlan_tag = false;
1198 av->attrs = attr->ah_attr;
1199 rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
1200 rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
1201 roce_info->local_qp = ibqp->qp_num;
1202 if (av->sgid_addr.saddr.sa_family == AF_INET6) {
1204 av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
1206 av->sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
1208 irdma_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr);
1209 irdma_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr);
1211 udp_info->ipv4 = false;
1212 irdma_copy_ip_ntohl(local_ip, daddr);
1214 udp_info->arp_idx = irdma_arp_table(iwdev->rf,
1219 __be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr;
1220 __be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr;
1222 local_ip[0] = ntohl(daddr);
1224 udp_info->ipv4 = true;
1225 udp_info->dest_ip_addr[0] = 0;
1226 udp_info->dest_ip_addr[1] = 0;
1227 udp_info->dest_ip_addr[2] = 0;
1228 udp_info->dest_ip_addr[3] = local_ip[0];
1230 udp_info->local_ipaddr[0] = 0;
1231 udp_info->local_ipaddr[1] = 0;
1232 udp_info->local_ipaddr[2] = 0;
1233 udp_info->local_ipaddr[3] = ntohl(saddr);
1236 irdma_add_arp(iwdev->rf, local_ip, udp_info->ipv4,
1237 attr->ah_attr.roce.dmac);
1240 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1241 if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) {
1242 ibdev_err(&iwdev->ibdev,
1243 "rd_atomic = %d, above max_hw_ord=%d\n",
1244 attr->max_rd_atomic,
1245 dev->hw_attrs.max_hw_ord);
1248 if (attr->max_rd_atomic)
1249 roce_info->ord_size = attr->max_rd_atomic;
1250 info.ord_valid = true;
1253 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1254 if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) {
1255 ibdev_err(&iwdev->ibdev,
1256 "rd_atomic = %d, above max_hw_ird=%d\n",
1257 attr->max_rd_atomic,
1258 dev->hw_attrs.max_hw_ird);
1261 if (attr->max_dest_rd_atomic)
1262 roce_info->ird_size = attr->max_dest_rd_atomic;
1265 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1266 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
1267 roce_info->wr_rdresp_en = true;
1268 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
1269 roce_info->wr_rdresp_en = true;
1270 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
1271 roce_info->rd_en = true;
1274 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
1276 ibdev_dbg(&iwdev->ibdev,
1277 "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n",
1278 __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
1279 iwqp->ibqp_state, iwqp->iwarp_state, attr_mask);
1281 spin_lock_irqsave(&iwqp->lock, flags);
1282 if (attr_mask & IB_QP_STATE) {
1283 if (!ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state,
1284 iwqp->ibqp.qp_type, attr_mask)) {
1285 ibdev_warn(&iwdev->ibdev, "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
1286 iwqp->ibqp.qp_num, iwqp->ibqp_state,
1291 info.curr_iwarp_state = iwqp->iwarp_state;
1293 switch (attr->qp_state) {
1295 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1300 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1301 info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
1302 issue_modify_qp = 1;
1306 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1310 info.arp_cache_idx_valid = true;
1311 info.cq_num_valid = true;
1312 info.next_iwarp_state = IRDMA_QP_STATE_RTR;
1313 issue_modify_qp = 1;
1316 if (iwqp->ibqp_state < IB_QPS_RTR ||
1317 iwqp->ibqp_state == IB_QPS_ERR) {
1322 info.arp_cache_idx_valid = true;
1323 info.cq_num_valid = true;
1324 info.ord_valid = true;
1325 info.next_iwarp_state = IRDMA_QP_STATE_RTS;
1326 issue_modify_qp = 1;
1327 if (iwdev->push_mode && udata &&
1328 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1329 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1330 spin_unlock_irqrestore(&iwqp->lock, flags);
1331 irdma_alloc_push_page(iwqp);
1332 spin_lock_irqsave(&iwqp->lock, flags);
1336 if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD)
1339 if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) {
1344 info.next_iwarp_state = IRDMA_QP_STATE_SQD;
1345 issue_modify_qp = 1;
1350 if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) {
1351 spin_unlock_irqrestore(&iwqp->lock, flags);
1352 info.next_iwarp_state = IRDMA_QP_STATE_SQD;
1353 irdma_hw_modify_qp(iwdev, iwqp, &info, true);
1354 spin_lock_irqsave(&iwqp->lock, flags);
1357 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1358 spin_unlock_irqrestore(&iwqp->lock, flags);
1360 if (ib_copy_from_udata(&ureq, udata,
1361 min(sizeof(ureq), udata->inlen)))
1364 irdma_flush_wqes(iwqp,
1365 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
1366 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
1372 info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1373 issue_modify_qp = 1;
1380 iwqp->ibqp_state = attr->qp_state;
1383 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1384 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1385 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1386 spin_unlock_irqrestore(&iwqp->lock, flags);
1388 if (attr_mask & IB_QP_STATE) {
1389 if (issue_modify_qp) {
1390 ctx_info->rem_endpoint_idx = udp_info->arp_idx;
1391 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1393 spin_lock_irqsave(&iwqp->lock, flags);
1394 if (iwqp->iwarp_state == info.curr_iwarp_state) {
1395 iwqp->iwarp_state = info.next_iwarp_state;
1396 iwqp->ibqp_state = attr->qp_state;
1398 if (iwqp->ibqp_state > IB_QPS_RTS &&
1399 !iwqp->flush_issued) {
1400 iwqp->flush_issued = 1;
1401 spin_unlock_irqrestore(&iwqp->lock, flags);
1402 irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ |
1406 spin_unlock_irqrestore(&iwqp->lock, flags);
1409 iwqp->ibqp_state = attr->qp_state;
1411 if (udata && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1412 struct irdma_ucontext *ucontext;
1414 ucontext = rdma_udata_to_drv_context(udata,
1415 struct irdma_ucontext, ibucontext);
1416 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1417 !iwqp->push_wqe_mmap_entry &&
1418 !irdma_setup_push_mmap_entries(ucontext, iwqp,
1419 &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1420 uresp.push_valid = 1;
1421 uresp.push_offset = iwqp->sc_qp.push_offset;
1423 ret = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1426 irdma_remove_push_mmap_entries(iwqp);
1427 ibdev_dbg(&iwdev->ibdev,
1428 "VERBS: copy_to_udata failed\n");
1436 spin_unlock_irqrestore(&iwqp->lock, flags);
1442 * irdma_modify_qp - modify qp request
1443 * @ibqp: qp's pointer for modify
1444 * @attr: access attributes
1445 * @attr_mask: state mask
1448 int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1449 struct ib_udata *udata)
1451 struct irdma_qp *iwqp = to_iwqp(ibqp);
1452 struct irdma_device *iwdev = iwqp->iwdev;
1453 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1454 struct irdma_qp_host_ctx_info *ctx_info;
1455 struct irdma_tcp_offload_info *tcp_info;
1456 struct irdma_iwarp_offload_info *offload_info;
1457 struct irdma_modify_qp_info info = {};
1458 struct irdma_modify_qp_resp uresp = {};
1459 struct irdma_modify_qp_req ureq = {};
1460 u8 issue_modify_qp = 0;
1463 unsigned long flags;
1465 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1468 ctx_info = &iwqp->ctx_info;
1469 offload_info = &iwqp->iwarp_info;
1470 tcp_info = &iwqp->tcp_info;
1471 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
1472 ibdev_dbg(&iwdev->ibdev,
1473 "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d last_aeq=%d hw_tcp_state=%d hw_iwarp_state=%d attr_mask=0x%x\n",
1474 __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
1475 iwqp->ibqp_state, iwqp->iwarp_state, iwqp->last_aeq,
1476 iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask);
1478 spin_lock_irqsave(&iwqp->lock, flags);
1479 if (attr_mask & IB_QP_STATE) {
1480 info.curr_iwarp_state = iwqp->iwarp_state;
1481 switch (attr->qp_state) {
1484 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1489 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1490 info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
1491 issue_modify_qp = 1;
1493 if (iwdev->push_mode && udata &&
1494 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1495 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1496 spin_unlock_irqrestore(&iwqp->lock, flags);
1497 irdma_alloc_push_page(iwqp);
1498 spin_lock_irqsave(&iwqp->lock, flags);
1502 if (iwqp->iwarp_state > IRDMA_QP_STATE_RTS ||
1508 issue_modify_qp = 1;
1509 iwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
1510 iwqp->hte_added = 1;
1511 info.next_iwarp_state = IRDMA_QP_STATE_RTS;
1512 info.tcp_ctx_valid = true;
1513 info.ord_valid = true;
1514 info.arp_cache_idx_valid = true;
1515 info.cq_num_valid = true;
1518 if (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) {
1523 if (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING ||
1524 iwqp->iwarp_state < IRDMA_QP_STATE_RTS) {
1529 if (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) {
1534 info.next_iwarp_state = IRDMA_QP_STATE_CLOSING;
1535 issue_modify_qp = 1;
1538 if (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) {
1543 info.next_iwarp_state = IRDMA_QP_STATE_TERMINATE;
1544 issue_modify_qp = 1;
1548 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1549 spin_unlock_irqrestore(&iwqp->lock, flags);
1551 if (ib_copy_from_udata(&ureq, udata,
1552 min(sizeof(ureq), udata->inlen)))
1555 irdma_flush_wqes(iwqp,
1556 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
1557 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
1563 if (iwqp->sc_qp.term_flags) {
1564 spin_unlock_irqrestore(&iwqp->lock, flags);
1565 irdma_terminate_del_timer(&iwqp->sc_qp);
1566 spin_lock_irqsave(&iwqp->lock, flags);
1568 info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1569 if (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED &&
1571 iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT)
1572 info.reset_tcp_conn = true;
1576 issue_modify_qp = 1;
1577 info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1584 iwqp->ibqp_state = attr->qp_state;
1586 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1587 ctx_info->iwarp_info_valid = true;
1588 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
1589 offload_info->wr_rdresp_en = true;
1590 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
1591 offload_info->wr_rdresp_en = true;
1592 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
1593 offload_info->rd_en = true;
1596 if (ctx_info->iwarp_info_valid) {
1597 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1598 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1599 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1601 spin_unlock_irqrestore(&iwqp->lock, flags);
1603 if (attr_mask & IB_QP_STATE) {
1604 if (issue_modify_qp) {
1605 ctx_info->rem_endpoint_idx = tcp_info->arp_idx;
1606 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1610 spin_lock_irqsave(&iwqp->lock, flags);
1611 if (iwqp->iwarp_state == info.curr_iwarp_state) {
1612 iwqp->iwarp_state = info.next_iwarp_state;
1613 iwqp->ibqp_state = attr->qp_state;
1615 spin_unlock_irqrestore(&iwqp->lock, flags);
1618 if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {
1620 if (iwqp->hw_tcp_state) {
1621 spin_lock_irqsave(&iwqp->lock, flags);
1622 iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
1623 iwqp->last_aeq = IRDMA_AE_RESET_SENT;
1624 spin_unlock_irqrestore(&iwqp->lock, flags);
1626 irdma_cm_disconn(iwqp);
1628 int close_timer_started;
1630 spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
1632 if (iwqp->cm_node) {
1633 refcount_inc(&iwqp->cm_node->refcnt);
1634 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1635 close_timer_started = atomic_inc_return(&iwqp->close_timer_started);
1636 if (iwqp->cm_id && close_timer_started == 1)
1637 irdma_schedule_cm_timer(iwqp->cm_node,
1638 (struct irdma_puda_buf *)iwqp,
1639 IRDMA_TIMER_TYPE_CLOSE, 1, 0);
1641 irdma_rem_ref_cm_node(iwqp->cm_node);
1643 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1647 if (attr_mask & IB_QP_STATE && udata &&
1648 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1649 struct irdma_ucontext *ucontext;
1651 ucontext = rdma_udata_to_drv_context(udata,
1652 struct irdma_ucontext, ibucontext);
1653 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1654 !iwqp->push_wqe_mmap_entry &&
1655 !irdma_setup_push_mmap_entries(ucontext, iwqp,
1656 &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1657 uresp.push_valid = 1;
1658 uresp.push_offset = iwqp->sc_qp.push_offset;
1661 err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1664 irdma_remove_push_mmap_entries(iwqp);
1665 ibdev_dbg(&iwdev->ibdev,
1666 "VERBS: copy_to_udata failed\n");
1673 spin_unlock_irqrestore(&iwqp->lock, flags);
1679 * irdma_cq_free_rsrc - free up resources for cq
1680 * @rf: RDMA PCI function
1683 static void irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq)
1685 struct irdma_sc_cq *cq = &iwcq->sc_cq;
1687 if (!iwcq->user_mode) {
1688 dma_free_coherent(rf->sc_dev.hw->device, iwcq->kmem.size,
1689 iwcq->kmem.va, iwcq->kmem.pa);
1690 iwcq->kmem.va = NULL;
1691 dma_free_coherent(rf->sc_dev.hw->device,
1692 iwcq->kmem_shadow.size,
1693 iwcq->kmem_shadow.va, iwcq->kmem_shadow.pa);
1694 iwcq->kmem_shadow.va = NULL;
1697 irdma_free_rsrc(rf, rf->allocated_cqs, cq->cq_uk.cq_id);
1701 * irdma_free_cqbuf - worker to free a cq buffer
1702 * @work: provides access to the cq buffer to free
1704 static void irdma_free_cqbuf(struct work_struct *work)
1706 struct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work);
1708 dma_free_coherent(cq_buf->hw->device, cq_buf->kmem_buf.size,
1709 cq_buf->kmem_buf.va, cq_buf->kmem_buf.pa);
1710 cq_buf->kmem_buf.va = NULL;
1715 * irdma_process_resize_list - remove resized cq buffers from the resize_list
1716 * @iwcq: cq which owns the resize_list
1717 * @iwdev: irdma device
1718 * @lcqe_buf: the buffer where the last cqe is received
1720 static int irdma_process_resize_list(struct irdma_cq *iwcq,
1721 struct irdma_device *iwdev,
1722 struct irdma_cq_buf *lcqe_buf)
1724 struct list_head *tmp_node, *list_node;
1725 struct irdma_cq_buf *cq_buf;
1728 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
1729 cq_buf = list_entry(list_node, struct irdma_cq_buf, list);
1730 if (cq_buf == lcqe_buf)
1733 list_del(&cq_buf->list);
1734 queue_work(iwdev->cleanup_wq, &cq_buf->work);
1742 * irdma_destroy_cq - destroy cq
1743 * @ib_cq: cq pointer
1746 static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
1748 struct irdma_device *iwdev = to_iwdev(ib_cq->device);
1749 struct irdma_cq *iwcq = to_iwcq(ib_cq);
1750 struct irdma_sc_cq *cq = &iwcq->sc_cq;
1751 struct irdma_sc_dev *dev = cq->dev;
1752 struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id];
1753 struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq);
1754 unsigned long flags;
1756 spin_lock_irqsave(&iwcq->lock, flags);
1757 if (!list_empty(&iwcq->resize_list))
1758 irdma_process_resize_list(iwcq, iwdev, NULL);
1759 spin_unlock_irqrestore(&iwcq->lock, flags);
1761 irdma_cq_wq_destroy(iwdev->rf, cq);
1763 spin_lock_irqsave(&iwceq->ce_lock, flags);
1764 irdma_sc_cleanup_ceqes(cq, ceq);
1765 spin_unlock_irqrestore(&iwceq->ce_lock, flags);
1766 irdma_cq_free_rsrc(iwdev->rf, iwcq);
1772 * irdma_resize_cq - resize cq
1773 * @ibcq: cq to be resized
1774 * @entries: desired cq size
1777 static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
1778 struct ib_udata *udata)
1780 struct irdma_cq *iwcq = to_iwcq(ibcq);
1781 struct irdma_sc_dev *dev = iwcq->sc_cq.dev;
1782 struct irdma_cqp_request *cqp_request;
1783 struct cqp_cmds_info *cqp_info;
1784 struct irdma_modify_cq_info *m_info;
1785 struct irdma_modify_cq_info info = {};
1786 struct irdma_dma_mem kmem_buf;
1787 struct irdma_cq_mr *cqmr_buf;
1788 struct irdma_pbl *iwpbl_buf;
1789 struct irdma_device *iwdev;
1790 struct irdma_pci_f *rf;
1791 struct irdma_cq_buf *cq_buf = NULL;
1792 enum irdma_status_code status = 0;
1793 unsigned long flags;
1796 iwdev = to_iwdev(ibcq->device);
1799 if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
1800 IRDMA_FEATURE_CQ_RESIZE))
1803 if (entries > rf->max_cqe)
1806 if (!iwcq->user_mode) {
1808 if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
1812 info.cq_size = max(entries, 4);
1814 if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1)
1818 struct irdma_resize_cq_req req = {};
1819 struct irdma_ucontext *ucontext =
1820 rdma_udata_to_drv_context(udata, struct irdma_ucontext,
1823 /* CQ resize not supported with legacy GEN_1 libi40iw */
1824 if (ucontext->legacy_mode)
1827 if (ib_copy_from_udata(&req, udata,
1828 min(sizeof(req), udata->inlen)))
1831 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1832 iwpbl_buf = irdma_get_pbl((unsigned long)req.user_cq_buffer,
1833 &ucontext->cq_reg_mem_list);
1834 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1839 cqmr_buf = &iwpbl_buf->cq_mr;
1840 if (iwpbl_buf->pbl_allocated) {
1841 info.virtual_map = true;
1842 info.pbl_chunk_size = 1;
1843 info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx;
1845 info.cq_pa = cqmr_buf->cq_pbl.addr;
1848 /* Kmode CQ resize */
1851 rsize = info.cq_size * sizeof(struct irdma_cqe);
1852 kmem_buf.size = ALIGN(round_up(rsize, 256), 256);
1853 kmem_buf.va = dma_alloc_coherent(dev->hw->device,
1854 kmem_buf.size, &kmem_buf.pa,
1859 info.cq_base = kmem_buf.va;
1860 info.cq_pa = kmem_buf.pa;
1861 cq_buf = kzalloc(sizeof(*cq_buf), GFP_KERNEL);
1868 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1874 info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold;
1875 info.cq_resize = true;
1877 cqp_info = &cqp_request->info;
1878 m_info = &cqp_info->in.u.cq_modify.info;
1879 memcpy(m_info, &info, sizeof(*m_info));
1881 cqp_info->cqp_cmd = IRDMA_OP_CQ_MODIFY;
1882 cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq;
1883 cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request;
1884 cqp_info->post_sq = 1;
1885 status = irdma_handle_cqp_op(rf, cqp_request);
1886 irdma_put_cqp_request(&rf->cqp, cqp_request);
1892 spin_lock_irqsave(&iwcq->lock, flags);
1894 cq_buf->kmem_buf = iwcq->kmem;
1895 cq_buf->hw = dev->hw;
1896 memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, sizeof(cq_buf->cq_uk));
1897 INIT_WORK(&cq_buf->work, irdma_free_cqbuf);
1898 list_add_tail(&cq_buf->list, &iwcq->resize_list);
1899 iwcq->kmem = kmem_buf;
1902 irdma_sc_cq_resize(&iwcq->sc_cq, &info);
1903 ibcq->cqe = info.cq_size - 1;
1904 spin_unlock_irqrestore(&iwcq->lock, flags);
1909 dma_free_coherent(dev->hw->device, kmem_buf.size, kmem_buf.va,
1918 static inline int cq_validate_flags(u32 flags, u8 hw_rev)
1920 /* GEN1 does not support CQ create flags */
1921 if (hw_rev == IRDMA_GEN_1)
1922 return flags ? -EOPNOTSUPP : 0;
1924 return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : 0;
1928 * irdma_create_cq - create cq
1929 * @ibcq: CQ allocated
1930 * @attr: attributes for cq
1933 static int irdma_create_cq(struct ib_cq *ibcq,
1934 const struct ib_cq_init_attr *attr,
1935 struct ib_udata *udata)
1937 struct ib_device *ibdev = ibcq->device;
1938 struct irdma_device *iwdev = to_iwdev(ibdev);
1939 struct irdma_pci_f *rf = iwdev->rf;
1940 struct irdma_cq *iwcq = to_iwcq(ibcq);
1942 struct irdma_sc_cq *cq;
1943 struct irdma_sc_dev *dev = &rf->sc_dev;
1944 struct irdma_cq_init_info info = {};
1945 enum irdma_status_code status;
1946 struct irdma_cqp_request *cqp_request;
1947 struct cqp_cmds_info *cqp_info;
1948 struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
1949 unsigned long flags;
1951 int entries = attr->cqe;
1953 err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
1956 err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
1963 spin_lock_init(&iwcq->lock);
1964 INIT_LIST_HEAD(&iwcq->resize_list);
1966 ukinfo->cq_size = max(entries, 4);
1967 ukinfo->cq_id = cq_num;
1968 iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
1969 if (attr->comp_vector < rf->ceqs_count)
1970 info.ceq_id = attr->comp_vector;
1971 info.ceq_id_valid = true;
1973 info.type = IRDMA_CQ_TYPE_IWARP;
1974 info.vsi = &iwdev->vsi;
1977 struct irdma_ucontext *ucontext;
1978 struct irdma_create_cq_req req = {};
1979 struct irdma_cq_mr *cqmr;
1980 struct irdma_pbl *iwpbl;
1981 struct irdma_pbl *iwpbl_shadow;
1982 struct irdma_cq_mr *cqmr_shadow;
1984 iwcq->user_mode = true;
1986 rdma_udata_to_drv_context(udata, struct irdma_ucontext,
1988 if (ib_copy_from_udata(&req, udata,
1989 min(sizeof(req), udata->inlen))) {
1994 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1995 iwpbl = irdma_get_pbl((unsigned long)req.user_cq_buf,
1996 &ucontext->cq_reg_mem_list);
1997 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2003 iwcq->iwpbl = iwpbl;
2004 iwcq->cq_mem_size = 0;
2005 cqmr = &iwpbl->cq_mr;
2007 if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
2008 IRDMA_FEATURE_CQ_RESIZE && !ucontext->legacy_mode) {
2009 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2010 iwpbl_shadow = irdma_get_pbl(
2011 (unsigned long)req.user_shadow_area,
2012 &ucontext->cq_reg_mem_list);
2013 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2015 if (!iwpbl_shadow) {
2019 iwcq->iwpbl_shadow = iwpbl_shadow;
2020 cqmr_shadow = &iwpbl_shadow->cq_mr;
2021 info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
2024 info.shadow_area_pa = cqmr->shadow;
2026 if (iwpbl->pbl_allocated) {
2027 info.virtual_map = true;
2028 info.pbl_chunk_size = 1;
2029 info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
2031 info.cq_base_pa = cqmr->cq_pbl.addr;
2034 /* Kmode allocations */
2037 if (entries < 1 || entries > rf->max_cqe) {
2043 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
2045 ukinfo->cq_size = entries;
2047 rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
2048 iwcq->kmem.size = ALIGN(round_up(rsize, 256), 256);
2049 iwcq->kmem.va = dma_alloc_coherent(dev->hw->device,
2051 &iwcq->kmem.pa, GFP_KERNEL);
2052 if (!iwcq->kmem.va) {
2057 iwcq->kmem_shadow.size = ALIGN(IRDMA_SHADOW_AREA_SIZE << 3,
2059 iwcq->kmem_shadow.va = dma_alloc_coherent(dev->hw->device,
2060 iwcq->kmem_shadow.size,
2061 &iwcq->kmem_shadow.pa,
2063 if (!iwcq->kmem_shadow.va) {
2067 info.shadow_area_pa = iwcq->kmem_shadow.pa;
2068 ukinfo->shadow_area = iwcq->kmem_shadow.va;
2069 ukinfo->cq_base = iwcq->kmem.va;
2070 info.cq_base_pa = iwcq->kmem.pa;
2073 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
2074 info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
2075 (u32)IRDMA_MAX_CQ_READ_THRESH);
2077 if (irdma_sc_cq_init(cq, &info)) {
2078 ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n");
2083 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
2089 cqp_info = &cqp_request->info;
2090 cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
2091 cqp_info->post_sq = 1;
2092 cqp_info->in.u.cq_create.cq = cq;
2093 cqp_info->in.u.cq_create.check_overflow = true;
2094 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
2095 status = irdma_handle_cqp_op(rf, cqp_request);
2096 irdma_put_cqp_request(&rf->cqp, cqp_request);
2103 struct irdma_create_cq_resp resp = {};
2105 resp.cq_id = info.cq_uk_init_info.cq_id;
2106 resp.cq_size = info.cq_uk_init_info.cq_size;
2107 if (ib_copy_to_udata(udata, &resp,
2108 min(sizeof(resp), udata->outlen))) {
2109 ibdev_dbg(&iwdev->ibdev,
2110 "VERBS: copy to user data\n");
2117 irdma_cq_wq_destroy(rf, cq);
2119 irdma_cq_free_rsrc(rf, iwcq);
2125 * irdma_get_mr_access - get hw MR access permissions from IB access flags
2126 * @access: IB access flags
2128 static inline u16 irdma_get_mr_access(int access)
2132 hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ?
2133 IRDMA_ACCESS_FLAGS_LOCALWRITE : 0;
2134 hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ?
2135 IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0;
2136 hw_access |= (access & IB_ACCESS_REMOTE_READ) ?
2137 IRDMA_ACCESS_FLAGS_REMOTEREAD : 0;
2138 hw_access |= (access & IB_ACCESS_MW_BIND) ?
2139 IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;
2140 hw_access |= (access & IB_ZERO_BASED) ?
2141 IRDMA_ACCESS_FLAGS_ZERO_BASED : 0;
2142 hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD;
2148 * irdma_free_stag - free stag resource
2149 * @iwdev: irdma device
2150 * @stag: stag to free
2152 static void irdma_free_stag(struct irdma_device *iwdev, u32 stag)
2156 stag_idx = (stag & iwdev->rf->mr_stagmask) >> IRDMA_CQPSQ_STAG_IDX_S;
2157 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx);
2161 * irdma_create_stag - create random stag
2162 * @iwdev: irdma device
2164 static u32 irdma_create_stag(struct irdma_device *iwdev)
2168 u32 next_stag_index;
2174 get_random_bytes(&random, sizeof(random));
2175 consumer_key = (u8)random;
2177 driver_key = random & ~iwdev->rf->mr_stagmask;
2178 next_stag_index = (random & iwdev->rf->mr_stagmask) >> 8;
2179 next_stag_index %= iwdev->rf->max_mr;
2181 ret = irdma_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs,
2182 iwdev->rf->max_mr, &stag_index,
2186 stag = stag_index << IRDMA_CQPSQ_STAG_IDX_S;
2188 stag += (u32)consumer_key;
2194 * irdma_next_pbl_addr - Get next pbl address
2195 * @pbl: pointer to a pble
2196 * @pinfo: info pointer
2199 static inline u64 *irdma_next_pbl_addr(u64 *pbl, struct irdma_pble_info **pinfo,
2203 if (!(*pinfo) || *idx != (*pinfo)->cnt)
2208 return (*pinfo)->addr;
2212 * irdma_copy_user_pgaddrs - copy user page address to pble's os locally
2213 * @iwmr: iwmr for IB's user page addresses
2214 * @pbl: ple pointer to save 1 level or 0 level pble
2215 * @level: indicated level 0, 1 or 2
2217 static void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
2218 enum irdma_pble_level level)
2220 struct ib_umem *region = iwmr->region;
2221 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2222 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2223 struct irdma_pble_info *pinfo;
2224 struct ib_block_iter biter;
2228 pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf;
2230 if (iwmr->type == IRDMA_MEMREG_TYPE_QP)
2231 iwpbl->qp_mr.sq_page = sg_page(region->sgt_append.sgt.sgl);
2233 rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
2234 *pbl = rdma_block_iter_dma_address(&biter);
2235 if (++pbl_cnt == palloc->total_cnt)
2237 pbl = irdma_next_pbl_addr(pbl, &pinfo, &idx);
2242 * irdma_check_mem_contiguous - check if pbls stored in arr are contiguous
2243 * @arr: lvl1 pbl array
2244 * @npages: page count
2245 * @pg_size: page size
2248 static bool irdma_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
2252 for (pg_idx = 0; pg_idx < npages; pg_idx++) {
2253 if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
2261 * irdma_check_mr_contiguous - check if MR is physically contiguous
2262 * @palloc: pbl allocation struct
2263 * @pg_size: page size
2265 static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc,
2268 struct irdma_pble_level2 *lvl2 = &palloc->level2;
2269 struct irdma_pble_info *leaf = lvl2->leaf;
2271 u64 *start_addr = NULL;
2275 if (palloc->level == PBLE_LEVEL_1) {
2276 arr = palloc->level1.addr;
2277 ret = irdma_check_mem_contiguous(arr, palloc->total_cnt,
2282 start_addr = leaf->addr;
2284 for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
2286 if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
2288 ret = irdma_check_mem_contiguous(arr, leaf->cnt, pg_size);
2297 * irdma_setup_pbles - copy user pg address to pble's
2298 * @rf: RDMA PCI function
2299 * @iwmr: mr pointer for this memory registration
2300 * @use_pbles: flag if to use pble's
2302 static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
2305 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2306 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2307 struct irdma_pble_info *pinfo;
2309 enum irdma_status_code status;
2310 enum irdma_pble_level level = PBLE_LEVEL_1;
2313 status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
2318 iwpbl->pbl_allocated = true;
2319 level = palloc->level;
2320 pinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 :
2321 palloc->level2.leaf;
2324 pbl = iwmr->pgaddrmem;
2327 irdma_copy_user_pgaddrs(iwmr, pbl, level);
2330 iwmr->pgaddrmem[0] = *pbl;
2336 * irdma_handle_q_mem - handle memory for qp and cq
2337 * @iwdev: irdma device
2338 * @req: information for q memory management
2339 * @iwpbl: pble struct
2340 * @use_pbles: flag to use pble
2342 static int irdma_handle_q_mem(struct irdma_device *iwdev,
2343 struct irdma_mem_reg_req *req,
2344 struct irdma_pbl *iwpbl, bool use_pbles)
2346 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2347 struct irdma_mr *iwmr = iwpbl->iwmr;
2348 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
2349 struct irdma_cq_mr *cqmr = &iwpbl->cq_mr;
2350 struct irdma_hmc_pble *hmc_p;
2351 u64 *arr = iwmr->pgaddrmem;
2356 pg_size = iwmr->page_size;
2357 err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles);
2361 if (use_pbles && palloc->level != PBLE_LEVEL_1) {
2362 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2363 iwpbl->pbl_allocated = false;
2368 arr = palloc->level1.addr;
2370 switch (iwmr->type) {
2371 case IRDMA_MEMREG_TYPE_QP:
2372 total = req->sq_pages + req->rq_pages;
2373 hmc_p = &qpmr->sq_pbl;
2374 qpmr->shadow = (dma_addr_t)arr[total];
2377 ret = irdma_check_mem_contiguous(arr, req->sq_pages,
2380 ret = irdma_check_mem_contiguous(&arr[req->sq_pages],
2386 hmc_p->idx = palloc->level1.idx;
2387 hmc_p = &qpmr->rq_pbl;
2388 hmc_p->idx = palloc->level1.idx + req->sq_pages;
2390 hmc_p->addr = arr[0];
2391 hmc_p = &qpmr->rq_pbl;
2392 hmc_p->addr = arr[req->sq_pages];
2395 case IRDMA_MEMREG_TYPE_CQ:
2396 hmc_p = &cqmr->cq_pbl;
2399 cqmr->shadow = (dma_addr_t)arr[req->cq_pages];
2402 ret = irdma_check_mem_contiguous(arr, req->cq_pages,
2406 hmc_p->idx = palloc->level1.idx;
2408 hmc_p->addr = arr[0];
2411 ibdev_dbg(&iwdev->ibdev, "VERBS: MR type error\n");
2415 if (use_pbles && ret) {
2416 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2417 iwpbl->pbl_allocated = false;
2424 * irdma_hw_alloc_mw - create the hw memory window
2425 * @iwdev: irdma device
2426 * @iwmr: pointer to memory window info
2428 static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
2430 struct irdma_mw_alloc_info *info;
2431 struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
2432 struct irdma_cqp_request *cqp_request;
2433 struct cqp_cmds_info *cqp_info;
2434 enum irdma_status_code status;
2436 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2440 cqp_info = &cqp_request->info;
2441 info = &cqp_info->in.u.mw_alloc.info;
2442 memset(info, 0, sizeof(*info));
2443 if (iwmr->ibmw.type == IB_MW_TYPE_1)
2444 info->mw_wide = true;
2446 info->page_size = PAGE_SIZE;
2447 info->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2448 info->pd_id = iwpd->sc_pd.pd_id;
2449 info->remote_access = true;
2450 cqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC;
2451 cqp_info->post_sq = 1;
2452 cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev;
2453 cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request;
2454 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2455 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2457 return status ? -ENOMEM : 0;
2461 * irdma_alloc_mw - Allocate memory window
2462 * @ibmw: Memory Window
2463 * @udata: user data pointer
2465 static int irdma_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
2467 struct irdma_device *iwdev = to_iwdev(ibmw->device);
2468 struct irdma_mr *iwmr = to_iwmw(ibmw);
2472 stag = irdma_create_stag(iwdev);
2479 err_code = irdma_hw_alloc_mw(iwdev, iwmr);
2481 irdma_free_stag(iwdev, stag);
2489 * irdma_dealloc_mw - Dealloc memory window
2490 * @ibmw: memory window structure.
2492 static int irdma_dealloc_mw(struct ib_mw *ibmw)
2494 struct ib_pd *ibpd = ibmw->pd;
2495 struct irdma_pd *iwpd = to_iwpd(ibpd);
2496 struct irdma_mr *iwmr = to_iwmr((struct ib_mr *)ibmw);
2497 struct irdma_device *iwdev = to_iwdev(ibmw->device);
2498 struct irdma_cqp_request *cqp_request;
2499 struct cqp_cmds_info *cqp_info;
2500 struct irdma_dealloc_stag_info *info;
2502 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2506 cqp_info = &cqp_request->info;
2507 info = &cqp_info->in.u.dealloc_stag.info;
2508 memset(info, 0, sizeof(*info));
2509 info->pd_id = iwpd->sc_pd.pd_id;
2510 info->stag_idx = ibmw->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
2512 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
2513 cqp_info->post_sq = 1;
2514 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
2515 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
2516 irdma_handle_cqp_op(iwdev->rf, cqp_request);
2517 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2518 irdma_free_stag(iwdev, iwmr->stag);
2524 * irdma_hw_alloc_stag - cqp command to allocate stag
2525 * @iwdev: irdma device
2526 * @iwmr: irdma mr pointer
2528 static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
2529 struct irdma_mr *iwmr)
2531 struct irdma_allocate_stag_info *info;
2532 struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
2533 enum irdma_status_code status;
2535 struct irdma_cqp_request *cqp_request;
2536 struct cqp_cmds_info *cqp_info;
2538 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2542 cqp_info = &cqp_request->info;
2543 info = &cqp_info->in.u.alloc_stag.info;
2544 memset(info, 0, sizeof(*info));
2545 info->page_size = PAGE_SIZE;
2546 info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2547 info->pd_id = iwpd->sc_pd.pd_id;
2548 info->total_len = iwmr->len;
2549 info->remote_access = true;
2550 cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG;
2551 cqp_info->post_sq = 1;
2552 cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev;
2553 cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
2554 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2555 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2563 * irdma_alloc_mr - register stag for fast memory registration
2565 * @mr_type: memory for stag registrion
2566 * @max_num_sg: man number of pages
2568 static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
2571 struct irdma_device *iwdev = to_iwdev(pd->device);
2572 struct irdma_pble_alloc *palloc;
2573 struct irdma_pbl *iwpbl;
2574 struct irdma_mr *iwmr;
2575 enum irdma_status_code status;
2577 int err_code = -ENOMEM;
2579 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
2581 return ERR_PTR(-ENOMEM);
2583 stag = irdma_create_stag(iwdev);
2590 iwmr->ibmr.rkey = stag;
2591 iwmr->ibmr.lkey = stag;
2593 iwmr->ibmr.device = pd->device;
2594 iwpbl = &iwmr->iwpbl;
2596 iwmr->type = IRDMA_MEMREG_TYPE_MEM;
2597 palloc = &iwpbl->pble_alloc;
2598 iwmr->page_cnt = max_num_sg;
2599 status = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
2604 err_code = irdma_hw_alloc_stag(iwdev, iwmr);
2606 goto err_alloc_stag;
2608 iwpbl->pbl_allocated = true;
2612 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2614 irdma_free_stag(iwdev, stag);
2618 return ERR_PTR(err_code);
2622 * irdma_set_page - populate pbl list for fmr
2623 * @ibmr: ib mem to access iwarp mr pointer
2624 * @addr: page dma address fro pbl list
2626 static int irdma_set_page(struct ib_mr *ibmr, u64 addr)
2628 struct irdma_mr *iwmr = to_iwmr(ibmr);
2629 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2630 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2633 if (unlikely(iwmr->npages == iwmr->page_cnt))
2636 pbl = palloc->level1.addr;
2637 pbl[iwmr->npages++] = addr;
2643 * irdma_map_mr_sg - map of sg list for fmr
2644 * @ibmr: ib mem to access iwarp mr pointer
2645 * @sg: scatter gather list
2646 * @sg_nents: number of sg pages
2647 * @sg_offset: scatter gather list for fmr
2649 static int irdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2650 int sg_nents, unsigned int *sg_offset)
2652 struct irdma_mr *iwmr = to_iwmr(ibmr);
2656 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, irdma_set_page);
2660 * irdma_hwreg_mr - send cqp command for memory registration
2661 * @iwdev: irdma device
2662 * @iwmr: irdma mr pointer
2663 * @access: access for MR
2665 static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
2668 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2669 struct irdma_reg_ns_stag_info *stag_info;
2670 struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
2671 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2672 enum irdma_status_code status;
2674 struct irdma_cqp_request *cqp_request;
2675 struct cqp_cmds_info *cqp_info;
2677 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2681 cqp_info = &cqp_request->info;
2682 stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
2683 memset(stag_info, 0, sizeof(*stag_info));
2684 stag_info->va = iwpbl->user_base;
2685 stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2686 stag_info->stag_key = (u8)iwmr->stag;
2687 stag_info->total_len = iwmr->len;
2688 stag_info->access_rights = irdma_get_mr_access(access);
2689 stag_info->pd_id = iwpd->sc_pd.pd_id;
2690 if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
2691 stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED;
2693 stag_info->addr_type = IRDMA_ADDR_TYPE_VA_BASED;
2694 stag_info->page_size = iwmr->page_size;
2696 if (iwpbl->pbl_allocated) {
2697 if (palloc->level == PBLE_LEVEL_1) {
2698 stag_info->first_pm_pbl_index = palloc->level1.idx;
2699 stag_info->chunk_size = 1;
2701 stag_info->first_pm_pbl_index = palloc->level2.root.idx;
2702 stag_info->chunk_size = 3;
2705 stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
2708 cqp_info->cqp_cmd = IRDMA_OP_MR_REG_NON_SHARED;
2709 cqp_info->post_sq = 1;
2710 cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev;
2711 cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
2712 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2713 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2721 * irdma_reg_user_mr - Register a user memory region
2723 * @start: virtual start address
2724 * @len: length of mr
2725 * @virt: virtual address
2726 * @access: access of mr
2729 static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
2730 u64 virt, int access,
2731 struct ib_udata *udata)
2733 struct irdma_device *iwdev = to_iwdev(pd->device);
2734 struct irdma_ucontext *ucontext;
2735 struct irdma_pble_alloc *palloc;
2736 struct irdma_pbl *iwpbl;
2737 struct irdma_mr *iwmr;
2738 struct ib_umem *region;
2739 struct irdma_mem_reg_req req;
2740 u32 total, stag = 0;
2741 u8 shadow_pgcnt = 1;
2742 bool use_pbles = false;
2743 unsigned long flags;
2747 if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
2748 return ERR_PTR(-EINVAL);
2750 region = ib_umem_get(pd->device, start, len, access);
2752 if (IS_ERR(region)) {
2753 ibdev_dbg(&iwdev->ibdev,
2754 "VERBS: Failed to create ib_umem region\n");
2755 return (struct ib_mr *)region;
2758 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) {
2759 ib_umem_release(region);
2760 return ERR_PTR(-EFAULT);
2763 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
2765 ib_umem_release(region);
2766 return ERR_PTR(-ENOMEM);
2769 iwpbl = &iwmr->iwpbl;
2771 iwmr->region = region;
2773 iwmr->ibmr.device = pd->device;
2774 iwmr->ibmr.iova = virt;
2775 iwmr->page_size = PAGE_SIZE;
2777 if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) {
2778 iwmr->page_size = ib_umem_find_best_pgsz(region,
2779 iwdev->rf->sc_dev.hw_attrs.page_size_cap,
2781 if (unlikely(!iwmr->page_size)) {
2783 ib_umem_release(region);
2784 return ERR_PTR(-EOPNOTSUPP);
2787 iwmr->len = region->length;
2788 iwpbl->user_base = virt;
2789 palloc = &iwpbl->pble_alloc;
2790 iwmr->type = req.reg_type;
2791 iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
2793 switch (req.reg_type) {
2794 case IRDMA_MEMREG_TYPE_QP:
2795 total = req.sq_pages + req.rq_pages + shadow_pgcnt;
2796 if (total > iwmr->page_cnt) {
2800 total = req.sq_pages + req.rq_pages;
2801 use_pbles = (total > 2);
2802 err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
2806 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2808 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2809 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
2810 iwpbl->on_list = true;
2811 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2813 case IRDMA_MEMREG_TYPE_CQ:
2814 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
2816 total = req.cq_pages + shadow_pgcnt;
2817 if (total > iwmr->page_cnt) {
2822 use_pbles = (req.cq_pages > 1);
2823 err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
2827 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2829 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2830 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
2831 iwpbl->on_list = true;
2832 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2834 case IRDMA_MEMREG_TYPE_MEM:
2835 use_pbles = (iwmr->page_cnt != 1);
2837 err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles);
2842 ret = irdma_check_mr_contiguous(palloc,
2845 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2846 iwpbl->pbl_allocated = false;
2850 stag = irdma_create_stag(iwdev);
2857 iwmr->ibmr.rkey = stag;
2858 iwmr->ibmr.lkey = stag;
2859 err = irdma_hwreg_mr(iwdev, iwmr, access);
2861 irdma_free_stag(iwdev, stag);
2870 iwmr->type = req.reg_type;
2875 if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
2876 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2877 ib_umem_release(region);
2880 return ERR_PTR(err);
2884 * irdma_reg_phys_mr - register kernel physical memory
2886 * @addr: physical address of memory to register
2887 * @size: size of memory to register
2888 * @access: Access rights
2889 * @iova_start: start of virtual address for physical buffers
2891 struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access,
2894 struct irdma_device *iwdev = to_iwdev(pd->device);
2895 struct irdma_pbl *iwpbl;
2896 struct irdma_mr *iwmr;
2897 enum irdma_status_code status;
2901 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
2903 return ERR_PTR(-ENOMEM);
2906 iwmr->ibmr.device = pd->device;
2907 iwpbl = &iwmr->iwpbl;
2909 iwmr->type = IRDMA_MEMREG_TYPE_MEM;
2910 iwpbl->user_base = *iova_start;
2911 stag = irdma_create_stag(iwdev);
2918 iwmr->ibmr.iova = *iova_start;
2919 iwmr->ibmr.rkey = stag;
2920 iwmr->ibmr.lkey = stag;
2922 iwmr->pgaddrmem[0] = addr;
2924 iwmr->page_size = SZ_4K;
2925 status = irdma_hwreg_mr(iwdev, iwmr, access);
2927 irdma_free_stag(iwdev, stag);
2937 return ERR_PTR(ret);
2941 * irdma_get_dma_mr - register physical mem
2943 * @acc: access for memory
2945 static struct ib_mr *irdma_get_dma_mr(struct ib_pd *pd, int acc)
2949 return irdma_reg_phys_mr(pd, 0, 0, acc, &kva);
2953 * irdma_del_memlist - Deleting pbl list entries for CQ/QP
2954 * @iwmr: iwmr for IB's user page addresses
2955 * @ucontext: ptr to user context
2957 static void irdma_del_memlist(struct irdma_mr *iwmr,
2958 struct irdma_ucontext *ucontext)
2960 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2961 unsigned long flags;
2963 switch (iwmr->type) {
2964 case IRDMA_MEMREG_TYPE_CQ:
2965 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2966 if (iwpbl->on_list) {
2967 iwpbl->on_list = false;
2968 list_del(&iwpbl->list);
2970 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2972 case IRDMA_MEMREG_TYPE_QP:
2973 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2974 if (iwpbl->on_list) {
2975 iwpbl->on_list = false;
2976 list_del(&iwpbl->list);
2978 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2986 * irdma_dereg_mr - deregister mr
2987 * @ib_mr: mr ptr for dereg
2990 static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
2992 struct ib_pd *ibpd = ib_mr->pd;
2993 struct irdma_pd *iwpd = to_iwpd(ibpd);
2994 struct irdma_mr *iwmr = to_iwmr(ib_mr);
2995 struct irdma_device *iwdev = to_iwdev(ib_mr->device);
2996 struct irdma_dealloc_stag_info *info;
2997 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2998 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2999 struct irdma_cqp_request *cqp_request;
3000 struct cqp_cmds_info *cqp_info;
3002 if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
3004 struct irdma_ucontext *ucontext;
3006 ucontext = rdma_udata_to_drv_context(udata,
3007 struct irdma_ucontext,
3009 irdma_del_memlist(iwmr, ucontext);
3014 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3018 cqp_info = &cqp_request->info;
3019 info = &cqp_info->in.u.dealloc_stag.info;
3020 memset(info, 0, sizeof(*info));
3021 info->pd_id = iwpd->sc_pd.pd_id;
3022 info->stag_idx = ib_mr->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
3024 if (iwpbl->pbl_allocated)
3025 info->dealloc_pbl = true;
3027 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
3028 cqp_info->post_sq = 1;
3029 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
3030 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
3031 irdma_handle_cqp_op(iwdev->rf, cqp_request);
3032 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3033 irdma_free_stag(iwdev, iwmr->stag);
3035 if (iwpbl->pbl_allocated)
3036 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
3037 ib_umem_release(iwmr->region);
3044 * irdma_copy_sg_list - copy sg list for qp
3045 * @sg_list: copied into sg_list
3046 * @sgl: copy from sgl
3047 * @num_sges: count of sg entries
3049 static void irdma_copy_sg_list(struct irdma_sge *sg_list, struct ib_sge *sgl,
3054 for (i = 0; (i < num_sges) && (i < IRDMA_MAX_WQ_FRAGMENT_COUNT); i++) {
3055 sg_list[i].tag_off = sgl[i].addr;
3056 sg_list[i].len = sgl[i].length;
3057 sg_list[i].stag = sgl[i].lkey;
3062 * irdma_post_send - kernel application wr
3063 * @ibqp: qp ptr for wr
3064 * @ib_wr: work request ptr
3065 * @bad_wr: return of bad wr if err
3067 static int irdma_post_send(struct ib_qp *ibqp,
3068 const struct ib_send_wr *ib_wr,
3069 const struct ib_send_wr **bad_wr)
3071 struct irdma_qp *iwqp;
3072 struct irdma_qp_uk *ukqp;
3073 struct irdma_sc_dev *dev;
3074 struct irdma_post_sq_info info;
3075 enum irdma_status_code ret;
3077 unsigned long flags;
3079 struct irdma_ah *ah;
3080 bool reflush = false;
3082 iwqp = to_iwqp(ibqp);
3083 ukqp = &iwqp->sc_qp.qp_uk;
3084 dev = &iwqp->iwdev->rf->sc_dev;
3086 spin_lock_irqsave(&iwqp->lock, flags);
3087 if (iwqp->flush_issued && ukqp->sq_flush_complete)
3090 memset(&info, 0, sizeof(info));
3092 info.wr_id = (ib_wr->wr_id);
3093 if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
3094 info.signaled = true;
3095 if (ib_wr->send_flags & IB_SEND_FENCE)
3096 info.read_fence = true;
3097 switch (ib_wr->opcode) {
3098 case IB_WR_SEND_WITH_IMM:
3099 if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) {
3100 info.imm_data_valid = true;
3101 info.imm_data = ntohl(ib_wr->ex.imm_data);
3108 case IB_WR_SEND_WITH_INV:
3109 if (ib_wr->opcode == IB_WR_SEND ||
3110 ib_wr->opcode == IB_WR_SEND_WITH_IMM) {
3111 if (ib_wr->send_flags & IB_SEND_SOLICITED)
3112 info.op_type = IRDMA_OP_TYPE_SEND_SOL;
3114 info.op_type = IRDMA_OP_TYPE_SEND;
3116 if (ib_wr->send_flags & IB_SEND_SOLICITED)
3117 info.op_type = IRDMA_OP_TYPE_SEND_SOL_INV;
3119 info.op_type = IRDMA_OP_TYPE_SEND_INV;
3120 info.stag_to_inv = ib_wr->ex.invalidate_rkey;
3123 if (ib_wr->send_flags & IB_SEND_INLINE) {
3124 info.op.inline_send.data = (void *)(unsigned long)
3125 ib_wr->sg_list[0].addr;
3126 info.op.inline_send.len = ib_wr->sg_list[0].length;
3127 if (iwqp->ibqp.qp_type == IB_QPT_UD ||
3128 iwqp->ibqp.qp_type == IB_QPT_GSI) {
3129 ah = to_iwah(ud_wr(ib_wr)->ah);
3130 info.op.inline_send.ah_id = ah->sc_ah.ah_info.ah_idx;
3131 info.op.inline_send.qkey = ud_wr(ib_wr)->remote_qkey;
3132 info.op.inline_send.dest_qp = ud_wr(ib_wr)->remote_qpn;
3134 ret = irdma_uk_inline_send(ukqp, &info, false);
3136 info.op.send.num_sges = ib_wr->num_sge;
3137 info.op.send.sg_list = (struct irdma_sge *)
3139 if (iwqp->ibqp.qp_type == IB_QPT_UD ||
3140 iwqp->ibqp.qp_type == IB_QPT_GSI) {
3141 ah = to_iwah(ud_wr(ib_wr)->ah);
3142 info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
3143 info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
3144 info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
3146 ret = irdma_uk_send(ukqp, &info, false);
3150 if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED)
3156 case IB_WR_RDMA_WRITE_WITH_IMM:
3157 if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) {
3158 info.imm_data_valid = true;
3159 info.imm_data = ntohl(ib_wr->ex.imm_data);
3165 case IB_WR_RDMA_WRITE:
3166 if (ib_wr->send_flags & IB_SEND_SOLICITED)
3167 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE_SOL;
3169 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
3171 if (ib_wr->send_flags & IB_SEND_INLINE) {
3172 info.op.inline_rdma_write.data = (void *)(uintptr_t)ib_wr->sg_list[0].addr;
3173 info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
3174 info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
3175 info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
3176 ret = irdma_uk_inline_rdma_write(ukqp, &info, false);
3178 info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
3179 info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
3180 info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
3181 info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
3182 ret = irdma_uk_rdma_write(ukqp, &info, false);
3186 if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED)
3192 case IB_WR_RDMA_READ_WITH_INV:
3195 case IB_WR_RDMA_READ:
3196 if (ib_wr->num_sge >
3197 dev->hw_attrs.uk_attrs.max_hw_read_sges) {
3201 info.op_type = IRDMA_OP_TYPE_RDMA_READ;
3202 info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
3203 info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
3204 info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
3205 info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
3207 ret = irdma_uk_rdma_read(ukqp, &info, inv_stag, false);
3209 if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED)
3215 case IB_WR_LOCAL_INV:
3216 info.op_type = IRDMA_OP_TYPE_INV_STAG;
3217 info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
3218 ret = irdma_uk_stag_local_invalidate(ukqp, &info, true);
3222 case IB_WR_REG_MR: {
3223 struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
3224 struct irdma_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
3225 struct irdma_fast_reg_stag_info stag_info = {};
3227 stag_info.signaled = info.signaled;
3228 stag_info.read_fence = info.read_fence;
3229 stag_info.access_rights = irdma_get_mr_access(reg_wr(ib_wr)->access);
3230 stag_info.stag_key = reg_wr(ib_wr)->key & 0xff;
3231 stag_info.stag_idx = reg_wr(ib_wr)->key >> 8;
3232 stag_info.page_size = reg_wr(ib_wr)->mr->page_size;
3233 stag_info.wr_id = ib_wr->wr_id;
3234 stag_info.addr_type = IRDMA_ADDR_TYPE_VA_BASED;
3235 stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
3236 stag_info.total_len = iwmr->ibmr.length;
3237 stag_info.reg_addr_pa = *palloc->level1.addr;
3238 stag_info.first_pm_pbl_index = palloc->level1.idx;
3239 stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
3240 if (iwmr->npages > IRDMA_MIN_PAGES_PER_FMR)
3241 stag_info.chunk_size = 1;
3242 ret = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info,
3250 ibdev_dbg(&iwqp->iwdev->ibdev,
3251 "VERBS: upost_send bad opcode = 0x%x\n",
3258 ib_wr = ib_wr->next;
3261 if (!iwqp->flush_issued && iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS) {
3262 irdma_uk_qp_post_wr(ukqp);
3263 spin_unlock_irqrestore(&iwqp->lock, flags);
3264 } else if (reflush) {
3265 ukqp->sq_flush_complete = false;
3266 spin_unlock_irqrestore(&iwqp->lock, flags);
3267 irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ | IRDMA_REFLUSH);
3269 spin_unlock_irqrestore(&iwqp->lock, flags);
3278 * irdma_post_recv - post receive wr for kernel application
3279 * @ibqp: ib qp pointer
3280 * @ib_wr: work request for receive
3281 * @bad_wr: bad wr caused an error
3283 static int irdma_post_recv(struct ib_qp *ibqp,
3284 const struct ib_recv_wr *ib_wr,
3285 const struct ib_recv_wr **bad_wr)
3287 struct irdma_qp *iwqp;
3288 struct irdma_qp_uk *ukqp;
3289 struct irdma_post_rq_info post_recv = {};
3290 struct irdma_sge sg_list[IRDMA_MAX_WQ_FRAGMENT_COUNT];
3291 enum irdma_status_code ret = 0;
3292 unsigned long flags;
3294 bool reflush = false;
3296 iwqp = to_iwqp(ibqp);
3297 ukqp = &iwqp->sc_qp.qp_uk;
3299 spin_lock_irqsave(&iwqp->lock, flags);
3300 if (iwqp->flush_issued && ukqp->rq_flush_complete)
3303 post_recv.num_sges = ib_wr->num_sge;
3304 post_recv.wr_id = ib_wr->wr_id;
3305 irdma_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
3306 post_recv.sg_list = sg_list;
3307 ret = irdma_uk_post_receive(ukqp, &post_recv);
3309 ibdev_dbg(&iwqp->iwdev->ibdev,
3310 "VERBS: post_recv err %d\n", ret);
3311 if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED)
3318 ib_wr = ib_wr->next;
3323 ukqp->rq_flush_complete = false;
3324 spin_unlock_irqrestore(&iwqp->lock, flags);
3325 irdma_flush_wqes(iwqp, IRDMA_FLUSH_RQ | IRDMA_REFLUSH);
3327 spin_unlock_irqrestore(&iwqp->lock, flags);
3337 * irdma_flush_err_to_ib_wc_status - return change flush error code to IB status
3338 * @opcode: iwarp flush code
3340 static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode)
3343 case FLUSH_PROT_ERR:
3344 return IB_WC_LOC_PROT_ERR;
3345 case FLUSH_REM_ACCESS_ERR:
3346 return IB_WC_REM_ACCESS_ERR;
3347 case FLUSH_LOC_QP_OP_ERR:
3348 return IB_WC_LOC_QP_OP_ERR;
3349 case FLUSH_REM_OP_ERR:
3350 return IB_WC_REM_OP_ERR;
3351 case FLUSH_LOC_LEN_ERR:
3352 return IB_WC_LOC_LEN_ERR;
3353 case FLUSH_GENERAL_ERR:
3354 return IB_WC_WR_FLUSH_ERR;
3355 case FLUSH_RETRY_EXC_ERR:
3356 return IB_WC_RETRY_EXC_ERR;
3357 case FLUSH_MW_BIND_ERR:
3358 return IB_WC_MW_BIND_ERR;
3359 case FLUSH_FATAL_ERR:
3361 return IB_WC_FATAL_ERR;
3366 * irdma_process_cqe - process cqe info
3367 * @entry: processed cqe
3368 * @cq_poll_info: cqe info
3370 static void irdma_process_cqe(struct ib_wc *entry,
3371 struct irdma_cq_poll_info *cq_poll_info)
3373 struct irdma_qp *iwqp;
3374 struct irdma_sc_qp *qp;
3376 entry->wc_flags = 0;
3377 entry->pkey_index = 0;
3378 entry->wr_id = cq_poll_info->wr_id;
3380 qp = cq_poll_info->qp_handle;
3381 iwqp = qp->qp_uk.back_qp;
3382 entry->qp = qp->qp_uk.back_qp;
3384 if (cq_poll_info->error) {
3385 entry->status = (cq_poll_info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ?
3386 irdma_flush_err_to_ib_wc_status(cq_poll_info->minor_err) : IB_WC_GENERAL_ERR;
3388 entry->vendor_err = cq_poll_info->major_err << 16 |
3389 cq_poll_info->minor_err;
3391 entry->status = IB_WC_SUCCESS;
3392 if (cq_poll_info->imm_valid) {
3393 entry->ex.imm_data = htonl(cq_poll_info->imm_data);
3394 entry->wc_flags |= IB_WC_WITH_IMM;
3396 if (cq_poll_info->ud_smac_valid) {
3397 ether_addr_copy(entry->smac, cq_poll_info->ud_smac);
3398 entry->wc_flags |= IB_WC_WITH_SMAC;
3401 if (cq_poll_info->ud_vlan_valid) {
3402 u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK;
3404 entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
3406 entry->vlan_id = vlan;
3407 entry->wc_flags |= IB_WC_WITH_VLAN;
3414 switch (cq_poll_info->op_type) {
3415 case IRDMA_OP_TYPE_RDMA_WRITE:
3416 case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
3417 entry->opcode = IB_WC_RDMA_WRITE;
3419 case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
3420 case IRDMA_OP_TYPE_RDMA_READ:
3421 entry->opcode = IB_WC_RDMA_READ;
3423 case IRDMA_OP_TYPE_SEND_INV:
3424 case IRDMA_OP_TYPE_SEND_SOL:
3425 case IRDMA_OP_TYPE_SEND_SOL_INV:
3426 case IRDMA_OP_TYPE_SEND:
3427 entry->opcode = IB_WC_SEND;
3429 case IRDMA_OP_TYPE_FAST_REG_NSMR:
3430 entry->opcode = IB_WC_REG_MR;
3432 case IRDMA_OP_TYPE_INV_STAG:
3433 entry->opcode = IB_WC_LOCAL_INV;
3435 case IRDMA_OP_TYPE_REC_IMM:
3436 case IRDMA_OP_TYPE_REC:
3437 entry->opcode = cq_poll_info->op_type == IRDMA_OP_TYPE_REC_IMM ?
3438 IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
3439 if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
3440 cq_poll_info->stag_invalid_set) {
3441 entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
3442 entry->wc_flags |= IB_WC_WITH_INVALIDATE;
3446 ibdev_err(&iwqp->iwdev->ibdev,
3447 "Invalid opcode = %d in CQE\n", cq_poll_info->op_type);
3448 entry->status = IB_WC_GENERAL_ERR;
3452 if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) {
3453 entry->src_qp = cq_poll_info->ud_src_qpn;
3456 (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE);
3457 entry->network_hdr_type = cq_poll_info->ipv4 ?
3461 entry->src_qp = cq_poll_info->qp_id;
3464 entry->byte_len = cq_poll_info->bytes_xfered;
3468 * irdma_poll_one - poll one entry of the CQ
3469 * @ukcq: ukcq to poll
3470 * @cur_cqe: current CQE info to be filled in
3471 * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ
3473 * Returns the internal irdma device error code or 0 on success
3475 static inline int irdma_poll_one(struct irdma_cq_uk *ukcq,
3476 struct irdma_cq_poll_info *cur_cqe,
3477 struct ib_wc *entry)
3479 int ret = irdma_uk_cq_poll_cmpl(ukcq, cur_cqe);
3484 irdma_process_cqe(entry, cur_cqe);
3490 * __irdma_poll_cq - poll cq for completion (kernel apps)
3492 * @num_entries: number of entries to poll
3493 * @entry: wr of a completed entry
3495 static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc *entry)
3497 struct list_head *tmp_node, *list_node;
3498 struct irdma_cq_buf *last_buf = NULL;
3499 struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe;
3500 struct irdma_cq_buf *cq_buf;
3501 enum irdma_status_code ret;
3502 struct irdma_device *iwdev;
3503 struct irdma_cq_uk *ukcq;
3504 bool cq_new_cqe = false;
3505 int resized_bufs = 0;
3508 iwdev = to_iwdev(iwcq->ibcq.device);
3509 ukcq = &iwcq->sc_cq.cq_uk;
3511 /* go through the list of previously resized CQ buffers */
3512 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
3513 cq_buf = container_of(list_node, struct irdma_cq_buf, list);
3514 while (npolled < num_entries) {
3515 ret = irdma_poll_one(&cq_buf->cq_uk, cur_cqe, entry + npolled);
3521 if (ret == IRDMA_ERR_Q_EMPTY)
3523 /* QP using the CQ is destroyed. Skip reporting this CQE */
3524 if (ret == IRDMA_ERR_Q_DESTROYED) {
3531 /* save the resized CQ buffer which received the last cqe */
3537 /* check the current CQ for new cqes */
3538 while (npolled < num_entries) {
3539 ret = irdma_poll_one(ukcq, cur_cqe, entry + npolled);
3546 if (ret == IRDMA_ERR_Q_EMPTY)
3548 /* QP using the CQ is destroyed. Skip reporting this CQE */
3549 if (ret == IRDMA_ERR_Q_DESTROYED) {
3557 /* all previous CQ resizes are complete */
3558 resized_bufs = irdma_process_resize_list(iwcq, iwdev, NULL);
3560 /* only CQ resizes up to the last_buf are complete */
3561 resized_bufs = irdma_process_resize_list(iwcq, iwdev, last_buf);
3563 /* report to the HW the number of complete CQ resizes */
3564 irdma_uk_cq_set_resized_cnt(ukcq, resized_bufs);
3568 ibdev_dbg(&iwdev->ibdev, "%s: Error polling CQ, irdma_err: %d\n",
3575 * irdma_poll_cq - poll cq for completion (kernel apps)
3577 * @num_entries: number of entries to poll
3578 * @entry: wr of a completed entry
3580 static int irdma_poll_cq(struct ib_cq *ibcq, int num_entries,
3581 struct ib_wc *entry)
3583 struct irdma_cq *iwcq;
3584 unsigned long flags;
3587 iwcq = to_iwcq(ibcq);
3589 spin_lock_irqsave(&iwcq->lock, flags);
3590 ret = __irdma_poll_cq(iwcq, num_entries, entry);
3591 spin_unlock_irqrestore(&iwcq->lock, flags);
3597 * irdma_req_notify_cq - arm cq kernel application
3599 * @notify_flags: notofication flags
3601 static int irdma_req_notify_cq(struct ib_cq *ibcq,
3602 enum ib_cq_notify_flags notify_flags)
3604 struct irdma_cq *iwcq;
3605 struct irdma_cq_uk *ukcq;
3606 unsigned long flags;
3607 enum irdma_cmpl_notify cq_notify;
3608 bool promo_event = false;
3611 cq_notify = notify_flags == IB_CQ_SOLICITED ?
3612 IRDMA_CQ_COMPL_SOLICITED : IRDMA_CQ_COMPL_EVENT;
3613 iwcq = to_iwcq(ibcq);
3614 ukcq = &iwcq->sc_cq.cq_uk;
3616 spin_lock_irqsave(&iwcq->lock, flags);
3617 /* Only promote to arm the CQ for any event if the last arm event was solicited. */
3618 if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED)
3621 if (!iwcq->armed || promo_event) {
3623 iwcq->last_notify = cq_notify;
3624 irdma_uk_cq_request_notification(ukcq, cq_notify);
3627 if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && !irdma_cq_empty(iwcq))
3629 spin_unlock_irqrestore(&iwcq->lock, flags);
3634 static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
3635 struct ib_port_immutable *immutable)
3637 struct ib_port_attr attr;
3640 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
3641 err = ib_query_port(ibdev, port_num, &attr);
3645 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
3646 immutable->pkey_tbl_len = attr.pkey_tbl_len;
3647 immutable->gid_tbl_len = attr.gid_tbl_len;
3652 static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
3653 struct ib_port_immutable *immutable)
3655 struct ib_port_attr attr;
3658 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
3659 err = ib_query_port(ibdev, port_num, &attr);
3662 immutable->gid_tbl_len = attr.gid_tbl_len;
3667 static const char *const irdma_hw_stat_names[] = {
3669 [IRDMA_HW_STAT_INDEX_RXVLANERR] = "rxVlanErrors",
3670 [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
3671 [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
3672 [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
3673 [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
3674 [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
3675 [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
3676 [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs",
3677 [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors",
3678 [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors",
3679 [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = "cnpHandled",
3680 [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = "cnpIgnored",
3681 [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = "cnpSent",
3684 [IRDMA_HW_STAT_INDEX_IP4RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3686 [IRDMA_HW_STAT_INDEX_IP4RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3688 [IRDMA_HW_STAT_INDEX_IP4RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] =
3690 [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3692 [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3694 [IRDMA_HW_STAT_INDEX_IP4TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3696 [IRDMA_HW_STAT_INDEX_IP4TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3698 [IRDMA_HW_STAT_INDEX_IP4TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] =
3700 [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3701 "ip4OutMcastOctets",
3702 [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3704 [IRDMA_HW_STAT_INDEX_IP6RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3706 [IRDMA_HW_STAT_INDEX_IP6RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3708 [IRDMA_HW_STAT_INDEX_IP6RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] =
3710 [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3712 [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3714 [IRDMA_HW_STAT_INDEX_IP6TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3716 [IRDMA_HW_STAT_INDEX_IP6TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3718 [IRDMA_HW_STAT_INDEX_IP6TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] =
3720 [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3721 "ip6OutMcastOctets",
3722 [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3724 [IRDMA_HW_STAT_INDEX_TCPRXSEGS + IRDMA_HW_STAT_INDEX_MAX_32] =
3726 [IRDMA_HW_STAT_INDEX_TCPTXSEG + IRDMA_HW_STAT_INDEX_MAX_32] =
3728 [IRDMA_HW_STAT_INDEX_RDMARXRDS + IRDMA_HW_STAT_INDEX_MAX_32] =
3730 [IRDMA_HW_STAT_INDEX_RDMARXSNDS + IRDMA_HW_STAT_INDEX_MAX_32] =
3732 [IRDMA_HW_STAT_INDEX_RDMARXWRS + IRDMA_HW_STAT_INDEX_MAX_32] =
3734 [IRDMA_HW_STAT_INDEX_RDMATXRDS + IRDMA_HW_STAT_INDEX_MAX_32] =
3736 [IRDMA_HW_STAT_INDEX_RDMATXSNDS + IRDMA_HW_STAT_INDEX_MAX_32] =
3738 [IRDMA_HW_STAT_INDEX_RDMATXWRS + IRDMA_HW_STAT_INDEX_MAX_32] =
3740 [IRDMA_HW_STAT_INDEX_RDMAVBND + IRDMA_HW_STAT_INDEX_MAX_32] =
3742 [IRDMA_HW_STAT_INDEX_RDMAVINV + IRDMA_HW_STAT_INDEX_MAX_32] =
3744 [IRDMA_HW_STAT_INDEX_UDPRXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3746 [IRDMA_HW_STAT_INDEX_UDPTXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3748 [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3752 static void irdma_get_dev_fw_str(struct ib_device *dev, char *str)
3754 struct irdma_device *iwdev = to_iwdev(dev);
3756 snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u",
3757 irdma_fw_major_ver(&iwdev->rf->sc_dev),
3758 irdma_fw_minor_ver(&iwdev->rf->sc_dev));
3762 * irdma_alloc_hw_port_stats - Allocate a hw stats structure
3763 * @ibdev: device pointer from stack
3764 * @port_num: port number
3766 static struct rdma_hw_stats *irdma_alloc_hw_port_stats(struct ib_device *ibdev,
3769 int num_counters = IRDMA_HW_STAT_INDEX_MAX_32 +
3770 IRDMA_HW_STAT_INDEX_MAX_64;
3771 unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
3773 BUILD_BUG_ON(ARRAY_SIZE(irdma_hw_stat_names) !=
3774 (IRDMA_HW_STAT_INDEX_MAX_32 + IRDMA_HW_STAT_INDEX_MAX_64));
3776 return rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters,
3781 * irdma_get_hw_stats - Populates the rdma_hw_stats structure
3782 * @ibdev: device pointer from stack
3783 * @stats: stats pointer from stack
3784 * @port_num: port number
3785 * @index: which hw counter the stack is requesting we update
3787 static int irdma_get_hw_stats(struct ib_device *ibdev,
3788 struct rdma_hw_stats *stats, u32 port_num,
3791 struct irdma_device *iwdev = to_iwdev(ibdev);
3792 struct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats;
3794 if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
3795 irdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true);
3797 irdma_cqp_gather_stats_gen1(&iwdev->rf->sc_dev, iwdev->vsi.pestat);
3799 memcpy(&stats->value[0], hw_stats, sizeof(*hw_stats));
3801 return stats->num_counters;
3805 * irdma_query_gid - Query port GID
3806 * @ibdev: device pointer from stack
3807 * @port: port number
3808 * @index: Entry index
3811 static int irdma_query_gid(struct ib_device *ibdev, u32 port, int index,
3814 struct irdma_device *iwdev = to_iwdev(ibdev);
3816 memset(gid->raw, 0, sizeof(gid->raw));
3817 ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
3823 * mcast_list_add - Add a new mcast item to list
3824 * @rf: RDMA PCI function
3825 * @new_elem: pointer to element to add
3827 static void mcast_list_add(struct irdma_pci_f *rf,
3828 struct mc_table_list *new_elem)
3830 list_add(&new_elem->list, &rf->mc_qht_list.list);
3834 * mcast_list_del - Remove an mcast item from list
3835 * @mc_qht_elem: pointer to mcast table list element
3837 static void mcast_list_del(struct mc_table_list *mc_qht_elem)
3840 list_del(&mc_qht_elem->list);
3844 * mcast_list_lookup_ip - Search mcast list for address
3845 * @rf: RDMA PCI function
3846 * @ip_mcast: pointer to mcast IP address
3848 static struct mc_table_list *mcast_list_lookup_ip(struct irdma_pci_f *rf,
3851 struct mc_table_list *mc_qht_el;
3852 struct list_head *pos, *q;
3854 list_for_each_safe (pos, q, &rf->mc_qht_list.list) {
3855 mc_qht_el = list_entry(pos, struct mc_table_list, list);
3856 if (!memcmp(mc_qht_el->mc_info.dest_ip, ip_mcast,
3857 sizeof(mc_qht_el->mc_info.dest_ip)))
3865 * irdma_mcast_cqp_op - perform a mcast cqp operation
3866 * @iwdev: irdma device
3867 * @mc_grp_ctx: mcast group info
3870 * returns error status
3872 static int irdma_mcast_cqp_op(struct irdma_device *iwdev,
3873 struct irdma_mcast_grp_info *mc_grp_ctx, u8 op)
3875 struct cqp_cmds_info *cqp_info;
3876 struct irdma_cqp_request *cqp_request;
3877 enum irdma_status_code status;
3879 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3883 cqp_request->info.in.u.mc_create.info = *mc_grp_ctx;
3884 cqp_info = &cqp_request->info;
3885 cqp_info->cqp_cmd = op;
3886 cqp_info->post_sq = 1;
3887 cqp_info->in.u.mc_create.scratch = (uintptr_t)cqp_request;
3888 cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp;
3889 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
3890 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3898 * irdma_mcast_mac - Get the multicast MAC for an IP address
3899 * @ip_addr: IPv4 or IPv6 address
3900 * @mac: pointer to result MAC address
3901 * @ipv4: flag indicating IPv4 or IPv6
3904 void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4)
3906 u8 *ip = (u8 *)ip_addr;
3909 unsigned char mac4[ETH_ALEN] = {0x01, 0x00, 0x5E, 0x00,
3912 mac4[3] = ip[2] & 0x7F;
3915 ether_addr_copy(mac, mac4);
3917 unsigned char mac6[ETH_ALEN] = {0x33, 0x33, 0x00, 0x00,
3924 ether_addr_copy(mac, mac6);
3929 * irdma_attach_mcast - attach a qp to a multicast group
3931 * @ibgid: pointer to global ID
3934 * returns error status
3936 static int irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
3938 struct irdma_qp *iwqp = to_iwqp(ibqp);
3939 struct irdma_device *iwdev = iwqp->iwdev;
3940 struct irdma_pci_f *rf = iwdev->rf;
3941 struct mc_table_list *mc_qht_elem;
3942 struct irdma_mcast_grp_ctx_entry_info mcg_info = {};
3943 unsigned long flags;
3944 u32 ip_addr[4] = {};
3951 struct sockaddr saddr;
3952 struct sockaddr_in saddr_in;
3953 struct sockaddr_in6 saddr_in6;
3955 unsigned char dmac[ETH_ALEN];
3957 rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
3959 if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) {
3960 irdma_copy_ip_ntohl(ip_addr,
3961 sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
3962 irdma_netdev_vlan_ipv6(ip_addr, &vlan_id, NULL);
3964 ibdev_dbg(&iwdev->ibdev,
3965 "VERBS: qp_id=%d, IP6address=%pI6\n", ibqp->qp_num,
3967 irdma_mcast_mac(ip_addr, dmac, false);
3969 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
3971 vlan_id = irdma_get_vlan_ipv4(ip_addr);
3972 irdma_mcast_mac(ip_addr, dmac, true);
3973 ibdev_dbg(&iwdev->ibdev,
3974 "VERBS: qp_id=%d, IP4address=%pI4, MAC=%pM\n",
3975 ibqp->qp_num, ip_addr, dmac);
3978 spin_lock_irqsave(&rf->qh_list_lock, flags);
3979 mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
3981 struct irdma_dma_mem *dma_mem_mc;
3983 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3984 mc_qht_elem = kzalloc(sizeof(*mc_qht_elem), GFP_KERNEL);
3988 mc_qht_elem->mc_info.ipv4_valid = ipv4;
3989 memcpy(mc_qht_elem->mc_info.dest_ip, ip_addr,
3990 sizeof(mc_qht_elem->mc_info.dest_ip));
3991 ret = irdma_alloc_rsrc(rf, rf->allocated_mcgs, rf->max_mcg,
3992 &mgn, &rf->next_mcg);
3998 mc_qht_elem->mc_info.mgn = mgn;
3999 dma_mem_mc = &mc_qht_elem->mc_grp_ctx.dma_mem_mc;
4000 dma_mem_mc->size = ALIGN(sizeof(u64) * IRDMA_MAX_MGS_PER_CTX,
4001 IRDMA_HW_PAGE_SIZE);
4002 dma_mem_mc->va = dma_alloc_coherent(rf->hw.device,
4006 if (!dma_mem_mc->va) {
4007 irdma_free_rsrc(rf, rf->allocated_mcgs, mgn);
4012 mc_qht_elem->mc_grp_ctx.mg_id = (u16)mgn;
4013 memcpy(mc_qht_elem->mc_grp_ctx.dest_ip_addr, ip_addr,
4014 sizeof(mc_qht_elem->mc_grp_ctx.dest_ip_addr));
4015 mc_qht_elem->mc_grp_ctx.ipv4_valid = ipv4;
4016 mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id;
4017 if (vlan_id < VLAN_N_VID)
4018 mc_qht_elem->mc_grp_ctx.vlan_valid = true;
4019 mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->vsi.fcn_id;
4020 mc_qht_elem->mc_grp_ctx.qs_handle =
4021 iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle;
4022 ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac);
4024 spin_lock_irqsave(&rf->qh_list_lock, flags);
4025 mcast_list_add(rf, mc_qht_elem);
4027 if (mc_qht_elem->mc_grp_ctx.no_of_mgs ==
4028 IRDMA_MAX_MGS_PER_CTX) {
4029 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4034 mcg_info.qp_id = iwqp->ibqp.qp_num;
4035 no_mgs = mc_qht_elem->mc_grp_ctx.no_of_mgs;
4036 irdma_sc_add_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
4037 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4039 /* Only if there is a change do we need to modify or create */
4041 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4042 IRDMA_OP_MC_CREATE);
4043 } else if (no_mgs != mc_qht_elem->mc_grp_ctx.no_of_mgs) {
4044 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4045 IRDMA_OP_MC_MODIFY);
4056 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
4057 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
4058 mcast_list_del(mc_qht_elem);
4059 dma_free_coherent(rf->hw.device,
4060 mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,
4061 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,
4062 mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);
4063 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;
4064 irdma_free_rsrc(rf, rf->allocated_mcgs,
4065 mc_qht_elem->mc_grp_ctx.mg_id);
4073 * irdma_detach_mcast - detach a qp from a multicast group
4075 * @ibgid: pointer to global ID
4078 * returns error status
4080 static int irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
4082 struct irdma_qp *iwqp = to_iwqp(ibqp);
4083 struct irdma_device *iwdev = iwqp->iwdev;
4084 struct irdma_pci_f *rf = iwdev->rf;
4085 u32 ip_addr[4] = {};
4086 struct mc_table_list *mc_qht_elem;
4087 struct irdma_mcast_grp_ctx_entry_info mcg_info = {};
4089 unsigned long flags;
4091 struct sockaddr saddr;
4092 struct sockaddr_in saddr_in;
4093 struct sockaddr_in6 saddr_in6;
4096 rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
4097 if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid))
4098 irdma_copy_ip_ntohl(ip_addr,
4099 sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4101 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
4103 spin_lock_irqsave(&rf->qh_list_lock, flags);
4104 mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
4106 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4107 ibdev_dbg(&iwdev->ibdev,
4108 "VERBS: address not found MCG\n");
4112 mcg_info.qp_id = iwqp->ibqp.qp_num;
4113 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
4114 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
4115 mcast_list_del(mc_qht_elem);
4116 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4117 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4118 IRDMA_OP_MC_DESTROY);
4120 ibdev_dbg(&iwdev->ibdev,
4121 "VERBS: failed MC_DESTROY MCG\n");
4122 spin_lock_irqsave(&rf->qh_list_lock, flags);
4123 mcast_list_add(rf, mc_qht_elem);
4124 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4128 dma_free_coherent(rf->hw.device,
4129 mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,
4130 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,
4131 mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);
4132 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;
4133 irdma_free_rsrc(rf, rf->allocated_mcgs,
4134 mc_qht_elem->mc_grp_ctx.mg_id);
4137 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4138 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4139 IRDMA_OP_MC_MODIFY);
4141 ibdev_dbg(&iwdev->ibdev,
4142 "VERBS: failed Modify MCG\n");
4151 * irdma_create_ah - create address handle
4152 * @ibah: address handle
4153 * @attr: address handle attributes
4156 * returns 0 on success, error otherwise
4158 static int irdma_create_ah(struct ib_ah *ibah,
4159 struct rdma_ah_init_attr *attr,
4160 struct ib_udata *udata)
4162 struct irdma_pd *pd = to_iwpd(ibah->pd);
4163 struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
4164 struct rdma_ah_attr *ah_attr = attr->ah_attr;
4165 const struct ib_gid_attr *sgid_attr;
4166 struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
4167 struct irdma_pci_f *rf = iwdev->rf;
4168 struct irdma_sc_ah *sc_ah;
4170 struct irdma_ah_info *ah_info;
4171 struct irdma_create_ah_resp uresp;
4173 struct sockaddr saddr;
4174 struct sockaddr_in saddr_in;
4175 struct sockaddr_in6 saddr_in6;
4176 } sgid_addr, dgid_addr;
4180 err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah_id,
4187 sc_ah->ah_info.ah_idx = ah_id;
4188 sc_ah->ah_info.vsi = &iwdev->vsi;
4189 irdma_sc_init_ah(&rf->sc_dev, sc_ah);
4190 ah->sgid_index = ah_attr->grh.sgid_index;
4191 sgid_attr = ah_attr->grh.sgid_attr;
4192 memcpy(&ah->dgid, &ah_attr->grh.dgid, sizeof(ah->dgid));
4193 rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid);
4194 rdma_gid2ip((struct sockaddr *)&dgid_addr, &ah_attr->grh.dgid);
4195 ah->av.attrs = *ah_attr;
4196 ah->av.net_type = rdma_gid_attr_network_type(sgid_attr);
4197 ah->av.sgid_addr.saddr = sgid_addr.saddr;
4198 ah->av.dgid_addr.saddr = dgid_addr.saddr;
4199 ah_info = &sc_ah->ah_info;
4200 ah_info->ah_idx = ah_id;
4201 ah_info->pd_idx = pd->sc_pd.pd_id;
4202 if (ah_attr->ah_flags & IB_AH_GRH) {
4203 ah_info->flow_label = ah_attr->grh.flow_label;
4204 ah_info->hop_ttl = ah_attr->grh.hop_limit;
4205 ah_info->tc_tos = ah_attr->grh.traffic_class;
4208 ether_addr_copy(dmac, ah_attr->roce.dmac);
4209 if (rdma_gid_attr_network_type(sgid_attr) == RDMA_NETWORK_IPV4) {
4210 ah_info->ipv4_valid = true;
4211 ah_info->dest_ip_addr[0] =
4212 ntohl(dgid_addr.saddr_in.sin_addr.s_addr);
4213 ah_info->src_ip_addr[0] =
4214 ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
4215 ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0],
4216 ah_info->dest_ip_addr[0]);
4217 if (ipv4_is_multicast(dgid_addr.saddr_in.sin_addr.s_addr)) {
4218 ah_info->do_lpbk = true;
4219 irdma_mcast_mac(ah_info->dest_ip_addr, dmac, true);
4222 irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
4223 dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4224 irdma_copy_ip_ntohl(ah_info->src_ip_addr,
4225 sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4226 ah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr,
4227 ah_info->dest_ip_addr);
4228 if (rdma_is_multicast_addr(&dgid_addr.saddr_in6.sin6_addr)) {
4229 ah_info->do_lpbk = true;
4230 irdma_mcast_mac(ah_info->dest_ip_addr, dmac, false);
4234 err = rdma_read_gid_l2_fields(sgid_attr, &ah_info->vlan_tag,
4239 ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr,
4240 ah_info->ipv4_valid, dmac);
4242 if (ah_info->dst_arpindex == -1) {
4247 if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb)
4248 ah_info->vlan_tag = 0;
4250 if (ah_info->vlan_tag < VLAN_N_VID) {
4251 ah_info->insert_vlan_tag = true;
4252 ah_info->vlan_tag |=
4253 rt_tos2priority(ah_info->tc_tos) << VLAN_PRIO_SHIFT;
4256 err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE,
4257 attr->flags & RDMA_CREATE_AH_SLEEPABLE,
4258 irdma_gsi_ud_qp_ah_cb, sc_ah);
4261 ibdev_dbg(&iwdev->ibdev,
4262 "VERBS: CQP-OP Create AH fail");
4266 if (!(attr->flags & RDMA_CREATE_AH_SLEEPABLE)) {
4267 int cnt = CQP_COMPL_WAIT_TIME_MS * CQP_TIMEOUT_THRESHOLD;
4270 irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
4272 } while (!sc_ah->ah_info.ah_valid && --cnt);
4275 ibdev_dbg(&iwdev->ibdev,
4276 "VERBS: CQP create AH timed out");
4283 uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
4284 err = ib_copy_to_udata(udata, &uresp,
4285 min(sizeof(uresp), udata->outlen));
4290 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id);
4296 * irdma_destroy_ah - Destroy address handle
4297 * @ibah: pointer to address handle
4298 * @ah_flags: flags for sleepable
4300 static int irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
4302 struct irdma_device *iwdev = to_iwdev(ibah->device);
4303 struct irdma_ah *ah = to_iwah(ibah);
4305 irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
4308 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
4309 ah->sc_ah.ah_info.ah_idx);
4315 * irdma_query_ah - Query address handle
4316 * @ibah: pointer to address handle
4317 * @ah_attr: address handle attributes
4319 static int irdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
4321 struct irdma_ah *ah = to_iwah(ibah);
4323 memset(ah_attr, 0, sizeof(*ah_attr));
4324 if (ah->av.attrs.ah_flags & IB_AH_GRH) {
4325 ah_attr->ah_flags = IB_AH_GRH;
4326 ah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label;
4327 ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos;
4328 ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl;
4329 ah_attr->grh.sgid_index = ah->sgid_index;
4330 ah_attr->grh.sgid_index = ah->sgid_index;
4331 memcpy(&ah_attr->grh.dgid, &ah->dgid,
4332 sizeof(ah_attr->grh.dgid));
4338 static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
4341 return IB_LINK_LAYER_ETHERNET;
4344 static __be64 irdma_mac_to_guid(struct net_device *ndev)
4346 unsigned char *mac = ndev->dev_addr;
4348 unsigned char *dst = (unsigned char *)&guid;
4350 dst[0] = mac[0] ^ 2;
4362 static const struct ib_device_ops irdma_roce_dev_ops = {
4363 .attach_mcast = irdma_attach_mcast,
4364 .create_ah = irdma_create_ah,
4365 .create_user_ah = irdma_create_ah,
4366 .destroy_ah = irdma_destroy_ah,
4367 .detach_mcast = irdma_detach_mcast,
4368 .get_link_layer = irdma_get_link_layer,
4369 .get_port_immutable = irdma_roce_port_immutable,
4370 .modify_qp = irdma_modify_qp_roce,
4371 .query_ah = irdma_query_ah,
4372 .query_pkey = irdma_query_pkey,
4375 static const struct ib_device_ops irdma_iw_dev_ops = {
4376 .modify_qp = irdma_modify_qp,
4377 .get_port_immutable = irdma_iw_port_immutable,
4378 .query_gid = irdma_query_gid,
4381 static const struct ib_device_ops irdma_dev_ops = {
4382 .owner = THIS_MODULE,
4383 .driver_id = RDMA_DRIVER_IRDMA,
4384 .uverbs_abi_ver = IRDMA_ABI_VER,
4386 .alloc_hw_port_stats = irdma_alloc_hw_port_stats,
4387 .alloc_mr = irdma_alloc_mr,
4388 .alloc_mw = irdma_alloc_mw,
4389 .alloc_pd = irdma_alloc_pd,
4390 .alloc_ucontext = irdma_alloc_ucontext,
4391 .create_cq = irdma_create_cq,
4392 .create_qp = irdma_create_qp,
4393 .dealloc_driver = irdma_ib_dealloc_device,
4394 .dealloc_mw = irdma_dealloc_mw,
4395 .dealloc_pd = irdma_dealloc_pd,
4396 .dealloc_ucontext = irdma_dealloc_ucontext,
4397 .dereg_mr = irdma_dereg_mr,
4398 .destroy_cq = irdma_destroy_cq,
4399 .destroy_qp = irdma_destroy_qp,
4400 .disassociate_ucontext = irdma_disassociate_ucontext,
4401 .get_dev_fw_str = irdma_get_dev_fw_str,
4402 .get_dma_mr = irdma_get_dma_mr,
4403 .get_hw_stats = irdma_get_hw_stats,
4404 .map_mr_sg = irdma_map_mr_sg,
4406 .mmap_free = irdma_mmap_free,
4407 .poll_cq = irdma_poll_cq,
4408 .post_recv = irdma_post_recv,
4409 .post_send = irdma_post_send,
4410 .query_device = irdma_query_device,
4411 .query_port = irdma_query_port,
4412 .query_qp = irdma_query_qp,
4413 .reg_user_mr = irdma_reg_user_mr,
4414 .req_notify_cq = irdma_req_notify_cq,
4415 .resize_cq = irdma_resize_cq,
4416 INIT_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd),
4417 INIT_RDMA_OBJ_SIZE(ib_ucontext, irdma_ucontext, ibucontext),
4418 INIT_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah),
4419 INIT_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq),
4420 INIT_RDMA_OBJ_SIZE(ib_mw, irdma_mr, ibmw),
4421 INIT_RDMA_OBJ_SIZE(ib_qp, irdma_qp, ibqp),
4425 * irdma_init_roce_device - initialization of roce rdma device
4426 * @iwdev: irdma device
4428 static void irdma_init_roce_device(struct irdma_device *iwdev)
4430 iwdev->ibdev.node_type = RDMA_NODE_IB_CA;
4431 iwdev->ibdev.node_guid = irdma_mac_to_guid(iwdev->netdev);
4432 ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops);
4436 * irdma_init_iw_device - initialization of iwarp rdma device
4437 * @iwdev: irdma device
4439 static int irdma_init_iw_device(struct irdma_device *iwdev)
4441 struct net_device *netdev = iwdev->netdev;
4443 iwdev->ibdev.node_type = RDMA_NODE_RNIC;
4444 ether_addr_copy((u8 *)&iwdev->ibdev.node_guid, netdev->dev_addr);
4445 iwdev->ibdev.ops.iw_add_ref = irdma_qp_add_ref;
4446 iwdev->ibdev.ops.iw_rem_ref = irdma_qp_rem_ref;
4447 iwdev->ibdev.ops.iw_get_qp = irdma_get_qp;
4448 iwdev->ibdev.ops.iw_connect = irdma_connect;
4449 iwdev->ibdev.ops.iw_accept = irdma_accept;
4450 iwdev->ibdev.ops.iw_reject = irdma_reject;
4451 iwdev->ibdev.ops.iw_create_listen = irdma_create_listen;
4452 iwdev->ibdev.ops.iw_destroy_listen = irdma_destroy_listen;
4453 memcpy(iwdev->ibdev.iw_ifname, netdev->name,
4454 sizeof(iwdev->ibdev.iw_ifname));
4455 ib_set_device_ops(&iwdev->ibdev, &irdma_iw_dev_ops);
4461 * irdma_init_rdma_device - initialization of rdma device
4462 * @iwdev: irdma device
4464 static int irdma_init_rdma_device(struct irdma_device *iwdev)
4466 struct pci_dev *pcidev = iwdev->rf->pcidev;
4469 if (iwdev->roce_mode) {
4470 irdma_init_roce_device(iwdev);
4472 ret = irdma_init_iw_device(iwdev);
4476 iwdev->ibdev.phys_port_cnt = 1;
4477 iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count;
4478 iwdev->ibdev.dev.parent = &pcidev->dev;
4479 ib_set_device_ops(&iwdev->ibdev, &irdma_dev_ops);
4485 * irdma_port_ibevent - indicate port event
4486 * @iwdev: irdma device
4488 void irdma_port_ibevent(struct irdma_device *iwdev)
4490 struct ib_event event;
4492 event.device = &iwdev->ibdev;
4493 event.element.port_num = 1;
4495 iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
4496 ib_dispatch_event(&event);
4500 * irdma_ib_unregister_device - unregister rdma device from IB
4502 * @iwdev: irdma device
4504 void irdma_ib_unregister_device(struct irdma_device *iwdev)
4506 iwdev->iw_status = 0;
4507 irdma_port_ibevent(iwdev);
4508 ib_unregister_device(&iwdev->ibdev);
4512 * irdma_ib_register_device - register irdma device to IB core
4513 * @iwdev: irdma device
4515 int irdma_ib_register_device(struct irdma_device *iwdev)
4519 ret = irdma_init_rdma_device(iwdev);
4523 ret = ib_device_set_netdev(&iwdev->ibdev, iwdev->netdev, 1);
4526 dma_set_max_seg_size(iwdev->rf->hw.device, UINT_MAX);
4527 ret = ib_register_device(&iwdev->ibdev, "irdma%d", iwdev->rf->hw.device);
4531 iwdev->iw_status = 1;
4532 irdma_port_ibevent(iwdev);
4538 ibdev_dbg(&iwdev->ibdev, "VERBS: Register RDMA device fail\n");
4544 * irdma_ib_dealloc_device
4547 * callback from ibdev dealloc_driver to deallocate resources
4548 * unber irdma device
4550 void irdma_ib_dealloc_device(struct ib_device *ibdev)
4552 struct irdma_device *iwdev = to_iwdev(ibdev);
4554 irdma_rt_deinit_hw(iwdev);
4555 irdma_ctrl_deinit_hw(iwdev->rf);