1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
5 static struct irdma_rsrc_limits rsrc_limits_table[] = {
32 /* types of hmc objects */
33 static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = {
38 IRDMA_HMC_IW_APBVT_ENTRY,
52 IRDMA_HMC_IW_OOISCFFL,
56 * irdma_iwarp_ce_handler - handle iwarp completions
57 * @iwcq: iwarp cq receiving event
59 static void irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq)
61 struct irdma_cq *cq = iwcq->back_cq;
65 if (cq->ibcq.comp_handler)
66 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
70 * irdma_puda_ce_handler - handle puda completion events
71 * @rf: RDMA PCI function
72 * @cq: puda completion q for event
74 static void irdma_puda_ce_handler(struct irdma_pci_f *rf,
75 struct irdma_sc_cq *cq)
77 struct irdma_sc_dev *dev = &rf->sc_dev;
78 enum irdma_status_code status;
82 status = irdma_puda_poll_cmpl(dev, cq, &compl_error);
83 if (status == IRDMA_ERR_Q_EMPTY)
86 ibdev_dbg(to_ibdev(dev), "ERR: puda status = %d\n", status);
90 ibdev_dbg(to_ibdev(dev), "ERR: puda compl_err =0x%x\n",
100 * irdma_process_ceq - handle ceq for completions
101 * @rf: RDMA PCI function
102 * @ceq: ceq having cq for completion
104 static void irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq)
106 struct irdma_sc_dev *dev = &rf->sc_dev;
107 struct irdma_sc_ceq *sc_ceq;
108 struct irdma_sc_cq *cq;
111 sc_ceq = &ceq->sc_ceq;
113 spin_lock_irqsave(&ceq->ce_lock, flags);
114 cq = irdma_sc_process_ceq(dev, sc_ceq);
116 spin_unlock_irqrestore(&ceq->ce_lock, flags);
120 if (cq->cq_type == IRDMA_CQ_TYPE_IWARP)
121 irdma_iwarp_ce_handler(cq);
123 spin_unlock_irqrestore(&ceq->ce_lock, flags);
125 if (cq->cq_type == IRDMA_CQ_TYPE_CQP)
126 queue_work(rf->cqp_cmpl_wq, &rf->cqp_cmpl_work);
127 else if (cq->cq_type == IRDMA_CQ_TYPE_ILQ ||
128 cq->cq_type == IRDMA_CQ_TYPE_IEQ)
129 irdma_puda_ce_handler(rf, cq);
133 static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
134 struct irdma_aeqe_info *info)
136 qp->sq_flush_code = info->sq;
137 qp->rq_flush_code = info->rq;
138 qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
140 switch (info->ae_id) {
141 case IRDMA_AE_AMP_UNALLOCATED_STAG:
142 case IRDMA_AE_AMP_BOUNDS_VIOLATION:
143 case IRDMA_AE_AMP_INVALID_STAG:
144 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
146 case IRDMA_AE_AMP_BAD_PD:
147 case IRDMA_AE_UDA_XMIT_BAD_PD:
148 qp->flush_code = FLUSH_PROT_ERR;
150 case IRDMA_AE_AMP_BAD_QP:
151 case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
152 qp->flush_code = FLUSH_LOC_QP_OP_ERR;
154 case IRDMA_AE_AMP_BAD_STAG_KEY:
155 case IRDMA_AE_AMP_BAD_STAG_INDEX:
156 case IRDMA_AE_AMP_TO_WRAP:
157 case IRDMA_AE_AMP_RIGHTS_VIOLATION:
158 case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
159 case IRDMA_AE_PRIV_OPERATION_DENIED:
160 case IRDMA_AE_IB_INVALID_REQUEST:
161 case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
162 qp->flush_code = FLUSH_REM_ACCESS_ERR;
163 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
165 case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
166 case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
167 case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
168 case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
169 case IRDMA_AE_UDA_L4LEN_INVALID:
170 case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
171 qp->flush_code = FLUSH_LOC_LEN_ERR;
173 case IRDMA_AE_LCE_QP_CATASTROPHIC:
174 qp->flush_code = FLUSH_FATAL_ERR;
176 case IRDMA_AE_DDP_UBE_INVALID_MO:
177 case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
178 case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
179 qp->flush_code = FLUSH_GENERAL_ERR;
181 case IRDMA_AE_LLP_TOO_MANY_RETRIES:
182 qp->flush_code = FLUSH_RETRY_EXC_ERR;
184 case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
185 case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
186 case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
187 qp->flush_code = FLUSH_MW_BIND_ERR;
189 case IRDMA_AE_IB_REMOTE_OP_ERROR:
190 qp->flush_code = FLUSH_REM_OP_ERR;
193 qp->flush_code = FLUSH_FATAL_ERR;
199 * irdma_process_aeq - handle aeq events
200 * @rf: RDMA PCI function
202 static void irdma_process_aeq(struct irdma_pci_f *rf)
204 struct irdma_sc_dev *dev = &rf->sc_dev;
205 struct irdma_aeq *aeq = &rf->aeq;
206 struct irdma_sc_aeq *sc_aeq = &aeq->sc_aeq;
207 struct irdma_aeqe_info aeinfo;
208 struct irdma_aeqe_info *info = &aeinfo;
210 struct irdma_qp *iwqp = NULL;
211 struct irdma_sc_cq *cq = NULL;
212 struct irdma_cq *iwcq = NULL;
213 struct irdma_sc_qp *qp = NULL;
214 struct irdma_qp_host_ctx_info *ctx_info = NULL;
215 struct irdma_device *iwdev = rf->iwdev;
224 memset(info, 0, sizeof(*info));
225 ret = irdma_sc_get_next_aeqe(sc_aeq, info);
230 ibdev_dbg(&iwdev->ibdev,
231 "AEQ: ae_id = 0x%x bool qp=%d qp_id = %d tcp_state=%d iwarp_state=%d ae_src=%d\n",
232 info->ae_id, info->qp, info->qp_cq_id, info->tcp_state,
233 info->iwarp_state, info->ae_src);
236 spin_lock_irqsave(&rf->qptable_lock, flags);
237 iwqp = rf->qp_table[info->qp_cq_id];
239 spin_unlock_irqrestore(&rf->qptable_lock,
241 if (info->ae_id == IRDMA_AE_QP_SUSPEND_COMPLETE) {
242 atomic_dec(&iwdev->vsi.qp_suspend_reqs);
243 wake_up(&iwdev->suspend_wq);
246 ibdev_dbg(&iwdev->ibdev, "AEQ: qp_id %d is already freed\n",
250 irdma_qp_add_ref(&iwqp->ibqp);
251 spin_unlock_irqrestore(&rf->qptable_lock, flags);
253 spin_lock_irqsave(&iwqp->lock, flags);
254 iwqp->hw_tcp_state = info->tcp_state;
255 iwqp->hw_iwarp_state = info->iwarp_state;
256 if (info->ae_id != IRDMA_AE_QP_SUSPEND_COMPLETE)
257 iwqp->last_aeq = info->ae_id;
258 spin_unlock_irqrestore(&iwqp->lock, flags);
259 ctx_info = &iwqp->ctx_info;
260 if (rdma_protocol_roce(&iwqp->iwdev->ibdev, 1))
261 ctx_info->roce_info->err_rq_idx_valid = true;
263 ctx_info->iwarp_info->err_rq_idx_valid = true;
265 if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR)
269 switch (info->ae_id) {
270 struct irdma_cm_node *cm_node;
271 case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
272 cm_node = iwqp->cm_node;
273 if (cm_node->accept_pend) {
274 atomic_dec(&cm_node->listener->pend_accepts_cnt);
275 cm_node->accept_pend = 0;
277 iwqp->rts_ae_rcvd = 1;
278 wake_up_interruptible(&iwqp->waitq);
280 case IRDMA_AE_LLP_FIN_RECEIVED:
281 case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
284 if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
285 iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSE_WAIT;
286 if (iwqp->hw_tcp_state == IRDMA_TCP_STATE_CLOSE_WAIT &&
287 iwqp->ibqp_state == IB_QPS_RTS) {
288 irdma_next_iw_state(iwqp,
289 IRDMA_QP_STATE_CLOSING,
291 irdma_cm_disconn(iwqp);
293 irdma_schedule_cm_timer(iwqp->cm_node,
294 (struct irdma_puda_buf *)iwqp,
295 IRDMA_TIMER_TYPE_CLOSE,
299 case IRDMA_AE_LLP_CLOSE_COMPLETE:
301 irdma_terminate_done(qp, 0);
303 irdma_cm_disconn(iwqp);
305 case IRDMA_AE_BAD_CLOSE:
306 case IRDMA_AE_RESET_SENT:
307 irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0,
309 irdma_cm_disconn(iwqp);
311 case IRDMA_AE_LLP_CONNECTION_RESET:
312 if (atomic_read(&iwqp->close_timer_started))
314 irdma_cm_disconn(iwqp);
316 case IRDMA_AE_QP_SUSPEND_COMPLETE:
317 if (iwqp->iwdev->vsi.tc_change_pending) {
318 atomic_dec(&iwqp->sc_qp.vsi->qp_suspend_reqs);
319 wake_up(&iwqp->iwdev->suspend_wq);
322 case IRDMA_AE_TERMINATE_SENT:
323 irdma_terminate_send_fin(qp);
325 case IRDMA_AE_LLP_TERMINATE_RECEIVED:
326 irdma_terminate_received(qp, info);
328 case IRDMA_AE_CQ_OPERATION_ERROR:
329 ibdev_err(&iwdev->ibdev,
330 "Processing an iWARP related AE for CQ misc = 0x%04X\n",
332 cq = (struct irdma_sc_cq *)(unsigned long)
337 if (iwcq->ibcq.event_handler) {
338 struct ib_event ibevent;
340 ibevent.device = iwcq->ibcq.device;
341 ibevent.event = IB_EVENT_CQ_ERR;
342 ibevent.element.cq = &iwcq->ibcq;
343 iwcq->ibcq.event_handler(&ibevent,
344 iwcq->ibcq.cq_context);
347 case IRDMA_AE_RESET_NOT_SENT:
348 case IRDMA_AE_LLP_DOUBT_REACHABILITY:
349 case IRDMA_AE_RESOURCE_EXHAUSTION:
351 case IRDMA_AE_PRIV_OPERATION_DENIED:
352 case IRDMA_AE_STAG_ZERO_INVALID:
353 case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
354 case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
355 case IRDMA_AE_DDP_UBE_INVALID_MO:
356 case IRDMA_AE_DDP_UBE_INVALID_QN:
357 case IRDMA_AE_DDP_NO_L_BIT:
358 case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
359 case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
360 case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:
361 case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
362 case IRDMA_AE_INVALID_ARP_ENTRY:
363 case IRDMA_AE_INVALID_TCP_OPTION_RCVD:
364 case IRDMA_AE_STALE_ARP_ENTRY:
365 case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
366 case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
367 case IRDMA_AE_LLP_SYN_RECEIVED:
368 case IRDMA_AE_LLP_TOO_MANY_RETRIES:
369 case IRDMA_AE_LCE_QP_CATASTROPHIC:
370 case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
371 case IRDMA_AE_LCE_CQ_CATASTROPHIC:
372 case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
373 if (rdma_protocol_roce(&iwdev->ibdev, 1))
374 ctx_info->roce_info->err_rq_idx_valid = false;
376 ctx_info->iwarp_info->err_rq_idx_valid = false;
379 ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d\n",
380 info->ae_id, info->qp, info->qp_cq_id);
381 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
382 if (!info->sq && ctx_info->roce_info->err_rq_idx_valid) {
383 ctx_info->roce_info->err_rq_idx = info->wqe_idx;
384 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va,
387 irdma_set_flush_fields(qp, info);
388 irdma_cm_disconn(iwqp);
391 if (!info->sq && ctx_info->iwarp_info->err_rq_idx_valid) {
392 ctx_info->iwarp_info->err_rq_idx = info->wqe_idx;
393 ctx_info->tcp_info_valid = false;
394 ctx_info->iwarp_info_valid = true;
395 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va,
398 if (iwqp->hw_iwarp_state != IRDMA_QP_STATE_RTS &&
399 iwqp->hw_iwarp_state != IRDMA_QP_STATE_TERMINATE) {
400 irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0, 0);
401 irdma_cm_disconn(iwqp);
403 irdma_terminate_connection(qp, info);
408 irdma_qp_rem_ref(&iwqp->ibqp);
412 irdma_sc_repost_aeq_entries(dev, aeqcnt);
416 * irdma_ena_intr - set up device interrupts
417 * @dev: hardware control device structure
418 * @msix_id: id of the interrupt to be enabled
420 static void irdma_ena_intr(struct irdma_sc_dev *dev, u32 msix_id)
422 dev->irq_ops->irdma_en_irq(dev, msix_id);
426 * irdma_dpc - tasklet for aeq and ceq 0
427 * @t: tasklet_struct ptr
429 static void irdma_dpc(struct tasklet_struct *t)
431 struct irdma_pci_f *rf = from_tasklet(rf, t, dpc_tasklet);
434 irdma_process_ceq(rf, rf->ceqlist);
435 irdma_process_aeq(rf);
436 irdma_ena_intr(&rf->sc_dev, rf->iw_msixtbl[0].idx);
440 * irdma_ceq_dpc - dpc handler for CEQ
441 * @t: tasklet_struct ptr
443 static void irdma_ceq_dpc(struct tasklet_struct *t)
445 struct irdma_ceq *iwceq = from_tasklet(iwceq, t, dpc_tasklet);
446 struct irdma_pci_f *rf = iwceq->rf;
448 irdma_process_ceq(rf, iwceq);
449 irdma_ena_intr(&rf->sc_dev, iwceq->msix_idx);
453 * irdma_save_msix_info - copy msix vector information to iwarp device
454 * @rf: RDMA PCI function
456 * Allocate iwdev msix table and copy the msix info to the table
457 * Return 0 if successful, otherwise return error
459 static enum irdma_status_code irdma_save_msix_info(struct irdma_pci_f *rf)
461 struct irdma_qvlist_info *iw_qvlist;
462 struct irdma_qv_info *iw_qvinfo;
463 struct msix_entry *pmsix;
469 return IRDMA_ERR_NO_INTR;
471 size = sizeof(struct irdma_msix_vector) * rf->msix_count;
472 size += struct_size(iw_qvlist, qv_info, rf->msix_count);
473 rf->iw_msixtbl = kzalloc(size, GFP_KERNEL);
475 return IRDMA_ERR_NO_MEMORY;
477 rf->iw_qvlist = (struct irdma_qvlist_info *)
478 (&rf->iw_msixtbl[rf->msix_count]);
479 iw_qvlist = rf->iw_qvlist;
480 iw_qvinfo = iw_qvlist->qv_info;
481 iw_qvlist->num_vectors = rf->msix_count;
482 if (rf->msix_count <= num_online_cpus())
483 rf->msix_shared = true;
485 pmsix = rf->msix_entries;
486 for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) {
487 rf->iw_msixtbl[i].idx = pmsix->entry;
488 rf->iw_msixtbl[i].irq = pmsix->vector;
489 rf->iw_msixtbl[i].cpu_affinity = ceq_idx;
491 iw_qvinfo->aeq_idx = 0;
493 iw_qvinfo->ceq_idx = ceq_idx++;
495 iw_qvinfo->ceq_idx = IRDMA_Q_INVALID_IDX;
497 iw_qvinfo->aeq_idx = IRDMA_Q_INVALID_IDX;
498 iw_qvinfo->ceq_idx = ceq_idx++;
500 iw_qvinfo->itr_idx = 3;
501 iw_qvinfo->v_idx = rf->iw_msixtbl[i].idx;
509 * irdma_irq_handler - interrupt handler for aeq and ceq0
510 * @irq: Interrupt request number
511 * @data: RDMA PCI function
513 static irqreturn_t irdma_irq_handler(int irq, void *data)
515 struct irdma_pci_f *rf = data;
517 tasklet_schedule(&rf->dpc_tasklet);
523 * irdma_ceq_handler - interrupt handler for ceq
524 * @irq: interrupt request number
527 static irqreturn_t irdma_ceq_handler(int irq, void *data)
529 struct irdma_ceq *iwceq = data;
531 if (iwceq->irq != irq)
532 ibdev_err(to_ibdev(&iwceq->rf->sc_dev), "expected irq = %d received irq = %d\n",
534 tasklet_schedule(&iwceq->dpc_tasklet);
540 * irdma_destroy_irq - destroy device interrupts
541 * @rf: RDMA PCI function
542 * @msix_vec: msix vector to disable irq
543 * @dev_id: parameter to pass to free_irq (used during irq setup)
545 * The function is called when destroying aeq/ceq
547 static void irdma_destroy_irq(struct irdma_pci_f *rf,
548 struct irdma_msix_vector *msix_vec, void *dev_id)
550 struct irdma_sc_dev *dev = &rf->sc_dev;
552 dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
553 irq_update_affinity_hint(msix_vec->irq, NULL);
554 free_irq(msix_vec->irq, dev_id);
558 * irdma_destroy_cqp - destroy control qp
559 * @rf: RDMA PCI function
560 * @free_hwcqp: 1 if hw cqp should be freed
562 * Issue destroy cqp request and
563 * free the resources associated with the cqp
565 static void irdma_destroy_cqp(struct irdma_pci_f *rf, bool free_hwcqp)
567 enum irdma_status_code status = 0;
568 struct irdma_sc_dev *dev = &rf->sc_dev;
569 struct irdma_cqp *cqp = &rf->cqp;
572 destroy_workqueue(rf->cqp_cmpl_wq);
574 status = irdma_sc_cqp_destroy(dev->cqp);
576 ibdev_dbg(to_ibdev(dev), "ERR: Destroy CQP failed %d\n", status);
578 irdma_cleanup_pending_cqp_op(rf);
579 dma_free_coherent(dev->hw->device, cqp->sq.size, cqp->sq.va,
582 kfree(cqp->scratch_array);
583 cqp->scratch_array = NULL;
584 kfree(cqp->cqp_requests);
585 cqp->cqp_requests = NULL;
588 static void irdma_destroy_virt_aeq(struct irdma_pci_f *rf)
590 struct irdma_aeq *aeq = &rf->aeq;
591 u32 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
592 dma_addr_t *pg_arr = (dma_addr_t *)aeq->palloc.level1.addr;
594 irdma_unmap_vm_page_list(&rf->hw, pg_arr, pg_cnt);
595 irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
600 * irdma_destroy_aeq - destroy aeq
601 * @rf: RDMA PCI function
603 * Issue a destroy aeq request and
604 * free the resources associated with the aeq
605 * The function is called during driver unload
607 static void irdma_destroy_aeq(struct irdma_pci_f *rf)
609 enum irdma_status_code status = IRDMA_ERR_NOT_READY;
610 struct irdma_sc_dev *dev = &rf->sc_dev;
611 struct irdma_aeq *aeq = &rf->aeq;
613 if (!rf->msix_shared) {
614 rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, rf->iw_msixtbl->idx, false);
615 irdma_destroy_irq(rf, rf->iw_msixtbl, rf);
620 aeq->sc_aeq.size = 0;
621 status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_DESTROY);
623 ibdev_dbg(to_ibdev(dev), "ERR: Destroy AEQ failed %d\n", status);
626 if (aeq->virtual_map) {
627 irdma_destroy_virt_aeq(rf);
629 dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va,
636 * irdma_destroy_ceq - destroy ceq
637 * @rf: RDMA PCI function
638 * @iwceq: ceq to be destroyed
640 * Issue a destroy ceq request and
641 * free the resources associated with the ceq
643 static void irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq)
645 enum irdma_status_code status;
646 struct irdma_sc_dev *dev = &rf->sc_dev;
651 status = irdma_sc_ceq_destroy(&iwceq->sc_ceq, 0, 1);
653 ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy command failed %d\n", status);
657 status = irdma_sc_cceq_destroy_done(&iwceq->sc_ceq);
659 ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy completion failed %d\n",
662 dma_free_coherent(dev->hw->device, iwceq->mem.size, iwceq->mem.va,
664 iwceq->mem.va = NULL;
668 * irdma_del_ceq_0 - destroy ceq 0
669 * @rf: RDMA PCI function
671 * Disable the ceq 0 interrupt and destroy the ceq 0
673 static void irdma_del_ceq_0(struct irdma_pci_f *rf)
675 struct irdma_ceq *iwceq = rf->ceqlist;
676 struct irdma_msix_vector *msix_vec;
678 if (rf->msix_shared) {
679 msix_vec = &rf->iw_msixtbl[0];
680 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
682 msix_vec->idx, false);
683 irdma_destroy_irq(rf, msix_vec, rf);
685 msix_vec = &rf->iw_msixtbl[1];
686 irdma_destroy_irq(rf, msix_vec, iwceq);
689 irdma_destroy_ceq(rf, iwceq);
690 rf->sc_dev.ceq_valid = false;
695 * irdma_del_ceqs - destroy all ceq's except CEQ 0
696 * @rf: RDMA PCI function
698 * Go through all of the device ceq's, except 0, and for each
699 * ceq disable the ceq interrupt and destroy the ceq
701 static void irdma_del_ceqs(struct irdma_pci_f *rf)
703 struct irdma_ceq *iwceq = &rf->ceqlist[1];
704 struct irdma_msix_vector *msix_vec;
708 msix_vec = &rf->iw_msixtbl[1];
710 msix_vec = &rf->iw_msixtbl[2];
712 for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) {
713 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, msix_vec->ceq_id,
714 msix_vec->idx, false);
715 irdma_destroy_irq(rf, msix_vec, iwceq);
716 irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
717 IRDMA_OP_CEQ_DESTROY);
718 dma_free_coherent(rf->sc_dev.hw->device, iwceq->mem.size,
719 iwceq->mem.va, iwceq->mem.pa);
720 iwceq->mem.va = NULL;
726 * irdma_destroy_ccq - destroy control cq
727 * @rf: RDMA PCI function
729 * Issue destroy ccq request and
730 * free the resources associated with the ccq
732 static void irdma_destroy_ccq(struct irdma_pci_f *rf)
734 struct irdma_sc_dev *dev = &rf->sc_dev;
735 struct irdma_ccq *ccq = &rf->ccq;
736 enum irdma_status_code status = 0;
739 status = irdma_sc_ccq_destroy(dev->ccq, 0, true);
741 ibdev_dbg(to_ibdev(dev), "ERR: CCQ destroy failed %d\n", status);
742 dma_free_coherent(dev->hw->device, ccq->mem_cq.size, ccq->mem_cq.va,
744 ccq->mem_cq.va = NULL;
748 * irdma_close_hmc_objects_type - delete hmc objects of a given type
750 * @obj_type: the hmc object type to be deleted
751 * @hmc_info: host memory info struct
752 * @privileged: permission to close HMC objects
753 * @reset: true if called before reset
755 static void irdma_close_hmc_objects_type(struct irdma_sc_dev *dev,
756 enum irdma_hmc_rsrc_type obj_type,
757 struct irdma_hmc_info *hmc_info,
758 bool privileged, bool reset)
760 struct irdma_hmc_del_obj_info info = {};
762 info.hmc_info = hmc_info;
763 info.rsrc_type = obj_type;
764 info.count = hmc_info->hmc_obj[obj_type].cnt;
765 info.privileged = privileged;
766 if (irdma_sc_del_hmc_obj(dev, &info, reset))
767 ibdev_dbg(to_ibdev(dev), "ERR: del HMC obj of type %d failed\n",
772 * irdma_del_hmc_objects - remove all device hmc objects
774 * @hmc_info: hmc_info to free
775 * @privileged: permission to delete HMC objects
776 * @reset: true if called before reset
777 * @vers: hardware version
779 static void irdma_del_hmc_objects(struct irdma_sc_dev *dev,
780 struct irdma_hmc_info *hmc_info, bool privileged,
781 bool reset, enum irdma_vers vers)
785 for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
786 if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
787 irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
788 hmc_info, privileged, reset);
789 if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)
795 * irdma_create_hmc_obj_type - create hmc object of a given type
796 * @dev: hardware control device structure
797 * @info: information for the hmc object to create
799 static enum irdma_status_code
800 irdma_create_hmc_obj_type(struct irdma_sc_dev *dev,
801 struct irdma_hmc_create_obj_info *info)
803 return irdma_sc_create_hmc_obj(dev, info);
807 * irdma_create_hmc_objs - create all hmc objects for the device
808 * @rf: RDMA PCI function
809 * @privileged: permission to create HMC objects
812 * Create the device hmc objects and allocate hmc pages
813 * Return 0 if successful, otherwise clean up and return error
815 static enum irdma_status_code
816 irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged, enum irdma_vers vers)
818 struct irdma_sc_dev *dev = &rf->sc_dev;
819 struct irdma_hmc_create_obj_info info = {};
820 enum irdma_status_code status = 0;
823 info.hmc_info = dev->hmc_info;
824 info.privileged = privileged;
825 info.entry_type = rf->sd_type;
827 for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
828 if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) {
829 info.rsrc_type = iw_hmc_obj_types[i];
830 info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
832 status = irdma_create_hmc_obj_type(dev, &info);
834 ibdev_dbg(to_ibdev(dev),
835 "ERR: create obj type %d status = %d\n",
836 iw_hmc_obj_types[i], status);
840 if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)
845 return irdma_sc_static_hmc_pages_allocated(dev->cqp, 0, dev->hmc_fn_id,
850 /* destroy the hmc objects of a given type */
851 if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
852 irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
853 dev->hmc_info, privileged,
861 * irdma_obj_aligned_mem - get aligned memory from device allocated memory
862 * @rf: RDMA PCI function
863 * @memptr: points to the memory addresses
864 * @size: size of memory needed
865 * @mask: mask for the aligned memory
867 * Get aligned memory of the requested size and
868 * update the memptr to point to the new aligned memory
869 * Return 0 if successful, otherwise return no memory error
871 static enum irdma_status_code
872 irdma_obj_aligned_mem(struct irdma_pci_f *rf, struct irdma_dma_mem *memptr,
875 unsigned long va, newva;
878 va = (unsigned long)rf->obj_next.va;
881 newva = ALIGN(va, (unsigned long)mask + 1ULL);
883 memptr->va = (u8 *)va + extra;
884 memptr->pa = rf->obj_next.pa + extra;
886 if (((u8 *)memptr->va + size) > ((u8 *)rf->obj_mem.va + rf->obj_mem.size))
887 return IRDMA_ERR_NO_MEMORY;
889 rf->obj_next.va = (u8 *)memptr->va + size;
890 rf->obj_next.pa = memptr->pa + size;
896 * irdma_create_cqp - create control qp
897 * @rf: RDMA PCI function
899 * Return 0, if the cqp and all the resources associated with it
900 * are successfully created, otherwise return error
902 static enum irdma_status_code irdma_create_cqp(struct irdma_pci_f *rf)
904 enum irdma_status_code status;
905 u32 sqsize = IRDMA_CQP_SW_SQSIZE_2048;
906 struct irdma_dma_mem mem;
907 struct irdma_sc_dev *dev = &rf->sc_dev;
908 struct irdma_cqp_init_info cqp_init_info = {};
909 struct irdma_cqp *cqp = &rf->cqp;
910 u16 maj_err, min_err;
913 cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
914 if (!cqp->cqp_requests)
915 return IRDMA_ERR_NO_MEMORY;
917 cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
918 if (!cqp->scratch_array) {
919 kfree(cqp->cqp_requests);
920 return IRDMA_ERR_NO_MEMORY;
923 dev->cqp = &cqp->sc_cqp;
925 cqp->sq.size = ALIGN(sizeof(struct irdma_cqp_sq_wqe) * sqsize,
926 IRDMA_CQP_ALIGNMENT);
927 cqp->sq.va = dma_alloc_coherent(dev->hw->device, cqp->sq.size,
928 &cqp->sq.pa, GFP_KERNEL);
930 kfree(cqp->scratch_array);
931 kfree(cqp->cqp_requests);
932 return IRDMA_ERR_NO_MEMORY;
935 status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx),
936 IRDMA_HOST_CTX_ALIGNMENT_M);
940 dev->cqp->host_ctx_pa = mem.pa;
941 dev->cqp->host_ctx = mem.va;
942 /* populate the cqp init info */
943 cqp_init_info.dev = dev;
944 cqp_init_info.sq_size = sqsize;
945 cqp_init_info.sq = cqp->sq.va;
946 cqp_init_info.sq_pa = cqp->sq.pa;
947 cqp_init_info.host_ctx_pa = mem.pa;
948 cqp_init_info.host_ctx = mem.va;
949 cqp_init_info.hmc_profile = rf->rsrc_profile;
950 cqp_init_info.scratch_array = cqp->scratch_array;
951 cqp_init_info.protocol_used = rf->protocol_used;
953 switch (rf->rdma_ver) {
955 cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_1;
958 cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_2;
961 status = irdma_sc_cqp_init(dev->cqp, &cqp_init_info);
963 ibdev_dbg(to_ibdev(dev), "ERR: cqp init status %d\n", status);
967 spin_lock_init(&cqp->req_lock);
968 spin_lock_init(&cqp->compl_lock);
970 status = irdma_sc_cqp_create(dev->cqp, &maj_err, &min_err);
972 ibdev_dbg(to_ibdev(dev),
973 "ERR: cqp create failed - status %d maj_err %d min_err %d\n",
974 status, maj_err, min_err);
978 INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
979 INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
981 /* init the waitqueue of the cqp_requests and add them to the list */
982 for (i = 0; i < sqsize; i++) {
983 init_waitqueue_head(&cqp->cqp_requests[i].waitq);
984 list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
986 init_waitqueue_head(&cqp->remove_wq);
990 irdma_destroy_cqp(rf, false);
996 * irdma_create_ccq - create control cq
997 * @rf: RDMA PCI function
999 * Return 0, if the ccq and the resources associated with it
1000 * are successfully created, otherwise return error
1002 static enum irdma_status_code irdma_create_ccq(struct irdma_pci_f *rf)
1004 struct irdma_sc_dev *dev = &rf->sc_dev;
1005 enum irdma_status_code status;
1006 struct irdma_ccq_init_info info = {};
1007 struct irdma_ccq *ccq = &rf->ccq;
1009 dev->ccq = &ccq->sc_cq;
1010 dev->ccq->dev = dev;
1012 ccq->shadow_area.size = sizeof(struct irdma_cq_shadow_area);
1013 ccq->mem_cq.size = ALIGN(sizeof(struct irdma_cqe) * IW_CCQ_SIZE,
1014 IRDMA_CQ0_ALIGNMENT);
1015 ccq->mem_cq.va = dma_alloc_coherent(dev->hw->device, ccq->mem_cq.size,
1016 &ccq->mem_cq.pa, GFP_KERNEL);
1017 if (!ccq->mem_cq.va)
1018 return IRDMA_ERR_NO_MEMORY;
1020 status = irdma_obj_aligned_mem(rf, &ccq->shadow_area,
1021 ccq->shadow_area.size,
1022 IRDMA_SHADOWAREA_M);
1026 ccq->sc_cq.back_cq = ccq;
1027 /* populate the ccq init info */
1028 info.cq_base = ccq->mem_cq.va;
1029 info.cq_pa = ccq->mem_cq.pa;
1030 info.num_elem = IW_CCQ_SIZE;
1031 info.shadow_area = ccq->shadow_area.va;
1032 info.shadow_area_pa = ccq->shadow_area.pa;
1033 info.ceqe_mask = false;
1034 info.ceq_id_valid = true;
1035 info.shadow_read_threshold = 16;
1036 info.vsi = &rf->default_vsi;
1037 status = irdma_sc_ccq_init(dev->ccq, &info);
1039 status = irdma_sc_ccq_create(dev->ccq, 0, true, true);
1042 dma_free_coherent(dev->hw->device, ccq->mem_cq.size,
1043 ccq->mem_cq.va, ccq->mem_cq.pa);
1044 ccq->mem_cq.va = NULL;
1051 * irdma_alloc_set_mac - set up a mac address table entry
1052 * @iwdev: irdma device
1054 * Allocate a mac ip entry and add it to the hw table Return 0
1055 * if successful, otherwise return error
1057 static enum irdma_status_code irdma_alloc_set_mac(struct irdma_device *iwdev)
1059 enum irdma_status_code status;
1061 status = irdma_alloc_local_mac_entry(iwdev->rf,
1062 &iwdev->mac_ip_table_idx);
1064 status = irdma_add_local_mac_entry(iwdev->rf,
1065 (const u8 *)iwdev->netdev->dev_addr,
1066 (u8)iwdev->mac_ip_table_idx);
1068 irdma_del_local_mac_entry(iwdev->rf,
1069 (u8)iwdev->mac_ip_table_idx);
1075 * irdma_cfg_ceq_vector - set up the msix interrupt vector for
1077 * @rf: RDMA PCI function
1078 * @iwceq: ceq associated with the vector
1079 * @ceq_id: the id number of the iwceq
1080 * @msix_vec: interrupt vector information
1082 * Allocate interrupt resources and enable irq handling
1083 * Return 0 if successful, otherwise return error
1085 static enum irdma_status_code
1086 irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
1087 u32 ceq_id, struct irdma_msix_vector *msix_vec)
1091 if (rf->msix_shared && !ceq_id) {
1092 tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
1093 status = request_irq(msix_vec->irq, irdma_irq_handler, 0,
1096 tasklet_setup(&iwceq->dpc_tasklet, irdma_ceq_dpc);
1098 status = request_irq(msix_vec->irq, irdma_ceq_handler, 0,
1101 cpumask_clear(&msix_vec->mask);
1102 cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
1103 irq_update_affinity_hint(msix_vec->irq, &msix_vec->mask);
1105 ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n");
1106 return IRDMA_ERR_CFG;
1109 msix_vec->ceq_id = ceq_id;
1110 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, msix_vec->idx, true);
1116 * irdma_cfg_aeq_vector - set up the msix vector for aeq
1117 * @rf: RDMA PCI function
1119 * Allocate interrupt resources and enable irq handling
1120 * Return 0 if successful, otherwise return error
1122 static enum irdma_status_code irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
1124 struct irdma_msix_vector *msix_vec = rf->iw_msixtbl;
1127 if (!rf->msix_shared) {
1128 tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
1129 ret = request_irq(msix_vec->irq, irdma_irq_handler, 0,
1133 ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n");
1134 return IRDMA_ERR_CFG;
1137 rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx, true);
1143 * irdma_create_ceq - create completion event queue
1144 * @rf: RDMA PCI function
1145 * @iwceq: pointer to the ceq resources to be created
1146 * @ceq_id: the id number of the iwceq
1147 * @vsi: SC vsi struct
1149 * Return 0, if the ceq and the resources associated with it
1150 * are successfully created, otherwise return error
1152 static enum irdma_status_code irdma_create_ceq(struct irdma_pci_f *rf,
1153 struct irdma_ceq *iwceq,
1155 struct irdma_sc_vsi *vsi)
1157 enum irdma_status_code status;
1158 struct irdma_ceq_init_info info = {};
1159 struct irdma_sc_dev *dev = &rf->sc_dev;
1163 info.ceq_id = ceq_id;
1165 ceq_size = min(rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt,
1166 dev->hw_attrs.max_hw_ceq_size);
1167 iwceq->mem.size = ALIGN(sizeof(struct irdma_ceqe) * ceq_size,
1168 IRDMA_CEQ_ALIGNMENT);
1169 iwceq->mem.va = dma_alloc_coherent(dev->hw->device, iwceq->mem.size,
1170 &iwceq->mem.pa, GFP_KERNEL);
1172 return IRDMA_ERR_NO_MEMORY;
1174 info.ceq_id = ceq_id;
1175 info.ceqe_base = iwceq->mem.va;
1176 info.ceqe_pa = iwceq->mem.pa;
1177 info.elem_cnt = ceq_size;
1178 iwceq->sc_ceq.ceq_id = ceq_id;
1181 scratch = (uintptr_t)&rf->cqp.sc_cqp;
1182 status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info);
1185 status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
1186 IRDMA_OP_CEQ_CREATE);
1188 status = irdma_sc_cceq_create(&iwceq->sc_ceq, scratch);
1192 dma_free_coherent(dev->hw->device, iwceq->mem.size,
1193 iwceq->mem.va, iwceq->mem.pa);
1194 iwceq->mem.va = NULL;
1201 * irdma_setup_ceq_0 - create CEQ 0 and it's interrupt resource
1202 * @rf: RDMA PCI function
1204 * Allocate a list for all device completion event queues
1205 * Create the ceq 0 and configure it's msix interrupt vector
1206 * Return 0, if successfully set up, otherwise return error
1208 static enum irdma_status_code irdma_setup_ceq_0(struct irdma_pci_f *rf)
1210 struct irdma_ceq *iwceq;
1211 struct irdma_msix_vector *msix_vec;
1213 enum irdma_status_code status = 0;
1216 num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
1217 rf->ceqlist = kcalloc(num_ceqs, sizeof(*rf->ceqlist), GFP_KERNEL);
1219 status = IRDMA_ERR_NO_MEMORY;
1223 iwceq = &rf->ceqlist[0];
1224 status = irdma_create_ceq(rf, iwceq, 0, &rf->default_vsi);
1226 ibdev_dbg(&rf->iwdev->ibdev, "ERR: create ceq status = %d\n",
1231 spin_lock_init(&iwceq->ce_lock);
1232 i = rf->msix_shared ? 0 : 1;
1233 msix_vec = &rf->iw_msixtbl[i];
1234 iwceq->irq = msix_vec->irq;
1235 iwceq->msix_idx = msix_vec->idx;
1236 status = irdma_cfg_ceq_vector(rf, iwceq, 0, msix_vec);
1238 irdma_destroy_ceq(rf, iwceq);
1242 irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
1246 if (status && !rf->ceqs_count) {
1251 rf->sc_dev.ceq_valid = true;
1257 * irdma_setup_ceqs - manage the device ceq's and their interrupt resources
1258 * @rf: RDMA PCI function
1259 * @vsi: VSI structure for this CEQ
1261 * Allocate a list for all device completion event queues
1262 * Create the ceq's and configure their msix interrupt vectors
1263 * Return 0, if ceqs are successfully set up, otherwise return error
1265 static enum irdma_status_code irdma_setup_ceqs(struct irdma_pci_f *rf,
1266 struct irdma_sc_vsi *vsi)
1270 struct irdma_ceq *iwceq;
1271 struct irdma_msix_vector *msix_vec;
1272 enum irdma_status_code status;
1275 num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
1276 i = (rf->msix_shared) ? 1 : 2;
1277 for (ceq_id = 1; i < num_ceqs; i++, ceq_id++) {
1278 iwceq = &rf->ceqlist[ceq_id];
1279 status = irdma_create_ceq(rf, iwceq, ceq_id, vsi);
1281 ibdev_dbg(&rf->iwdev->ibdev,
1282 "ERR: create ceq status = %d\n", status);
1285 spin_lock_init(&iwceq->ce_lock);
1286 msix_vec = &rf->iw_msixtbl[i];
1287 iwceq->irq = msix_vec->irq;
1288 iwceq->msix_idx = msix_vec->idx;
1289 status = irdma_cfg_ceq_vector(rf, iwceq, ceq_id, msix_vec);
1291 irdma_destroy_ceq(rf, iwceq);
1294 irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
1306 static enum irdma_status_code irdma_create_virt_aeq(struct irdma_pci_f *rf,
1309 enum irdma_status_code status = IRDMA_ERR_NO_MEMORY;
1310 struct irdma_aeq *aeq = &rf->aeq;
1314 if (rf->rdma_ver < IRDMA_GEN_2)
1315 return IRDMA_NOT_SUPPORTED;
1317 aeq->mem.size = sizeof(struct irdma_sc_aeqe) * size;
1318 aeq->mem.va = vzalloc(aeq->mem.size);
1323 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
1324 status = irdma_get_pble(rf->pble_rsrc, &aeq->palloc, pg_cnt, true);
1330 pg_arr = (dma_addr_t *)aeq->palloc.level1.addr;
1331 status = irdma_map_vm_page_list(&rf->hw, aeq->mem.va, pg_arr, pg_cnt);
1333 irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
1342 * irdma_create_aeq - create async event queue
1343 * @rf: RDMA PCI function
1345 * Return 0, if the aeq and the resources associated with it
1346 * are successfully created, otherwise return error
1348 static enum irdma_status_code irdma_create_aeq(struct irdma_pci_f *rf)
1350 enum irdma_status_code status;
1351 struct irdma_aeq_init_info info = {};
1352 struct irdma_sc_dev *dev = &rf->sc_dev;
1353 struct irdma_aeq *aeq = &rf->aeq;
1354 struct irdma_hmc_info *hmc_info = rf->sc_dev.hmc_info;
1356 u8 multiplier = (rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? 2 : 1;
1358 aeq_size = multiplier * hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt +
1359 hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
1360 aeq_size = min(aeq_size, dev->hw_attrs.max_hw_aeq_size);
1362 aeq->mem.size = ALIGN(sizeof(struct irdma_sc_aeqe) * aeq_size,
1363 IRDMA_AEQ_ALIGNMENT);
1364 aeq->mem.va = dma_alloc_coherent(dev->hw->device, aeq->mem.size,
1366 GFP_KERNEL | __GFP_NOWARN);
1370 /* physically mapped aeq failed. setup virtual aeq */
1371 status = irdma_create_virt_aeq(rf, aeq_size);
1375 info.virtual_map = true;
1376 aeq->virtual_map = info.virtual_map;
1377 info.pbl_chunk_size = 1;
1378 info.first_pm_pbl_idx = aeq->palloc.level1.idx;
1381 info.aeqe_base = aeq->mem.va;
1382 info.aeq_elem_pa = aeq->mem.pa;
1383 info.elem_cnt = aeq_size;
1385 info.msix_idx = rf->iw_msixtbl->idx;
1386 status = irdma_sc_aeq_init(&aeq->sc_aeq, &info);
1390 status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_CREATE);
1397 if (aeq->virtual_map) {
1398 irdma_destroy_virt_aeq(rf);
1400 dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va,
1409 * irdma_setup_aeq - set up the device aeq
1410 * @rf: RDMA PCI function
1412 * Create the aeq and configure its msix interrupt vector
1413 * Return 0 if successful, otherwise return error
1415 static enum irdma_status_code irdma_setup_aeq(struct irdma_pci_f *rf)
1417 struct irdma_sc_dev *dev = &rf->sc_dev;
1418 enum irdma_status_code status;
1420 status = irdma_create_aeq(rf);
1424 status = irdma_cfg_aeq_vector(rf);
1426 irdma_destroy_aeq(rf);
1430 if (!rf->msix_shared)
1431 irdma_ena_intr(dev, rf->iw_msixtbl[0].idx);
1437 * irdma_initialize_ilq - create iwarp local queue for cm
1438 * @iwdev: irdma device
1440 * Return 0 if successful, otherwise return error
1442 static enum irdma_status_code irdma_initialize_ilq(struct irdma_device *iwdev)
1444 struct irdma_puda_rsrc_info info = {};
1445 enum irdma_status_code status;
1447 info.type = IRDMA_PUDA_RSRC_TYPE_ILQ;
1452 info.abi_ver = IRDMA_ABI_VER;
1453 info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
1454 info.rq_size = info.sq_size;
1455 info.buf_size = 1024;
1456 info.tx_buf_cnt = 2 * info.sq_size;
1457 info.receive = irdma_receive_ilq;
1458 info.xmit_complete = irdma_free_sqbuf;
1459 status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
1461 ibdev_dbg(&iwdev->ibdev, "ERR: ilq create fail\n");
1467 * irdma_initialize_ieq - create iwarp exception queue
1468 * @iwdev: irdma device
1470 * Return 0 if successful, otherwise return error
1472 static enum irdma_status_code irdma_initialize_ieq(struct irdma_device *iwdev)
1474 struct irdma_puda_rsrc_info info = {};
1475 enum irdma_status_code status;
1477 info.type = IRDMA_PUDA_RSRC_TYPE_IEQ;
1479 info.qp_id = iwdev->vsi.exception_lan_q;
1482 info.abi_ver = IRDMA_ABI_VER;
1483 info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
1484 info.rq_size = info.sq_size;
1485 info.buf_size = iwdev->vsi.mtu + IRDMA_IPV4_PAD;
1486 info.tx_buf_cnt = 4096;
1487 status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
1489 ibdev_dbg(&iwdev->ibdev, "ERR: ieq create fail\n");
1495 * irdma_reinitialize_ieq - destroy and re-create ieq
1496 * @vsi: VSI structure
1498 void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi)
1500 struct irdma_device *iwdev = vsi->back_vsi;
1501 struct irdma_pci_f *rf = iwdev->rf;
1503 irdma_puda_dele_rsrc(vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, false);
1504 if (irdma_initialize_ieq(iwdev)) {
1505 iwdev->rf->reset = true;
1506 rf->gen_ops.request_reset(rf);
1511 * irdma_hmc_setup - create hmc objects for the device
1512 * @rf: RDMA PCI function
1514 * Set up the device private memory space for the number and size of
1515 * the hmc objects and create the objects
1516 * Return 0 if successful, otherwise return error
1518 static enum irdma_status_code irdma_hmc_setup(struct irdma_pci_f *rf)
1520 enum irdma_status_code status;
1523 if (rf->rdma_ver == IRDMA_GEN_1)
1524 qpcnt = rsrc_limits_table[rf->limits_sel].qplimit * 2;
1526 qpcnt = rsrc_limits_table[rf->limits_sel].qplimit;
1528 rf->sd_type = IRDMA_SD_TYPE_DIRECT;
1529 status = irdma_cfg_fpm_val(&rf->sc_dev, qpcnt);
1533 status = irdma_create_hmc_objs(rf, true, rf->rdma_ver);
1539 * irdma_del_init_mem - deallocate memory resources
1540 * @rf: RDMA PCI function
1542 static void irdma_del_init_mem(struct irdma_pci_f *rf)
1544 struct irdma_sc_dev *dev = &rf->sc_dev;
1546 kfree(dev->hmc_info->sd_table.sd_entry);
1547 dev->hmc_info->sd_table.sd_entry = NULL;
1548 kfree(rf->mem_rsrc);
1549 rf->mem_rsrc = NULL;
1550 dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
1552 rf->obj_mem.va = NULL;
1553 if (rf->rdma_ver != IRDMA_GEN_1) {
1554 kfree(rf->allocated_ws_nodes);
1555 rf->allocated_ws_nodes = NULL;
1559 kfree(rf->iw_msixtbl);
1560 rf->iw_msixtbl = NULL;
1561 kfree(rf->hmc_info_mem);
1562 rf->hmc_info_mem = NULL;
1566 * irdma_initialize_dev - initialize device
1567 * @rf: RDMA PCI function
1569 * Allocate memory for the hmc objects and initialize iwdev
1570 * Return 0 if successful, otherwise clean up the resources
1573 static enum irdma_status_code irdma_initialize_dev(struct irdma_pci_f *rf)
1575 enum irdma_status_code status;
1576 struct irdma_sc_dev *dev = &rf->sc_dev;
1577 struct irdma_device_init_info info = {};
1578 struct irdma_dma_mem mem;
1581 size = sizeof(struct irdma_hmc_pble_rsrc) +
1582 sizeof(struct irdma_hmc_info) +
1583 (sizeof(struct irdma_hmc_obj_info) * IRDMA_HMC_IW_MAX);
1585 rf->hmc_info_mem = kzalloc(size, GFP_KERNEL);
1586 if (!rf->hmc_info_mem)
1587 return IRDMA_ERR_NO_MEMORY;
1589 rf->pble_rsrc = (struct irdma_hmc_pble_rsrc *)rf->hmc_info_mem;
1590 dev->hmc_info = &rf->hw.hmc;
1591 dev->hmc_info->hmc_obj = (struct irdma_hmc_obj_info *)
1592 (rf->pble_rsrc + 1);
1594 status = irdma_obj_aligned_mem(rf, &mem, IRDMA_QUERY_FPM_BUF_SIZE,
1595 IRDMA_FPM_QUERY_BUF_ALIGNMENT_M);
1599 info.fpm_query_buf_pa = mem.pa;
1600 info.fpm_query_buf = mem.va;
1602 status = irdma_obj_aligned_mem(rf, &mem, IRDMA_COMMIT_FPM_BUF_SIZE,
1603 IRDMA_FPM_COMMIT_BUF_ALIGNMENT_M);
1607 info.fpm_commit_buf_pa = mem.pa;
1608 info.fpm_commit_buf = mem.va;
1610 info.bar0 = rf->hw.hw_addr;
1611 info.hmc_fn_id = PCI_FUNC(rf->pcidev->devfn);
1613 status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info);
1619 kfree(rf->hmc_info_mem);
1620 rf->hmc_info_mem = NULL;
1626 * irdma_rt_deinit_hw - clean up the irdma device resources
1627 * @iwdev: irdma device
1629 * remove the mac ip entry and ipv4/ipv6 addresses, destroy the
1630 * device queues and free the pble and the hmc objects
1632 void irdma_rt_deinit_hw(struct irdma_device *iwdev)
1634 ibdev_dbg(&iwdev->ibdev, "INIT: state = %d\n", iwdev->init_state);
1636 switch (iwdev->init_state) {
1637 case IP_ADDR_REGISTERED:
1638 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1639 irdma_del_local_mac_entry(iwdev->rf,
1640 (u8)iwdev->mac_ip_table_idx);
1643 case PBLE_CHUNK_MEM:
1646 if (!iwdev->roce_mode)
1647 irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,
1651 if (!iwdev->roce_mode)
1652 irdma_puda_dele_rsrc(&iwdev->vsi,
1653 IRDMA_PUDA_RSRC_TYPE_ILQ,
1657 ibdev_warn(&iwdev->ibdev, "bad init_state = %d\n", iwdev->init_state);
1661 irdma_cleanup_cm_core(&iwdev->cm_core);
1662 if (iwdev->vsi.pestat) {
1663 irdma_vsi_stats_free(&iwdev->vsi);
1664 kfree(iwdev->vsi.pestat);
1666 if (iwdev->cleanup_wq)
1667 destroy_workqueue(iwdev->cleanup_wq);
1670 static enum irdma_status_code irdma_setup_init_state(struct irdma_pci_f *rf)
1672 enum irdma_status_code status;
1674 status = irdma_save_msix_info(rf);
1678 rf->hw.device = &rf->pcidev->dev;
1679 rf->obj_mem.size = ALIGN(8192, IRDMA_HW_PAGE_SIZE);
1680 rf->obj_mem.va = dma_alloc_coherent(rf->hw.device, rf->obj_mem.size,
1681 &rf->obj_mem.pa, GFP_KERNEL);
1682 if (!rf->obj_mem.va) {
1683 status = IRDMA_ERR_NO_MEMORY;
1687 rf->obj_next = rf->obj_mem;
1688 status = irdma_initialize_dev(rf);
1695 dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
1697 rf->obj_mem.va = NULL;
1699 kfree(rf->iw_msixtbl);
1700 rf->iw_msixtbl = NULL;
1705 * irdma_get_used_rsrc - determine resources used internally
1706 * @iwdev: irdma device
1708 * Called at the end of open to get all internal allocations
1710 static void irdma_get_used_rsrc(struct irdma_device *iwdev)
1712 iwdev->rf->used_pds = find_next_zero_bit(iwdev->rf->allocated_pds,
1713 iwdev->rf->max_pd, 0);
1714 iwdev->rf->used_qps = find_next_zero_bit(iwdev->rf->allocated_qps,
1715 iwdev->rf->max_qp, 0);
1716 iwdev->rf->used_cqs = find_next_zero_bit(iwdev->rf->allocated_cqs,
1717 iwdev->rf->max_cq, 0);
1718 iwdev->rf->used_mrs = find_next_zero_bit(iwdev->rf->allocated_mrs,
1719 iwdev->rf->max_mr, 0);
1722 void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
1724 enum init_completion_state state = rf->init_state;
1726 rf->init_state = INVALID_STATE;
1727 if (rf->rsrc_created) {
1728 irdma_destroy_aeq(rf);
1729 irdma_destroy_pble_prm(rf->pble_rsrc);
1731 rf->rsrc_created = false;
1735 irdma_del_ceq_0(rf);
1738 irdma_destroy_ccq(rf);
1740 case HW_RSRC_INITIALIZED:
1741 case HMC_OBJS_CREATED:
1742 irdma_del_hmc_objects(&rf->sc_dev, rf->sc_dev.hmc_info, true,
1743 rf->reset, rf->rdma_ver);
1746 irdma_destroy_cqp(rf, true);
1749 irdma_del_init_mem(rf);
1753 ibdev_warn(&rf->iwdev->ibdev, "bad init_state = %d\n", rf->init_state);
1759 * irdma_rt_init_hw - Initializes runtime portion of HW
1760 * @iwdev: irdma device
1761 * @l2params: qos, tc, mtu info from netdev driver
1763 * Create device queues ILQ, IEQ, CEQs and PBLEs. Setup irdma
1764 * device resource objects.
1766 enum irdma_status_code irdma_rt_init_hw(struct irdma_device *iwdev,
1767 struct irdma_l2params *l2params)
1769 struct irdma_pci_f *rf = iwdev->rf;
1770 struct irdma_sc_dev *dev = &rf->sc_dev;
1771 enum irdma_status_code status;
1772 struct irdma_vsi_init_info vsi_info = {};
1773 struct irdma_vsi_stats_info stats_info = {};
1776 vsi_info.back_vsi = iwdev;
1777 vsi_info.params = l2params;
1778 vsi_info.pf_data_vsi_num = iwdev->vsi_num;
1779 vsi_info.register_qset = rf->gen_ops.register_qset;
1780 vsi_info.unregister_qset = rf->gen_ops.unregister_qset;
1781 vsi_info.exception_lan_q = 2;
1782 irdma_sc_vsi_init(&iwdev->vsi, &vsi_info);
1784 status = irdma_setup_cm_core(iwdev, rf->rdma_ver);
1788 stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
1789 if (!stats_info.pestat) {
1790 irdma_cleanup_cm_core(&iwdev->cm_core);
1791 return IRDMA_ERR_NO_MEMORY;
1793 stats_info.fcn_id = dev->hmc_fn_id;
1794 status = irdma_vsi_stats_init(&iwdev->vsi, &stats_info);
1796 irdma_cleanup_cm_core(&iwdev->cm_core);
1797 kfree(stats_info.pestat);
1802 if (!iwdev->roce_mode) {
1803 status = irdma_initialize_ilq(iwdev);
1806 iwdev->init_state = ILQ_CREATED;
1807 status = irdma_initialize_ieq(iwdev);
1810 iwdev->init_state = IEQ_CREATED;
1812 if (!rf->rsrc_created) {
1813 status = irdma_setup_ceqs(rf, &iwdev->vsi);
1817 iwdev->init_state = CEQS_CREATED;
1819 status = irdma_hmc_init_pble(&rf->sc_dev,
1826 iwdev->init_state = PBLE_CHUNK_MEM;
1828 status = irdma_setup_aeq(rf);
1830 irdma_destroy_pble_prm(rf->pble_rsrc);
1834 iwdev->init_state = AEQ_CREATED;
1835 rf->rsrc_created = true;
1838 iwdev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
1839 IB_DEVICE_MEM_WINDOW |
1840 IB_DEVICE_MEM_MGT_EXTENSIONS;
1842 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1843 irdma_alloc_set_mac(iwdev);
1844 irdma_add_ip(iwdev);
1845 iwdev->init_state = IP_ADDR_REGISTERED;
1847 /* handles asynch cleanup tasks - disconnect CM , free qp,
1850 iwdev->cleanup_wq = alloc_workqueue("irdma-cleanup-wq",
1851 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
1852 if (!iwdev->cleanup_wq)
1853 return IRDMA_ERR_NO_MEMORY;
1854 irdma_get_used_rsrc(iwdev);
1855 init_waitqueue_head(&iwdev->suspend_wq);
1860 dev_err(&rf->pcidev->dev, "HW runtime init FAIL status = %d last cmpl = %d\n",
1861 status, iwdev->init_state);
1862 irdma_rt_deinit_hw(iwdev);
1868 * irdma_ctrl_init_hw - Initializes control portion of HW
1869 * @rf: RDMA PCI function
1871 * Create admin queues, HMC obejcts and RF resource objects
1873 enum irdma_status_code irdma_ctrl_init_hw(struct irdma_pci_f *rf)
1875 struct irdma_sc_dev *dev = &rf->sc_dev;
1876 enum irdma_status_code status;
1878 status = irdma_setup_init_state(rf);
1881 rf->init_state = INITIAL_STATE;
1883 status = irdma_create_cqp(rf);
1886 rf->init_state = CQP_CREATED;
1888 status = irdma_hmc_setup(rf);
1891 rf->init_state = HMC_OBJS_CREATED;
1893 status = irdma_initialize_hw_rsrc(rf);
1896 rf->init_state = HW_RSRC_INITIALIZED;
1898 status = irdma_create_ccq(rf);
1901 rf->init_state = CCQ_CREATED;
1903 dev->feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT;
1904 if (rf->rdma_ver != IRDMA_GEN_1) {
1905 status = irdma_get_rdma_features(dev);
1910 status = irdma_setup_ceq_0(rf);
1913 rf->init_state = CEQ0_CREATED;
1914 /* Handles processing of CQP completions */
1915 rf->cqp_cmpl_wq = alloc_ordered_workqueue("cqp_cmpl_wq",
1916 WQ_HIGHPRI | WQ_UNBOUND);
1917 if (!rf->cqp_cmpl_wq) {
1918 status = IRDMA_ERR_NO_MEMORY;
1921 INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker);
1922 irdma_sc_ccq_arm(dev->ccq);
1926 dev_err(&rf->pcidev->dev, "IRDMA hardware initialization FAILED init_state=%d status=%d\n",
1927 rf->init_state, status);
1928 irdma_ctrl_deinit_hw(rf);
1933 * irdma_set_hw_rsrc - set hw memory resources.
1934 * @rf: RDMA PCI function
1936 static void irdma_set_hw_rsrc(struct irdma_pci_f *rf)
1938 rf->allocated_qps = (void *)(rf->mem_rsrc +
1939 (sizeof(struct irdma_arp_entry) * rf->arp_table_size));
1940 rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)];
1941 rf->allocated_mrs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)];
1942 rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)];
1943 rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)];
1944 rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)];
1945 rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)];
1946 rf->qp_table = (struct irdma_qp **)
1947 (&rf->allocated_arps[BITS_TO_LONGS(rf->arp_table_size)]);
1949 spin_lock_init(&rf->rsrc_lock);
1950 spin_lock_init(&rf->arp_lock);
1951 spin_lock_init(&rf->qptable_lock);
1952 spin_lock_init(&rf->qh_list_lock);
1956 * irdma_calc_mem_rsrc_size - calculate memory resources size.
1957 * @rf: RDMA PCI function
1959 static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf)
1963 rsrc_size = sizeof(struct irdma_arp_entry) * rf->arp_table_size;
1964 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp);
1965 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr);
1966 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq);
1967 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd);
1968 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size);
1969 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah);
1970 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg);
1971 rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp;
1977 * irdma_initialize_hw_rsrc - initialize hw resource tracking array
1978 * @rf: RDMA PCI function
1980 u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
1986 if (rf->rdma_ver != IRDMA_GEN_1) {
1987 rf->allocated_ws_nodes =
1988 kcalloc(BITS_TO_LONGS(IRDMA_MAX_WS_NODES),
1989 sizeof(unsigned long), GFP_KERNEL);
1990 if (!rf->allocated_ws_nodes)
1993 set_bit(0, rf->allocated_ws_nodes);
1994 rf->max_ws_node_id = IRDMA_MAX_WS_NODES;
1996 rf->max_cqe = rf->sc_dev.hw_attrs.uk_attrs.max_hw_cq_size;
1997 rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt;
1998 rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt;
1999 rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
2000 rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds;
2001 rf->arp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt;
2002 rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt;
2003 rf->max_mcg = rf->max_qp;
2005 rsrc_size = irdma_calc_mem_rsrc_size(rf);
2006 rf->mem_rsrc = kzalloc(rsrc_size, GFP_KERNEL);
2007 if (!rf->mem_rsrc) {
2009 goto mem_rsrc_kzalloc_fail;
2012 rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc;
2014 irdma_set_hw_rsrc(rf);
2016 set_bit(0, rf->allocated_mrs);
2017 set_bit(0, rf->allocated_qps);
2018 set_bit(0, rf->allocated_cqs);
2019 set_bit(0, rf->allocated_pds);
2020 set_bit(0, rf->allocated_arps);
2021 set_bit(0, rf->allocated_ahs);
2022 set_bit(0, rf->allocated_mcgs);
2023 set_bit(2, rf->allocated_qps); /* qp 2 IEQ */
2024 set_bit(1, rf->allocated_qps); /* qp 1 ILQ */
2025 set_bit(1, rf->allocated_cqs);
2026 set_bit(1, rf->allocated_pds);
2027 set_bit(2, rf->allocated_cqs);
2028 set_bit(2, rf->allocated_pds);
2030 INIT_LIST_HEAD(&rf->mc_qht_list.list);
2031 /* stag index mask has a minimum of 14 bits */
2032 mrdrvbits = 24 - max(get_count_order(rf->max_mr), 14);
2033 rf->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits));
2037 mem_rsrc_kzalloc_fail:
2038 kfree(rf->allocated_ws_nodes);
2039 rf->allocated_ws_nodes = NULL;
2045 * irdma_cqp_ce_handler - handle cqp completions
2046 * @rf: RDMA PCI function
2047 * @cq: cq for cqp completions
2049 void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
2051 struct irdma_cqp_request *cqp_request;
2052 struct irdma_sc_dev *dev = &rf->sc_dev;
2054 struct irdma_ccq_cqe_info info;
2055 unsigned long flags;
2059 memset(&info, 0, sizeof(info));
2060 spin_lock_irqsave(&rf->cqp.compl_lock, flags);
2061 ret = irdma_sc_ccq_get_cqe_info(cq, &info);
2062 spin_unlock_irqrestore(&rf->cqp.compl_lock, flags);
2066 cqp_request = (struct irdma_cqp_request *)
2067 (unsigned long)info.scratch;
2068 if (info.error && irdma_cqp_crit_err(dev, cqp_request->info.cqp_cmd,
2071 ibdev_err(&rf->iwdev->ibdev, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
2072 info.op_code, info.maj_err_code, info.min_err_code);
2074 cqp_request->compl_info.maj_err_code = info.maj_err_code;
2075 cqp_request->compl_info.min_err_code = info.min_err_code;
2076 cqp_request->compl_info.op_ret_val = info.op_ret_val;
2077 cqp_request->compl_info.error = info.error;
2079 if (cqp_request->waiting) {
2080 cqp_request->request_done = true;
2081 wake_up(&cqp_request->waitq);
2082 irdma_put_cqp_request(&rf->cqp, cqp_request);
2084 if (cqp_request->callback_fcn)
2085 cqp_request->callback_fcn(cqp_request);
2086 irdma_put_cqp_request(&rf->cqp, cqp_request);
2094 irdma_process_bh(dev);
2095 irdma_sc_ccq_arm(cq);
2100 * cqp_compl_worker - Handle cqp completions
2101 * @work: Pointer to work structure
2103 void cqp_compl_worker(struct work_struct *work)
2105 struct irdma_pci_f *rf = container_of(work, struct irdma_pci_f,
2107 struct irdma_sc_cq *cq = &rf->ccq.sc_cq;
2109 irdma_cqp_ce_handler(rf, cq);
2113 * irdma_lookup_apbvt_entry - lookup hash table for an existing apbvt entry corresponding to port
2114 * @cm_core: cm's core
2115 * @port: port to identify apbvt entry
2117 static struct irdma_apbvt_entry *irdma_lookup_apbvt_entry(struct irdma_cm_core *cm_core,
2120 struct irdma_apbvt_entry *entry;
2122 hash_for_each_possible(cm_core->apbvt_hash_tbl, entry, hlist, port) {
2123 if (entry->port == port) {
2133 * irdma_next_iw_state - modify qp state
2134 * @iwqp: iwarp qp to modify
2135 * @state: next state for qp
2136 * @del_hash: del hash
2137 * @term: term message
2138 * @termlen: length of term message
2140 void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term,
2143 struct irdma_modify_qp_info info = {};
2145 info.next_iwarp_state = state;
2146 info.remove_hash_idx = del_hash;
2147 info.cq_num_valid = true;
2148 info.arp_cache_idx_valid = true;
2149 info.dont_send_term = true;
2150 info.dont_send_fin = true;
2151 info.termlen = termlen;
2153 if (term & IRDMAQP_TERM_SEND_TERM_ONLY)
2154 info.dont_send_term = false;
2155 if (term & IRDMAQP_TERM_SEND_FIN_ONLY)
2156 info.dont_send_fin = false;
2157 if (iwqp->sc_qp.term_flags && state == IRDMA_QP_STATE_ERROR)
2158 info.reset_tcp_conn = true;
2159 iwqp->hw_iwarp_state = state;
2160 irdma_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
2161 iwqp->iwarp_state = info.next_iwarp_state;
2165 * irdma_del_local_mac_entry - remove a mac entry from the hw
2167 * @rf: RDMA PCI function
2168 * @idx: the index of the mac ip address to delete
2170 void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx)
2172 struct irdma_cqp *iwcqp = &rf->cqp;
2173 struct irdma_cqp_request *cqp_request;
2174 struct cqp_cmds_info *cqp_info;
2176 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2180 cqp_info = &cqp_request->info;
2181 cqp_info->cqp_cmd = IRDMA_OP_DELETE_LOCAL_MAC_ENTRY;
2182 cqp_info->post_sq = 1;
2183 cqp_info->in.u.del_local_mac_entry.cqp = &iwcqp->sc_cqp;
2184 cqp_info->in.u.del_local_mac_entry.scratch = (uintptr_t)cqp_request;
2185 cqp_info->in.u.del_local_mac_entry.entry_idx = idx;
2186 cqp_info->in.u.del_local_mac_entry.ignore_ref_count = 0;
2188 irdma_handle_cqp_op(rf, cqp_request);
2189 irdma_put_cqp_request(iwcqp, cqp_request);
2193 * irdma_add_local_mac_entry - add a mac ip address entry to the
2195 * @rf: RDMA PCI function
2196 * @mac_addr: pointer to mac address
2197 * @idx: the index of the mac ip address to add
2199 int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx)
2201 struct irdma_local_mac_entry_info *info;
2202 struct irdma_cqp *iwcqp = &rf->cqp;
2203 struct irdma_cqp_request *cqp_request;
2204 struct cqp_cmds_info *cqp_info;
2205 enum irdma_status_code status;
2207 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2209 return IRDMA_ERR_NO_MEMORY;
2211 cqp_info = &cqp_request->info;
2212 cqp_info->post_sq = 1;
2213 info = &cqp_info->in.u.add_local_mac_entry.info;
2214 ether_addr_copy(info->mac_addr, mac_addr);
2215 info->entry_idx = idx;
2216 cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;
2217 cqp_info->cqp_cmd = IRDMA_OP_ADD_LOCAL_MAC_ENTRY;
2218 cqp_info->in.u.add_local_mac_entry.cqp = &iwcqp->sc_cqp;
2219 cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;
2221 status = irdma_handle_cqp_op(rf, cqp_request);
2222 irdma_put_cqp_request(iwcqp, cqp_request);
2228 * irdma_alloc_local_mac_entry - allocate a mac entry
2229 * @rf: RDMA PCI function
2230 * @mac_tbl_idx: the index of the new mac address
2232 * Allocate a mac address entry and update the mac_tbl_idx
2233 * to hold the index of the newly created mac address
2234 * Return 0 if successful, otherwise return error
2236 int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx)
2238 struct irdma_cqp *iwcqp = &rf->cqp;
2239 struct irdma_cqp_request *cqp_request;
2240 struct cqp_cmds_info *cqp_info;
2241 enum irdma_status_code status = 0;
2243 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2245 return IRDMA_ERR_NO_MEMORY;
2247 cqp_info = &cqp_request->info;
2248 cqp_info->cqp_cmd = IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY;
2249 cqp_info->post_sq = 1;
2250 cqp_info->in.u.alloc_local_mac_entry.cqp = &iwcqp->sc_cqp;
2251 cqp_info->in.u.alloc_local_mac_entry.scratch = (uintptr_t)cqp_request;
2252 status = irdma_handle_cqp_op(rf, cqp_request);
2254 *mac_tbl_idx = (u16)cqp_request->compl_info.op_ret_val;
2256 irdma_put_cqp_request(iwcqp, cqp_request);
2262 * irdma_cqp_manage_apbvt_cmd - send cqp command manage apbvt
2263 * @iwdev: irdma device
2264 * @accel_local_port: port for apbvt
2265 * @add_port: add ordelete port
2267 static enum irdma_status_code
2268 irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev, u16 accel_local_port,
2271 struct irdma_apbvt_info *info;
2272 struct irdma_cqp_request *cqp_request;
2273 struct cqp_cmds_info *cqp_info;
2274 enum irdma_status_code status;
2276 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, add_port);
2278 return IRDMA_ERR_NO_MEMORY;
2280 cqp_info = &cqp_request->info;
2281 info = &cqp_info->in.u.manage_apbvt_entry.info;
2282 memset(info, 0, sizeof(*info));
2283 info->add = add_port;
2284 info->port = accel_local_port;
2285 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_APBVT_ENTRY;
2286 cqp_info->post_sq = 1;
2287 cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->rf->cqp.sc_cqp;
2288 cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request;
2289 ibdev_dbg(&iwdev->ibdev, "DEV: %s: port=0x%04x\n",
2290 (!add_port) ? "DELETE" : "ADD", accel_local_port);
2292 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2293 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2299 * irdma_add_apbvt - add tcp port to HW apbvt table
2300 * @iwdev: irdma device
2301 * @port: port for apbvt
2303 struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port)
2305 struct irdma_cm_core *cm_core = &iwdev->cm_core;
2306 struct irdma_apbvt_entry *entry;
2307 unsigned long flags;
2309 spin_lock_irqsave(&cm_core->apbvt_lock, flags);
2310 entry = irdma_lookup_apbvt_entry(cm_core, port);
2312 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2316 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
2318 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2324 hash_add(cm_core->apbvt_hash_tbl, &entry->hlist, entry->port);
2325 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2327 if (irdma_cqp_manage_apbvt_cmd(iwdev, port, true)) {
2336 * irdma_del_apbvt - delete tcp port from HW apbvt table
2337 * @iwdev: irdma device
2338 * @entry: apbvt entry object
2340 void irdma_del_apbvt(struct irdma_device *iwdev,
2341 struct irdma_apbvt_entry *entry)
2343 struct irdma_cm_core *cm_core = &iwdev->cm_core;
2344 unsigned long flags;
2346 spin_lock_irqsave(&cm_core->apbvt_lock, flags);
2347 if (--entry->use_cnt) {
2348 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2352 hash_del(&entry->hlist);
2353 /* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to
2354 * protect against race where add APBVT CQP can race ahead of the delete
2355 * APBVT for same port.
2357 irdma_cqp_manage_apbvt_cmd(iwdev, entry->port, false);
2359 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2363 * irdma_manage_arp_cache - manage hw arp cache
2364 * @rf: RDMA PCI function
2365 * @mac_addr: mac address ptr
2366 * @ip_addr: ip addr for arp cache
2367 * @ipv4: flag inicating IPv4
2368 * @action: add, delete or modify
2370 void irdma_manage_arp_cache(struct irdma_pci_f *rf,
2371 const unsigned char *mac_addr,
2372 u32 *ip_addr, bool ipv4, u32 action)
2374 struct irdma_add_arp_cache_entry_info *info;
2375 struct irdma_cqp_request *cqp_request;
2376 struct cqp_cmds_info *cqp_info;
2379 arp_index = irdma_arp_table(rf, ip_addr, ipv4, mac_addr, action);
2380 if (arp_index == -1)
2383 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
2387 cqp_info = &cqp_request->info;
2388 if (action == IRDMA_ARP_ADD) {
2389 cqp_info->cqp_cmd = IRDMA_OP_ADD_ARP_CACHE_ENTRY;
2390 info = &cqp_info->in.u.add_arp_cache_entry.info;
2391 memset(info, 0, sizeof(*info));
2392 info->arp_index = (u16)arp_index;
2393 info->permanent = true;
2394 ether_addr_copy(info->mac_addr, mac_addr);
2395 cqp_info->in.u.add_arp_cache_entry.scratch =
2396 (uintptr_t)cqp_request;
2397 cqp_info->in.u.add_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
2399 cqp_info->cqp_cmd = IRDMA_OP_DELETE_ARP_CACHE_ENTRY;
2400 cqp_info->in.u.del_arp_cache_entry.scratch =
2401 (uintptr_t)cqp_request;
2402 cqp_info->in.u.del_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
2403 cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index;
2406 cqp_info->post_sq = 1;
2407 irdma_handle_cqp_op(rf, cqp_request);
2408 irdma_put_cqp_request(&rf->cqp, cqp_request);
2412 * irdma_send_syn_cqp_callback - do syn/ack after qhash
2413 * @cqp_request: qhash cqp completion
2415 static void irdma_send_syn_cqp_callback(struct irdma_cqp_request *cqp_request)
2417 struct irdma_cm_node *cm_node = cqp_request->param;
2419 irdma_send_syn(cm_node, 1);
2420 irdma_rem_ref_cm_node(cm_node);
2424 * irdma_manage_qhash - add or modify qhash
2425 * @iwdev: irdma device
2426 * @cminfo: cm info for qhash
2427 * @etype: type (syn or quad)
2428 * @mtype: type of qhash
2429 * @cmnode: cmnode associated with connection
2430 * @wait: wait for completion
2432 enum irdma_status_code
2433 irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
2434 enum irdma_quad_entry_type etype,
2435 enum irdma_quad_hash_manage_type mtype, void *cmnode,
2438 struct irdma_qhash_table_info *info;
2439 enum irdma_status_code status;
2440 struct irdma_cqp *iwcqp = &iwdev->rf->cqp;
2441 struct irdma_cqp_request *cqp_request;
2442 struct cqp_cmds_info *cqp_info;
2443 struct irdma_cm_node *cm_node = cmnode;
2445 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
2447 return IRDMA_ERR_NO_MEMORY;
2449 cqp_info = &cqp_request->info;
2450 info = &cqp_info->in.u.manage_qhash_table_entry.info;
2451 memset(info, 0, sizeof(*info));
2452 info->vsi = &iwdev->vsi;
2453 info->manage = mtype;
2454 info->entry_type = etype;
2455 if (cminfo->vlan_id < VLAN_N_VID) {
2456 info->vlan_valid = true;
2457 info->vlan_id = cminfo->vlan_id;
2459 info->vlan_valid = false;
2461 info->ipv4_valid = cminfo->ipv4;
2462 info->user_pri = cminfo->user_pri;
2463 ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
2464 info->qp_num = cminfo->qh_qpid;
2465 info->dest_port = cminfo->loc_port;
2466 info->dest_ip[0] = cminfo->loc_addr[0];
2467 info->dest_ip[1] = cminfo->loc_addr[1];
2468 info->dest_ip[2] = cminfo->loc_addr[2];
2469 info->dest_ip[3] = cminfo->loc_addr[3];
2470 if (etype == IRDMA_QHASH_TYPE_TCP_ESTABLISHED ||
2471 etype == IRDMA_QHASH_TYPE_UDP_UNICAST ||
2472 etype == IRDMA_QHASH_TYPE_UDP_MCAST ||
2473 etype == IRDMA_QHASH_TYPE_ROCE_MCAST ||
2474 etype == IRDMA_QHASH_TYPE_ROCEV2_HW) {
2475 info->src_port = cminfo->rem_port;
2476 info->src_ip[0] = cminfo->rem_addr[0];
2477 info->src_ip[1] = cminfo->rem_addr[1];
2478 info->src_ip[2] = cminfo->rem_addr[2];
2479 info->src_ip[3] = cminfo->rem_addr[3];
2482 cqp_request->callback_fcn = irdma_send_syn_cqp_callback;
2483 cqp_request->param = cmnode;
2485 refcount_inc(&cm_node->refcnt);
2487 if (info->ipv4_valid)
2488 ibdev_dbg(&iwdev->ibdev,
2489 "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI4 rem_addr=%pI4 mac=%pM, vlan_id=%d cm_node=%p\n",
2490 (!mtype) ? "DELETE" : "ADD",
2491 __builtin_return_address(0), info->dest_port,
2492 info->src_port, info->dest_ip, info->src_ip,
2493 info->mac_addr, cminfo->vlan_id,
2494 cmnode ? cmnode : NULL);
2496 ibdev_dbg(&iwdev->ibdev,
2497 "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI6 rem_addr=%pI6 mac=%pM, vlan_id=%d cm_node=%p\n",
2498 (!mtype) ? "DELETE" : "ADD",
2499 __builtin_return_address(0), info->dest_port,
2500 info->src_port, info->dest_ip, info->src_ip,
2501 info->mac_addr, cminfo->vlan_id,
2502 cmnode ? cmnode : NULL);
2504 cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp;
2505 cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;
2506 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY;
2507 cqp_info->post_sq = 1;
2508 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2509 if (status && cm_node && !wait)
2510 irdma_rem_ref_cm_node(cm_node);
2512 irdma_put_cqp_request(iwcqp, cqp_request);
2518 * irdma_hw_flush_wqes_callback - Check return code after flush
2519 * @cqp_request: qhash cqp completion
2521 static void irdma_hw_flush_wqes_callback(struct irdma_cqp_request *cqp_request)
2523 struct irdma_qp_flush_info *hw_info;
2524 struct irdma_sc_qp *qp;
2525 struct irdma_qp *iwqp;
2526 struct cqp_cmds_info *cqp_info;
2528 cqp_info = &cqp_request->info;
2529 hw_info = &cqp_info->in.u.qp_flush_wqes.info;
2530 qp = cqp_info->in.u.qp_flush_wqes.qp;
2531 iwqp = qp->qp_uk.back_qp;
2533 if (cqp_request->compl_info.maj_err_code)
2537 (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
2538 cqp_request->compl_info.min_err_code == 0)) {
2539 /* RQ WQE flush was requested but did not happen */
2540 qp->qp_uk.rq_flush_complete = true;
2543 (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED ||
2544 cqp_request->compl_info.min_err_code == 0)) {
2545 if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) {
2546 ibdev_err(&iwqp->iwdev->ibdev, "Flush QP[%d] failed, SQ has more work",
2548 irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC);
2550 qp->qp_uk.sq_flush_complete = true;
2555 * irdma_hw_flush_wqes - flush qp's wqe
2556 * @rf: RDMA PCI function
2557 * @qp: hardware control qp
2558 * @info: info for flush
2559 * @wait: flag wait for completion
2561 enum irdma_status_code irdma_hw_flush_wqes(struct irdma_pci_f *rf,
2562 struct irdma_sc_qp *qp,
2563 struct irdma_qp_flush_info *info,
2566 enum irdma_status_code status;
2567 struct irdma_qp_flush_info *hw_info;
2568 struct irdma_cqp_request *cqp_request;
2569 struct cqp_cmds_info *cqp_info;
2570 struct irdma_qp *iwqp = qp->qp_uk.back_qp;
2572 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
2574 return IRDMA_ERR_NO_MEMORY;
2576 cqp_info = &cqp_request->info;
2578 cqp_request->callback_fcn = irdma_hw_flush_wqes_callback;
2579 hw_info = &cqp_request->info.in.u.qp_flush_wqes.info;
2580 memcpy(hw_info, info, sizeof(*hw_info));
2581 cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES;
2582 cqp_info->post_sq = 1;
2583 cqp_info->in.u.qp_flush_wqes.qp = qp;
2584 cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;
2585 status = irdma_handle_cqp_op(rf, cqp_request);
2587 qp->qp_uk.sq_flush_complete = true;
2588 qp->qp_uk.rq_flush_complete = true;
2589 irdma_put_cqp_request(&rf->cqp, cqp_request);
2593 if (!wait || cqp_request->compl_info.maj_err_code)
2597 if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
2598 cqp_request->compl_info.min_err_code == 0) {
2599 /* RQ WQE flush was requested but did not happen */
2600 qp->qp_uk.rq_flush_complete = true;
2604 if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED ||
2605 cqp_request->compl_info.min_err_code == 0) {
2607 * Handling case where WQE is posted to empty SQ when
2608 * flush has not completed
2610 if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) {
2611 struct irdma_cqp_request *new_req;
2613 if (!qp->qp_uk.sq_flush_complete)
2615 qp->qp_uk.sq_flush_complete = false;
2616 qp->flush_sq = false;
2620 new_req = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
2622 status = IRDMA_ERR_NO_MEMORY;
2625 cqp_info = &new_req->info;
2626 hw_info = &new_req->info.in.u.qp_flush_wqes.info;
2627 memcpy(hw_info, info, sizeof(*hw_info));
2628 cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES;
2629 cqp_info->post_sq = 1;
2630 cqp_info->in.u.qp_flush_wqes.qp = qp;
2631 cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)new_req;
2633 status = irdma_handle_cqp_op(rf, new_req);
2634 if (new_req->compl_info.maj_err_code ||
2635 new_req->compl_info.min_err_code != IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
2637 ibdev_err(&iwqp->iwdev->ibdev, "fatal QP event: SQ in error but not flushed, qp: %d",
2639 qp->qp_uk.sq_flush_complete = false;
2640 irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC);
2642 irdma_put_cqp_request(&rf->cqp, new_req);
2644 /* SQ WQE flush was requested but did not happen */
2645 qp->qp_uk.sq_flush_complete = true;
2648 if (!IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring))
2649 qp->qp_uk.sq_flush_complete = true;
2653 ibdev_dbg(&rf->iwdev->ibdev,
2654 "VERBS: qp_id=%d qp_type=%d qpstate=%d ibqpstate=%d last_aeq=%d hw_iw_state=%d maj_err_code=%d min_err_code=%d\n",
2655 iwqp->ibqp.qp_num, rf->protocol_used, iwqp->iwarp_state,
2656 iwqp->ibqp_state, iwqp->last_aeq, iwqp->hw_iwarp_state,
2657 cqp_request->compl_info.maj_err_code,
2658 cqp_request->compl_info.min_err_code);
2660 irdma_put_cqp_request(&rf->cqp, cqp_request);
2666 * irdma_gen_ae - generate AE
2667 * @rf: RDMA PCI function
2668 * @qp: qp associated with AE
2669 * @info: info for ae
2670 * @wait: wait for completion
2672 void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
2673 struct irdma_gen_ae_info *info, bool wait)
2675 struct irdma_gen_ae_info *ae_info;
2676 struct irdma_cqp_request *cqp_request;
2677 struct cqp_cmds_info *cqp_info;
2679 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
2683 cqp_info = &cqp_request->info;
2684 ae_info = &cqp_request->info.in.u.gen_ae.info;
2685 memcpy(ae_info, info, sizeof(*ae_info));
2686 cqp_info->cqp_cmd = IRDMA_OP_GEN_AE;
2687 cqp_info->post_sq = 1;
2688 cqp_info->in.u.gen_ae.qp = qp;
2689 cqp_info->in.u.gen_ae.scratch = (uintptr_t)cqp_request;
2691 irdma_handle_cqp_op(rf, cqp_request);
2692 irdma_put_cqp_request(&rf->cqp, cqp_request);
2695 void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
2697 struct irdma_qp_flush_info info = {};
2698 struct irdma_pci_f *rf = iwqp->iwdev->rf;
2699 u8 flush_code = iwqp->sc_qp.flush_code;
2701 if (!(flush_mask & IRDMA_FLUSH_SQ) && !(flush_mask & IRDMA_FLUSH_RQ))
2704 /* Set flush info fields*/
2705 info.sq = flush_mask & IRDMA_FLUSH_SQ;
2706 info.rq = flush_mask & IRDMA_FLUSH_RQ;
2708 if (flush_mask & IRDMA_REFLUSH) {
2710 iwqp->sc_qp.flush_sq = false;
2712 iwqp->sc_qp.flush_rq = false;
2715 /* Generate userflush errors in CQE */
2716 info.sq_major_code = IRDMA_FLUSH_MAJOR_ERR;
2717 info.sq_minor_code = FLUSH_GENERAL_ERR;
2718 info.rq_major_code = IRDMA_FLUSH_MAJOR_ERR;
2719 info.rq_minor_code = FLUSH_GENERAL_ERR;
2720 info.userflushcode = true;
2722 if (info.sq && iwqp->sc_qp.sq_flush_code)
2723 info.sq_minor_code = flush_code;
2724 if (info.rq && iwqp->sc_qp.rq_flush_code)
2725 info.rq_minor_code = flush_code;
2729 (void)irdma_hw_flush_wqes(rf, &iwqp->sc_qp, &info,
2730 flush_mask & IRDMA_FLUSH_WAIT);
2731 iwqp->flush_issued = true;