1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
6 * irdma_arp_table -manage arp table
7 * @rf: RDMA PCI function
8 * @ip_addr: ip address for device
10 * @mac_addr: mac address ptr
11 * @action: modify, delete or add
13 int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, bool ipv4,
14 const u8 *mac_addr, u32 action)
23 memcpy(ip, ip_addr, sizeof(ip));
25 spin_lock_irqsave(&rf->arp_lock, flags);
26 for (arp_index = 0; (u32)arp_index < rf->arp_table_size; arp_index++) {
27 if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip)))
33 if (arp_index != rf->arp_table_size) {
39 if (irdma_alloc_rsrc(rf, rf->allocated_arps, rf->arp_table_size,
40 (u32 *)&arp_index, &rf->next_arp_index)) {
45 memcpy(rf->arp_table[arp_index].ip_addr, ip,
46 sizeof(rf->arp_table[arp_index].ip_addr));
47 ether_addr_copy(rf->arp_table[arp_index].mac_addr, mac_addr);
49 case IRDMA_ARP_RESOLVE:
50 if (arp_index == rf->arp_table_size)
53 case IRDMA_ARP_DELETE:
54 if (arp_index == rf->arp_table_size) {
59 memset(rf->arp_table[arp_index].ip_addr, 0,
60 sizeof(rf->arp_table[arp_index].ip_addr));
61 eth_zero_addr(rf->arp_table[arp_index].mac_addr);
62 irdma_free_rsrc(rf, rf->allocated_arps, arp_index);
69 spin_unlock_irqrestore(&rf->arp_lock, flags);
74 * irdma_add_arp - add a new arp entry if needed
80 int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, const u8 *mac)
84 arpidx = irdma_arp_table(rf, &ip[0], ipv4, NULL, IRDMA_ARP_RESOLVE);
86 if (ether_addr_equal(rf->arp_table[arpidx].mac_addr, mac))
89 irdma_manage_arp_cache(rf, rf->arp_table[arpidx].mac_addr, ip,
90 ipv4, IRDMA_ARP_DELETE);
93 irdma_manage_arp_cache(rf, mac, ip, ipv4, IRDMA_ARP_ADD);
95 return irdma_arp_table(rf, ip, ipv4, NULL, IRDMA_ARP_RESOLVE);
99 * wr32 - write 32 bits to hw register
100 * @hw: hardware information including registers
101 * @reg: register offset
102 * @val: value to write to register
104 inline void wr32(struct irdma_hw *hw, u32 reg, u32 val)
106 writel(val, hw->hw_addr + reg);
110 * rd32 - read a 32 bit hw register
111 * @hw: hardware information including registers
112 * @reg: register offset
114 * Return value of register content
116 inline u32 rd32(struct irdma_hw *hw, u32 reg)
118 return readl(hw->hw_addr + reg);
122 * rd64 - read a 64 bit hw register
123 * @hw: hardware information including registers
124 * @reg: register offset
126 * Return value of register content
128 inline u64 rd64(struct irdma_hw *hw, u32 reg)
130 return readq(hw->hw_addr + reg);
133 static void irdma_gid_change_event(struct ib_device *ibdev)
135 struct ib_event ib_event;
137 ib_event.event = IB_EVENT_GID_CHANGE;
138 ib_event.device = ibdev;
139 ib_event.element.port_num = 1;
140 ib_dispatch_event(&ib_event);
144 * irdma_inetaddr_event - system notifier for ipv4 addr events
145 * @notifier: not used
146 * @event: event for notifier
149 int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event,
152 struct in_ifaddr *ifa = ptr;
153 struct net_device *real_dev, *netdev = ifa->ifa_dev->dev;
154 struct irdma_device *iwdev;
155 struct ib_device *ibdev;
158 real_dev = rdma_vlan_dev_real_dev(netdev);
162 ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
166 iwdev = to_iwdev(ibdev);
167 local_ipaddr = ntohl(ifa->ifa_address);
168 ibdev_dbg(&iwdev->ibdev,
169 "DEV: netdev %p event %lu local_ip=%pI4 MAC=%pM\n", real_dev,
170 event, &local_ipaddr, real_dev->dev_addr);
173 irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr,
174 &local_ipaddr, true, IRDMA_ARP_DELETE);
175 irdma_if_notify(iwdev, real_dev, &local_ipaddr, true, false);
176 irdma_gid_change_event(&iwdev->ibdev);
179 case NETDEV_CHANGEADDR:
180 irdma_add_arp(iwdev->rf, &local_ipaddr, true, real_dev->dev_addr);
181 irdma_if_notify(iwdev, real_dev, &local_ipaddr, true, true);
182 irdma_gid_change_event(&iwdev->ibdev);
188 ib_device_put(ibdev);
194 * irdma_inet6addr_event - system notifier for ipv6 addr events
195 * @notifier: not used
196 * @event: event for notifier
199 int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event,
202 struct inet6_ifaddr *ifa = ptr;
203 struct net_device *real_dev, *netdev = ifa->idev->dev;
204 struct irdma_device *iwdev;
205 struct ib_device *ibdev;
206 u32 local_ipaddr6[4];
208 real_dev = rdma_vlan_dev_real_dev(netdev);
212 ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
216 iwdev = to_iwdev(ibdev);
217 irdma_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
218 ibdev_dbg(&iwdev->ibdev,
219 "DEV: netdev %p event %lu local_ip=%pI6 MAC=%pM\n", real_dev,
220 event, local_ipaddr6, real_dev->dev_addr);
223 irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr,
224 local_ipaddr6, false, IRDMA_ARP_DELETE);
225 irdma_if_notify(iwdev, real_dev, local_ipaddr6, false, false);
226 irdma_gid_change_event(&iwdev->ibdev);
229 case NETDEV_CHANGEADDR:
230 irdma_add_arp(iwdev->rf, local_ipaddr6, false,
232 irdma_if_notify(iwdev, real_dev, local_ipaddr6, false, true);
233 irdma_gid_change_event(&iwdev->ibdev);
239 ib_device_put(ibdev);
245 * irdma_net_event - system notifier for net events
246 * @notifier: not used
247 * @event: event for notifier
250 int irdma_net_event(struct notifier_block *notifier, unsigned long event,
253 struct neighbour *neigh = ptr;
254 struct net_device *real_dev, *netdev = (struct net_device *)neigh->dev;
255 struct irdma_device *iwdev;
256 struct ib_device *ibdev;
258 u32 local_ipaddr[4] = {};
262 case NETEVENT_NEIGH_UPDATE:
263 real_dev = rdma_vlan_dev_real_dev(netdev);
266 ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
270 iwdev = to_iwdev(ibdev);
271 p = (__be32 *)neigh->primary_key;
272 if (neigh->tbl->family == AF_INET6) {
274 irdma_copy_ip_ntohl(local_ipaddr, p);
276 local_ipaddr[0] = ntohl(*p);
279 ibdev_dbg(&iwdev->ibdev,
280 "DEV: netdev %p state %d local_ip=%pI4 MAC=%pM\n",
281 iwdev->netdev, neigh->nud_state, local_ipaddr,
284 if (neigh->nud_state & NUD_VALID)
285 irdma_add_arp(iwdev->rf, local_ipaddr, ipv4, neigh->ha);
288 irdma_manage_arp_cache(iwdev->rf, neigh->ha,
291 ib_device_put(ibdev);
301 * irdma_netdevice_event - system notifier for netdev events
302 * @notifier: not used
303 * @event: event for notifier
306 int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
309 struct irdma_device *iwdev;
310 struct ib_device *ibdev;
311 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
313 ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_IRDMA);
317 iwdev = to_iwdev(ibdev);
318 iwdev->iw_status = 1;
321 iwdev->iw_status = 0;
324 irdma_port_ibevent(iwdev);
329 ib_device_put(ibdev);
335 * irdma_add_ipv6_addr - add ipv6 address to the hw arp table
336 * @iwdev: irdma device
338 static void irdma_add_ipv6_addr(struct irdma_device *iwdev)
340 struct net_device *ip_dev;
341 struct inet6_dev *idev;
342 struct inet6_ifaddr *ifp, *tmp;
343 u32 local_ipaddr6[4];
346 for_each_netdev_rcu (&init_net, ip_dev) {
347 if (((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF &&
348 rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev) ||
349 ip_dev == iwdev->netdev) &&
350 (READ_ONCE(ip_dev->flags) & IFF_UP)) {
351 idev = __in6_dev_get(ip_dev);
353 ibdev_err(&iwdev->ibdev, "ipv6 inet device not found\n");
356 list_for_each_entry_safe (ifp, tmp, &idev->addr_list,
358 ibdev_dbg(&iwdev->ibdev,
359 "INIT: IP=%pI6, vlan_id=%d, MAC=%pM\n",
361 rdma_vlan_dev_vlan_id(ip_dev),
364 irdma_copy_ip_ntohl(local_ipaddr6,
365 ifp->addr.in6_u.u6_addr32);
366 irdma_manage_arp_cache(iwdev->rf,
368 local_ipaddr6, false,
377 * irdma_add_ipv4_addr - add ipv4 address to the hw arp table
378 * @iwdev: irdma device
380 static void irdma_add_ipv4_addr(struct irdma_device *iwdev)
382 struct net_device *dev;
383 struct in_device *idev;
387 for_each_netdev_rcu (&init_net, dev) {
388 if (((rdma_vlan_dev_vlan_id(dev) < 0xFFFF &&
389 rdma_vlan_dev_real_dev(dev) == iwdev->netdev) ||
390 dev == iwdev->netdev) && (READ_ONCE(dev->flags) & IFF_UP)) {
391 const struct in_ifaddr *ifa;
393 idev = __in_dev_get_rcu(dev);
397 in_dev_for_each_ifa_rcu(ifa, idev) {
398 ibdev_dbg(&iwdev->ibdev, "CM: IP=%pI4, vlan_id=%d, MAC=%pM\n",
399 &ifa->ifa_address, rdma_vlan_dev_vlan_id(dev),
402 ip_addr = ntohl(ifa->ifa_address);
403 irdma_manage_arp_cache(iwdev->rf, dev->dev_addr,
413 * irdma_add_ip - add ip addresses
414 * @iwdev: irdma device
416 * Add ipv4/ipv6 addresses to the arp cache
418 void irdma_add_ip(struct irdma_device *iwdev)
420 irdma_add_ipv4_addr(iwdev);
421 irdma_add_ipv6_addr(iwdev);
425 * irdma_alloc_and_get_cqp_request - get cqp struct
426 * @cqp: device cqp ptr
427 * @wait: cqp to be used in wait mode
429 struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
432 struct irdma_cqp_request *cqp_request = NULL;
435 spin_lock_irqsave(&cqp->req_lock, flags);
436 if (!list_empty(&cqp->cqp_avail_reqs)) {
437 cqp_request = list_first_entry(&cqp->cqp_avail_reqs,
438 struct irdma_cqp_request, list);
439 list_del_init(&cqp_request->list);
441 spin_unlock_irqrestore(&cqp->req_lock, flags);
443 cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC);
445 cqp_request->dynamic = true;
447 init_waitqueue_head(&cqp_request->waitq);
451 ibdev_dbg(to_ibdev(cqp->sc_cqp.dev), "ERR: CQP Request Fail: No Memory");
455 cqp_request->waiting = wait;
456 refcount_set(&cqp_request->refcnt, 1);
457 memset(&cqp_request->compl_info, 0, sizeof(cqp_request->compl_info));
463 * irdma_get_cqp_request - increase refcount for cqp_request
464 * @cqp_request: pointer to cqp_request instance
466 static inline void irdma_get_cqp_request(struct irdma_cqp_request *cqp_request)
468 refcount_inc(&cqp_request->refcnt);
472 * irdma_free_cqp_request - free cqp request
474 * @cqp_request: to be put back in cqp list
476 void irdma_free_cqp_request(struct irdma_cqp *cqp,
477 struct irdma_cqp_request *cqp_request)
481 if (cqp_request->dynamic) {
484 cqp_request->request_done = false;
485 cqp_request->callback_fcn = NULL;
486 cqp_request->waiting = false;
488 spin_lock_irqsave(&cqp->req_lock, flags);
489 list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
490 spin_unlock_irqrestore(&cqp->req_lock, flags);
492 wake_up(&cqp->remove_wq);
496 * irdma_put_cqp_request - dec ref count and free if 0
498 * @cqp_request: to be put back in cqp list
500 void irdma_put_cqp_request(struct irdma_cqp *cqp,
501 struct irdma_cqp_request *cqp_request)
503 if (refcount_dec_and_test(&cqp_request->refcnt))
504 irdma_free_cqp_request(cqp, cqp_request);
508 * irdma_free_pending_cqp_request -free pending cqp request objs
510 * @cqp_request: to be put back in cqp list
513 irdma_free_pending_cqp_request(struct irdma_cqp *cqp,
514 struct irdma_cqp_request *cqp_request)
516 if (cqp_request->waiting) {
517 cqp_request->compl_info.error = true;
518 cqp_request->request_done = true;
519 wake_up(&cqp_request->waitq);
521 wait_event_timeout(cqp->remove_wq,
522 refcount_read(&cqp_request->refcnt) == 1, 1000);
523 irdma_put_cqp_request(cqp, cqp_request);
527 * irdma_cleanup_pending_cqp_op - clean-up cqp with no
529 * @rf: RDMA PCI function
531 void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf)
533 struct irdma_sc_dev *dev = &rf->sc_dev;
534 struct irdma_cqp *cqp = &rf->cqp;
535 struct irdma_cqp_request *cqp_request = NULL;
536 struct cqp_cmds_info *pcmdinfo = NULL;
537 u32 i, pending_work, wqe_idx;
539 pending_work = IRDMA_RING_USED_QUANTA(cqp->sc_cqp.sq_ring);
540 wqe_idx = IRDMA_RING_CURRENT_TAIL(cqp->sc_cqp.sq_ring);
541 for (i = 0; i < pending_work; i++) {
542 cqp_request = (struct irdma_cqp_request *)(unsigned long)
543 cqp->scratch_array[wqe_idx];
545 irdma_free_pending_cqp_request(cqp, cqp_request);
546 wqe_idx = (wqe_idx + 1) % IRDMA_RING_SIZE(cqp->sc_cqp.sq_ring);
549 while (!list_empty(&dev->cqp_cmd_head)) {
550 pcmdinfo = irdma_remove_cqp_head(dev);
552 container_of(pcmdinfo, struct irdma_cqp_request, info);
554 irdma_free_pending_cqp_request(cqp, cqp_request);
559 * irdma_wait_event - wait for completion
560 * @rf: RDMA PCI function
561 * @cqp_request: cqp request to wait
563 static int irdma_wait_event(struct irdma_pci_f *rf,
564 struct irdma_cqp_request *cqp_request)
566 struct irdma_cqp_timeout cqp_timeout = {};
567 bool cqp_error = false;
570 cqp_timeout.compl_cqp_cmds = rf->sc_dev.cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
572 irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
573 if (wait_event_timeout(cqp_request->waitq,
574 cqp_request->request_done,
575 msecs_to_jiffies(CQP_COMPL_WAIT_TIME_MS)))
578 irdma_check_cqp_progress(&cqp_timeout, &rf->sc_dev);
580 if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD)
585 rf->gen_ops.request_reset(rf);
590 cqp_error = cqp_request->compl_info.error;
593 if (cqp_request->compl_info.maj_err_code == 0xFFFF &&
594 cqp_request->compl_info.min_err_code == 0x8029) {
597 rf->gen_ops.request_reset(rf);
605 static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = {
606 [IRDMA_OP_CEQ_DESTROY] = "Destroy CEQ Cmd",
607 [IRDMA_OP_AEQ_DESTROY] = "Destroy AEQ Cmd",
608 [IRDMA_OP_DELETE_ARP_CACHE_ENTRY] = "Delete ARP Cache Cmd",
609 [IRDMA_OP_MANAGE_APBVT_ENTRY] = "Manage APBV Table Entry Cmd",
610 [IRDMA_OP_CEQ_CREATE] = "CEQ Create Cmd",
611 [IRDMA_OP_AEQ_CREATE] = "AEQ Destroy Cmd",
612 [IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY] = "Manage Quad Hash Table Entry Cmd",
613 [IRDMA_OP_QP_MODIFY] = "Modify QP Cmd",
614 [IRDMA_OP_QP_UPLOAD_CONTEXT] = "Upload Context Cmd",
615 [IRDMA_OP_CQ_CREATE] = "Create CQ Cmd",
616 [IRDMA_OP_CQ_DESTROY] = "Destroy CQ Cmd",
617 [IRDMA_OP_QP_CREATE] = "Create QP Cmd",
618 [IRDMA_OP_QP_DESTROY] = "Destroy QP Cmd",
619 [IRDMA_OP_ALLOC_STAG] = "Allocate STag Cmd",
620 [IRDMA_OP_MR_REG_NON_SHARED] = "Register Non-Shared MR Cmd",
621 [IRDMA_OP_DEALLOC_STAG] = "Deallocate STag Cmd",
622 [IRDMA_OP_MW_ALLOC] = "Allocate Memory Window Cmd",
623 [IRDMA_OP_QP_FLUSH_WQES] = "Flush QP Cmd",
624 [IRDMA_OP_ADD_ARP_CACHE_ENTRY] = "Add ARP Cache Cmd",
625 [IRDMA_OP_MANAGE_PUSH_PAGE] = "Manage Push Page Cmd",
626 [IRDMA_OP_UPDATE_PE_SDS] = "Update PE SDs Cmd",
627 [IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE] = "Manage HMC PM Function Table Cmd",
628 [IRDMA_OP_SUSPEND] = "Suspend QP Cmd",
629 [IRDMA_OP_RESUME] = "Resume QP Cmd",
630 [IRDMA_OP_MANAGE_VF_PBLE_BP] = "Manage VF PBLE Backing Pages Cmd",
631 [IRDMA_OP_QUERY_FPM_VAL] = "Query FPM Values Cmd",
632 [IRDMA_OP_COMMIT_FPM_VAL] = "Commit FPM Values Cmd",
633 [IRDMA_OP_AH_CREATE] = "Create Address Handle Cmd",
634 [IRDMA_OP_AH_MODIFY] = "Modify Address Handle Cmd",
635 [IRDMA_OP_AH_DESTROY] = "Destroy Address Handle Cmd",
636 [IRDMA_OP_MC_CREATE] = "Create Multicast Group Cmd",
637 [IRDMA_OP_MC_DESTROY] = "Destroy Multicast Group Cmd",
638 [IRDMA_OP_MC_MODIFY] = "Modify Multicast Group Cmd",
639 [IRDMA_OP_STATS_ALLOCATE] = "Add Statistics Instance Cmd",
640 [IRDMA_OP_STATS_FREE] = "Free Statistics Instance Cmd",
641 [IRDMA_OP_STATS_GATHER] = "Gather Statistics Cmd",
642 [IRDMA_OP_WS_ADD_NODE] = "Add Work Scheduler Node Cmd",
643 [IRDMA_OP_WS_MODIFY_NODE] = "Modify Work Scheduler Node Cmd",
644 [IRDMA_OP_WS_DELETE_NODE] = "Delete Work Scheduler Node Cmd",
645 [IRDMA_OP_SET_UP_MAP] = "Set UP-UP Mapping Cmd",
646 [IRDMA_OP_GEN_AE] = "Generate AE Cmd",
647 [IRDMA_OP_QUERY_RDMA_FEATURES] = "RDMA Get Features Cmd",
648 [IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY] = "Allocate Local MAC Entry Cmd",
649 [IRDMA_OP_ADD_LOCAL_MAC_ENTRY] = "Add Local MAC Entry Cmd",
650 [IRDMA_OP_DELETE_LOCAL_MAC_ENTRY] = "Delete Local MAC Entry Cmd",
651 [IRDMA_OP_CQ_MODIFY] = "CQ Modify Cmd",
654 static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = {
655 {0xffff, 0x8006, "Flush No Wqe Pending"},
656 {0xffff, 0x8007, "Modify QP Bad Close"},
657 {0xffff, 0x8009, "LLP Closed"},
658 {0xffff, 0x800a, "Reset Not Sent"}
662 * irdma_cqp_crit_err - check if CQP error is critical
663 * @dev: pointer to dev structure
664 * @cqp_cmd: code for last CQP operation
665 * @maj_err_code: major error code
666 * @min_err_code: minot error code
668 bool irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd,
669 u16 maj_err_code, u16 min_err_code)
673 for (i = 0; i < ARRAY_SIZE(irdma_noncrit_err_list); ++i) {
674 if (maj_err_code == irdma_noncrit_err_list[i].maj &&
675 min_err_code == irdma_noncrit_err_list[i].min) {
676 ibdev_dbg(to_ibdev(dev),
677 "CQP: [%s Error][%s] maj=0x%x min=0x%x\n",
678 irdma_noncrit_err_list[i].desc,
679 irdma_cqp_cmd_names[cqp_cmd], maj_err_code,
688 * irdma_handle_cqp_op - process cqp command
689 * @rf: RDMA PCI function
690 * @cqp_request: cqp request to process
692 int irdma_handle_cqp_op(struct irdma_pci_f *rf,
693 struct irdma_cqp_request *cqp_request)
695 struct irdma_sc_dev *dev = &rf->sc_dev;
696 struct cqp_cmds_info *info = &cqp_request->info;
698 bool put_cqp_request = true;
703 irdma_get_cqp_request(cqp_request);
704 status = irdma_process_cqp_cmd(dev, info);
708 if (cqp_request->waiting) {
709 put_cqp_request = false;
710 status = irdma_wait_event(rf, cqp_request);
718 if (irdma_cqp_crit_err(dev, info->cqp_cmd,
719 cqp_request->compl_info.maj_err_code,
720 cqp_request->compl_info.min_err_code))
721 ibdev_err(&rf->iwdev->ibdev,
722 "[%s Error][op_code=%d] status=%d waiting=%d completion_err=%d maj=0x%x min=0x%x\n",
723 irdma_cqp_cmd_names[info->cqp_cmd], info->cqp_cmd, status, cqp_request->waiting,
724 cqp_request->compl_info.error, cqp_request->compl_info.maj_err_code,
725 cqp_request->compl_info.min_err_code);
728 irdma_put_cqp_request(&rf->cqp, cqp_request);
733 void irdma_qp_add_ref(struct ib_qp *ibqp)
735 struct irdma_qp *iwqp = (struct irdma_qp *)ibqp;
737 refcount_inc(&iwqp->refcnt);
740 void irdma_qp_rem_ref(struct ib_qp *ibqp)
742 struct irdma_qp *iwqp = to_iwqp(ibqp);
743 struct irdma_device *iwdev = iwqp->iwdev;
747 spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);
748 if (!refcount_dec_and_test(&iwqp->refcnt)) {
749 spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
753 qp_num = iwqp->ibqp.qp_num;
754 iwdev->rf->qp_table[qp_num] = NULL;
755 spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
756 complete(&iwqp->free_qp);
759 struct ib_device *to_ibdev(struct irdma_sc_dev *dev)
761 return &(container_of(dev, struct irdma_pci_f, sc_dev))->iwdev->ibdev;
765 * irdma_get_qp - get qp address
766 * @device: iwarp device
769 struct ib_qp *irdma_get_qp(struct ib_device *device, int qpn)
771 struct irdma_device *iwdev = to_iwdev(device);
773 if (qpn < IW_FIRST_QPN || qpn >= iwdev->rf->max_qp)
776 return &iwdev->rf->qp_table[qpn]->ibqp;
780 * irdma_remove_cqp_head - return head entry and remove
783 void *irdma_remove_cqp_head(struct irdma_sc_dev *dev)
785 struct list_head *entry;
786 struct list_head *list = &dev->cqp_cmd_head;
788 if (list_empty(list))
798 * irdma_cqp_sds_cmd - create cqp command for sd
799 * @dev: hardware control device structure
800 * @sdinfo: information for sd cqp
803 int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
804 struct irdma_update_sds_info *sdinfo)
806 struct irdma_cqp_request *cqp_request;
807 struct cqp_cmds_info *cqp_info;
808 struct irdma_pci_f *rf = dev_to_rf(dev);
811 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
815 cqp_info = &cqp_request->info;
816 memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo,
817 sizeof(cqp_info->in.u.update_pe_sds.info));
818 cqp_info->cqp_cmd = IRDMA_OP_UPDATE_PE_SDS;
819 cqp_info->post_sq = 1;
820 cqp_info->in.u.update_pe_sds.dev = dev;
821 cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request;
823 status = irdma_handle_cqp_op(rf, cqp_request);
824 irdma_put_cqp_request(&rf->cqp, cqp_request);
830 * irdma_cqp_qp_suspend_resume - cqp command for suspend/resume
831 * @qp: hardware control qp
832 * @op: suspend or resume
834 int irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp, u8 op)
836 struct irdma_sc_dev *dev = qp->dev;
837 struct irdma_cqp_request *cqp_request;
838 struct irdma_sc_cqp *cqp = dev->cqp;
839 struct cqp_cmds_info *cqp_info;
840 struct irdma_pci_f *rf = dev_to_rf(dev);
843 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
847 cqp_info = &cqp_request->info;
848 cqp_info->cqp_cmd = op;
849 cqp_info->in.u.suspend_resume.cqp = cqp;
850 cqp_info->in.u.suspend_resume.qp = qp;
851 cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request;
853 status = irdma_handle_cqp_op(rf, cqp_request);
854 irdma_put_cqp_request(&rf->cqp, cqp_request);
860 * irdma_term_modify_qp - modify qp for term message
861 * @qp: hardware control qp
862 * @next_state: qp's next state
863 * @term: terminate code
866 void irdma_term_modify_qp(struct irdma_sc_qp *qp, u8 next_state, u8 term,
869 struct irdma_qp *iwqp;
871 iwqp = qp->qp_uk.back_qp;
872 irdma_next_iw_state(iwqp, next_state, 0, term, term_len);
876 * irdma_terminate_done - after terminate is completed
877 * @qp: hardware control qp
878 * @timeout_occurred: indicates if terminate timer expired
880 void irdma_terminate_done(struct irdma_sc_qp *qp, int timeout_occurred)
882 struct irdma_qp *iwqp;
887 iwqp = qp->qp_uk.back_qp;
888 spin_lock_irqsave(&iwqp->lock, flags);
889 if (iwqp->hte_added) {
893 first_time = !(qp->term_flags & IRDMA_TERM_DONE);
894 qp->term_flags |= IRDMA_TERM_DONE;
895 spin_unlock_irqrestore(&iwqp->lock, flags);
897 if (!timeout_occurred)
898 irdma_terminate_del_timer(qp);
900 irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, hte, 0, 0);
901 irdma_cm_disconn(iwqp);
905 static void irdma_terminate_timeout(struct timer_list *t)
907 struct irdma_qp *iwqp = from_timer(iwqp, t, terminate_timer);
908 struct irdma_sc_qp *qp = &iwqp->sc_qp;
910 irdma_terminate_done(qp, 1);
911 irdma_qp_rem_ref(&iwqp->ibqp);
915 * irdma_terminate_start_timer - start terminate timeout
916 * @qp: hardware control qp
918 void irdma_terminate_start_timer(struct irdma_sc_qp *qp)
920 struct irdma_qp *iwqp;
922 iwqp = qp->qp_uk.back_qp;
923 irdma_qp_add_ref(&iwqp->ibqp);
924 timer_setup(&iwqp->terminate_timer, irdma_terminate_timeout, 0);
925 iwqp->terminate_timer.expires = jiffies + HZ;
927 add_timer(&iwqp->terminate_timer);
931 * irdma_terminate_del_timer - delete terminate timeout
932 * @qp: hardware control qp
934 void irdma_terminate_del_timer(struct irdma_sc_qp *qp)
936 struct irdma_qp *iwqp;
939 iwqp = qp->qp_uk.back_qp;
940 ret = del_timer(&iwqp->terminate_timer);
942 irdma_qp_rem_ref(&iwqp->ibqp);
946 * irdma_cqp_query_fpm_val_cmd - send cqp command for fpm
947 * @dev: function device struct
948 * @val_mem: buffer for fpm
949 * @hmc_fn_id: function id for fpm
951 int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
952 struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
954 struct irdma_cqp_request *cqp_request;
955 struct cqp_cmds_info *cqp_info;
956 struct irdma_pci_f *rf = dev_to_rf(dev);
959 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
963 cqp_info = &cqp_request->info;
964 cqp_request->param = NULL;
965 cqp_info->in.u.query_fpm_val.cqp = dev->cqp;
966 cqp_info->in.u.query_fpm_val.fpm_val_pa = val_mem->pa;
967 cqp_info->in.u.query_fpm_val.fpm_val_va = val_mem->va;
968 cqp_info->in.u.query_fpm_val.hmc_fn_id = hmc_fn_id;
969 cqp_info->cqp_cmd = IRDMA_OP_QUERY_FPM_VAL;
970 cqp_info->post_sq = 1;
971 cqp_info->in.u.query_fpm_val.scratch = (uintptr_t)cqp_request;
973 status = irdma_handle_cqp_op(rf, cqp_request);
974 irdma_put_cqp_request(&rf->cqp, cqp_request);
980 * irdma_cqp_commit_fpm_val_cmd - commit fpm values in hw
981 * @dev: hardware control device structure
982 * @val_mem: buffer with fpm values
983 * @hmc_fn_id: function id for fpm
985 int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
986 struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
988 struct irdma_cqp_request *cqp_request;
989 struct cqp_cmds_info *cqp_info;
990 struct irdma_pci_f *rf = dev_to_rf(dev);
993 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
997 cqp_info = &cqp_request->info;
998 cqp_request->param = NULL;
999 cqp_info->in.u.commit_fpm_val.cqp = dev->cqp;
1000 cqp_info->in.u.commit_fpm_val.fpm_val_pa = val_mem->pa;
1001 cqp_info->in.u.commit_fpm_val.fpm_val_va = val_mem->va;
1002 cqp_info->in.u.commit_fpm_val.hmc_fn_id = hmc_fn_id;
1003 cqp_info->cqp_cmd = IRDMA_OP_COMMIT_FPM_VAL;
1004 cqp_info->post_sq = 1;
1005 cqp_info->in.u.commit_fpm_val.scratch = (uintptr_t)cqp_request;
1007 status = irdma_handle_cqp_op(rf, cqp_request);
1008 irdma_put_cqp_request(&rf->cqp, cqp_request);
1014 * irdma_cqp_cq_create_cmd - create a cq for the cqp
1015 * @dev: device pointer
1016 * @cq: pointer to created cq
1018 int irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
1020 struct irdma_pci_f *rf = dev_to_rf(dev);
1021 struct irdma_cqp *iwcqp = &rf->cqp;
1022 struct irdma_cqp_request *cqp_request;
1023 struct cqp_cmds_info *cqp_info;
1026 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
1030 cqp_info = &cqp_request->info;
1031 cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
1032 cqp_info->post_sq = 1;
1033 cqp_info->in.u.cq_create.cq = cq;
1034 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
1036 status = irdma_handle_cqp_op(rf, cqp_request);
1037 irdma_put_cqp_request(iwcqp, cqp_request);
1043 * irdma_cqp_qp_create_cmd - create a qp for the cqp
1044 * @dev: device pointer
1045 * @qp: pointer to created qp
1047 int irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
1049 struct irdma_pci_f *rf = dev_to_rf(dev);
1050 struct irdma_cqp *iwcqp = &rf->cqp;
1051 struct irdma_cqp_request *cqp_request;
1052 struct cqp_cmds_info *cqp_info;
1053 struct irdma_create_qp_info *qp_info;
1056 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
1060 cqp_info = &cqp_request->info;
1061 qp_info = &cqp_request->info.in.u.qp_create.info;
1062 memset(qp_info, 0, sizeof(*qp_info));
1063 qp_info->cq_num_valid = true;
1064 qp_info->next_iwarp_state = IRDMA_QP_STATE_RTS;
1065 cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
1066 cqp_info->post_sq = 1;
1067 cqp_info->in.u.qp_create.qp = qp;
1068 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
1070 status = irdma_handle_cqp_op(rf, cqp_request);
1071 irdma_put_cqp_request(iwcqp, cqp_request);
1077 * irdma_dealloc_push_page - free a push page for qp
1078 * @rf: RDMA PCI function
1079 * @qp: hardware control qp
1081 static void irdma_dealloc_push_page(struct irdma_pci_f *rf,
1082 struct irdma_sc_qp *qp)
1084 struct irdma_cqp_request *cqp_request;
1085 struct cqp_cmds_info *cqp_info;
1088 if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX)
1091 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
1095 cqp_info = &cqp_request->info;
1096 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
1097 cqp_info->post_sq = 1;
1098 cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
1099 cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
1100 cqp_info->in.u.manage_push_page.info.free_page = 1;
1101 cqp_info->in.u.manage_push_page.info.push_page_type = 0;
1102 cqp_info->in.u.manage_push_page.cqp = &rf->cqp.sc_cqp;
1103 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
1104 status = irdma_handle_cqp_op(rf, cqp_request);
1106 qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
1107 irdma_put_cqp_request(&rf->cqp, cqp_request);
1111 * irdma_free_qp_rsrc - free up memory resources for qp
1112 * @iwqp: qp ptr (user or kernel)
1114 void irdma_free_qp_rsrc(struct irdma_qp *iwqp)
1116 struct irdma_device *iwdev = iwqp->iwdev;
1117 struct irdma_pci_f *rf = iwdev->rf;
1118 u32 qp_num = iwqp->ibqp.qp_num;
1120 irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
1121 irdma_dealloc_push_page(rf, &iwqp->sc_qp);
1122 if (iwqp->sc_qp.vsi) {
1123 irdma_qp_rem_qos(&iwqp->sc_qp);
1124 iwqp->sc_qp.dev->ws_remove(iwqp->sc_qp.vsi,
1125 iwqp->sc_qp.user_pri);
1129 irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
1130 dma_free_coherent(rf->sc_dev.hw->device, iwqp->q2_ctx_mem.size,
1131 iwqp->q2_ctx_mem.va, iwqp->q2_ctx_mem.pa);
1132 iwqp->q2_ctx_mem.va = NULL;
1133 dma_free_coherent(rf->sc_dev.hw->device, iwqp->kqp.dma_mem.size,
1134 iwqp->kqp.dma_mem.va, iwqp->kqp.dma_mem.pa);
1135 iwqp->kqp.dma_mem.va = NULL;
1136 kfree(iwqp->kqp.sq_wrid_mem);
1137 kfree(iwqp->kqp.rq_wrid_mem);
1141 * irdma_cq_wq_destroy - send cq destroy cqp
1142 * @rf: RDMA PCI function
1143 * @cq: hardware control cq
1145 void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
1147 struct irdma_cqp_request *cqp_request;
1148 struct cqp_cmds_info *cqp_info;
1150 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1154 cqp_info = &cqp_request->info;
1155 cqp_info->cqp_cmd = IRDMA_OP_CQ_DESTROY;
1156 cqp_info->post_sq = 1;
1157 cqp_info->in.u.cq_destroy.cq = cq;
1158 cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;
1160 irdma_handle_cqp_op(rf, cqp_request);
1161 irdma_put_cqp_request(&rf->cqp, cqp_request);
1165 * irdma_hw_modify_qp_callback - handle state for modifyQPs that don't wait
1166 * @cqp_request: modify QP completion
1168 static void irdma_hw_modify_qp_callback(struct irdma_cqp_request *cqp_request)
1170 struct cqp_cmds_info *cqp_info;
1171 struct irdma_qp *iwqp;
1173 cqp_info = &cqp_request->info;
1174 iwqp = cqp_info->in.u.qp_modify.qp->qp_uk.back_qp;
1175 atomic_dec(&iwqp->hw_mod_qp_pend);
1176 wake_up(&iwqp->mod_qp_waitq);
1180 * irdma_hw_modify_qp - setup cqp for modify qp
1181 * @iwdev: RDMA device
1182 * @iwqp: qp ptr (user or kernel)
1183 * @info: info for modify qp
1184 * @wait: flag to wait or not for modify qp completion
1186 int irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp,
1187 struct irdma_modify_qp_info *info, bool wait)
1190 struct irdma_pci_f *rf = iwdev->rf;
1191 struct irdma_cqp_request *cqp_request;
1192 struct cqp_cmds_info *cqp_info;
1193 struct irdma_modify_qp_info *m_info;
1195 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
1200 cqp_request->callback_fcn = irdma_hw_modify_qp_callback;
1201 atomic_inc(&iwqp->hw_mod_qp_pend);
1203 cqp_info = &cqp_request->info;
1204 m_info = &cqp_info->in.u.qp_modify.info;
1205 memcpy(m_info, info, sizeof(*m_info));
1206 cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY;
1207 cqp_info->post_sq = 1;
1208 cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
1209 cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
1210 status = irdma_handle_cqp_op(rf, cqp_request);
1211 irdma_put_cqp_request(&rf->cqp, cqp_request);
1213 if (rdma_protocol_roce(&iwdev->ibdev, 1))
1216 switch (m_info->next_iwarp_state) {
1217 struct irdma_gen_ae_info ae_info;
1219 case IRDMA_QP_STATE_RTS:
1220 case IRDMA_QP_STATE_IDLE:
1221 case IRDMA_QP_STATE_TERMINATE:
1222 case IRDMA_QP_STATE_CLOSING:
1223 if (info->curr_iwarp_state == IRDMA_QP_STATE_IDLE)
1224 irdma_send_reset(iwqp->cm_node);
1226 iwqp->sc_qp.term_flags = IRDMA_TERM_DONE;
1228 ae_info.ae_code = IRDMA_AE_BAD_CLOSE;
1230 irdma_gen_ae(rf, &iwqp->sc_qp, &ae_info, false);
1232 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp,
1237 cqp_info = &cqp_request->info;
1238 m_info = &cqp_info->in.u.qp_modify.info;
1239 memcpy(m_info, info, sizeof(*m_info));
1240 cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY;
1241 cqp_info->post_sq = 1;
1242 cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
1243 cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
1244 m_info->next_iwarp_state = IRDMA_QP_STATE_ERROR;
1245 m_info->reset_tcp_conn = true;
1246 irdma_handle_cqp_op(rf, cqp_request);
1247 irdma_put_cqp_request(&rf->cqp, cqp_request);
1250 case IRDMA_QP_STATE_ERROR:
1260 * irdma_cqp_cq_destroy_cmd - destroy the cqp cq
1261 * @dev: device pointer
1262 * @cq: pointer to cq
1264 void irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
1266 struct irdma_pci_f *rf = dev_to_rf(dev);
1268 irdma_cq_wq_destroy(rf, cq);
1272 * irdma_cqp_qp_destroy_cmd - destroy the cqp
1273 * @dev: device pointer
1274 * @qp: pointer to qp
1276 int irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
1278 struct irdma_pci_f *rf = dev_to_rf(dev);
1279 struct irdma_cqp *iwcqp = &rf->cqp;
1280 struct irdma_cqp_request *cqp_request;
1281 struct cqp_cmds_info *cqp_info;
1284 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
1288 cqp_info = &cqp_request->info;
1289 memset(cqp_info, 0, sizeof(*cqp_info));
1290 cqp_info->cqp_cmd = IRDMA_OP_QP_DESTROY;
1291 cqp_info->post_sq = 1;
1292 cqp_info->in.u.qp_destroy.qp = qp;
1293 cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
1294 cqp_info->in.u.qp_destroy.remove_hash_idx = true;
1296 status = irdma_handle_cqp_op(rf, cqp_request);
1297 irdma_put_cqp_request(&rf->cqp, cqp_request);
1303 * irdma_ieq_mpa_crc_ae - generate AE for crc error
1304 * @dev: hardware control device structure
1305 * @qp: hardware control qp
1307 void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
1309 struct irdma_gen_ae_info info = {};
1310 struct irdma_pci_f *rf = dev_to_rf(dev);
1312 ibdev_dbg(&rf->iwdev->ibdev, "AEQ: Generate MPA CRC AE\n");
1313 info.ae_code = IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR;
1314 info.ae_src = IRDMA_AE_SOURCE_RQ;
1315 irdma_gen_ae(rf, qp, &info, false);
1319 * irdma_init_hash_desc - initialize hash for crc calculation
1320 * @desc: cryption type
1322 int irdma_init_hash_desc(struct shash_desc **desc)
1324 struct crypto_shash *tfm;
1325 struct shash_desc *tdesc;
1327 tfm = crypto_alloc_shash("crc32c", 0, 0);
1331 tdesc = kzalloc(sizeof(*tdesc) + crypto_shash_descsize(tfm),
1334 crypto_free_shash(tfm);
1345 * irdma_free_hash_desc - free hash desc
1346 * @desc: to be freed
1348 void irdma_free_hash_desc(struct shash_desc *desc)
1351 crypto_free_shash(desc->tfm);
1357 * irdma_ieq_check_mpacrc - check if mpa crc is OK
1358 * @desc: desc for hash
1359 * @addr: address of buffer for crc
1360 * @len: length of buffer
1361 * @val: value to be compared
1363 int irdma_ieq_check_mpacrc(struct shash_desc *desc, void *addr, u32 len,
1370 crypto_shash_init(desc);
1371 ret = crypto_shash_update(desc, addr, len);
1373 crypto_shash_final(desc, (u8 *)&crc);
1381 * irdma_ieq_get_qp - get qp based on quad in puda buffer
1382 * @dev: hardware control device structure
1383 * @buf: receive puda buffer on exception q
1385 struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev,
1386 struct irdma_puda_buf *buf)
1388 struct irdma_qp *iwqp;
1389 struct irdma_cm_node *cm_node;
1390 struct irdma_device *iwdev = buf->vsi->back_vsi;
1391 u32 loc_addr[4] = {};
1392 u32 rem_addr[4] = {};
1393 u16 loc_port, rem_port;
1394 struct ipv6hdr *ip6h;
1395 struct iphdr *iph = (struct iphdr *)buf->iph;
1396 struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
1398 if (iph->version == 4) {
1399 loc_addr[0] = ntohl(iph->daddr);
1400 rem_addr[0] = ntohl(iph->saddr);
1402 ip6h = (struct ipv6hdr *)buf->iph;
1403 irdma_copy_ip_ntohl(loc_addr, ip6h->daddr.in6_u.u6_addr32);
1404 irdma_copy_ip_ntohl(rem_addr, ip6h->saddr.in6_u.u6_addr32);
1406 loc_port = ntohs(tcph->dest);
1407 rem_port = ntohs(tcph->source);
1408 cm_node = irdma_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port,
1409 loc_addr, buf->vlan_valid ? buf->vlan_id : 0xFFFF);
1413 iwqp = cm_node->iwqp;
1414 irdma_rem_ref_cm_node(cm_node);
1416 return &iwqp->sc_qp;
1420 * irdma_send_ieq_ack - ACKs for duplicate or OOO partials FPDUs
1423 void irdma_send_ieq_ack(struct irdma_sc_qp *qp)
1425 struct irdma_cm_node *cm_node = ((struct irdma_qp *)qp->qp_uk.back_qp)->cm_node;
1426 struct irdma_puda_buf *buf = qp->pfpdu.lastrcv_buf;
1427 struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
1429 cm_node->tcp_cntxt.rcv_nxt = qp->pfpdu.nextseqnum;
1430 cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
1432 irdma_send_ack(cm_node);
1436 * irdma_puda_ieq_get_ah_info - get AH info from IEQ buffer
1438 * @ah_info: AH info pointer
1440 void irdma_puda_ieq_get_ah_info(struct irdma_sc_qp *qp,
1441 struct irdma_ah_info *ah_info)
1443 struct irdma_puda_buf *buf = qp->pfpdu.ah_buf;
1445 struct ipv6hdr *ip6h;
1447 memset(ah_info, 0, sizeof(*ah_info));
1448 ah_info->do_lpbk = true;
1449 ah_info->vlan_tag = buf->vlan_id;
1450 ah_info->insert_vlan_tag = buf->vlan_valid;
1451 ah_info->ipv4_valid = buf->ipv4;
1452 ah_info->vsi = qp->vsi;
1454 if (buf->smac_valid)
1455 ether_addr_copy(ah_info->mac_addr, buf->smac);
1458 ah_info->ipv4_valid = true;
1459 iph = (struct iphdr *)buf->iph;
1460 ah_info->hop_ttl = iph->ttl;
1461 ah_info->tc_tos = iph->tos;
1462 ah_info->dest_ip_addr[0] = ntohl(iph->daddr);
1463 ah_info->src_ip_addr[0] = ntohl(iph->saddr);
1465 ip6h = (struct ipv6hdr *)buf->iph;
1466 ah_info->hop_ttl = ip6h->hop_limit;
1467 ah_info->tc_tos = ip6h->priority;
1468 irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
1469 ip6h->daddr.in6_u.u6_addr32);
1470 irdma_copy_ip_ntohl(ah_info->src_ip_addr,
1471 ip6h->saddr.in6_u.u6_addr32);
1474 ah_info->dst_arpindex = irdma_arp_table(dev_to_rf(qp->dev),
1475 ah_info->dest_ip_addr,
1476 ah_info->ipv4_valid,
1477 NULL, IRDMA_ARP_RESOLVE);
1481 * irdma_gen1_ieq_update_tcpip_info - update tcpip in the buffer
1482 * @buf: puda to update
1483 * @len: length of buffer
1484 * @seqnum: seq number for tcp
1486 static void irdma_gen1_ieq_update_tcpip_info(struct irdma_puda_buf *buf,
1487 u16 len, u32 seqnum)
1489 struct tcphdr *tcph;
1493 u8 *addr = buf->mem.va;
1495 iphlen = (buf->ipv4) ? 20 : 40;
1496 iph = (struct iphdr *)(addr + buf->maclen);
1497 tcph = (struct tcphdr *)(addr + buf->maclen + iphlen);
1498 pktsize = len + buf->tcphlen + iphlen;
1499 iph->tot_len = htons(pktsize);
1500 tcph->seq = htonl(seqnum);
1504 * irdma_ieq_update_tcpip_info - update tcpip in the buffer
1505 * @buf: puda to update
1506 * @len: length of buffer
1507 * @seqnum: seq number for tcp
1509 void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len,
1512 struct tcphdr *tcph;
1515 if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1516 return irdma_gen1_ieq_update_tcpip_info(buf, len, seqnum);
1519 tcph = (struct tcphdr *)addr;
1520 tcph->seq = htonl(seqnum);
1524 * irdma_gen1_puda_get_tcpip_info - get tcpip info from puda
1526 * @info: to get information
1529 static int irdma_gen1_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
1530 struct irdma_puda_buf *buf)
1533 struct ipv6hdr *ip6h;
1534 struct tcphdr *tcph;
1537 u8 *mem = buf->mem.va;
1538 struct ethhdr *ethh = buf->mem.va;
1540 if (ethh->h_proto == htons(0x8100)) {
1541 info->vlan_valid = true;
1542 buf->vlan_id = ntohs(((struct vlan_ethhdr *)ethh)->h_vlan_TCI) &
1546 buf->maclen = (info->vlan_valid) ? 18 : 14;
1547 iphlen = (info->l3proto) ? 40 : 20;
1548 buf->ipv4 = (info->l3proto) ? false : true;
1549 buf->iph = mem + buf->maclen;
1550 iph = (struct iphdr *)buf->iph;
1551 buf->tcph = buf->iph + iphlen;
1552 tcph = (struct tcphdr *)buf->tcph;
1555 pkt_len = ntohs(iph->tot_len);
1557 ip6h = (struct ipv6hdr *)buf->iph;
1558 pkt_len = ntohs(ip6h->payload_len) + iphlen;
1561 buf->totallen = pkt_len + buf->maclen;
1563 if (info->payload_len < buf->totallen) {
1564 ibdev_dbg(to_ibdev(buf->vsi->dev),
1565 "ERR: payload_len = 0x%x totallen expected0x%x\n",
1566 info->payload_len, buf->totallen);
1570 buf->tcphlen = tcph->doff << 2;
1571 buf->datalen = pkt_len - iphlen - buf->tcphlen;
1572 buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL;
1573 buf->hdrlen = buf->maclen + iphlen + buf->tcphlen;
1574 buf->seqnum = ntohl(tcph->seq);
1580 * irdma_puda_get_tcpip_info - get tcpip info from puda buffer
1581 * @info: to get information
1584 int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
1585 struct irdma_puda_buf *buf)
1587 struct tcphdr *tcph;
1591 if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1592 return irdma_gen1_puda_get_tcpip_info(info, buf);
1595 buf->vlan_valid = info->vlan_valid;
1596 if (info->vlan_valid)
1597 buf->vlan_id = info->vlan;
1599 buf->ipv4 = info->ipv4;
1601 buf->iph = mem + IRDMA_IPV4_PAD;
1605 buf->tcph = mem + IRDMA_TCP_OFFSET;
1606 tcph = (struct tcphdr *)buf->tcph;
1607 pkt_len = info->payload_len;
1608 buf->totallen = pkt_len;
1609 buf->tcphlen = tcph->doff << 2;
1610 buf->datalen = pkt_len - IRDMA_TCP_OFFSET - buf->tcphlen;
1611 buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL;
1612 buf->hdrlen = IRDMA_TCP_OFFSET + buf->tcphlen;
1613 buf->seqnum = ntohl(tcph->seq);
1615 if (info->smac_valid) {
1616 ether_addr_copy(buf->smac, info->smac);
1617 buf->smac_valid = true;
1624 * irdma_hw_stats_timeout - Stats timer-handler which updates all HW stats
1625 * @t: timer_list pointer
1627 static void irdma_hw_stats_timeout(struct timer_list *t)
1629 struct irdma_vsi_pestat *pf_devstat =
1630 from_timer(pf_devstat, t, stats_timer);
1631 struct irdma_sc_vsi *sc_vsi = pf_devstat->vsi;
1633 if (sc_vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1634 irdma_cqp_gather_stats_gen1(sc_vsi->dev, sc_vsi->pestat);
1636 irdma_cqp_gather_stats_cmd(sc_vsi->dev, sc_vsi->pestat, false);
1638 mod_timer(&pf_devstat->stats_timer,
1639 jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
1643 * irdma_hw_stats_start_timer - Start periodic stats timer
1644 * @vsi: vsi structure pointer
1646 void irdma_hw_stats_start_timer(struct irdma_sc_vsi *vsi)
1648 struct irdma_vsi_pestat *devstat = vsi->pestat;
1650 timer_setup(&devstat->stats_timer, irdma_hw_stats_timeout, 0);
1651 mod_timer(&devstat->stats_timer,
1652 jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
1656 * irdma_hw_stats_stop_timer - Delete periodic stats timer
1657 * @vsi: pointer to vsi structure
1659 void irdma_hw_stats_stop_timer(struct irdma_sc_vsi *vsi)
1661 struct irdma_vsi_pestat *devstat = vsi->pestat;
1663 del_timer_sync(&devstat->stats_timer);
1667 * irdma_process_stats - Checking for wrap and update stats
1668 * @pestat: stats structure pointer
1670 static inline void irdma_process_stats(struct irdma_vsi_pestat *pestat)
1672 sc_vsi_update_stats(pestat->vsi);
1676 * irdma_cqp_gather_stats_gen1 - Gather stats
1677 * @dev: pointer to device structure
1678 * @pestat: statistics structure
1680 void irdma_cqp_gather_stats_gen1(struct irdma_sc_dev *dev,
1681 struct irdma_vsi_pestat *pestat)
1683 struct irdma_gather_stats *gather_stats =
1684 pestat->gather_info.gather_stats_va;
1685 u32 stats_inst_offset_32;
1686 u32 stats_inst_offset_64;
1688 stats_inst_offset_32 = (pestat->gather_info.use_stats_inst) ?
1689 pestat->gather_info.stats_inst_index :
1690 pestat->hw->hmc.hmc_fn_id;
1691 stats_inst_offset_32 *= 4;
1692 stats_inst_offset_64 = stats_inst_offset_32 * 2;
1694 gather_stats->rxvlanerr =
1696 dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_RXVLANERR]
1697 + stats_inst_offset_32);
1698 gather_stats->ip4rxdiscard =
1700 dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP4RXDISCARD]
1701 + stats_inst_offset_32);
1702 gather_stats->ip4rxtrunc =
1704 dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP4RXTRUNC]
1705 + stats_inst_offset_32);
1706 gather_stats->ip4txnoroute =
1708 dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE]
1709 + stats_inst_offset_32);
1710 gather_stats->ip6rxdiscard =
1712 dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP6RXDISCARD]
1713 + stats_inst_offset_32);
1714 gather_stats->ip6rxtrunc =
1716 dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP6RXTRUNC]
1717 + stats_inst_offset_32);
1718 gather_stats->ip6txnoroute =
1720 dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE]
1721 + stats_inst_offset_32);
1722 gather_stats->tcprtxseg =
1724 dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_TCPRTXSEG]
1725 + stats_inst_offset_32);
1726 gather_stats->tcprxopterr =
1728 dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_TCPRXOPTERR]
1729 + stats_inst_offset_32);
1731 gather_stats->ip4rxocts =
1733 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXOCTS]
1734 + stats_inst_offset_64);
1735 gather_stats->ip4rxpkts =
1737 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXPKTS]
1738 + stats_inst_offset_64);
1739 gather_stats->ip4txfrag =
1741 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXFRAGS]
1742 + stats_inst_offset_64);
1743 gather_stats->ip4rxmcpkts =
1745 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS]
1746 + stats_inst_offset_64);
1747 gather_stats->ip4txocts =
1749 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXOCTS]
1750 + stats_inst_offset_64);
1751 gather_stats->ip4txpkts =
1753 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXPKTS]
1754 + stats_inst_offset_64);
1755 gather_stats->ip4txfrag =
1757 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXFRAGS]
1758 + stats_inst_offset_64);
1759 gather_stats->ip4txmcpkts =
1761 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS]
1762 + stats_inst_offset_64);
1763 gather_stats->ip6rxocts =
1765 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXOCTS]
1766 + stats_inst_offset_64);
1767 gather_stats->ip6rxpkts =
1769 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXPKTS]
1770 + stats_inst_offset_64);
1771 gather_stats->ip6txfrags =
1773 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXFRAGS]
1774 + stats_inst_offset_64);
1775 gather_stats->ip6rxmcpkts =
1777 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS]
1778 + stats_inst_offset_64);
1779 gather_stats->ip6txocts =
1781 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXOCTS]
1782 + stats_inst_offset_64);
1783 gather_stats->ip6txpkts =
1785 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXPKTS]
1786 + stats_inst_offset_64);
1787 gather_stats->ip6txfrags =
1789 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXFRAGS]
1790 + stats_inst_offset_64);
1791 gather_stats->ip6txmcpkts =
1793 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS]
1794 + stats_inst_offset_64);
1795 gather_stats->tcprxsegs =
1797 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_TCPRXSEGS]
1798 + stats_inst_offset_64);
1799 gather_stats->tcptxsegs =
1801 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_TCPTXSEG]
1802 + stats_inst_offset_64);
1803 gather_stats->rdmarxrds =
1805 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMARXRDS]
1806 + stats_inst_offset_64);
1807 gather_stats->rdmarxsnds =
1809 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMARXSNDS]
1810 + stats_inst_offset_64);
1811 gather_stats->rdmarxwrs =
1813 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMARXWRS]
1814 + stats_inst_offset_64);
1815 gather_stats->rdmatxrds =
1817 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMATXRDS]
1818 + stats_inst_offset_64);
1819 gather_stats->rdmatxsnds =
1821 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMATXSNDS]
1822 + stats_inst_offset_64);
1823 gather_stats->rdmatxwrs =
1825 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMATXWRS]
1826 + stats_inst_offset_64);
1827 gather_stats->rdmavbn =
1829 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMAVBND]
1830 + stats_inst_offset_64);
1831 gather_stats->rdmavinv =
1833 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMAVINV]
1834 + stats_inst_offset_64);
1835 gather_stats->udprxpkts =
1837 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_UDPRXPKTS]
1838 + stats_inst_offset_64);
1839 gather_stats->udptxpkts =
1841 dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_UDPTXPKTS]
1842 + stats_inst_offset_64);
1844 irdma_process_stats(pestat);
1848 * irdma_process_cqp_stats - Checking for wrap and update stats
1849 * @cqp_request: cqp_request structure pointer
1851 static void irdma_process_cqp_stats(struct irdma_cqp_request *cqp_request)
1853 struct irdma_vsi_pestat *pestat = cqp_request->param;
1855 irdma_process_stats(pestat);
1859 * irdma_cqp_gather_stats_cmd - Gather stats
1860 * @dev: pointer to device structure
1861 * @pestat: pointer to stats info
1862 * @wait: flag to wait or not wait for stats
1864 int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
1865 struct irdma_vsi_pestat *pestat, bool wait)
1868 struct irdma_pci_f *rf = dev_to_rf(dev);
1869 struct irdma_cqp *iwcqp = &rf->cqp;
1870 struct irdma_cqp_request *cqp_request;
1871 struct cqp_cmds_info *cqp_info;
1874 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
1878 cqp_info = &cqp_request->info;
1879 memset(cqp_info, 0, sizeof(*cqp_info));
1880 cqp_info->cqp_cmd = IRDMA_OP_STATS_GATHER;
1881 cqp_info->post_sq = 1;
1882 cqp_info->in.u.stats_gather.info = pestat->gather_info;
1883 cqp_info->in.u.stats_gather.scratch = (uintptr_t)cqp_request;
1884 cqp_info->in.u.stats_gather.cqp = &rf->cqp.sc_cqp;
1885 cqp_request->param = pestat;
1887 cqp_request->callback_fcn = irdma_process_cqp_stats;
1888 status = irdma_handle_cqp_op(rf, cqp_request);
1890 irdma_process_stats(pestat);
1891 irdma_put_cqp_request(&rf->cqp, cqp_request);
1897 * irdma_cqp_stats_inst_cmd - Allocate/free stats instance
1898 * @vsi: pointer to vsi structure
1899 * @cmd: command to allocate or free
1900 * @stats_info: pointer to allocate stats info
1902 int irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
1903 struct irdma_stats_inst_info *stats_info)
1905 struct irdma_pci_f *rf = dev_to_rf(vsi->dev);
1906 struct irdma_cqp *iwcqp = &rf->cqp;
1907 struct irdma_cqp_request *cqp_request;
1908 struct cqp_cmds_info *cqp_info;
1912 if (cmd == IRDMA_OP_STATS_ALLOCATE)
1914 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
1918 cqp_info = &cqp_request->info;
1919 memset(cqp_info, 0, sizeof(*cqp_info));
1920 cqp_info->cqp_cmd = cmd;
1921 cqp_info->post_sq = 1;
1922 cqp_info->in.u.stats_manage.info = *stats_info;
1923 cqp_info->in.u.stats_manage.scratch = (uintptr_t)cqp_request;
1924 cqp_info->in.u.stats_manage.cqp = &rf->cqp.sc_cqp;
1925 status = irdma_handle_cqp_op(rf, cqp_request);
1927 stats_info->stats_idx = cqp_request->compl_info.op_ret_val;
1928 irdma_put_cqp_request(iwcqp, cqp_request);
1934 * irdma_cqp_ceq_cmd - Create/Destroy CEQ's after CEQ 0
1935 * @dev: pointer to device info
1936 * @sc_ceq: pointer to ceq structure
1937 * @op: Create or Destroy
1939 int irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_ceq *sc_ceq,
1942 struct irdma_cqp_request *cqp_request;
1943 struct cqp_cmds_info *cqp_info;
1944 struct irdma_pci_f *rf = dev_to_rf(dev);
1947 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1951 cqp_info = &cqp_request->info;
1952 cqp_info->post_sq = 1;
1953 cqp_info->cqp_cmd = op;
1954 cqp_info->in.u.ceq_create.ceq = sc_ceq;
1955 cqp_info->in.u.ceq_create.scratch = (uintptr_t)cqp_request;
1957 status = irdma_handle_cqp_op(rf, cqp_request);
1958 irdma_put_cqp_request(&rf->cqp, cqp_request);
1964 * irdma_cqp_aeq_cmd - Create/Destroy AEQ
1965 * @dev: pointer to device info
1966 * @sc_aeq: pointer to aeq structure
1967 * @op: Create or Destroy
1969 int irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_aeq *sc_aeq,
1972 struct irdma_cqp_request *cqp_request;
1973 struct cqp_cmds_info *cqp_info;
1974 struct irdma_pci_f *rf = dev_to_rf(dev);
1977 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1981 cqp_info = &cqp_request->info;
1982 cqp_info->post_sq = 1;
1983 cqp_info->cqp_cmd = op;
1984 cqp_info->in.u.aeq_create.aeq = sc_aeq;
1985 cqp_info->in.u.aeq_create.scratch = (uintptr_t)cqp_request;
1987 status = irdma_handle_cqp_op(rf, cqp_request);
1988 irdma_put_cqp_request(&rf->cqp, cqp_request);
1994 * irdma_cqp_ws_node_cmd - Add/modify/delete ws node
1995 * @dev: pointer to device structure
1996 * @cmd: Add, modify or delete
1997 * @node_info: pointer to ws node info
1999 int irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
2000 struct irdma_ws_node_info *node_info)
2002 struct irdma_pci_f *rf = dev_to_rf(dev);
2003 struct irdma_cqp *iwcqp = &rf->cqp;
2004 struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp;
2005 struct irdma_cqp_request *cqp_request;
2006 struct cqp_cmds_info *cqp_info;
2010 if (!rf->sc_dev.ceq_valid)
2015 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, !poll);
2019 cqp_info = &cqp_request->info;
2020 memset(cqp_info, 0, sizeof(*cqp_info));
2021 cqp_info->cqp_cmd = cmd;
2022 cqp_info->post_sq = 1;
2023 cqp_info->in.u.ws_node.info = *node_info;
2024 cqp_info->in.u.ws_node.cqp = cqp;
2025 cqp_info->in.u.ws_node.scratch = (uintptr_t)cqp_request;
2026 status = irdma_handle_cqp_op(rf, cqp_request);
2031 struct irdma_ccq_cqe_info compl_info;
2033 status = irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_WORK_SCHED_NODE,
2035 node_info->qs_handle = compl_info.op_ret_val;
2036 ibdev_dbg(&rf->iwdev->ibdev, "DCB: opcode=%d, compl_info.retval=%d\n",
2037 compl_info.op_code, compl_info.op_ret_val);
2039 node_info->qs_handle = cqp_request->compl_info.op_ret_val;
2043 irdma_put_cqp_request(&rf->cqp, cqp_request);
2049 * irdma_ah_cqp_op - perform an AH cqp operation
2050 * @rf: RDMA PCI function
2051 * @sc_ah: address handle
2052 * @cmd: AH operation
2053 * @wait: wait if true
2054 * @callback_fcn: Callback function on CQP op completion
2055 * @cb_param: parameter for callback function
2059 int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
2061 void (*callback_fcn)(struct irdma_cqp_request *),
2064 struct irdma_cqp_request *cqp_request;
2065 struct cqp_cmds_info *cqp_info;
2068 if (cmd != IRDMA_OP_AH_CREATE && cmd != IRDMA_OP_AH_DESTROY)
2071 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
2075 cqp_info = &cqp_request->info;
2076 cqp_info->cqp_cmd = cmd;
2077 cqp_info->post_sq = 1;
2078 if (cmd == IRDMA_OP_AH_CREATE) {
2079 cqp_info->in.u.ah_create.info = sc_ah->ah_info;
2080 cqp_info->in.u.ah_create.scratch = (uintptr_t)cqp_request;
2081 cqp_info->in.u.ah_create.cqp = &rf->cqp.sc_cqp;
2082 } else if (cmd == IRDMA_OP_AH_DESTROY) {
2083 cqp_info->in.u.ah_destroy.info = sc_ah->ah_info;
2084 cqp_info->in.u.ah_destroy.scratch = (uintptr_t)cqp_request;
2085 cqp_info->in.u.ah_destroy.cqp = &rf->cqp.sc_cqp;
2089 cqp_request->callback_fcn = callback_fcn;
2090 cqp_request->param = cb_param;
2092 status = irdma_handle_cqp_op(rf, cqp_request);
2093 irdma_put_cqp_request(&rf->cqp, cqp_request);
2099 sc_ah->ah_info.ah_valid = (cmd == IRDMA_OP_AH_CREATE);
2105 * irdma_ieq_ah_cb - callback after creation of AH for IEQ
2106 * @cqp_request: pointer to cqp_request of create AH
2108 static void irdma_ieq_ah_cb(struct irdma_cqp_request *cqp_request)
2110 struct irdma_sc_qp *qp = cqp_request->param;
2111 struct irdma_sc_ah *sc_ah = qp->pfpdu.ah;
2112 unsigned long flags;
2114 spin_lock_irqsave(&qp->pfpdu.lock, flags);
2115 if (!cqp_request->compl_info.op_ret_val) {
2116 sc_ah->ah_info.ah_valid = true;
2117 irdma_ieq_process_fpdus(qp, qp->vsi->ieq);
2119 sc_ah->ah_info.ah_valid = false;
2120 irdma_ieq_cleanup_qp(qp->vsi->ieq, qp);
2122 spin_unlock_irqrestore(&qp->pfpdu.lock, flags);
2126 * irdma_ilq_ah_cb - callback after creation of AH for ILQ
2127 * @cqp_request: pointer to cqp_request of create AH
2129 static void irdma_ilq_ah_cb(struct irdma_cqp_request *cqp_request)
2131 struct irdma_cm_node *cm_node = cqp_request->param;
2132 struct irdma_sc_ah *sc_ah = cm_node->ah;
2134 sc_ah->ah_info.ah_valid = !cqp_request->compl_info.op_ret_val;
2135 irdma_add_conn_est_qh(cm_node);
2139 * irdma_puda_create_ah - create AH for ILQ/IEQ qp's
2140 * @dev: device pointer
2141 * @ah_info: Address handle info
2142 * @wait: When true will wait for operation to complete
2144 * @cb_param: Callback param when not waiting
2145 * @ah_ret: Returned pointer to address handle if created
2148 int irdma_puda_create_ah(struct irdma_sc_dev *dev,
2149 struct irdma_ah_info *ah_info, bool wait,
2150 enum puda_rsrc_type type, void *cb_param,
2151 struct irdma_sc_ah **ah_ret)
2153 struct irdma_sc_ah *ah;
2154 struct irdma_pci_f *rf = dev_to_rf(dev);
2157 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2162 err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah,
2163 &ah_info->ah_idx, &rf->next_ah);
2168 ah->ah_info = *ah_info;
2170 if (type == IRDMA_PUDA_RSRC_TYPE_ILQ)
2171 err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait,
2172 irdma_ilq_ah_cb, cb_param);
2174 err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait,
2175 irdma_ieq_ah_cb, cb_param);
2182 irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx);
2190 * irdma_puda_free_ah - free a puda address handle
2191 * @dev: device pointer
2192 * @ah: The address handle to free
2194 void irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah)
2196 struct irdma_pci_f *rf = dev_to_rf(dev);
2201 if (ah->ah_info.ah_valid) {
2202 irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_DESTROY, false, NULL, NULL);
2203 irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx);
2210 * irdma_gsi_ud_qp_ah_cb - callback after creation of AH for GSI/ID QP
2211 * @cqp_request: pointer to cqp_request of create AH
2213 void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request)
2215 struct irdma_sc_ah *sc_ah = cqp_request->param;
2217 if (!cqp_request->compl_info.op_ret_val)
2218 sc_ah->ah_info.ah_valid = true;
2220 sc_ah->ah_info.ah_valid = false;
2224 * irdma_prm_add_pble_mem - add moemory to pble resources
2225 * @pprm: pble resource manager
2226 * @pchunk: chunk of memory to add
2228 int irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
2229 struct irdma_chunk *pchunk)
2233 if (pchunk->size & 0xfff)
2236 sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift;
2238 pchunk->bitmapbuf = bitmap_zalloc(sizeofbitmap, GFP_KERNEL);
2239 if (!pchunk->bitmapbuf)
2242 pchunk->sizeofbitmap = sizeofbitmap;
2243 /* each pble is 8 bytes hence shift by 3 */
2244 pprm->total_pble_alloc += pchunk->size >> 3;
2245 pprm->free_pble_cnt += pchunk->size >> 3;
2251 * irdma_prm_get_pbles - get pble's from prm
2252 * @pprm: pble resource manager
2253 * @chunkinfo: nformation about chunk where pble's were acquired
2254 * @mem_size: size of pble memory needed
2255 * @vaddr: returns virtual address of pble memory
2256 * @fpm_addr: returns fpm address of pble memory
2258 int irdma_prm_get_pbles(struct irdma_pble_prm *pprm,
2259 struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size,
2260 u64 **vaddr, u64 *fpm_addr)
2263 u64 bit_idx = PBLE_INVALID_IDX;
2264 struct irdma_chunk *pchunk = NULL;
2265 struct list_head *chunk_entry = pprm->clist.next;
2267 unsigned long flags;
2271 bits_needed = DIV_ROUND_UP_ULL(mem_size, BIT_ULL(pprm->pble_shift));
2273 spin_lock_irqsave(&pprm->prm_lock, flags);
2274 while (chunk_entry != &pprm->clist) {
2275 pchunk = (struct irdma_chunk *)chunk_entry;
2276 bit_idx = bitmap_find_next_zero_area(pchunk->bitmapbuf,
2277 pchunk->sizeofbitmap, 0,
2279 if (bit_idx < pchunk->sizeofbitmap)
2282 /* list.next used macro */
2283 chunk_entry = pchunk->list.next;
2286 if (!pchunk || bit_idx >= pchunk->sizeofbitmap) {
2287 spin_unlock_irqrestore(&pprm->prm_lock, flags);
2291 bitmap_set(pchunk->bitmapbuf, bit_idx, bits_needed);
2292 offset = bit_idx << pprm->pble_shift;
2293 *vaddr = pchunk->vaddr + offset;
2294 *fpm_addr = pchunk->fpm_addr + offset;
2296 chunkinfo->pchunk = pchunk;
2297 chunkinfo->bit_idx = bit_idx;
2298 chunkinfo->bits_used = bits_needed;
2299 /* 3 is sizeof pble divide */
2300 pprm->free_pble_cnt -= chunkinfo->bits_used << (pprm->pble_shift - 3);
2301 spin_unlock_irqrestore(&pprm->prm_lock, flags);
2307 * irdma_prm_return_pbles - return pbles back to prm
2308 * @pprm: pble resource manager
2309 * @chunkinfo: chunk where pble's were acquired and to be freed
2311 void irdma_prm_return_pbles(struct irdma_pble_prm *pprm,
2312 struct irdma_pble_chunkinfo *chunkinfo)
2314 unsigned long flags;
2316 spin_lock_irqsave(&pprm->prm_lock, flags);
2317 pprm->free_pble_cnt += chunkinfo->bits_used << (pprm->pble_shift - 3);
2318 bitmap_clear(chunkinfo->pchunk->bitmapbuf, chunkinfo->bit_idx,
2319 chunkinfo->bits_used);
2320 spin_unlock_irqrestore(&pprm->prm_lock, flags);
2323 int irdma_map_vm_page_list(struct irdma_hw *hw, void *va, dma_addr_t *pg_dma,
2326 struct page *vm_page;
2330 addr = (u8 *)(uintptr_t)va;
2331 for (i = 0; i < pg_cnt; i++) {
2332 vm_page = vmalloc_to_page(addr);
2336 pg_dma[i] = dma_map_page(hw->device, vm_page, 0, PAGE_SIZE,
2338 if (dma_mapping_error(hw->device, pg_dma[i]))
2347 irdma_unmap_vm_page_list(hw, pg_dma, i);
2351 void irdma_unmap_vm_page_list(struct irdma_hw *hw, dma_addr_t *pg_dma, u32 pg_cnt)
2355 for (i = 0; i < pg_cnt; i++)
2356 dma_unmap_page(hw->device, pg_dma[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
2360 * irdma_pble_free_paged_mem - free virtual paged memory
2361 * @chunk: chunk to free with paged memory
2363 void irdma_pble_free_paged_mem(struct irdma_chunk *chunk)
2368 irdma_unmap_vm_page_list(chunk->dev->hw, chunk->dmainfo.dmaaddrs,
2372 kfree(chunk->dmainfo.dmaaddrs);
2373 chunk->dmainfo.dmaaddrs = NULL;
2374 vfree(chunk->vaddr);
2375 chunk->vaddr = NULL;
2380 * irdma_pble_get_paged_mem -allocate paged memory for pbles
2381 * @chunk: chunk to add for paged memory
2382 * @pg_cnt: number of pages needed
2384 int irdma_pble_get_paged_mem(struct irdma_chunk *chunk, u32 pg_cnt)
2389 chunk->dmainfo.dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL);
2390 if (!chunk->dmainfo.dmaaddrs)
2393 size = PAGE_SIZE * pg_cnt;
2398 if (irdma_map_vm_page_list(chunk->dev->hw, va, chunk->dmainfo.dmaaddrs,
2405 chunk->pg_cnt = pg_cnt;
2406 chunk->type = PBLE_SD_PAGED;
2410 kfree(chunk->dmainfo.dmaaddrs);
2411 chunk->dmainfo.dmaaddrs = NULL;
2417 * irdma_alloc_ws_node_id - Allocate a tx scheduler node ID
2418 * @dev: device pointer
2420 u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev)
2422 struct irdma_pci_f *rf = dev_to_rf(dev);
2426 if (irdma_alloc_rsrc(rf, rf->allocated_ws_nodes, rf->max_ws_node_id,
2428 return IRDMA_WS_NODE_INVALID;
2430 return (u16)node_id;
2434 * irdma_free_ws_node_id - Free a tx scheduler node ID
2435 * @dev: device pointer
2436 * @node_id: Work scheduler node ID
2438 void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id)
2440 struct irdma_pci_f *rf = dev_to_rf(dev);
2442 irdma_free_rsrc(rf, rf->allocated_ws_nodes, (u32)node_id);
2446 * irdma_modify_qp_to_err - Modify a QP to error
2447 * @sc_qp: qp structure
2449 void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp)
2451 struct irdma_qp *qp = sc_qp->qp_uk.back_qp;
2452 struct ib_qp_attr attr;
2454 if (qp->iwdev->rf->reset)
2456 attr.qp_state = IB_QPS_ERR;
2458 if (rdma_protocol_roce(qp->ibqp.device, 1))
2459 irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
2461 irdma_modify_qp(&qp->ibqp, &attr, IB_QP_STATE, NULL);
2464 void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event)
2466 struct ib_event ibevent;
2468 if (!iwqp->ibqp.event_handler)
2472 case IRDMA_QP_EVENT_CATASTROPHIC:
2473 ibevent.event = IB_EVENT_QP_FATAL;
2475 case IRDMA_QP_EVENT_ACCESS_ERR:
2476 ibevent.event = IB_EVENT_QP_ACCESS_ERR;
2479 ibevent.device = iwqp->ibqp.device;
2480 ibevent.element.qp = &iwqp->ibqp;
2481 iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
2484 bool irdma_cq_empty(struct irdma_cq *iwcq)
2486 struct irdma_cq_uk *ukcq;
2491 ukcq = &iwcq->sc_cq.cq_uk;
2492 cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
2493 get_64bit_val(cqe, 24, &qword3);
2494 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
2496 return polarity != ukcq->polarity;
2499 void irdma_remove_cmpls_list(struct irdma_cq *iwcq)
2501 struct irdma_cmpl_gen *cmpl_node;
2502 struct list_head *tmp_node, *list_node;
2504 list_for_each_safe (list_node, tmp_node, &iwcq->cmpl_generated) {
2505 cmpl_node = list_entry(list_node, struct irdma_cmpl_gen, list);
2506 list_del(&cmpl_node->list);
2511 int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info)
2513 struct irdma_cmpl_gen *cmpl;
2515 if (list_empty(&iwcq->cmpl_generated))
2517 cmpl = list_first_entry_or_null(&iwcq->cmpl_generated, struct irdma_cmpl_gen, list);
2518 list_del(&cmpl->list);
2519 memcpy(cq_poll_info, &cmpl->cpi, sizeof(*cq_poll_info));
2522 ibdev_dbg(iwcq->ibcq.device,
2523 "VERBS: %s: Poll artificially generated completion for QP 0x%X, op %u, wr_id=0x%llx\n",
2524 __func__, cq_poll_info->qp_id, cq_poll_info->op_type,
2525 cq_poll_info->wr_id);
2531 * irdma_set_cpi_common_values - fill in values for polling info struct
2532 * @cpi: resulting structure of cq_poll_info type
2534 * @qp_num: id of the QP
2536 static void irdma_set_cpi_common_values(struct irdma_cq_poll_info *cpi,
2537 struct irdma_qp_uk *qp, u32 qp_num)
2539 cpi->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
2541 cpi->major_err = IRDMA_FLUSH_MAJOR_ERR;
2542 cpi->minor_err = FLUSH_GENERAL_ERR;
2543 cpi->qp_handle = (irdma_qp_handle)(uintptr_t)qp;
2544 cpi->qp_id = qp_num;
2547 static inline void irdma_comp_handler(struct irdma_cq *cq)
2549 if (!cq->ibcq.comp_handler)
2551 if (atomic_cmpxchg(&cq->armed, 1, 0))
2552 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
2555 void irdma_generate_flush_completions(struct irdma_qp *iwqp)
2557 struct irdma_qp_uk *qp = &iwqp->sc_qp.qp_uk;
2558 struct irdma_ring *sq_ring = &qp->sq_ring;
2559 struct irdma_ring *rq_ring = &qp->rq_ring;
2560 struct irdma_cmpl_gen *cmpl;
2564 bool compl_generated = false;
2565 unsigned long flags1;
2567 spin_lock_irqsave(&iwqp->iwscq->lock, flags1);
2568 if (irdma_cq_empty(iwqp->iwscq)) {
2569 unsigned long flags2;
2571 spin_lock_irqsave(&iwqp->lock, flags2);
2572 while (IRDMA_RING_MORE_WORK(*sq_ring)) {
2573 cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC);
2575 spin_unlock_irqrestore(&iwqp->lock, flags2);
2576 spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
2580 wqe_idx = sq_ring->tail;
2581 irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id);
2583 cmpl->cpi.wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
2584 sw_wqe = qp->sq_base[wqe_idx].elem;
2585 get_64bit_val(sw_wqe, 24, &wqe_qword);
2586 cmpl->cpi.op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, IRDMAQPSQ_OPCODE);
2587 /* remove the SQ WR by moving SQ tail*/
2588 IRDMA_RING_SET_TAIL(*sq_ring,
2589 sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta);
2591 ibdev_dbg(iwqp->iwscq->ibcq.device,
2592 "DEV: %s: adding wr_id = 0x%llx SQ Completion to list qp_id=%d\n",
2593 __func__, cmpl->cpi.wr_id, qp->qp_id);
2594 list_add_tail(&cmpl->list, &iwqp->iwscq->cmpl_generated);
2595 compl_generated = true;
2597 spin_unlock_irqrestore(&iwqp->lock, flags2);
2598 spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
2599 if (compl_generated)
2600 irdma_comp_handler(iwqp->iwrcq);
2602 spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
2603 mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
2604 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
2607 spin_lock_irqsave(&iwqp->iwrcq->lock, flags1);
2608 if (irdma_cq_empty(iwqp->iwrcq)) {
2609 unsigned long flags2;
2611 spin_lock_irqsave(&iwqp->lock, flags2);
2612 while (IRDMA_RING_MORE_WORK(*rq_ring)) {
2613 cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC);
2615 spin_unlock_irqrestore(&iwqp->lock, flags2);
2616 spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
2620 wqe_idx = rq_ring->tail;
2621 irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id);
2623 cmpl->cpi.wr_id = qp->rq_wrid_array[wqe_idx];
2624 cmpl->cpi.op_type = IRDMA_OP_TYPE_REC;
2625 /* remove the RQ WR by moving RQ tail */
2626 IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1);
2627 ibdev_dbg(iwqp->iwrcq->ibcq.device,
2628 "DEV: %s: adding wr_id = 0x%llx RQ Completion to list qp_id=%d, wqe_idx=%d\n",
2629 __func__, cmpl->cpi.wr_id, qp->qp_id,
2631 list_add_tail(&cmpl->list, &iwqp->iwrcq->cmpl_generated);
2633 compl_generated = true;
2635 spin_unlock_irqrestore(&iwqp->lock, flags2);
2636 spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
2637 if (compl_generated)
2638 irdma_comp_handler(iwqp->iwrcq);
2640 spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
2641 mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
2642 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));