1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
6 * irdma_arp_table -manage arp table
7 * @rf: RDMA PCI function
8 * @ip_addr: ip address for device
10 * @mac_addr: mac address ptr
11 * @action: modify, delete or add
13 int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, bool ipv4,
14 const u8 *mac_addr, u32 action)
23 memcpy(ip, ip_addr, sizeof(ip));
25 spin_lock_irqsave(&rf->arp_lock, flags);
26 for (arp_index = 0; (u32)arp_index < rf->arp_table_size; arp_index++) {
27 if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip)))
33 if (arp_index != rf->arp_table_size) {
39 if (irdma_alloc_rsrc(rf, rf->allocated_arps, rf->arp_table_size,
40 (u32 *)&arp_index, &rf->next_arp_index)) {
45 memcpy(rf->arp_table[arp_index].ip_addr, ip,
46 sizeof(rf->arp_table[arp_index].ip_addr));
47 ether_addr_copy(rf->arp_table[arp_index].mac_addr, mac_addr);
49 case IRDMA_ARP_RESOLVE:
50 if (arp_index == rf->arp_table_size)
53 case IRDMA_ARP_DELETE:
54 if (arp_index == rf->arp_table_size) {
59 memset(rf->arp_table[arp_index].ip_addr, 0,
60 sizeof(rf->arp_table[arp_index].ip_addr));
61 eth_zero_addr(rf->arp_table[arp_index].mac_addr);
62 irdma_free_rsrc(rf, rf->allocated_arps, arp_index);
69 spin_unlock_irqrestore(&rf->arp_lock, flags);
74 * irdma_add_arp - add a new arp entry if needed
80 int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, const u8 *mac)
84 arpidx = irdma_arp_table(rf, &ip[0], ipv4, NULL, IRDMA_ARP_RESOLVE);
86 if (ether_addr_equal(rf->arp_table[arpidx].mac_addr, mac))
89 irdma_manage_arp_cache(rf, rf->arp_table[arpidx].mac_addr, ip,
90 ipv4, IRDMA_ARP_DELETE);
93 irdma_manage_arp_cache(rf, mac, ip, ipv4, IRDMA_ARP_ADD);
95 return irdma_arp_table(rf, ip, ipv4, NULL, IRDMA_ARP_RESOLVE);
99 * wr32 - write 32 bits to hw register
100 * @hw: hardware information including registers
101 * @reg: register offset
102 * @val: value to write to register
104 inline void wr32(struct irdma_hw *hw, u32 reg, u32 val)
106 writel(val, hw->hw_addr + reg);
110 * rd32 - read a 32 bit hw register
111 * @hw: hardware information including registers
112 * @reg: register offset
114 * Return value of register content
116 inline u32 rd32(struct irdma_hw *hw, u32 reg)
118 return readl(hw->hw_addr + reg);
122 * rd64 - read a 64 bit hw register
123 * @hw: hardware information including registers
124 * @reg: register offset
126 * Return value of register content
128 inline u64 rd64(struct irdma_hw *hw, u32 reg)
130 return readq(hw->hw_addr + reg);
133 static void irdma_gid_change_event(struct ib_device *ibdev)
135 struct ib_event ib_event;
137 ib_event.event = IB_EVENT_GID_CHANGE;
138 ib_event.device = ibdev;
139 ib_event.element.port_num = 1;
140 ib_dispatch_event(&ib_event);
144 * irdma_inetaddr_event - system notifier for ipv4 addr events
145 * @notifier: not used
146 * @event: event for notifier
149 int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event,
152 struct in_ifaddr *ifa = ptr;
153 struct net_device *real_dev, *netdev = ifa->ifa_dev->dev;
154 struct irdma_device *iwdev;
155 struct ib_device *ibdev;
158 real_dev = rdma_vlan_dev_real_dev(netdev);
162 ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
166 iwdev = to_iwdev(ibdev);
167 local_ipaddr = ntohl(ifa->ifa_address);
168 ibdev_dbg(&iwdev->ibdev,
169 "DEV: netdev %p event %lu local_ip=%pI4 MAC=%pM\n", real_dev,
170 event, &local_ipaddr, real_dev->dev_addr);
173 irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr,
174 &local_ipaddr, true, IRDMA_ARP_DELETE);
175 irdma_if_notify(iwdev, real_dev, &local_ipaddr, true, false);
176 irdma_gid_change_event(&iwdev->ibdev);
179 case NETDEV_CHANGEADDR:
180 irdma_add_arp(iwdev->rf, &local_ipaddr, true, real_dev->dev_addr);
181 irdma_if_notify(iwdev, real_dev, &local_ipaddr, true, true);
182 irdma_gid_change_event(&iwdev->ibdev);
188 ib_device_put(ibdev);
194 * irdma_inet6addr_event - system notifier for ipv6 addr events
195 * @notifier: not used
196 * @event: event for notifier
199 int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event,
202 struct inet6_ifaddr *ifa = ptr;
203 struct net_device *real_dev, *netdev = ifa->idev->dev;
204 struct irdma_device *iwdev;
205 struct ib_device *ibdev;
206 u32 local_ipaddr6[4];
208 real_dev = rdma_vlan_dev_real_dev(netdev);
212 ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
216 iwdev = to_iwdev(ibdev);
217 irdma_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
218 ibdev_dbg(&iwdev->ibdev,
219 "DEV: netdev %p event %lu local_ip=%pI6 MAC=%pM\n", real_dev,
220 event, local_ipaddr6, real_dev->dev_addr);
223 irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr,
224 local_ipaddr6, false, IRDMA_ARP_DELETE);
225 irdma_if_notify(iwdev, real_dev, local_ipaddr6, false, false);
226 irdma_gid_change_event(&iwdev->ibdev);
229 case NETDEV_CHANGEADDR:
230 irdma_add_arp(iwdev->rf, local_ipaddr6, false,
232 irdma_if_notify(iwdev, real_dev, local_ipaddr6, false, true);
233 irdma_gid_change_event(&iwdev->ibdev);
239 ib_device_put(ibdev);
245 * irdma_net_event - system notifier for net events
246 * @notifier: not used
247 * @event: event for notifier
250 int irdma_net_event(struct notifier_block *notifier, unsigned long event,
253 struct neighbour *neigh = ptr;
254 struct net_device *real_dev, *netdev = (struct net_device *)neigh->dev;
255 struct irdma_device *iwdev;
256 struct ib_device *ibdev;
258 u32 local_ipaddr[4] = {};
262 case NETEVENT_NEIGH_UPDATE:
263 real_dev = rdma_vlan_dev_real_dev(netdev);
266 ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
270 iwdev = to_iwdev(ibdev);
271 p = (__be32 *)neigh->primary_key;
272 if (neigh->tbl->family == AF_INET6) {
274 irdma_copy_ip_ntohl(local_ipaddr, p);
276 local_ipaddr[0] = ntohl(*p);
279 ibdev_dbg(&iwdev->ibdev,
280 "DEV: netdev %p state %d local_ip=%pI4 MAC=%pM\n",
281 iwdev->netdev, neigh->nud_state, local_ipaddr,
284 if (neigh->nud_state & NUD_VALID)
285 irdma_add_arp(iwdev->rf, local_ipaddr, ipv4, neigh->ha);
288 irdma_manage_arp_cache(iwdev->rf, neigh->ha,
291 ib_device_put(ibdev);
301 * irdma_netdevice_event - system notifier for netdev events
302 * @notifier: not used
303 * @event: event for notifier
306 int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
309 struct irdma_device *iwdev;
310 struct ib_device *ibdev;
311 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
313 ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_IRDMA);
317 iwdev = to_iwdev(ibdev);
318 iwdev->iw_status = 1;
321 iwdev->iw_status = 0;
324 irdma_port_ibevent(iwdev);
329 ib_device_put(ibdev);
335 * irdma_add_ipv6_addr - add ipv6 address to the hw arp table
336 * @iwdev: irdma device
338 static void irdma_add_ipv6_addr(struct irdma_device *iwdev)
340 struct net_device *ip_dev;
341 struct inet6_dev *idev;
342 struct inet6_ifaddr *ifp, *tmp;
343 u32 local_ipaddr6[4];
346 for_each_netdev_rcu (&init_net, ip_dev) {
347 if (((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF &&
348 rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev) ||
349 ip_dev == iwdev->netdev) &&
350 (READ_ONCE(ip_dev->flags) & IFF_UP)) {
351 idev = __in6_dev_get(ip_dev);
353 ibdev_err(&iwdev->ibdev, "ipv6 inet device not found\n");
356 list_for_each_entry_safe (ifp, tmp, &idev->addr_list,
358 ibdev_dbg(&iwdev->ibdev,
359 "INIT: IP=%pI6, vlan_id=%d, MAC=%pM\n",
361 rdma_vlan_dev_vlan_id(ip_dev),
364 irdma_copy_ip_ntohl(local_ipaddr6,
365 ifp->addr.in6_u.u6_addr32);
366 irdma_manage_arp_cache(iwdev->rf,
368 local_ipaddr6, false,
377 * irdma_add_ipv4_addr - add ipv4 address to the hw arp table
378 * @iwdev: irdma device
380 static void irdma_add_ipv4_addr(struct irdma_device *iwdev)
382 struct net_device *dev;
383 struct in_device *idev;
387 for_each_netdev_rcu (&init_net, dev) {
388 if (((rdma_vlan_dev_vlan_id(dev) < 0xFFFF &&
389 rdma_vlan_dev_real_dev(dev) == iwdev->netdev) ||
390 dev == iwdev->netdev) && (READ_ONCE(dev->flags) & IFF_UP)) {
391 const struct in_ifaddr *ifa;
393 idev = __in_dev_get_rcu(dev);
397 in_dev_for_each_ifa_rcu(ifa, idev) {
398 ibdev_dbg(&iwdev->ibdev, "CM: IP=%pI4, vlan_id=%d, MAC=%pM\n",
399 &ifa->ifa_address, rdma_vlan_dev_vlan_id(dev),
402 ip_addr = ntohl(ifa->ifa_address);
403 irdma_manage_arp_cache(iwdev->rf, dev->dev_addr,
413 * irdma_add_ip - add ip addresses
414 * @iwdev: irdma device
416 * Add ipv4/ipv6 addresses to the arp cache
418 void irdma_add_ip(struct irdma_device *iwdev)
420 irdma_add_ipv4_addr(iwdev);
421 irdma_add_ipv6_addr(iwdev);
425 * irdma_alloc_and_get_cqp_request - get cqp struct
426 * @cqp: device cqp ptr
427 * @wait: cqp to be used in wait mode
429 struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
432 struct irdma_cqp_request *cqp_request = NULL;
435 spin_lock_irqsave(&cqp->req_lock, flags);
436 if (!list_empty(&cqp->cqp_avail_reqs)) {
437 cqp_request = list_first_entry(&cqp->cqp_avail_reqs,
438 struct irdma_cqp_request, list);
439 list_del_init(&cqp_request->list);
441 spin_unlock_irqrestore(&cqp->req_lock, flags);
443 cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC);
445 cqp_request->dynamic = true;
447 init_waitqueue_head(&cqp_request->waitq);
451 ibdev_dbg(to_ibdev(cqp->sc_cqp.dev), "ERR: CQP Request Fail: No Memory");
455 cqp_request->waiting = wait;
456 refcount_set(&cqp_request->refcnt, 1);
457 memset(&cqp_request->compl_info, 0, sizeof(cqp_request->compl_info));
463 * irdma_get_cqp_request - increase refcount for cqp_request
464 * @cqp_request: pointer to cqp_request instance
466 static inline void irdma_get_cqp_request(struct irdma_cqp_request *cqp_request)
468 refcount_inc(&cqp_request->refcnt);
472 * irdma_free_cqp_request - free cqp request
474 * @cqp_request: to be put back in cqp list
476 void irdma_free_cqp_request(struct irdma_cqp *cqp,
477 struct irdma_cqp_request *cqp_request)
481 if (cqp_request->dynamic) {
484 cqp_request->request_done = false;
485 cqp_request->callback_fcn = NULL;
486 cqp_request->waiting = false;
488 spin_lock_irqsave(&cqp->req_lock, flags);
489 list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
490 spin_unlock_irqrestore(&cqp->req_lock, flags);
492 wake_up(&cqp->remove_wq);
496 * irdma_put_cqp_request - dec ref count and free if 0
498 * @cqp_request: to be put back in cqp list
500 void irdma_put_cqp_request(struct irdma_cqp *cqp,
501 struct irdma_cqp_request *cqp_request)
503 if (refcount_dec_and_test(&cqp_request->refcnt))
504 irdma_free_cqp_request(cqp, cqp_request);
508 * irdma_free_pending_cqp_request -free pending cqp request objs
510 * @cqp_request: to be put back in cqp list
513 irdma_free_pending_cqp_request(struct irdma_cqp *cqp,
514 struct irdma_cqp_request *cqp_request)
516 if (cqp_request->waiting) {
517 cqp_request->compl_info.error = true;
518 cqp_request->request_done = true;
519 wake_up(&cqp_request->waitq);
521 wait_event_timeout(cqp->remove_wq,
522 refcount_read(&cqp_request->refcnt) == 1, 1000);
523 irdma_put_cqp_request(cqp, cqp_request);
527 * irdma_cleanup_pending_cqp_op - clean-up cqp with no
529 * @rf: RDMA PCI function
531 void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf)
533 struct irdma_sc_dev *dev = &rf->sc_dev;
534 struct irdma_cqp *cqp = &rf->cqp;
535 struct irdma_cqp_request *cqp_request = NULL;
536 struct cqp_cmds_info *pcmdinfo = NULL;
537 u32 i, pending_work, wqe_idx;
539 pending_work = IRDMA_RING_USED_QUANTA(cqp->sc_cqp.sq_ring);
540 wqe_idx = IRDMA_RING_CURRENT_TAIL(cqp->sc_cqp.sq_ring);
541 for (i = 0; i < pending_work; i++) {
542 cqp_request = (struct irdma_cqp_request *)(unsigned long)
543 cqp->scratch_array[wqe_idx];
545 irdma_free_pending_cqp_request(cqp, cqp_request);
546 wqe_idx = (wqe_idx + 1) % IRDMA_RING_SIZE(cqp->sc_cqp.sq_ring);
549 while (!list_empty(&dev->cqp_cmd_head)) {
550 pcmdinfo = irdma_remove_cqp_head(dev);
552 container_of(pcmdinfo, struct irdma_cqp_request, info);
554 irdma_free_pending_cqp_request(cqp, cqp_request);
559 * irdma_wait_event - wait for completion
560 * @rf: RDMA PCI function
561 * @cqp_request: cqp request to wait
563 static int irdma_wait_event(struct irdma_pci_f *rf,
564 struct irdma_cqp_request *cqp_request)
566 struct irdma_cqp_timeout cqp_timeout = {};
567 bool cqp_error = false;
570 cqp_timeout.compl_cqp_cmds = rf->sc_dev.cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
572 irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
573 if (wait_event_timeout(cqp_request->waitq,
574 cqp_request->request_done,
575 msecs_to_jiffies(CQP_COMPL_WAIT_TIME_MS)))
578 irdma_check_cqp_progress(&cqp_timeout, &rf->sc_dev);
580 if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD)
585 rf->gen_ops.request_reset(rf);
590 cqp_error = cqp_request->compl_info.error;
593 if (cqp_request->compl_info.maj_err_code == 0xFFFF) {
594 if (cqp_request->compl_info.min_err_code == 0x8002)
596 else if (cqp_request->compl_info.min_err_code == 0x8029) {
599 rf->gen_ops.request_reset(rf);
608 static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = {
609 [IRDMA_OP_CEQ_DESTROY] = "Destroy CEQ Cmd",
610 [IRDMA_OP_AEQ_DESTROY] = "Destroy AEQ Cmd",
611 [IRDMA_OP_DELETE_ARP_CACHE_ENTRY] = "Delete ARP Cache Cmd",
612 [IRDMA_OP_MANAGE_APBVT_ENTRY] = "Manage APBV Table Entry Cmd",
613 [IRDMA_OP_CEQ_CREATE] = "CEQ Create Cmd",
614 [IRDMA_OP_AEQ_CREATE] = "AEQ Destroy Cmd",
615 [IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY] = "Manage Quad Hash Table Entry Cmd",
616 [IRDMA_OP_QP_MODIFY] = "Modify QP Cmd",
617 [IRDMA_OP_QP_UPLOAD_CONTEXT] = "Upload Context Cmd",
618 [IRDMA_OP_CQ_CREATE] = "Create CQ Cmd",
619 [IRDMA_OP_CQ_DESTROY] = "Destroy CQ Cmd",
620 [IRDMA_OP_QP_CREATE] = "Create QP Cmd",
621 [IRDMA_OP_QP_DESTROY] = "Destroy QP Cmd",
622 [IRDMA_OP_ALLOC_STAG] = "Allocate STag Cmd",
623 [IRDMA_OP_MR_REG_NON_SHARED] = "Register Non-Shared MR Cmd",
624 [IRDMA_OP_DEALLOC_STAG] = "Deallocate STag Cmd",
625 [IRDMA_OP_MW_ALLOC] = "Allocate Memory Window Cmd",
626 [IRDMA_OP_QP_FLUSH_WQES] = "Flush QP Cmd",
627 [IRDMA_OP_ADD_ARP_CACHE_ENTRY] = "Add ARP Cache Cmd",
628 [IRDMA_OP_MANAGE_PUSH_PAGE] = "Manage Push Page Cmd",
629 [IRDMA_OP_UPDATE_PE_SDS] = "Update PE SDs Cmd",
630 [IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE] = "Manage HMC PM Function Table Cmd",
631 [IRDMA_OP_SUSPEND] = "Suspend QP Cmd",
632 [IRDMA_OP_RESUME] = "Resume QP Cmd",
633 [IRDMA_OP_MANAGE_VF_PBLE_BP] = "Manage VF PBLE Backing Pages Cmd",
634 [IRDMA_OP_QUERY_FPM_VAL] = "Query FPM Values Cmd",
635 [IRDMA_OP_COMMIT_FPM_VAL] = "Commit FPM Values Cmd",
636 [IRDMA_OP_AH_CREATE] = "Create Address Handle Cmd",
637 [IRDMA_OP_AH_MODIFY] = "Modify Address Handle Cmd",
638 [IRDMA_OP_AH_DESTROY] = "Destroy Address Handle Cmd",
639 [IRDMA_OP_MC_CREATE] = "Create Multicast Group Cmd",
640 [IRDMA_OP_MC_DESTROY] = "Destroy Multicast Group Cmd",
641 [IRDMA_OP_MC_MODIFY] = "Modify Multicast Group Cmd",
642 [IRDMA_OP_STATS_ALLOCATE] = "Add Statistics Instance Cmd",
643 [IRDMA_OP_STATS_FREE] = "Free Statistics Instance Cmd",
644 [IRDMA_OP_STATS_GATHER] = "Gather Statistics Cmd",
645 [IRDMA_OP_WS_ADD_NODE] = "Add Work Scheduler Node Cmd",
646 [IRDMA_OP_WS_MODIFY_NODE] = "Modify Work Scheduler Node Cmd",
647 [IRDMA_OP_WS_DELETE_NODE] = "Delete Work Scheduler Node Cmd",
648 [IRDMA_OP_SET_UP_MAP] = "Set UP-UP Mapping Cmd",
649 [IRDMA_OP_GEN_AE] = "Generate AE Cmd",
650 [IRDMA_OP_QUERY_RDMA_FEATURES] = "RDMA Get Features Cmd",
651 [IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY] = "Allocate Local MAC Entry Cmd",
652 [IRDMA_OP_ADD_LOCAL_MAC_ENTRY] = "Add Local MAC Entry Cmd",
653 [IRDMA_OP_DELETE_LOCAL_MAC_ENTRY] = "Delete Local MAC Entry Cmd",
654 [IRDMA_OP_CQ_MODIFY] = "CQ Modify Cmd",
657 static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = {
658 {0xffff, 0x8002, "Invalid State"},
659 {0xffff, 0x8006, "Flush No Wqe Pending"},
660 {0xffff, 0x8007, "Modify QP Bad Close"},
661 {0xffff, 0x8009, "LLP Closed"},
662 {0xffff, 0x800a, "Reset Not Sent"}
666 * irdma_cqp_crit_err - check if CQP error is critical
667 * @dev: pointer to dev structure
668 * @cqp_cmd: code for last CQP operation
669 * @maj_err_code: major error code
670 * @min_err_code: minot error code
672 bool irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd,
673 u16 maj_err_code, u16 min_err_code)
677 for (i = 0; i < ARRAY_SIZE(irdma_noncrit_err_list); ++i) {
678 if (maj_err_code == irdma_noncrit_err_list[i].maj &&
679 min_err_code == irdma_noncrit_err_list[i].min) {
680 ibdev_dbg(to_ibdev(dev),
681 "CQP: [%s Error][%s] maj=0x%x min=0x%x\n",
682 irdma_noncrit_err_list[i].desc,
683 irdma_cqp_cmd_names[cqp_cmd], maj_err_code,
692 * irdma_handle_cqp_op - process cqp command
693 * @rf: RDMA PCI function
694 * @cqp_request: cqp request to process
696 int irdma_handle_cqp_op(struct irdma_pci_f *rf,
697 struct irdma_cqp_request *cqp_request)
699 struct irdma_sc_dev *dev = &rf->sc_dev;
700 struct cqp_cmds_info *info = &cqp_request->info;
702 bool put_cqp_request = true;
707 irdma_get_cqp_request(cqp_request);
708 status = irdma_process_cqp_cmd(dev, info);
712 if (cqp_request->waiting) {
713 put_cqp_request = false;
714 status = irdma_wait_event(rf, cqp_request);
722 if (irdma_cqp_crit_err(dev, info->cqp_cmd,
723 cqp_request->compl_info.maj_err_code,
724 cqp_request->compl_info.min_err_code))
725 ibdev_err(&rf->iwdev->ibdev,
726 "[%s Error][op_code=%d] status=%d waiting=%d completion_err=%d maj=0x%x min=0x%x\n",
727 irdma_cqp_cmd_names[info->cqp_cmd], info->cqp_cmd, status, cqp_request->waiting,
728 cqp_request->compl_info.error, cqp_request->compl_info.maj_err_code,
729 cqp_request->compl_info.min_err_code);
732 irdma_put_cqp_request(&rf->cqp, cqp_request);
737 void irdma_qp_add_ref(struct ib_qp *ibqp)
739 struct irdma_qp *iwqp = (struct irdma_qp *)ibqp;
741 refcount_inc(&iwqp->refcnt);
744 void irdma_qp_rem_ref(struct ib_qp *ibqp)
746 struct irdma_qp *iwqp = to_iwqp(ibqp);
747 struct irdma_device *iwdev = iwqp->iwdev;
751 spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);
752 if (!refcount_dec_and_test(&iwqp->refcnt)) {
753 spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
757 qp_num = iwqp->ibqp.qp_num;
758 iwdev->rf->qp_table[qp_num] = NULL;
759 spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
760 complete(&iwqp->free_qp);
763 struct ib_device *to_ibdev(struct irdma_sc_dev *dev)
765 return &(container_of(dev, struct irdma_pci_f, sc_dev))->iwdev->ibdev;
769 * irdma_get_qp - get qp address
770 * @device: iwarp device
773 struct ib_qp *irdma_get_qp(struct ib_device *device, int qpn)
775 struct irdma_device *iwdev = to_iwdev(device);
777 if (qpn < IW_FIRST_QPN || qpn >= iwdev->rf->max_qp)
780 return &iwdev->rf->qp_table[qpn]->ibqp;
784 * irdma_remove_cqp_head - return head entry and remove
787 void *irdma_remove_cqp_head(struct irdma_sc_dev *dev)
789 struct list_head *entry;
790 struct list_head *list = &dev->cqp_cmd_head;
792 if (list_empty(list))
802 * irdma_cqp_sds_cmd - create cqp command for sd
803 * @dev: hardware control device structure
804 * @sdinfo: information for sd cqp
807 int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
808 struct irdma_update_sds_info *sdinfo)
810 struct irdma_cqp_request *cqp_request;
811 struct cqp_cmds_info *cqp_info;
812 struct irdma_pci_f *rf = dev_to_rf(dev);
815 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
819 cqp_info = &cqp_request->info;
820 memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo,
821 sizeof(cqp_info->in.u.update_pe_sds.info));
822 cqp_info->cqp_cmd = IRDMA_OP_UPDATE_PE_SDS;
823 cqp_info->post_sq = 1;
824 cqp_info->in.u.update_pe_sds.dev = dev;
825 cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request;
827 status = irdma_handle_cqp_op(rf, cqp_request);
828 irdma_put_cqp_request(&rf->cqp, cqp_request);
834 * irdma_cqp_qp_suspend_resume - cqp command for suspend/resume
835 * @qp: hardware control qp
836 * @op: suspend or resume
838 int irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp, u8 op)
840 struct irdma_sc_dev *dev = qp->dev;
841 struct irdma_cqp_request *cqp_request;
842 struct irdma_sc_cqp *cqp = dev->cqp;
843 struct cqp_cmds_info *cqp_info;
844 struct irdma_pci_f *rf = dev_to_rf(dev);
847 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
851 cqp_info = &cqp_request->info;
852 cqp_info->cqp_cmd = op;
853 cqp_info->in.u.suspend_resume.cqp = cqp;
854 cqp_info->in.u.suspend_resume.qp = qp;
855 cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request;
857 status = irdma_handle_cqp_op(rf, cqp_request);
858 irdma_put_cqp_request(&rf->cqp, cqp_request);
864 * irdma_term_modify_qp - modify qp for term message
865 * @qp: hardware control qp
866 * @next_state: qp's next state
867 * @term: terminate code
870 void irdma_term_modify_qp(struct irdma_sc_qp *qp, u8 next_state, u8 term,
873 struct irdma_qp *iwqp;
875 iwqp = qp->qp_uk.back_qp;
876 irdma_next_iw_state(iwqp, next_state, 0, term, term_len);
880 * irdma_terminate_done - after terminate is completed
881 * @qp: hardware control qp
882 * @timeout_occurred: indicates if terminate timer expired
884 void irdma_terminate_done(struct irdma_sc_qp *qp, int timeout_occurred)
886 struct irdma_qp *iwqp;
891 iwqp = qp->qp_uk.back_qp;
892 spin_lock_irqsave(&iwqp->lock, flags);
893 if (iwqp->hte_added) {
897 first_time = !(qp->term_flags & IRDMA_TERM_DONE);
898 qp->term_flags |= IRDMA_TERM_DONE;
899 spin_unlock_irqrestore(&iwqp->lock, flags);
901 if (!timeout_occurred)
902 irdma_terminate_del_timer(qp);
904 irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, hte, 0, 0);
905 irdma_cm_disconn(iwqp);
909 static void irdma_terminate_timeout(struct timer_list *t)
911 struct irdma_qp *iwqp = from_timer(iwqp, t, terminate_timer);
912 struct irdma_sc_qp *qp = &iwqp->sc_qp;
914 irdma_terminate_done(qp, 1);
915 irdma_qp_rem_ref(&iwqp->ibqp);
919 * irdma_terminate_start_timer - start terminate timeout
920 * @qp: hardware control qp
922 void irdma_terminate_start_timer(struct irdma_sc_qp *qp)
924 struct irdma_qp *iwqp;
926 iwqp = qp->qp_uk.back_qp;
927 irdma_qp_add_ref(&iwqp->ibqp);
928 timer_setup(&iwqp->terminate_timer, irdma_terminate_timeout, 0);
929 iwqp->terminate_timer.expires = jiffies + HZ;
931 add_timer(&iwqp->terminate_timer);
935 * irdma_terminate_del_timer - delete terminate timeout
936 * @qp: hardware control qp
938 void irdma_terminate_del_timer(struct irdma_sc_qp *qp)
940 struct irdma_qp *iwqp;
943 iwqp = qp->qp_uk.back_qp;
944 ret = del_timer(&iwqp->terminate_timer);
946 irdma_qp_rem_ref(&iwqp->ibqp);
950 * irdma_cqp_query_fpm_val_cmd - send cqp command for fpm
951 * @dev: function device struct
952 * @val_mem: buffer for fpm
953 * @hmc_fn_id: function id for fpm
955 int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
956 struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
958 struct irdma_cqp_request *cqp_request;
959 struct cqp_cmds_info *cqp_info;
960 struct irdma_pci_f *rf = dev_to_rf(dev);
963 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
967 cqp_info = &cqp_request->info;
968 cqp_request->param = NULL;
969 cqp_info->in.u.query_fpm_val.cqp = dev->cqp;
970 cqp_info->in.u.query_fpm_val.fpm_val_pa = val_mem->pa;
971 cqp_info->in.u.query_fpm_val.fpm_val_va = val_mem->va;
972 cqp_info->in.u.query_fpm_val.hmc_fn_id = hmc_fn_id;
973 cqp_info->cqp_cmd = IRDMA_OP_QUERY_FPM_VAL;
974 cqp_info->post_sq = 1;
975 cqp_info->in.u.query_fpm_val.scratch = (uintptr_t)cqp_request;
977 status = irdma_handle_cqp_op(rf, cqp_request);
978 irdma_put_cqp_request(&rf->cqp, cqp_request);
984 * irdma_cqp_commit_fpm_val_cmd - commit fpm values in hw
985 * @dev: hardware control device structure
986 * @val_mem: buffer with fpm values
987 * @hmc_fn_id: function id for fpm
989 int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
990 struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
992 struct irdma_cqp_request *cqp_request;
993 struct cqp_cmds_info *cqp_info;
994 struct irdma_pci_f *rf = dev_to_rf(dev);
997 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1001 cqp_info = &cqp_request->info;
1002 cqp_request->param = NULL;
1003 cqp_info->in.u.commit_fpm_val.cqp = dev->cqp;
1004 cqp_info->in.u.commit_fpm_val.fpm_val_pa = val_mem->pa;
1005 cqp_info->in.u.commit_fpm_val.fpm_val_va = val_mem->va;
1006 cqp_info->in.u.commit_fpm_val.hmc_fn_id = hmc_fn_id;
1007 cqp_info->cqp_cmd = IRDMA_OP_COMMIT_FPM_VAL;
1008 cqp_info->post_sq = 1;
1009 cqp_info->in.u.commit_fpm_val.scratch = (uintptr_t)cqp_request;
1011 status = irdma_handle_cqp_op(rf, cqp_request);
1012 irdma_put_cqp_request(&rf->cqp, cqp_request);
1018 * irdma_cqp_cq_create_cmd - create a cq for the cqp
1019 * @dev: device pointer
1020 * @cq: pointer to created cq
1022 int irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
1024 struct irdma_pci_f *rf = dev_to_rf(dev);
1025 struct irdma_cqp *iwcqp = &rf->cqp;
1026 struct irdma_cqp_request *cqp_request;
1027 struct cqp_cmds_info *cqp_info;
1030 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
1034 cqp_info = &cqp_request->info;
1035 cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
1036 cqp_info->post_sq = 1;
1037 cqp_info->in.u.cq_create.cq = cq;
1038 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
1040 status = irdma_handle_cqp_op(rf, cqp_request);
1041 irdma_put_cqp_request(iwcqp, cqp_request);
1047 * irdma_cqp_qp_create_cmd - create a qp for the cqp
1048 * @dev: device pointer
1049 * @qp: pointer to created qp
1051 int irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
1053 struct irdma_pci_f *rf = dev_to_rf(dev);
1054 struct irdma_cqp *iwcqp = &rf->cqp;
1055 struct irdma_cqp_request *cqp_request;
1056 struct cqp_cmds_info *cqp_info;
1057 struct irdma_create_qp_info *qp_info;
1060 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
1064 cqp_info = &cqp_request->info;
1065 qp_info = &cqp_request->info.in.u.qp_create.info;
1066 memset(qp_info, 0, sizeof(*qp_info));
1067 qp_info->cq_num_valid = true;
1068 qp_info->next_iwarp_state = IRDMA_QP_STATE_RTS;
1069 cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
1070 cqp_info->post_sq = 1;
1071 cqp_info->in.u.qp_create.qp = qp;
1072 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
1074 status = irdma_handle_cqp_op(rf, cqp_request);
1075 irdma_put_cqp_request(iwcqp, cqp_request);
1081 * irdma_dealloc_push_page - free a push page for qp
1082 * @rf: RDMA PCI function
1083 * @qp: hardware control qp
1085 static void irdma_dealloc_push_page(struct irdma_pci_f *rf,
1086 struct irdma_sc_qp *qp)
1088 struct irdma_cqp_request *cqp_request;
1089 struct cqp_cmds_info *cqp_info;
1092 if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX)
1095 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
1099 cqp_info = &cqp_request->info;
1100 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
1101 cqp_info->post_sq = 1;
1102 cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
1103 cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
1104 cqp_info->in.u.manage_push_page.info.free_page = 1;
1105 cqp_info->in.u.manage_push_page.info.push_page_type = 0;
1106 cqp_info->in.u.manage_push_page.cqp = &rf->cqp.sc_cqp;
1107 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
1108 status = irdma_handle_cqp_op(rf, cqp_request);
1110 qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
1111 irdma_put_cqp_request(&rf->cqp, cqp_request);
1115 * irdma_free_qp_rsrc - free up memory resources for qp
1116 * @iwqp: qp ptr (user or kernel)
1118 void irdma_free_qp_rsrc(struct irdma_qp *iwqp)
1120 struct irdma_device *iwdev = iwqp->iwdev;
1121 struct irdma_pci_f *rf = iwdev->rf;
1122 u32 qp_num = iwqp->ibqp.qp_num;
1124 irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
1125 irdma_dealloc_push_page(rf, &iwqp->sc_qp);
1126 if (iwqp->sc_qp.vsi) {
1127 irdma_qp_rem_qos(&iwqp->sc_qp);
1128 iwqp->sc_qp.dev->ws_remove(iwqp->sc_qp.vsi,
1129 iwqp->sc_qp.user_pri);
1133 irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
1134 dma_free_coherent(rf->sc_dev.hw->device, iwqp->q2_ctx_mem.size,
1135 iwqp->q2_ctx_mem.va, iwqp->q2_ctx_mem.pa);
1136 iwqp->q2_ctx_mem.va = NULL;
1137 dma_free_coherent(rf->sc_dev.hw->device, iwqp->kqp.dma_mem.size,
1138 iwqp->kqp.dma_mem.va, iwqp->kqp.dma_mem.pa);
1139 iwqp->kqp.dma_mem.va = NULL;
1140 kfree(iwqp->kqp.sq_wrid_mem);
1141 kfree(iwqp->kqp.rq_wrid_mem);
1145 * irdma_cq_wq_destroy - send cq destroy cqp
1146 * @rf: RDMA PCI function
1147 * @cq: hardware control cq
1149 void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
1151 struct irdma_cqp_request *cqp_request;
1152 struct cqp_cmds_info *cqp_info;
1154 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1158 cqp_info = &cqp_request->info;
1159 cqp_info->cqp_cmd = IRDMA_OP_CQ_DESTROY;
1160 cqp_info->post_sq = 1;
1161 cqp_info->in.u.cq_destroy.cq = cq;
1162 cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;
1164 irdma_handle_cqp_op(rf, cqp_request);
1165 irdma_put_cqp_request(&rf->cqp, cqp_request);
1169 * irdma_hw_modify_qp_callback - handle state for modifyQPs that don't wait
1170 * @cqp_request: modify QP completion
1172 static void irdma_hw_modify_qp_callback(struct irdma_cqp_request *cqp_request)
1174 struct cqp_cmds_info *cqp_info;
1175 struct irdma_qp *iwqp;
1177 cqp_info = &cqp_request->info;
1178 iwqp = cqp_info->in.u.qp_modify.qp->qp_uk.back_qp;
1179 atomic_dec(&iwqp->hw_mod_qp_pend);
1180 wake_up(&iwqp->mod_qp_waitq);
1184 * irdma_hw_modify_qp - setup cqp for modify qp
1185 * @iwdev: RDMA device
1186 * @iwqp: qp ptr (user or kernel)
1187 * @info: info for modify qp
1188 * @wait: flag to wait or not for modify qp completion
1190 int irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp,
1191 struct irdma_modify_qp_info *info, bool wait)
1194 struct irdma_pci_f *rf = iwdev->rf;
1195 struct irdma_cqp_request *cqp_request;
1196 struct cqp_cmds_info *cqp_info;
1197 struct irdma_modify_qp_info *m_info;
1199 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
1204 cqp_request->callback_fcn = irdma_hw_modify_qp_callback;
1205 atomic_inc(&iwqp->hw_mod_qp_pend);
1207 cqp_info = &cqp_request->info;
1208 m_info = &cqp_info->in.u.qp_modify.info;
1209 memcpy(m_info, info, sizeof(*m_info));
1210 cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY;
1211 cqp_info->post_sq = 1;
1212 cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
1213 cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
1214 status = irdma_handle_cqp_op(rf, cqp_request);
1215 irdma_put_cqp_request(&rf->cqp, cqp_request);
1217 if (rdma_protocol_roce(&iwdev->ibdev, 1))
1220 switch (m_info->next_iwarp_state) {
1221 struct irdma_gen_ae_info ae_info;
1223 case IRDMA_QP_STATE_RTS:
1224 case IRDMA_QP_STATE_IDLE:
1225 case IRDMA_QP_STATE_TERMINATE:
1226 case IRDMA_QP_STATE_CLOSING:
1227 if (info->curr_iwarp_state == IRDMA_QP_STATE_IDLE)
1228 irdma_send_reset(iwqp->cm_node);
1230 iwqp->sc_qp.term_flags = IRDMA_TERM_DONE;
1232 ae_info.ae_code = IRDMA_AE_BAD_CLOSE;
1234 irdma_gen_ae(rf, &iwqp->sc_qp, &ae_info, false);
1236 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp,
1241 cqp_info = &cqp_request->info;
1242 m_info = &cqp_info->in.u.qp_modify.info;
1243 memcpy(m_info, info, sizeof(*m_info));
1244 cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY;
1245 cqp_info->post_sq = 1;
1246 cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
1247 cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
1248 m_info->next_iwarp_state = IRDMA_QP_STATE_ERROR;
1249 m_info->reset_tcp_conn = true;
1250 irdma_handle_cqp_op(rf, cqp_request);
1251 irdma_put_cqp_request(&rf->cqp, cqp_request);
1254 case IRDMA_QP_STATE_ERROR:
1264 * irdma_cqp_cq_destroy_cmd - destroy the cqp cq
1265 * @dev: device pointer
1266 * @cq: pointer to cq
1268 void irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
1270 struct irdma_pci_f *rf = dev_to_rf(dev);
1272 irdma_cq_wq_destroy(rf, cq);
1276 * irdma_cqp_qp_destroy_cmd - destroy the cqp
1277 * @dev: device pointer
1278 * @qp: pointer to qp
1280 int irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
1282 struct irdma_pci_f *rf = dev_to_rf(dev);
1283 struct irdma_cqp *iwcqp = &rf->cqp;
1284 struct irdma_cqp_request *cqp_request;
1285 struct cqp_cmds_info *cqp_info;
1288 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
1292 cqp_info = &cqp_request->info;
1293 memset(cqp_info, 0, sizeof(*cqp_info));
1294 cqp_info->cqp_cmd = IRDMA_OP_QP_DESTROY;
1295 cqp_info->post_sq = 1;
1296 cqp_info->in.u.qp_destroy.qp = qp;
1297 cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
1298 cqp_info->in.u.qp_destroy.remove_hash_idx = true;
1300 status = irdma_handle_cqp_op(rf, cqp_request);
1301 irdma_put_cqp_request(&rf->cqp, cqp_request);
1307 * irdma_ieq_mpa_crc_ae - generate AE for crc error
1308 * @dev: hardware control device structure
1309 * @qp: hardware control qp
1311 void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
1313 struct irdma_gen_ae_info info = {};
1314 struct irdma_pci_f *rf = dev_to_rf(dev);
1316 ibdev_dbg(&rf->iwdev->ibdev, "AEQ: Generate MPA CRC AE\n");
1317 info.ae_code = IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR;
1318 info.ae_src = IRDMA_AE_SOURCE_RQ;
1319 irdma_gen_ae(rf, qp, &info, false);
1323 * irdma_init_hash_desc - initialize hash for crc calculation
1324 * @desc: cryption type
1326 int irdma_init_hash_desc(struct shash_desc **desc)
1328 struct crypto_shash *tfm;
1329 struct shash_desc *tdesc;
1331 tfm = crypto_alloc_shash("crc32c", 0, 0);
1335 tdesc = kzalloc(sizeof(*tdesc) + crypto_shash_descsize(tfm),
1338 crypto_free_shash(tfm);
1349 * irdma_free_hash_desc - free hash desc
1350 * @desc: to be freed
1352 void irdma_free_hash_desc(struct shash_desc *desc)
1355 crypto_free_shash(desc->tfm);
1361 * irdma_ieq_check_mpacrc - check if mpa crc is OK
1362 * @desc: desc for hash
1363 * @addr: address of buffer for crc
1364 * @len: length of buffer
1365 * @val: value to be compared
1367 int irdma_ieq_check_mpacrc(struct shash_desc *desc, void *addr, u32 len,
1374 crypto_shash_init(desc);
1375 ret = crypto_shash_update(desc, addr, len);
1377 crypto_shash_final(desc, (u8 *)&crc);
1385 * irdma_ieq_get_qp - get qp based on quad in puda buffer
1386 * @dev: hardware control device structure
1387 * @buf: receive puda buffer on exception q
1389 struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev,
1390 struct irdma_puda_buf *buf)
1392 struct irdma_qp *iwqp;
1393 struct irdma_cm_node *cm_node;
1394 struct irdma_device *iwdev = buf->vsi->back_vsi;
1395 u32 loc_addr[4] = {};
1396 u32 rem_addr[4] = {};
1397 u16 loc_port, rem_port;
1398 struct ipv6hdr *ip6h;
1399 struct iphdr *iph = (struct iphdr *)buf->iph;
1400 struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
1402 if (iph->version == 4) {
1403 loc_addr[0] = ntohl(iph->daddr);
1404 rem_addr[0] = ntohl(iph->saddr);
1406 ip6h = (struct ipv6hdr *)buf->iph;
1407 irdma_copy_ip_ntohl(loc_addr, ip6h->daddr.in6_u.u6_addr32);
1408 irdma_copy_ip_ntohl(rem_addr, ip6h->saddr.in6_u.u6_addr32);
1410 loc_port = ntohs(tcph->dest);
1411 rem_port = ntohs(tcph->source);
1412 cm_node = irdma_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port,
1413 loc_addr, buf->vlan_valid ? buf->vlan_id : 0xFFFF);
1417 iwqp = cm_node->iwqp;
1418 irdma_rem_ref_cm_node(cm_node);
1420 return &iwqp->sc_qp;
1424 * irdma_send_ieq_ack - ACKs for duplicate or OOO partials FPDUs
1427 void irdma_send_ieq_ack(struct irdma_sc_qp *qp)
1429 struct irdma_cm_node *cm_node = ((struct irdma_qp *)qp->qp_uk.back_qp)->cm_node;
1430 struct irdma_puda_buf *buf = qp->pfpdu.lastrcv_buf;
1431 struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
1433 cm_node->tcp_cntxt.rcv_nxt = qp->pfpdu.nextseqnum;
1434 cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
1436 irdma_send_ack(cm_node);
1440 * irdma_puda_ieq_get_ah_info - get AH info from IEQ buffer
1442 * @ah_info: AH info pointer
1444 void irdma_puda_ieq_get_ah_info(struct irdma_sc_qp *qp,
1445 struct irdma_ah_info *ah_info)
1447 struct irdma_puda_buf *buf = qp->pfpdu.ah_buf;
1449 struct ipv6hdr *ip6h;
1451 memset(ah_info, 0, sizeof(*ah_info));
1452 ah_info->do_lpbk = true;
1453 ah_info->vlan_tag = buf->vlan_id;
1454 ah_info->insert_vlan_tag = buf->vlan_valid;
1455 ah_info->ipv4_valid = buf->ipv4;
1456 ah_info->vsi = qp->vsi;
1458 if (buf->smac_valid)
1459 ether_addr_copy(ah_info->mac_addr, buf->smac);
1462 ah_info->ipv4_valid = true;
1463 iph = (struct iphdr *)buf->iph;
1464 ah_info->hop_ttl = iph->ttl;
1465 ah_info->tc_tos = iph->tos;
1466 ah_info->dest_ip_addr[0] = ntohl(iph->daddr);
1467 ah_info->src_ip_addr[0] = ntohl(iph->saddr);
1469 ip6h = (struct ipv6hdr *)buf->iph;
1470 ah_info->hop_ttl = ip6h->hop_limit;
1471 ah_info->tc_tos = ip6h->priority;
1472 irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
1473 ip6h->daddr.in6_u.u6_addr32);
1474 irdma_copy_ip_ntohl(ah_info->src_ip_addr,
1475 ip6h->saddr.in6_u.u6_addr32);
1478 ah_info->dst_arpindex = irdma_arp_table(dev_to_rf(qp->dev),
1479 ah_info->dest_ip_addr,
1480 ah_info->ipv4_valid,
1481 NULL, IRDMA_ARP_RESOLVE);
1485 * irdma_gen1_ieq_update_tcpip_info - update tcpip in the buffer
1486 * @buf: puda to update
1487 * @len: length of buffer
1488 * @seqnum: seq number for tcp
1490 static void irdma_gen1_ieq_update_tcpip_info(struct irdma_puda_buf *buf,
1491 u16 len, u32 seqnum)
1493 struct tcphdr *tcph;
1497 u8 *addr = buf->mem.va;
1499 iphlen = (buf->ipv4) ? 20 : 40;
1500 iph = (struct iphdr *)(addr + buf->maclen);
1501 tcph = (struct tcphdr *)(addr + buf->maclen + iphlen);
1502 pktsize = len + buf->tcphlen + iphlen;
1503 iph->tot_len = htons(pktsize);
1504 tcph->seq = htonl(seqnum);
1508 * irdma_ieq_update_tcpip_info - update tcpip in the buffer
1509 * @buf: puda to update
1510 * @len: length of buffer
1511 * @seqnum: seq number for tcp
1513 void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len,
1516 struct tcphdr *tcph;
1519 if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1520 return irdma_gen1_ieq_update_tcpip_info(buf, len, seqnum);
1523 tcph = (struct tcphdr *)addr;
1524 tcph->seq = htonl(seqnum);
1528 * irdma_gen1_puda_get_tcpip_info - get tcpip info from puda
1530 * @info: to get information
1533 static int irdma_gen1_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
1534 struct irdma_puda_buf *buf)
1537 struct ipv6hdr *ip6h;
1538 struct tcphdr *tcph;
1541 u8 *mem = buf->mem.va;
1542 struct ethhdr *ethh = buf->mem.va;
1544 if (ethh->h_proto == htons(0x8100)) {
1545 info->vlan_valid = true;
1546 buf->vlan_id = ntohs(((struct vlan_ethhdr *)ethh)->h_vlan_TCI) &
1550 buf->maclen = (info->vlan_valid) ? 18 : 14;
1551 iphlen = (info->l3proto) ? 40 : 20;
1552 buf->ipv4 = (info->l3proto) ? false : true;
1553 buf->iph = mem + buf->maclen;
1554 iph = (struct iphdr *)buf->iph;
1555 buf->tcph = buf->iph + iphlen;
1556 tcph = (struct tcphdr *)buf->tcph;
1559 pkt_len = ntohs(iph->tot_len);
1561 ip6h = (struct ipv6hdr *)buf->iph;
1562 pkt_len = ntohs(ip6h->payload_len) + iphlen;
1565 buf->totallen = pkt_len + buf->maclen;
1567 if (info->payload_len < buf->totallen) {
1568 ibdev_dbg(to_ibdev(buf->vsi->dev),
1569 "ERR: payload_len = 0x%x totallen expected0x%x\n",
1570 info->payload_len, buf->totallen);
1574 buf->tcphlen = tcph->doff << 2;
1575 buf->datalen = pkt_len - iphlen - buf->tcphlen;
1576 buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL;
1577 buf->hdrlen = buf->maclen + iphlen + buf->tcphlen;
1578 buf->seqnum = ntohl(tcph->seq);
1584 * irdma_puda_get_tcpip_info - get tcpip info from puda buffer
1585 * @info: to get information
1588 int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
1589 struct irdma_puda_buf *buf)
1591 struct tcphdr *tcph;
1595 if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1596 return irdma_gen1_puda_get_tcpip_info(info, buf);
1599 buf->vlan_valid = info->vlan_valid;
1600 if (info->vlan_valid)
1601 buf->vlan_id = info->vlan;
1603 buf->ipv4 = info->ipv4;
1605 buf->iph = mem + IRDMA_IPV4_PAD;
1609 buf->tcph = mem + IRDMA_TCP_OFFSET;
1610 tcph = (struct tcphdr *)buf->tcph;
1611 pkt_len = info->payload_len;
1612 buf->totallen = pkt_len;
1613 buf->tcphlen = tcph->doff << 2;
1614 buf->datalen = pkt_len - IRDMA_TCP_OFFSET - buf->tcphlen;
1615 buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL;
1616 buf->hdrlen = IRDMA_TCP_OFFSET + buf->tcphlen;
1617 buf->seqnum = ntohl(tcph->seq);
1619 if (info->smac_valid) {
1620 ether_addr_copy(buf->smac, info->smac);
1621 buf->smac_valid = true;
1628 * irdma_hw_stats_timeout - Stats timer-handler which updates all HW stats
1629 * @t: timer_list pointer
1631 static void irdma_hw_stats_timeout(struct timer_list *t)
1633 struct irdma_vsi_pestat *pf_devstat =
1634 from_timer(pf_devstat, t, stats_timer);
1635 struct irdma_sc_vsi *sc_vsi = pf_devstat->vsi;
1637 if (sc_vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
1638 irdma_cqp_gather_stats_cmd(sc_vsi->dev, sc_vsi->pestat, false);
1640 irdma_cqp_gather_stats_gen1(sc_vsi->dev, sc_vsi->pestat);
1642 mod_timer(&pf_devstat->stats_timer,
1643 jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
1647 * irdma_hw_stats_start_timer - Start periodic stats timer
1648 * @vsi: vsi structure pointer
1650 void irdma_hw_stats_start_timer(struct irdma_sc_vsi *vsi)
1652 struct irdma_vsi_pestat *devstat = vsi->pestat;
1654 timer_setup(&devstat->stats_timer, irdma_hw_stats_timeout, 0);
1655 mod_timer(&devstat->stats_timer,
1656 jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
1660 * irdma_hw_stats_stop_timer - Delete periodic stats timer
1661 * @vsi: pointer to vsi structure
1663 void irdma_hw_stats_stop_timer(struct irdma_sc_vsi *vsi)
1665 struct irdma_vsi_pestat *devstat = vsi->pestat;
1667 del_timer_sync(&devstat->stats_timer);
1671 * irdma_process_stats - Checking for wrap and update stats
1672 * @pestat: stats structure pointer
1674 static inline void irdma_process_stats(struct irdma_vsi_pestat *pestat)
1676 sc_vsi_update_stats(pestat->vsi);
1680 * irdma_cqp_gather_stats_gen1 - Gather stats
1681 * @dev: pointer to device structure
1682 * @pestat: statistics structure
1684 void irdma_cqp_gather_stats_gen1(struct irdma_sc_dev *dev,
1685 struct irdma_vsi_pestat *pestat)
1687 struct irdma_gather_stats *gather_stats =
1688 pestat->gather_info.gather_stats_va;
1689 const struct irdma_hw_stat_map *map = dev->hw_stats_map;
1690 u16 max_stats_idx = dev->hw_attrs.max_stat_idx;
1691 u32 stats_inst_offset_32;
1692 u32 stats_inst_offset_64;
1696 stats_inst_offset_32 = (pestat->gather_info.use_stats_inst) ?
1697 pestat->gather_info.stats_inst_index :
1698 pestat->hw->hmc.hmc_fn_id;
1699 stats_inst_offset_32 *= 4;
1700 stats_inst_offset_64 = stats_inst_offset_32 * 2;
1702 for (i = 0; i < max_stats_idx; i++) {
1703 if (map[i].bitmask <= IRDMA_MAX_STATS_32)
1704 new_val = rd32(dev->hw,
1705 dev->hw_stats_regs[i] + stats_inst_offset_32);
1707 new_val = rd64(dev->hw,
1708 dev->hw_stats_regs[i] + stats_inst_offset_64);
1709 gather_stats->val[map[i].byteoff / sizeof(u64)] = new_val;
1712 irdma_process_stats(pestat);
1716 * irdma_process_cqp_stats - Checking for wrap and update stats
1717 * @cqp_request: cqp_request structure pointer
1719 static void irdma_process_cqp_stats(struct irdma_cqp_request *cqp_request)
1721 struct irdma_vsi_pestat *pestat = cqp_request->param;
1723 irdma_process_stats(pestat);
1727 * irdma_cqp_gather_stats_cmd - Gather stats
1728 * @dev: pointer to device structure
1729 * @pestat: pointer to stats info
1730 * @wait: flag to wait or not wait for stats
1732 int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
1733 struct irdma_vsi_pestat *pestat, bool wait)
1736 struct irdma_pci_f *rf = dev_to_rf(dev);
1737 struct irdma_cqp *iwcqp = &rf->cqp;
1738 struct irdma_cqp_request *cqp_request;
1739 struct cqp_cmds_info *cqp_info;
1742 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
1746 cqp_info = &cqp_request->info;
1747 memset(cqp_info, 0, sizeof(*cqp_info));
1748 cqp_info->cqp_cmd = IRDMA_OP_STATS_GATHER;
1749 cqp_info->post_sq = 1;
1750 cqp_info->in.u.stats_gather.info = pestat->gather_info;
1751 cqp_info->in.u.stats_gather.scratch = (uintptr_t)cqp_request;
1752 cqp_info->in.u.stats_gather.cqp = &rf->cqp.sc_cqp;
1753 cqp_request->param = pestat;
1755 cqp_request->callback_fcn = irdma_process_cqp_stats;
1756 status = irdma_handle_cqp_op(rf, cqp_request);
1758 irdma_process_stats(pestat);
1759 irdma_put_cqp_request(&rf->cqp, cqp_request);
1765 * irdma_cqp_stats_inst_cmd - Allocate/free stats instance
1766 * @vsi: pointer to vsi structure
1767 * @cmd: command to allocate or free
1768 * @stats_info: pointer to allocate stats info
1770 int irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
1771 struct irdma_stats_inst_info *stats_info)
1773 struct irdma_pci_f *rf = dev_to_rf(vsi->dev);
1774 struct irdma_cqp *iwcqp = &rf->cqp;
1775 struct irdma_cqp_request *cqp_request;
1776 struct cqp_cmds_info *cqp_info;
1780 if (cmd == IRDMA_OP_STATS_ALLOCATE)
1782 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
1786 cqp_info = &cqp_request->info;
1787 memset(cqp_info, 0, sizeof(*cqp_info));
1788 cqp_info->cqp_cmd = cmd;
1789 cqp_info->post_sq = 1;
1790 cqp_info->in.u.stats_manage.info = *stats_info;
1791 cqp_info->in.u.stats_manage.scratch = (uintptr_t)cqp_request;
1792 cqp_info->in.u.stats_manage.cqp = &rf->cqp.sc_cqp;
1793 status = irdma_handle_cqp_op(rf, cqp_request);
1795 stats_info->stats_idx = cqp_request->compl_info.op_ret_val;
1796 irdma_put_cqp_request(iwcqp, cqp_request);
1802 * irdma_cqp_ceq_cmd - Create/Destroy CEQ's after CEQ 0
1803 * @dev: pointer to device info
1804 * @sc_ceq: pointer to ceq structure
1805 * @op: Create or Destroy
1807 int irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_ceq *sc_ceq,
1810 struct irdma_cqp_request *cqp_request;
1811 struct cqp_cmds_info *cqp_info;
1812 struct irdma_pci_f *rf = dev_to_rf(dev);
1815 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1819 cqp_info = &cqp_request->info;
1820 cqp_info->post_sq = 1;
1821 cqp_info->cqp_cmd = op;
1822 cqp_info->in.u.ceq_create.ceq = sc_ceq;
1823 cqp_info->in.u.ceq_create.scratch = (uintptr_t)cqp_request;
1825 status = irdma_handle_cqp_op(rf, cqp_request);
1826 irdma_put_cqp_request(&rf->cqp, cqp_request);
1832 * irdma_cqp_aeq_cmd - Create/Destroy AEQ
1833 * @dev: pointer to device info
1834 * @sc_aeq: pointer to aeq structure
1835 * @op: Create or Destroy
1837 int irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_aeq *sc_aeq,
1840 struct irdma_cqp_request *cqp_request;
1841 struct cqp_cmds_info *cqp_info;
1842 struct irdma_pci_f *rf = dev_to_rf(dev);
1845 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1849 cqp_info = &cqp_request->info;
1850 cqp_info->post_sq = 1;
1851 cqp_info->cqp_cmd = op;
1852 cqp_info->in.u.aeq_create.aeq = sc_aeq;
1853 cqp_info->in.u.aeq_create.scratch = (uintptr_t)cqp_request;
1855 status = irdma_handle_cqp_op(rf, cqp_request);
1856 irdma_put_cqp_request(&rf->cqp, cqp_request);
1862 * irdma_cqp_ws_node_cmd - Add/modify/delete ws node
1863 * @dev: pointer to device structure
1864 * @cmd: Add, modify or delete
1865 * @node_info: pointer to ws node info
1867 int irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
1868 struct irdma_ws_node_info *node_info)
1870 struct irdma_pci_f *rf = dev_to_rf(dev);
1871 struct irdma_cqp *iwcqp = &rf->cqp;
1872 struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp;
1873 struct irdma_cqp_request *cqp_request;
1874 struct cqp_cmds_info *cqp_info;
1878 if (!rf->sc_dev.ceq_valid)
1883 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, !poll);
1887 cqp_info = &cqp_request->info;
1888 memset(cqp_info, 0, sizeof(*cqp_info));
1889 cqp_info->cqp_cmd = cmd;
1890 cqp_info->post_sq = 1;
1891 cqp_info->in.u.ws_node.info = *node_info;
1892 cqp_info->in.u.ws_node.cqp = cqp;
1893 cqp_info->in.u.ws_node.scratch = (uintptr_t)cqp_request;
1894 status = irdma_handle_cqp_op(rf, cqp_request);
1899 struct irdma_ccq_cqe_info compl_info;
1901 status = irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_WORK_SCHED_NODE,
1903 node_info->qs_handle = compl_info.op_ret_val;
1904 ibdev_dbg(&rf->iwdev->ibdev, "DCB: opcode=%d, compl_info.retval=%d\n",
1905 compl_info.op_code, compl_info.op_ret_val);
1907 node_info->qs_handle = cqp_request->compl_info.op_ret_val;
1911 irdma_put_cqp_request(&rf->cqp, cqp_request);
1917 * irdma_ah_cqp_op - perform an AH cqp operation
1918 * @rf: RDMA PCI function
1919 * @sc_ah: address handle
1920 * @cmd: AH operation
1921 * @wait: wait if true
1922 * @callback_fcn: Callback function on CQP op completion
1923 * @cb_param: parameter for callback function
1927 int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
1929 void (*callback_fcn)(struct irdma_cqp_request *),
1932 struct irdma_cqp_request *cqp_request;
1933 struct cqp_cmds_info *cqp_info;
1936 if (cmd != IRDMA_OP_AH_CREATE && cmd != IRDMA_OP_AH_DESTROY)
1939 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
1943 cqp_info = &cqp_request->info;
1944 cqp_info->cqp_cmd = cmd;
1945 cqp_info->post_sq = 1;
1946 if (cmd == IRDMA_OP_AH_CREATE) {
1947 cqp_info->in.u.ah_create.info = sc_ah->ah_info;
1948 cqp_info->in.u.ah_create.scratch = (uintptr_t)cqp_request;
1949 cqp_info->in.u.ah_create.cqp = &rf->cqp.sc_cqp;
1950 } else if (cmd == IRDMA_OP_AH_DESTROY) {
1951 cqp_info->in.u.ah_destroy.info = sc_ah->ah_info;
1952 cqp_info->in.u.ah_destroy.scratch = (uintptr_t)cqp_request;
1953 cqp_info->in.u.ah_destroy.cqp = &rf->cqp.sc_cqp;
1957 cqp_request->callback_fcn = callback_fcn;
1958 cqp_request->param = cb_param;
1960 status = irdma_handle_cqp_op(rf, cqp_request);
1961 irdma_put_cqp_request(&rf->cqp, cqp_request);
1967 sc_ah->ah_info.ah_valid = (cmd == IRDMA_OP_AH_CREATE);
1973 * irdma_ieq_ah_cb - callback after creation of AH for IEQ
1974 * @cqp_request: pointer to cqp_request of create AH
1976 static void irdma_ieq_ah_cb(struct irdma_cqp_request *cqp_request)
1978 struct irdma_sc_qp *qp = cqp_request->param;
1979 struct irdma_sc_ah *sc_ah = qp->pfpdu.ah;
1980 unsigned long flags;
1982 spin_lock_irqsave(&qp->pfpdu.lock, flags);
1983 if (!cqp_request->compl_info.op_ret_val) {
1984 sc_ah->ah_info.ah_valid = true;
1985 irdma_ieq_process_fpdus(qp, qp->vsi->ieq);
1987 sc_ah->ah_info.ah_valid = false;
1988 irdma_ieq_cleanup_qp(qp->vsi->ieq, qp);
1990 spin_unlock_irqrestore(&qp->pfpdu.lock, flags);
1994 * irdma_ilq_ah_cb - callback after creation of AH for ILQ
1995 * @cqp_request: pointer to cqp_request of create AH
1997 static void irdma_ilq_ah_cb(struct irdma_cqp_request *cqp_request)
1999 struct irdma_cm_node *cm_node = cqp_request->param;
2000 struct irdma_sc_ah *sc_ah = cm_node->ah;
2002 sc_ah->ah_info.ah_valid = !cqp_request->compl_info.op_ret_val;
2003 irdma_add_conn_est_qh(cm_node);
2007 * irdma_puda_create_ah - create AH for ILQ/IEQ qp's
2008 * @dev: device pointer
2009 * @ah_info: Address handle info
2010 * @wait: When true will wait for operation to complete
2012 * @cb_param: Callback param when not waiting
2013 * @ah_ret: Returned pointer to address handle if created
2016 int irdma_puda_create_ah(struct irdma_sc_dev *dev,
2017 struct irdma_ah_info *ah_info, bool wait,
2018 enum puda_rsrc_type type, void *cb_param,
2019 struct irdma_sc_ah **ah_ret)
2021 struct irdma_sc_ah *ah;
2022 struct irdma_pci_f *rf = dev_to_rf(dev);
2025 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2030 err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah,
2031 &ah_info->ah_idx, &rf->next_ah);
2036 ah->ah_info = *ah_info;
2038 if (type == IRDMA_PUDA_RSRC_TYPE_ILQ)
2039 err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait,
2040 irdma_ilq_ah_cb, cb_param);
2042 err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait,
2043 irdma_ieq_ah_cb, cb_param);
2050 irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx);
2058 * irdma_puda_free_ah - free a puda address handle
2059 * @dev: device pointer
2060 * @ah: The address handle to free
2062 void irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah)
2064 struct irdma_pci_f *rf = dev_to_rf(dev);
2069 if (ah->ah_info.ah_valid) {
2070 irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_DESTROY, false, NULL, NULL);
2071 irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx);
2078 * irdma_gsi_ud_qp_ah_cb - callback after creation of AH for GSI/ID QP
2079 * @cqp_request: pointer to cqp_request of create AH
2081 void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request)
2083 struct irdma_sc_ah *sc_ah = cqp_request->param;
2085 if (!cqp_request->compl_info.op_ret_val)
2086 sc_ah->ah_info.ah_valid = true;
2088 sc_ah->ah_info.ah_valid = false;
2092 * irdma_prm_add_pble_mem - add moemory to pble resources
2093 * @pprm: pble resource manager
2094 * @pchunk: chunk of memory to add
2096 int irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
2097 struct irdma_chunk *pchunk)
2101 if (pchunk->size & 0xfff)
2104 sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift;
2106 pchunk->bitmapbuf = bitmap_zalloc(sizeofbitmap, GFP_KERNEL);
2107 if (!pchunk->bitmapbuf)
2110 pchunk->sizeofbitmap = sizeofbitmap;
2111 /* each pble is 8 bytes hence shift by 3 */
2112 pprm->total_pble_alloc += pchunk->size >> 3;
2113 pprm->free_pble_cnt += pchunk->size >> 3;
2119 * irdma_prm_get_pbles - get pble's from prm
2120 * @pprm: pble resource manager
2121 * @chunkinfo: nformation about chunk where pble's were acquired
2122 * @mem_size: size of pble memory needed
2123 * @vaddr: returns virtual address of pble memory
2124 * @fpm_addr: returns fpm address of pble memory
2126 int irdma_prm_get_pbles(struct irdma_pble_prm *pprm,
2127 struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size,
2128 u64 **vaddr, u64 *fpm_addr)
2131 u64 bit_idx = PBLE_INVALID_IDX;
2132 struct irdma_chunk *pchunk = NULL;
2133 struct list_head *chunk_entry = pprm->clist.next;
2135 unsigned long flags;
2139 bits_needed = DIV_ROUND_UP_ULL(mem_size, BIT_ULL(pprm->pble_shift));
2141 spin_lock_irqsave(&pprm->prm_lock, flags);
2142 while (chunk_entry != &pprm->clist) {
2143 pchunk = (struct irdma_chunk *)chunk_entry;
2144 bit_idx = bitmap_find_next_zero_area(pchunk->bitmapbuf,
2145 pchunk->sizeofbitmap, 0,
2147 if (bit_idx < pchunk->sizeofbitmap)
2150 /* list.next used macro */
2151 chunk_entry = pchunk->list.next;
2154 if (!pchunk || bit_idx >= pchunk->sizeofbitmap) {
2155 spin_unlock_irqrestore(&pprm->prm_lock, flags);
2159 bitmap_set(pchunk->bitmapbuf, bit_idx, bits_needed);
2160 offset = bit_idx << pprm->pble_shift;
2161 *vaddr = pchunk->vaddr + offset;
2162 *fpm_addr = pchunk->fpm_addr + offset;
2164 chunkinfo->pchunk = pchunk;
2165 chunkinfo->bit_idx = bit_idx;
2166 chunkinfo->bits_used = bits_needed;
2167 /* 3 is sizeof pble divide */
2168 pprm->free_pble_cnt -= chunkinfo->bits_used << (pprm->pble_shift - 3);
2169 spin_unlock_irqrestore(&pprm->prm_lock, flags);
2175 * irdma_prm_return_pbles - return pbles back to prm
2176 * @pprm: pble resource manager
2177 * @chunkinfo: chunk where pble's were acquired and to be freed
2179 void irdma_prm_return_pbles(struct irdma_pble_prm *pprm,
2180 struct irdma_pble_chunkinfo *chunkinfo)
2182 unsigned long flags;
2184 spin_lock_irqsave(&pprm->prm_lock, flags);
2185 pprm->free_pble_cnt += chunkinfo->bits_used << (pprm->pble_shift - 3);
2186 bitmap_clear(chunkinfo->pchunk->bitmapbuf, chunkinfo->bit_idx,
2187 chunkinfo->bits_used);
2188 spin_unlock_irqrestore(&pprm->prm_lock, flags);
2191 int irdma_map_vm_page_list(struct irdma_hw *hw, void *va, dma_addr_t *pg_dma,
2194 struct page *vm_page;
2198 addr = (u8 *)(uintptr_t)va;
2199 for (i = 0; i < pg_cnt; i++) {
2200 vm_page = vmalloc_to_page(addr);
2204 pg_dma[i] = dma_map_page(hw->device, vm_page, 0, PAGE_SIZE,
2206 if (dma_mapping_error(hw->device, pg_dma[i]))
2215 irdma_unmap_vm_page_list(hw, pg_dma, i);
2219 void irdma_unmap_vm_page_list(struct irdma_hw *hw, dma_addr_t *pg_dma, u32 pg_cnt)
2223 for (i = 0; i < pg_cnt; i++)
2224 dma_unmap_page(hw->device, pg_dma[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
2228 * irdma_pble_free_paged_mem - free virtual paged memory
2229 * @chunk: chunk to free with paged memory
2231 void irdma_pble_free_paged_mem(struct irdma_chunk *chunk)
2236 irdma_unmap_vm_page_list(chunk->dev->hw, chunk->dmainfo.dmaaddrs,
2240 kfree(chunk->dmainfo.dmaaddrs);
2241 chunk->dmainfo.dmaaddrs = NULL;
2242 vfree(chunk->vaddr);
2243 chunk->vaddr = NULL;
2248 * irdma_pble_get_paged_mem -allocate paged memory for pbles
2249 * @chunk: chunk to add for paged memory
2250 * @pg_cnt: number of pages needed
2252 int irdma_pble_get_paged_mem(struct irdma_chunk *chunk, u32 pg_cnt)
2257 chunk->dmainfo.dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL);
2258 if (!chunk->dmainfo.dmaaddrs)
2261 size = PAGE_SIZE * pg_cnt;
2266 if (irdma_map_vm_page_list(chunk->dev->hw, va, chunk->dmainfo.dmaaddrs,
2273 chunk->pg_cnt = pg_cnt;
2274 chunk->type = PBLE_SD_PAGED;
2278 kfree(chunk->dmainfo.dmaaddrs);
2279 chunk->dmainfo.dmaaddrs = NULL;
2285 * irdma_alloc_ws_node_id - Allocate a tx scheduler node ID
2286 * @dev: device pointer
2288 u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev)
2290 struct irdma_pci_f *rf = dev_to_rf(dev);
2294 if (irdma_alloc_rsrc(rf, rf->allocated_ws_nodes, rf->max_ws_node_id,
2296 return IRDMA_WS_NODE_INVALID;
2298 return (u16)node_id;
2302 * irdma_free_ws_node_id - Free a tx scheduler node ID
2303 * @dev: device pointer
2304 * @node_id: Work scheduler node ID
2306 void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id)
2308 struct irdma_pci_f *rf = dev_to_rf(dev);
2310 irdma_free_rsrc(rf, rf->allocated_ws_nodes, (u32)node_id);
2314 * irdma_modify_qp_to_err - Modify a QP to error
2315 * @sc_qp: qp structure
2317 void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp)
2319 struct irdma_qp *qp = sc_qp->qp_uk.back_qp;
2320 struct ib_qp_attr attr;
2322 if (qp->iwdev->rf->reset)
2324 attr.qp_state = IB_QPS_ERR;
2326 if (rdma_protocol_roce(qp->ibqp.device, 1))
2327 irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
2329 irdma_modify_qp(&qp->ibqp, &attr, IB_QP_STATE, NULL);
2332 void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event)
2334 struct ib_event ibevent;
2336 if (!iwqp->ibqp.event_handler)
2340 case IRDMA_QP_EVENT_CATASTROPHIC:
2341 ibevent.event = IB_EVENT_QP_FATAL;
2343 case IRDMA_QP_EVENT_ACCESS_ERR:
2344 ibevent.event = IB_EVENT_QP_ACCESS_ERR;
2346 case IRDMA_QP_EVENT_REQ_ERR:
2347 ibevent.event = IB_EVENT_QP_REQ_ERR;
2350 ibevent.device = iwqp->ibqp.device;
2351 ibevent.element.qp = &iwqp->ibqp;
2352 iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
2355 bool irdma_cq_empty(struct irdma_cq *iwcq)
2357 struct irdma_cq_uk *ukcq;
2362 ukcq = &iwcq->sc_cq.cq_uk;
2363 cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
2364 get_64bit_val(cqe, 24, &qword3);
2365 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
2367 return polarity != ukcq->polarity;
2370 void irdma_remove_cmpls_list(struct irdma_cq *iwcq)
2372 struct irdma_cmpl_gen *cmpl_node;
2373 struct list_head *tmp_node, *list_node;
2375 list_for_each_safe (list_node, tmp_node, &iwcq->cmpl_generated) {
2376 cmpl_node = list_entry(list_node, struct irdma_cmpl_gen, list);
2377 list_del(&cmpl_node->list);
2382 int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info)
2384 struct irdma_cmpl_gen *cmpl;
2386 if (list_empty(&iwcq->cmpl_generated))
2388 cmpl = list_first_entry_or_null(&iwcq->cmpl_generated, struct irdma_cmpl_gen, list);
2389 list_del(&cmpl->list);
2390 memcpy(cq_poll_info, &cmpl->cpi, sizeof(*cq_poll_info));
2393 ibdev_dbg(iwcq->ibcq.device,
2394 "VERBS: %s: Poll artificially generated completion for QP 0x%X, op %u, wr_id=0x%llx\n",
2395 __func__, cq_poll_info->qp_id, cq_poll_info->op_type,
2396 cq_poll_info->wr_id);
2402 * irdma_set_cpi_common_values - fill in values for polling info struct
2403 * @cpi: resulting structure of cq_poll_info type
2405 * @qp_num: id of the QP
2407 static void irdma_set_cpi_common_values(struct irdma_cq_poll_info *cpi,
2408 struct irdma_qp_uk *qp, u32 qp_num)
2410 cpi->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
2412 cpi->major_err = IRDMA_FLUSH_MAJOR_ERR;
2413 cpi->minor_err = FLUSH_GENERAL_ERR;
2414 cpi->qp_handle = (irdma_qp_handle)(uintptr_t)qp;
2415 cpi->qp_id = qp_num;
2418 static inline void irdma_comp_handler(struct irdma_cq *cq)
2420 if (!cq->ibcq.comp_handler)
2422 if (atomic_cmpxchg(&cq->armed, 1, 0))
2423 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
2426 void irdma_generate_flush_completions(struct irdma_qp *iwqp)
2428 struct irdma_qp_uk *qp = &iwqp->sc_qp.qp_uk;
2429 struct irdma_ring *sq_ring = &qp->sq_ring;
2430 struct irdma_ring *rq_ring = &qp->rq_ring;
2431 struct irdma_cmpl_gen *cmpl;
2435 bool compl_generated = false;
2436 unsigned long flags1;
2438 spin_lock_irqsave(&iwqp->iwscq->lock, flags1);
2439 if (irdma_cq_empty(iwqp->iwscq)) {
2440 unsigned long flags2;
2442 spin_lock_irqsave(&iwqp->lock, flags2);
2443 while (IRDMA_RING_MORE_WORK(*sq_ring)) {
2444 cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC);
2446 spin_unlock_irqrestore(&iwqp->lock, flags2);
2447 spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
2451 wqe_idx = sq_ring->tail;
2452 irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id);
2454 cmpl->cpi.wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
2455 sw_wqe = qp->sq_base[wqe_idx].elem;
2456 get_64bit_val(sw_wqe, 24, &wqe_qword);
2457 cmpl->cpi.op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, IRDMAQPSQ_OPCODE);
2458 cmpl->cpi.q_type = IRDMA_CQE_QTYPE_SQ;
2459 /* remove the SQ WR by moving SQ tail*/
2460 IRDMA_RING_SET_TAIL(*sq_ring,
2461 sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta);
2462 if (cmpl->cpi.op_type == IRDMAQP_OP_NOP) {
2466 ibdev_dbg(iwqp->iwscq->ibcq.device,
2467 "DEV: %s: adding wr_id = 0x%llx SQ Completion to list qp_id=%d\n",
2468 __func__, cmpl->cpi.wr_id, qp->qp_id);
2469 list_add_tail(&cmpl->list, &iwqp->iwscq->cmpl_generated);
2470 compl_generated = true;
2472 spin_unlock_irqrestore(&iwqp->lock, flags2);
2473 spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
2474 if (compl_generated)
2475 irdma_comp_handler(iwqp->iwscq);
2477 spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
2478 mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
2479 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
2482 spin_lock_irqsave(&iwqp->iwrcq->lock, flags1);
2483 if (irdma_cq_empty(iwqp->iwrcq)) {
2484 unsigned long flags2;
2486 spin_lock_irqsave(&iwqp->lock, flags2);
2487 while (IRDMA_RING_MORE_WORK(*rq_ring)) {
2488 cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC);
2490 spin_unlock_irqrestore(&iwqp->lock, flags2);
2491 spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
2495 wqe_idx = rq_ring->tail;
2496 irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id);
2498 cmpl->cpi.wr_id = qp->rq_wrid_array[wqe_idx];
2499 cmpl->cpi.op_type = IRDMA_OP_TYPE_REC;
2500 cmpl->cpi.q_type = IRDMA_CQE_QTYPE_RQ;
2501 /* remove the RQ WR by moving RQ tail */
2502 IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1);
2503 ibdev_dbg(iwqp->iwrcq->ibcq.device,
2504 "DEV: %s: adding wr_id = 0x%llx RQ Completion to list qp_id=%d, wqe_idx=%d\n",
2505 __func__, cmpl->cpi.wr_id, qp->qp_id,
2507 list_add_tail(&cmpl->list, &iwqp->iwrcq->cmpl_generated);
2509 compl_generated = true;
2511 spin_unlock_irqrestore(&iwqp->lock, flags2);
2512 spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
2513 if (compl_generated)
2514 irdma_comp_handler(iwqp->iwrcq);
2516 spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
2517 mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
2518 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));