2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/acpi.h>
34 #include <linux/module.h>
35 #include <linux/pci.h>
36 #include <rdma/ib_addr.h>
37 #include <rdma/ib_smi.h>
38 #include <rdma/ib_user_verbs.h>
39 #include <rdma/ib_cache.h>
40 #include "hns_roce_common.h"
41 #include "hns_roce_device.h"
42 #include "hns_roce_hem.h"
44 static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port,
50 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
53 if (!memcmp(hr_dev->dev_addr[port], addr, ETH_ALEN))
56 for (i = 0; i < ETH_ALEN; i++)
57 hr_dev->dev_addr[port][i] = addr[i];
59 phy_port = hr_dev->iboe.phy_port[port];
60 return hr_dev->hw->set_mac(hr_dev, phy_port, addr);
63 static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
65 struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
66 u32 port = attr->port_num - 1;
69 if (port >= hr_dev->caps.num_ports)
72 ret = hr_dev->hw->set_gid(hr_dev, attr->index, &attr->gid, attr);
77 static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
79 struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
80 u32 port = attr->port_num - 1;
83 if (port >= hr_dev->caps.num_ports)
86 ret = hr_dev->hw->set_gid(hr_dev, attr->index, NULL, NULL);
91 static int handle_en_event(struct hns_roce_dev *hr_dev, u32 port,
94 struct device *dev = hr_dev->dev;
95 struct net_device *netdev;
98 netdev = hr_dev->iboe.netdevs[port];
100 dev_err(dev, "can't find netdev on port(%u)!\n", port);
107 case NETDEV_REGISTER:
108 case NETDEV_CHANGEADDR:
109 ret = hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
113 * In v1 engine, only support all ports closed together.
117 dev_dbg(dev, "NETDEV event = 0x%x!\n", (u32)(event));
124 static int hns_roce_netdev_event(struct notifier_block *self,
125 unsigned long event, void *ptr)
127 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
128 struct hns_roce_ib_iboe *iboe = NULL;
129 struct hns_roce_dev *hr_dev = NULL;
133 hr_dev = container_of(self, struct hns_roce_dev, iboe.nb);
134 iboe = &hr_dev->iboe;
136 for (port = 0; port < hr_dev->caps.num_ports; port++) {
137 if (dev == iboe->netdevs[port]) {
138 ret = handle_en_event(hr_dev, port, event);
148 static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
153 for (i = 0; i < hr_dev->caps.num_ports; i++) {
154 ret = hns_roce_set_mac(hr_dev, i,
155 hr_dev->iboe.netdevs[i]->dev_addr);
163 static int hns_roce_query_device(struct ib_device *ib_dev,
164 struct ib_device_attr *props,
165 struct ib_udata *uhw)
167 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
169 memset(props, 0, sizeof(*props));
171 props->fw_ver = hr_dev->caps.fw_ver;
172 props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid);
173 props->max_mr_size = (u64)(~(0ULL));
174 props->page_size_cap = hr_dev->caps.page_size_cap;
175 props->vendor_id = hr_dev->vendor_id;
176 props->vendor_part_id = hr_dev->vendor_part_id;
177 props->hw_ver = hr_dev->hw_rev;
178 props->max_qp = hr_dev->caps.num_qps;
179 props->max_qp_wr = hr_dev->caps.max_wqes;
180 props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
181 IB_DEVICE_RC_RNR_NAK_GEN;
182 props->max_send_sge = hr_dev->caps.max_sq_sg;
183 props->max_recv_sge = hr_dev->caps.max_rq_sg;
184 props->max_sge_rd = 1;
185 props->max_cq = hr_dev->caps.num_cqs;
186 props->max_cqe = hr_dev->caps.max_cqes;
187 props->max_mr = hr_dev->caps.num_mtpts;
188 props->max_pd = hr_dev->caps.num_pds;
189 props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma;
190 props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma;
191 props->atomic_cap = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_ATOMIC ?
192 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
193 props->max_pkeys = 1;
194 props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay;
195 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
196 props->max_srq = hr_dev->caps.num_srqs;
197 props->max_srq_wr = hr_dev->caps.max_srq_wrs;
198 props->max_srq_sge = hr_dev->caps.max_srq_sges;
201 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR &&
202 hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
203 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
204 props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA;
207 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
208 props->device_cap_flags |= IB_DEVICE_XRC;
213 static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num,
214 struct ib_port_attr *props)
216 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
217 struct device *dev = hr_dev->dev;
218 struct net_device *net_dev;
225 /* props being zeroed by the caller, avoid zeroing it here */
227 props->max_mtu = hr_dev->caps.max_mtu;
228 props->gid_tbl_len = hr_dev->caps.gid_table_len[port];
229 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
230 IB_PORT_VENDOR_CLASS_SUP |
231 IB_PORT_BOOT_MGMT_SUP;
232 props->max_msg_sz = HNS_ROCE_MAX_MSG_LEN;
233 props->pkey_tbl_len = 1;
234 props->active_width = IB_WIDTH_4X;
235 props->active_speed = 1;
237 spin_lock_irqsave(&hr_dev->iboe.lock, flags);
239 net_dev = hr_dev->iboe.netdevs[port];
241 spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
242 dev_err(dev, "find netdev %u failed!\n", port);
246 mtu = iboe_get_mtu(net_dev->mtu);
247 props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256;
248 props->state = netif_running(net_dev) && netif_carrier_ok(net_dev) ?
251 props->phys_state = props->state == IB_PORT_ACTIVE ?
252 IB_PORT_PHYS_STATE_LINK_UP :
253 IB_PORT_PHYS_STATE_DISABLED;
255 spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
260 static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
263 return IB_LINK_LAYER_ETHERNET;
266 static int hns_roce_query_pkey(struct ib_device *ib_dev, u32 port, u16 index,
277 static int hns_roce_modify_device(struct ib_device *ib_dev, int mask,
278 struct ib_device_modify *props)
282 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
285 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
286 spin_lock_irqsave(&to_hr_dev(ib_dev)->sm_lock, flags);
287 memcpy(ib_dev->node_desc, props->node_desc, NODE_DESC_SIZE);
288 spin_unlock_irqrestore(&to_hr_dev(ib_dev)->sm_lock, flags);
294 struct hns_user_mmap_entry *
295 hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
297 enum hns_roce_mmap_type mmap_type)
299 struct hns_user_mmap_entry *entry;
302 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
306 entry->address = address;
307 entry->mmap_type = mmap_type;
310 /* pgoff 0 must be used by DB for compatibility */
311 case HNS_ROCE_MMAP_TYPE_DB:
312 ret = rdma_user_mmap_entry_insert_exact(
313 ucontext, &entry->rdma_entry, length, 0);
315 case HNS_ROCE_MMAP_TYPE_DWQE:
316 ret = rdma_user_mmap_entry_insert_range(
317 ucontext, &entry->rdma_entry, length, 1,
333 static void hns_roce_dealloc_uar_entry(struct hns_roce_ucontext *context)
335 if (context->db_mmap_entry)
336 rdma_user_mmap_entry_remove(
337 &context->db_mmap_entry->rdma_entry);
340 static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
342 struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
345 address = context->uar.pfn << PAGE_SHIFT;
346 context->db_mmap_entry = hns_roce_user_mmap_entry_insert(
347 uctx, address, PAGE_SIZE, HNS_ROCE_MMAP_TYPE_DB);
348 if (!context->db_mmap_entry)
354 static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
355 struct ib_udata *udata)
357 struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
358 struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
359 struct hns_roce_ib_alloc_ucontext_resp resp = {};
360 struct hns_roce_ib_alloc_ucontext ucmd = {};
366 resp.qp_tab_size = hr_dev->caps.num_qps;
367 resp.srq_tab_size = hr_dev->caps.num_srqs;
369 ret = ib_copy_from_udata(&ucmd, udata,
370 min(udata->inlen, sizeof(ucmd)));
374 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
375 context->config = ucmd.config & HNS_ROCE_EXSGE_FLAGS;
377 if (context->config & HNS_ROCE_EXSGE_FLAGS) {
378 resp.config |= HNS_ROCE_RSP_EXSGE_FLAGS;
379 resp.max_inline_data = hr_dev->caps.max_sq_inline;
382 ret = hns_roce_uar_alloc(hr_dev, &context->uar);
384 goto error_fail_uar_alloc;
386 ret = hns_roce_alloc_uar_entry(uctx);
388 goto error_fail_uar_entry;
390 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
391 hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
392 INIT_LIST_HEAD(&context->page_list);
393 mutex_init(&context->page_mutex);
396 resp.cqe_size = hr_dev->caps.cqe_sz;
398 ret = ib_copy_to_udata(udata, &resp,
399 min(udata->outlen, sizeof(resp)));
401 goto error_fail_copy_to_udata;
405 error_fail_copy_to_udata:
406 hns_roce_dealloc_uar_entry(context);
408 error_fail_uar_entry:
409 ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
411 error_fail_uar_alloc:
415 static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
417 struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
418 struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);
420 hns_roce_dealloc_uar_entry(context);
422 ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
425 static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma)
427 struct rdma_user_mmap_entry *rdma_entry;
428 struct hns_user_mmap_entry *entry;
433 rdma_entry = rdma_user_mmap_entry_get_pgoff(uctx, vma->vm_pgoff);
437 entry = to_hns_mmap(rdma_entry);
438 pfn = entry->address >> PAGE_SHIFT;
440 switch (entry->mmap_type) {
441 case HNS_ROCE_MMAP_TYPE_DB:
442 case HNS_ROCE_MMAP_TYPE_DWQE:
443 prot = pgprot_device(vma->vm_page_prot);
449 ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE,
452 rdma_user_mmap_entry_put(rdma_entry);
457 static void hns_roce_free_mmap(struct rdma_user_mmap_entry *rdma_entry)
459 struct hns_user_mmap_entry *entry = to_hns_mmap(rdma_entry);
464 static int hns_roce_port_immutable(struct ib_device *ib_dev, u32 port_num,
465 struct ib_port_immutable *immutable)
467 struct ib_port_attr attr;
470 ret = ib_query_port(ib_dev, port_num, &attr);
474 immutable->pkey_tbl_len = attr.pkey_tbl_len;
475 immutable->gid_tbl_len = attr.gid_tbl_len;
477 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
478 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
479 if (to_hr_dev(ib_dev)->caps.flags & HNS_ROCE_CAP_FLAG_ROCE_V1_V2)
480 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
485 static void hns_roce_disassociate_ucontext(struct ib_ucontext *ibcontext)
489 static void hns_roce_get_fw_ver(struct ib_device *device, char *str)
491 u64 fw_ver = to_hr_dev(device)->caps.fw_ver;
492 unsigned int major, minor, sub_minor;
494 major = upper_32_bits(fw_ver);
495 minor = high_16_bits(lower_32_bits(fw_ver));
496 sub_minor = low_16_bits(fw_ver);
498 snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%04u", major, minor,
502 static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
504 struct hns_roce_ib_iboe *iboe = &hr_dev->iboe;
506 hr_dev->active = false;
507 unregister_netdevice_notifier(&iboe->nb);
508 ib_unregister_device(&hr_dev->ib_dev);
511 static const struct ib_device_ops hns_roce_dev_ops = {
512 .owner = THIS_MODULE,
513 .driver_id = RDMA_DRIVER_HNS,
515 .uverbs_no_driver_id_binding = 1,
517 .get_dev_fw_str = hns_roce_get_fw_ver,
518 .add_gid = hns_roce_add_gid,
519 .alloc_pd = hns_roce_alloc_pd,
520 .alloc_ucontext = hns_roce_alloc_ucontext,
521 .create_ah = hns_roce_create_ah,
522 .create_user_ah = hns_roce_create_ah,
523 .create_cq = hns_roce_create_cq,
524 .create_qp = hns_roce_create_qp,
525 .dealloc_pd = hns_roce_dealloc_pd,
526 .dealloc_ucontext = hns_roce_dealloc_ucontext,
527 .del_gid = hns_roce_del_gid,
528 .dereg_mr = hns_roce_dereg_mr,
529 .destroy_ah = hns_roce_destroy_ah,
530 .destroy_cq = hns_roce_destroy_cq,
531 .disassociate_ucontext = hns_roce_disassociate_ucontext,
532 .get_dma_mr = hns_roce_get_dma_mr,
533 .get_link_layer = hns_roce_get_link_layer,
534 .get_port_immutable = hns_roce_port_immutable,
535 .mmap = hns_roce_mmap,
536 .mmap_free = hns_roce_free_mmap,
537 .modify_device = hns_roce_modify_device,
538 .modify_qp = hns_roce_modify_qp,
539 .query_ah = hns_roce_query_ah,
540 .query_device = hns_roce_query_device,
541 .query_pkey = hns_roce_query_pkey,
542 .query_port = hns_roce_query_port,
543 .reg_user_mr = hns_roce_reg_user_mr,
545 INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah),
546 INIT_RDMA_OBJ_SIZE(ib_cq, hns_roce_cq, ib_cq),
547 INIT_RDMA_OBJ_SIZE(ib_pd, hns_roce_pd, ibpd),
548 INIT_RDMA_OBJ_SIZE(ib_qp, hns_roce_qp, ibqp),
549 INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext),
552 static const struct ib_device_ops hns_roce_dev_mr_ops = {
553 .rereg_user_mr = hns_roce_rereg_user_mr,
556 static const struct ib_device_ops hns_roce_dev_mw_ops = {
557 .alloc_mw = hns_roce_alloc_mw,
558 .dealloc_mw = hns_roce_dealloc_mw,
560 INIT_RDMA_OBJ_SIZE(ib_mw, hns_roce_mw, ibmw),
563 static const struct ib_device_ops hns_roce_dev_frmr_ops = {
564 .alloc_mr = hns_roce_alloc_mr,
565 .map_mr_sg = hns_roce_map_mr_sg,
568 static const struct ib_device_ops hns_roce_dev_srq_ops = {
569 .create_srq = hns_roce_create_srq,
570 .destroy_srq = hns_roce_destroy_srq,
572 INIT_RDMA_OBJ_SIZE(ib_srq, hns_roce_srq, ibsrq),
575 static const struct ib_device_ops hns_roce_dev_xrcd_ops = {
576 .alloc_xrcd = hns_roce_alloc_xrcd,
577 .dealloc_xrcd = hns_roce_dealloc_xrcd,
579 INIT_RDMA_OBJ_SIZE(ib_xrcd, hns_roce_xrcd, ibxrcd),
582 static const struct ib_device_ops hns_roce_dev_restrack_ops = {
583 .fill_res_cq_entry = hns_roce_fill_res_cq_entry,
584 .fill_res_cq_entry_raw = hns_roce_fill_res_cq_entry_raw,
585 .fill_res_qp_entry = hns_roce_fill_res_qp_entry,
586 .fill_res_qp_entry_raw = hns_roce_fill_res_qp_entry_raw,
587 .fill_res_mr_entry = hns_roce_fill_res_mr_entry,
588 .fill_res_mr_entry_raw = hns_roce_fill_res_mr_entry_raw,
591 static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
594 struct hns_roce_ib_iboe *iboe = NULL;
595 struct ib_device *ib_dev = NULL;
596 struct device *dev = hr_dev->dev;
599 iboe = &hr_dev->iboe;
600 spin_lock_init(&iboe->lock);
602 ib_dev = &hr_dev->ib_dev;
604 ib_dev->node_type = RDMA_NODE_IB_CA;
605 ib_dev->dev.parent = dev;
607 ib_dev->phys_port_cnt = hr_dev->caps.num_ports;
608 ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey;
609 ib_dev->num_comp_vectors = hr_dev->caps.num_comp_vectors;
611 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR)
612 ib_set_device_ops(ib_dev, &hns_roce_dev_mr_ops);
614 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW)
615 ib_set_device_ops(ib_dev, &hns_roce_dev_mw_ops);
617 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR)
618 ib_set_device_ops(ib_dev, &hns_roce_dev_frmr_ops);
620 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
621 ib_set_device_ops(ib_dev, &hns_roce_dev_srq_ops);
622 ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_srq_ops);
625 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
626 ib_set_device_ops(ib_dev, &hns_roce_dev_xrcd_ops);
628 ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
629 ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
630 ib_set_device_ops(ib_dev, &hns_roce_dev_restrack_ops);
631 for (i = 0; i < hr_dev->caps.num_ports; i++) {
632 if (!hr_dev->iboe.netdevs[i])
635 ret = ib_device_set_netdev(ib_dev, hr_dev->iboe.netdevs[i],
640 dma_set_max_seg_size(dev, UINT_MAX);
641 ret = ib_register_device(ib_dev, "hns_%d", dev);
643 dev_err(dev, "ib_register_device failed!\n");
647 ret = hns_roce_setup_mtu_mac(hr_dev);
649 dev_err(dev, "setup_mtu_mac failed!\n");
650 goto error_failed_setup_mtu_mac;
653 iboe->nb.notifier_call = hns_roce_netdev_event;
654 ret = register_netdevice_notifier(&iboe->nb);
656 dev_err(dev, "register_netdevice_notifier failed!\n");
657 goto error_failed_setup_mtu_mac;
660 hr_dev->active = true;
663 error_failed_setup_mtu_mac:
664 ib_unregister_device(ib_dev);
669 static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
671 struct device *dev = hr_dev->dev;
674 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
675 HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
676 hr_dev->caps.num_mtpts);
678 dev_err(dev, "failed to init MTPT context memory, aborting.\n");
682 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table,
683 HEM_TYPE_QPC, hr_dev->caps.qpc_sz,
684 hr_dev->caps.num_qps);
686 dev_err(dev, "failed to init QP context memory, aborting.\n");
690 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.irrl_table,
692 hr_dev->caps.irrl_entry_sz *
693 hr_dev->caps.max_qp_init_rdma,
694 hr_dev->caps.num_qps);
696 dev_err(dev, "failed to init irrl_table memory, aborting.\n");
700 if (hr_dev->caps.trrl_entry_sz) {
701 ret = hns_roce_init_hem_table(hr_dev,
702 &hr_dev->qp_table.trrl_table,
704 hr_dev->caps.trrl_entry_sz *
705 hr_dev->caps.max_qp_dest_rdma,
706 hr_dev->caps.num_qps);
709 "failed to init trrl_table memory, aborting.\n");
714 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table,
715 HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz,
716 hr_dev->caps.num_cqs);
718 dev_err(dev, "failed to init CQ context memory, aborting.\n");
722 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
723 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table,
725 hr_dev->caps.srqc_entry_sz,
726 hr_dev->caps.num_srqs);
729 "failed to init SRQ context memory, aborting.\n");
734 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
735 ret = hns_roce_init_hem_table(hr_dev,
736 &hr_dev->qp_table.sccc_table,
738 hr_dev->caps.sccc_sz,
739 hr_dev->caps.num_qps);
742 "failed to init SCC context memory, aborting.\n");
747 if (hr_dev->caps.qpc_timer_entry_sz) {
748 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table,
750 hr_dev->caps.qpc_timer_entry_sz,
751 hr_dev->caps.qpc_timer_bt_num);
754 "failed to init QPC timer memory, aborting.\n");
759 if (hr_dev->caps.cqc_timer_entry_sz) {
760 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cqc_timer_table,
762 hr_dev->caps.cqc_timer_entry_sz,
763 hr_dev->caps.cqc_timer_bt_num);
766 "failed to init CQC timer memory, aborting.\n");
767 goto err_unmap_qpc_timer;
771 if (hr_dev->caps.gmv_entry_sz) {
772 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->gmv_table,
774 hr_dev->caps.gmv_entry_sz,
775 hr_dev->caps.gmv_entry_num);
778 "failed to init gmv table memory, ret = %d\n",
780 goto err_unmap_cqc_timer;
787 if (hr_dev->caps.cqc_timer_entry_sz)
788 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cqc_timer_table);
791 if (hr_dev->caps.qpc_timer_entry_sz)
792 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qpc_timer_table);
795 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
796 hns_roce_cleanup_hem_table(hr_dev,
797 &hr_dev->qp_table.sccc_table);
799 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
800 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->srq_table.table);
803 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
806 if (hr_dev->caps.trrl_entry_sz)
807 hns_roce_cleanup_hem_table(hr_dev,
808 &hr_dev->qp_table.trrl_table);
811 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
814 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
817 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
823 * hns_roce_setup_hca - setup host channel adapter
824 * @hr_dev: pointer to hns roce device
827 static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
829 struct device *dev = hr_dev->dev;
832 spin_lock_init(&hr_dev->sm_lock);
834 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
835 hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
836 INIT_LIST_HEAD(&hr_dev->pgdir_list);
837 mutex_init(&hr_dev->pgdir_mutex);
840 hns_roce_init_uar_table(hr_dev);
842 ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar);
844 dev_err(dev, "failed to allocate priv_uar.\n");
845 goto err_uar_table_free;
848 ret = hns_roce_init_qp_table(hr_dev);
850 dev_err(dev, "failed to init qp_table.\n");
851 goto err_uar_table_free;
854 hns_roce_init_pd_table(hr_dev);
856 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
857 hns_roce_init_xrcd_table(hr_dev);
859 hns_roce_init_mr_table(hr_dev);
861 hns_roce_init_cq_table(hr_dev);
863 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
864 hns_roce_init_srq_table(hr_dev);
869 ida_destroy(&hr_dev->uar_ida.ida);
873 static void check_and_get_armed_cq(struct list_head *cq_list, struct ib_cq *cq)
875 struct hns_roce_cq *hr_cq = to_hr_cq(cq);
878 spin_lock_irqsave(&hr_cq->lock, flags);
879 if (cq->comp_handler) {
880 if (!hr_cq->is_armed) {
882 list_add_tail(&hr_cq->node, cq_list);
885 spin_unlock_irqrestore(&hr_cq->lock, flags);
888 void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev)
890 struct hns_roce_qp *hr_qp;
891 struct hns_roce_cq *hr_cq;
892 struct list_head cq_list;
893 unsigned long flags_qp;
896 INIT_LIST_HEAD(&cq_list);
898 spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
899 list_for_each_entry(hr_qp, &hr_dev->qp_list, node) {
900 spin_lock_irqsave(&hr_qp->sq.lock, flags_qp);
901 if (hr_qp->sq.tail != hr_qp->sq.head)
902 check_and_get_armed_cq(&cq_list, hr_qp->ibqp.send_cq);
903 spin_unlock_irqrestore(&hr_qp->sq.lock, flags_qp);
905 spin_lock_irqsave(&hr_qp->rq.lock, flags_qp);
906 if ((!hr_qp->ibqp.srq) && (hr_qp->rq.tail != hr_qp->rq.head))
907 check_and_get_armed_cq(&cq_list, hr_qp->ibqp.recv_cq);
908 spin_unlock_irqrestore(&hr_qp->rq.lock, flags_qp);
911 list_for_each_entry(hr_cq, &cq_list, node)
912 hns_roce_cq_completion(hr_dev, hr_cq->cqn);
914 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
917 int hns_roce_init(struct hns_roce_dev *hr_dev)
919 struct device *dev = hr_dev->dev;
922 hr_dev->is_reset = false;
924 if (hr_dev->hw->cmq_init) {
925 ret = hr_dev->hw->cmq_init(hr_dev);
927 dev_err(dev, "init RoCE Command Queue failed!\n");
932 ret = hr_dev->hw->hw_profile(hr_dev);
934 dev_err(dev, "get RoCE engine profile failed!\n");
935 goto error_failed_cmd_init;
938 ret = hns_roce_cmd_init(hr_dev);
940 dev_err(dev, "cmd init failed!\n");
941 goto error_failed_cmd_init;
944 /* EQ depends on poll mode, event mode depends on EQ */
945 ret = hr_dev->hw->init_eq(hr_dev);
947 dev_err(dev, "eq init failed!\n");
948 goto error_failed_eq_table;
951 if (hr_dev->cmd_mod) {
952 ret = hns_roce_cmd_use_events(hr_dev);
955 "Cmd event mode failed, set back to poll!\n");
958 ret = hns_roce_init_hem(hr_dev);
960 dev_err(dev, "init HEM(Hardware Entry Memory) failed!\n");
961 goto error_failed_init_hem;
964 ret = hns_roce_setup_hca(hr_dev);
966 dev_err(dev, "setup hca failed!\n");
967 goto error_failed_setup_hca;
970 if (hr_dev->hw->hw_init) {
971 ret = hr_dev->hw->hw_init(hr_dev);
973 dev_err(dev, "hw_init failed!\n");
974 goto error_failed_engine_init;
978 INIT_LIST_HEAD(&hr_dev->qp_list);
979 spin_lock_init(&hr_dev->qp_list_lock);
980 INIT_LIST_HEAD(&hr_dev->dip_list);
981 spin_lock_init(&hr_dev->dip_list_lock);
983 ret = hns_roce_register_device(hr_dev);
985 goto error_failed_register_device;
989 error_failed_register_device:
990 if (hr_dev->hw->hw_exit)
991 hr_dev->hw->hw_exit(hr_dev);
993 error_failed_engine_init:
994 hns_roce_cleanup_bitmap(hr_dev);
996 error_failed_setup_hca:
997 hns_roce_cleanup_hem(hr_dev);
999 error_failed_init_hem:
1000 if (hr_dev->cmd_mod)
1001 hns_roce_cmd_use_polling(hr_dev);
1002 hr_dev->hw->cleanup_eq(hr_dev);
1004 error_failed_eq_table:
1005 hns_roce_cmd_cleanup(hr_dev);
1007 error_failed_cmd_init:
1008 if (hr_dev->hw->cmq_exit)
1009 hr_dev->hw->cmq_exit(hr_dev);
1014 void hns_roce_exit(struct hns_roce_dev *hr_dev)
1016 hns_roce_unregister_device(hr_dev);
1018 if (hr_dev->hw->hw_exit)
1019 hr_dev->hw->hw_exit(hr_dev);
1020 hns_roce_cleanup_bitmap(hr_dev);
1021 hns_roce_cleanup_hem(hr_dev);
1023 if (hr_dev->cmd_mod)
1024 hns_roce_cmd_use_polling(hr_dev);
1026 hr_dev->hw->cleanup_eq(hr_dev);
1027 hns_roce_cmd_cleanup(hr_dev);
1028 if (hr_dev->hw->cmq_exit)
1029 hr_dev->hw->cmq_exit(hr_dev);
1032 MODULE_LICENSE("Dual BSD/GPL");
1033 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
1034 MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
1035 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
1036 MODULE_DESCRIPTION("HNS RoCE Driver");