2 * Copyright (c) 2016-2017 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/acpi.h>
34 #include <linux/etherdevice.h>
35 #include <linux/interrupt.h>
36 #include <linux/iopoll.h>
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <net/addrconf.h>
40 #include <rdma/ib_addr.h>
41 #include <rdma/ib_cache.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/uverbs_ioctl.h>
46 #include "hns_roce_common.h"
47 #include "hns_roce_device.h"
48 #include "hns_roce_cmd.h"
49 #include "hns_roce_hem.h"
50 #include "hns_roce_hw_v2.h"
58 enum ecc_resource_type {
64 ECC_RESOURCE_QPC_TIMER,
65 ECC_RESOURCE_CQC_TIMER,
76 HNS_ROCE_CMD_READ_QPC_BT0, HNS_ROCE_CMD_WRITE_QPC_BT0 },
78 HNS_ROCE_CMD_READ_CQC_BT0, HNS_ROCE_CMD_WRITE_CQC_BT0 },
80 HNS_ROCE_CMD_READ_MPT_BT0, HNS_ROCE_CMD_WRITE_MPT_BT0 },
81 { "ECC_RESOURCE_SRQC",
82 HNS_ROCE_CMD_READ_SRQC_BT0, HNS_ROCE_CMD_WRITE_SRQC_BT0 },
83 /* ECC_RESOURCE_GMV is handled by cmdq, not mailbox */
86 { "ECC_RESOURCE_QPC_TIMER",
87 HNS_ROCE_CMD_READ_QPC_TIMER_BT0, HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0 },
88 { "ECC_RESOURCE_CQC_TIMER",
89 HNS_ROCE_CMD_READ_CQC_TIMER_BT0, HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0 },
90 { "ECC_RESOURCE_SCCC",
91 HNS_ROCE_CMD_READ_SCCC_BT0, HNS_ROCE_CMD_WRITE_SCCC_BT0 },
94 static inline void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
97 dseg->lkey = cpu_to_le32(sg->lkey);
98 dseg->addr = cpu_to_le64(sg->addr);
99 dseg->len = cpu_to_le32(sg->length);
103 * mapped-value = 1 + real-value
104 * The hns wr opcode real value is start from 0, In order to distinguish between
105 * initialized and uninitialized map values, we plus 1 to the actual value when
106 * defining the mapping, so that the validity can be identified by checking the
107 * mapped value is greater than 0.
109 #define HR_OPC_MAP(ib_key, hr_key) \
110 [IB_WR_ ## ib_key] = 1 + HNS_ROCE_V2_WQE_OP_ ## hr_key
112 static const u32 hns_roce_op_code[] = {
113 HR_OPC_MAP(RDMA_WRITE, RDMA_WRITE),
114 HR_OPC_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE_WITH_IMM),
115 HR_OPC_MAP(SEND, SEND),
116 HR_OPC_MAP(SEND_WITH_IMM, SEND_WITH_IMM),
117 HR_OPC_MAP(RDMA_READ, RDMA_READ),
118 HR_OPC_MAP(ATOMIC_CMP_AND_SWP, ATOM_CMP_AND_SWAP),
119 HR_OPC_MAP(ATOMIC_FETCH_AND_ADD, ATOM_FETCH_AND_ADD),
120 HR_OPC_MAP(SEND_WITH_INV, SEND_WITH_INV),
121 HR_OPC_MAP(MASKED_ATOMIC_CMP_AND_SWP, ATOM_MSK_CMP_AND_SWAP),
122 HR_OPC_MAP(MASKED_ATOMIC_FETCH_AND_ADD, ATOM_MSK_FETCH_AND_ADD),
123 HR_OPC_MAP(REG_MR, FAST_REG_PMR),
126 static u32 to_hr_opcode(u32 ib_opcode)
128 if (ib_opcode >= ARRAY_SIZE(hns_roce_op_code))
129 return HNS_ROCE_V2_WQE_OP_MASK;
131 return hns_roce_op_code[ib_opcode] ? hns_roce_op_code[ib_opcode] - 1 :
132 HNS_ROCE_V2_WQE_OP_MASK;
135 static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
136 const struct ib_reg_wr *wr)
138 struct hns_roce_wqe_frmr_seg *fseg =
139 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
140 struct hns_roce_mr *mr = to_hr_mr(wr->mr);
143 /* use ib_access_flags */
144 hr_reg_write_bool(fseg, FRMR_BIND_EN, wr->access & IB_ACCESS_MW_BIND);
145 hr_reg_write_bool(fseg, FRMR_ATOMIC,
146 wr->access & IB_ACCESS_REMOTE_ATOMIC);
147 hr_reg_write_bool(fseg, FRMR_RR, wr->access & IB_ACCESS_REMOTE_READ);
148 hr_reg_write_bool(fseg, FRMR_RW, wr->access & IB_ACCESS_REMOTE_WRITE);
149 hr_reg_write_bool(fseg, FRMR_LW, wr->access & IB_ACCESS_LOCAL_WRITE);
151 /* Data structure reuse may lead to confusion */
152 pbl_ba = mr->pbl_mtr.hem_cfg.root_ba;
153 rc_sq_wqe->msg_len = cpu_to_le32(lower_32_bits(pbl_ba));
154 rc_sq_wqe->inv_key = cpu_to_le32(upper_32_bits(pbl_ba));
156 rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
157 rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
158 rc_sq_wqe->rkey = cpu_to_le32(wr->key);
159 rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
161 hr_reg_write(fseg, FRMR_PBL_SIZE, mr->npages);
162 hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ,
163 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
164 hr_reg_clear(fseg, FRMR_BLK_MODE);
167 static void set_atomic_seg(const struct ib_send_wr *wr,
168 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
169 unsigned int valid_num_sge)
171 struct hns_roce_v2_wqe_data_seg *dseg =
172 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
173 struct hns_roce_wqe_atomic_seg *aseg =
174 (void *)dseg + sizeof(struct hns_roce_v2_wqe_data_seg);
176 set_data_seg_v2(dseg, wr->sg_list);
178 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
179 aseg->fetchadd_swap_data = cpu_to_le64(atomic_wr(wr)->swap);
180 aseg->cmp_data = cpu_to_le64(atomic_wr(wr)->compare_add);
182 aseg->fetchadd_swap_data =
183 cpu_to_le64(atomic_wr(wr)->compare_add);
187 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge);
190 static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
191 const struct ib_send_wr *wr,
192 unsigned int *sge_idx, u32 msg_len)
194 struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev;
195 unsigned int left_len_in_pg;
196 unsigned int idx = *sge_idx;
202 if (msg_len > qp->sq.ext_sge_cnt * HNS_ROCE_SGE_SIZE) {
204 "no enough extended sge space for inline data.\n");
208 dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
209 left_len_in_pg = hr_hw_page_align((uintptr_t)dseg) - (uintptr_t)dseg;
210 len = wr->sg_list[0].length;
211 addr = (void *)(unsigned long)(wr->sg_list[0].addr);
213 /* When copying data to extended sge space, the left length in page may
214 * not long enough for current user's sge. So the data should be
215 * splited into several parts, one in the first page, and the others in
216 * the subsequent pages.
219 if (len <= left_len_in_pg) {
220 memcpy(dseg, addr, len);
222 idx += len / HNS_ROCE_SGE_SIZE;
225 if (i >= wr->num_sge)
228 left_len_in_pg -= len;
229 len = wr->sg_list[i].length;
230 addr = (void *)(unsigned long)(wr->sg_list[i].addr);
233 memcpy(dseg, addr, left_len_in_pg);
235 len -= left_len_in_pg;
236 addr += left_len_in_pg;
237 idx += left_len_in_pg / HNS_ROCE_SGE_SIZE;
238 dseg = hns_roce_get_extend_sge(qp,
239 idx & (qp->sge.sge_cnt - 1));
240 left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT;
249 static void set_extend_sge(struct hns_roce_qp *qp, struct ib_sge *sge,
250 unsigned int *sge_ind, unsigned int cnt)
252 struct hns_roce_v2_wqe_data_seg *dseg;
253 unsigned int idx = *sge_ind;
256 dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
257 if (likely(sge->length)) {
258 set_data_seg_v2(dseg, sge);
268 static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len)
270 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
271 int mtu = ib_mtu_enum_to_int(qp->path_mtu);
273 if (len > qp->max_inline_data || len > mtu) {
274 ibdev_err(&hr_dev->ib_dev,
275 "invalid length of data, data len = %u, max inline len = %u, path mtu = %d.\n",
276 len, qp->max_inline_data, mtu);
283 static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
284 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
285 unsigned int *sge_idx)
287 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
288 u32 msg_len = le32_to_cpu(rc_sq_wqe->msg_len);
289 struct ib_device *ibdev = &hr_dev->ib_dev;
290 unsigned int curr_idx = *sge_idx;
291 void *dseg = rc_sq_wqe;
295 if (unlikely(wr->opcode == IB_WR_RDMA_READ)) {
296 ibdev_err(ibdev, "invalid inline parameters!\n");
300 if (!check_inl_data_len(qp, msg_len))
303 dseg += sizeof(struct hns_roce_v2_rc_send_wqe);
305 if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) {
306 hr_reg_clear(rc_sq_wqe, RC_SEND_WQE_INL_TYPE);
308 for (i = 0; i < wr->num_sge; i++) {
309 memcpy(dseg, ((void *)wr->sg_list[i].addr),
310 wr->sg_list[i].length);
311 dseg += wr->sg_list[i].length;
314 hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_INL_TYPE);
316 ret = fill_ext_sge_inl_data(qp, wr, &curr_idx, msg_len);
320 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, curr_idx - *sge_idx);
328 static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
329 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
330 unsigned int *sge_ind,
331 unsigned int valid_num_sge)
333 struct hns_roce_v2_wqe_data_seg *dseg =
334 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
335 struct hns_roce_qp *qp = to_hr_qp(ibqp);
339 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
340 (*sge_ind) & (qp->sge.sge_cnt - 1));
342 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE,
343 !!(wr->send_flags & IB_SEND_INLINE));
344 if (wr->send_flags & IB_SEND_INLINE)
345 return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind);
347 if (valid_num_sge <= HNS_ROCE_SGE_IN_WQE) {
348 for (i = 0; i < wr->num_sge; i++) {
349 if (likely(wr->sg_list[i].length)) {
350 set_data_seg_v2(dseg, wr->sg_list + i);
355 for (i = 0; i < wr->num_sge && j < HNS_ROCE_SGE_IN_WQE; i++) {
356 if (likely(wr->sg_list[i].length)) {
357 set_data_seg_v2(dseg, wr->sg_list + i);
363 set_extend_sge(qp, wr->sg_list + i, sge_ind,
364 valid_num_sge - HNS_ROCE_SGE_IN_WQE);
367 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge);
372 static int check_send_valid(struct hns_roce_dev *hr_dev,
373 struct hns_roce_qp *hr_qp)
375 struct ib_device *ibdev = &hr_dev->ib_dev;
376 struct ib_qp *ibqp = &hr_qp->ibqp;
378 if (unlikely(ibqp->qp_type != IB_QPT_RC &&
379 ibqp->qp_type != IB_QPT_GSI &&
380 ibqp->qp_type != IB_QPT_UD)) {
381 ibdev_err(ibdev, "not supported QP(0x%x)type!\n",
384 } else if (unlikely(hr_qp->state == IB_QPS_RESET ||
385 hr_qp->state == IB_QPS_INIT ||
386 hr_qp->state == IB_QPS_RTR)) {
387 ibdev_err(ibdev, "failed to post WQE, QP state %u!\n",
390 } else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) {
391 ibdev_err(ibdev, "failed to post WQE, dev state %d!\n",
399 static unsigned int calc_wr_sge_num(const struct ib_send_wr *wr,
400 unsigned int *sge_len)
402 unsigned int valid_num = 0;
403 unsigned int len = 0;
406 for (i = 0; i < wr->num_sge; i++) {
407 if (likely(wr->sg_list[i].length)) {
408 len += wr->sg_list[i].length;
417 static __le32 get_immtdata(const struct ib_send_wr *wr)
419 switch (wr->opcode) {
420 case IB_WR_SEND_WITH_IMM:
421 case IB_WR_RDMA_WRITE_WITH_IMM:
422 return cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
428 static int set_ud_opcode(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
429 const struct ib_send_wr *wr)
431 u32 ib_op = wr->opcode;
433 if (ib_op != IB_WR_SEND && ib_op != IB_WR_SEND_WITH_IMM)
436 ud_sq_wqe->immtdata = get_immtdata(wr);
438 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_OPCODE, to_hr_opcode(ib_op));
443 static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
444 struct hns_roce_ah *ah)
446 struct ib_device *ib_dev = ah->ibah.device;
447 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
449 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_UDPSPN, ah->av.udp_sport);
450 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_HOPLIMIT, ah->av.hop_limit);
451 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_TCLASS, ah->av.tclass);
452 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_FLOW_LABEL, ah->av.flowlabel);
454 if (WARN_ON(ah->av.sl > MAX_SERVICE_LEVEL))
457 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SL, ah->av.sl);
459 ud_sq_wqe->sgid_index = ah->av.gid_index;
461 memcpy(ud_sq_wqe->dmac, ah->av.mac, ETH_ALEN);
462 memcpy(ud_sq_wqe->dgid, ah->av.dgid, GID_LEN_V2);
464 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
467 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN_EN, ah->av.vlan_en);
468 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN, ah->av.vlan_id);
473 static inline int set_ud_wqe(struct hns_roce_qp *qp,
474 const struct ib_send_wr *wr,
475 void *wqe, unsigned int *sge_idx,
476 unsigned int owner_bit)
478 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
479 struct hns_roce_v2_ud_send_wqe *ud_sq_wqe = wqe;
480 unsigned int curr_idx = *sge_idx;
481 unsigned int valid_num_sge;
485 valid_num_sge = calc_wr_sge_num(wr, &msg_len);
487 ret = set_ud_opcode(ud_sq_wqe, wr);
491 ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
493 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_CQE,
494 !!(wr->send_flags & IB_SEND_SIGNALED));
495 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SE,
496 !!(wr->send_flags & IB_SEND_SOLICITED));
498 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_PD, to_hr_pd(qp->ibqp.pd)->pdn);
499 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SGE_NUM, valid_num_sge);
500 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_MSG_START_SGE_IDX,
501 curr_idx & (qp->sge.sge_cnt - 1));
503 ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
504 qp->qkey : ud_wr(wr)->remote_qkey);
505 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_DQPN, ud_wr(wr)->remote_qpn);
507 ret = fill_ud_av(ud_sq_wqe, ah);
511 qp->sl = to_hr_ah(ud_wr(wr)->ah)->av.sl;
513 set_extend_sge(qp, wr->sg_list, &curr_idx, valid_num_sge);
516 * The pipeline can sequentially post all valid WQEs into WQ buffer,
517 * including new WQEs waiting for the doorbell to update the PI again.
518 * Therefore, the owner bit of WQE MUST be updated after all fields
519 * and extSGEs have been written into DDR instead of cache.
521 if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
525 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_OWNER, owner_bit);
530 static int set_rc_opcode(struct hns_roce_dev *hr_dev,
531 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
532 const struct ib_send_wr *wr)
534 u32 ib_op = wr->opcode;
537 rc_sq_wqe->immtdata = get_immtdata(wr);
540 case IB_WR_RDMA_READ:
541 case IB_WR_RDMA_WRITE:
542 case IB_WR_RDMA_WRITE_WITH_IMM:
543 rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
544 rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
547 case IB_WR_SEND_WITH_IMM:
549 case IB_WR_ATOMIC_CMP_AND_SWP:
550 case IB_WR_ATOMIC_FETCH_AND_ADD:
551 rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey);
552 rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr);
555 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
556 set_frmr_seg(rc_sq_wqe, reg_wr(wr));
560 case IB_WR_SEND_WITH_INV:
561 rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
570 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_OPCODE, to_hr_opcode(ib_op));
575 static inline int set_rc_wqe(struct hns_roce_qp *qp,
576 const struct ib_send_wr *wr,
577 void *wqe, unsigned int *sge_idx,
578 unsigned int owner_bit)
580 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
581 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
582 unsigned int curr_idx = *sge_idx;
583 unsigned int valid_num_sge;
587 valid_num_sge = calc_wr_sge_num(wr, &msg_len);
589 rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
591 ret = set_rc_opcode(hr_dev, rc_sq_wqe, wr);
595 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_FENCE,
596 (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
598 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SE,
599 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
601 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE,
602 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
604 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
605 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
606 set_atomic_seg(wr, rc_sq_wqe, valid_num_sge);
607 else if (wr->opcode != IB_WR_REG_MR)
608 ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe,
609 &curr_idx, valid_num_sge);
612 * The pipeline can sequentially post all valid WQEs into WQ buffer,
613 * including new WQEs waiting for the doorbell to update the PI again.
614 * Therefore, the owner bit of WQE MUST be updated after all fields
615 * and extSGEs have been written into DDR instead of cache.
617 if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
621 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_OWNER, owner_bit);
626 static inline void update_sq_db(struct hns_roce_dev *hr_dev,
627 struct hns_roce_qp *qp)
629 if (unlikely(qp->state == IB_QPS_ERR)) {
630 flush_cqe(hr_dev, qp);
632 struct hns_roce_v2_db sq_db = {};
634 hr_reg_write(&sq_db, DB_TAG, qp->qpn);
635 hr_reg_write(&sq_db, DB_CMD, HNS_ROCE_V2_SQ_DB);
636 hr_reg_write(&sq_db, DB_PI, qp->sq.head);
637 hr_reg_write(&sq_db, DB_SL, qp->sl);
639 hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg);
643 static inline void update_rq_db(struct hns_roce_dev *hr_dev,
644 struct hns_roce_qp *qp)
646 if (unlikely(qp->state == IB_QPS_ERR)) {
647 flush_cqe(hr_dev, qp);
649 if (likely(qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)) {
651 qp->rq.head & V2_DB_PRODUCER_IDX_M;
653 struct hns_roce_v2_db rq_db = {};
655 hr_reg_write(&rq_db, DB_TAG, qp->qpn);
656 hr_reg_write(&rq_db, DB_CMD, HNS_ROCE_V2_RQ_DB);
657 hr_reg_write(&rq_db, DB_PI, qp->rq.head);
659 hns_roce_write64(hr_dev, (__le32 *)&rq_db,
665 static void hns_roce_write512(struct hns_roce_dev *hr_dev, u64 *val,
668 #define HNS_ROCE_WRITE_TIMES 8
669 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
670 struct hnae3_handle *handle = priv->handle;
671 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
674 if (!hr_dev->dis_db && !ops->get_hw_reset_stat(handle))
675 for (i = 0; i < HNS_ROCE_WRITE_TIMES; i++)
676 writeq_relaxed(*(val + i), dest + i);
679 static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
682 #define HNS_ROCE_SL_SHIFT 2
683 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
685 /* All kinds of DirectWQE have the same header field layout */
686 hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_FLAG);
687 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_L, qp->sl);
688 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_H,
689 qp->sl >> HNS_ROCE_SL_SHIFT);
690 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_WQE_INDEX, qp->sq.head);
692 hns_roce_write512(hr_dev, wqe, qp->sq.db_reg);
695 static int hns_roce_v2_post_send(struct ib_qp *ibqp,
696 const struct ib_send_wr *wr,
697 const struct ib_send_wr **bad_wr)
699 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
700 struct ib_device *ibdev = &hr_dev->ib_dev;
701 struct hns_roce_qp *qp = to_hr_qp(ibqp);
702 unsigned long flags = 0;
703 unsigned int owner_bit;
704 unsigned int sge_idx;
705 unsigned int wqe_idx;
710 spin_lock_irqsave(&qp->sq.lock, flags);
712 ret = check_send_valid(hr_dev, qp);
719 sge_idx = qp->next_sge;
721 for (nreq = 0; wr; ++nreq, wr = wr->next) {
722 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
728 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
730 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
731 ibdev_err(ibdev, "num_sge = %d > qp->sq.max_gs = %u.\n",
732 wr->num_sge, qp->sq.max_gs);
738 wqe = hns_roce_get_send_wqe(qp, wqe_idx);
739 qp->sq.wrid[wqe_idx] = wr->wr_id;
741 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
743 /* Corresponding to the QP type, wqe process separately */
744 if (ibqp->qp_type == IB_QPT_RC)
745 ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
747 ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit);
758 qp->next_sge = sge_idx;
760 if (nreq == 1 && (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
761 write_dwqe(hr_dev, qp, wqe);
763 update_sq_db(hr_dev, qp);
766 spin_unlock_irqrestore(&qp->sq.lock, flags);
771 static int check_recv_valid(struct hns_roce_dev *hr_dev,
772 struct hns_roce_qp *hr_qp)
774 struct ib_device *ibdev = &hr_dev->ib_dev;
775 struct ib_qp *ibqp = &hr_qp->ibqp;
777 if (unlikely(ibqp->qp_type != IB_QPT_RC &&
778 ibqp->qp_type != IB_QPT_GSI &&
779 ibqp->qp_type != IB_QPT_UD)) {
780 ibdev_err(ibdev, "unsupported qp type, qp_type = %d.\n",
785 if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
788 if (hr_qp->state == IB_QPS_RESET)
794 static void fill_recv_sge_to_wqe(const struct ib_recv_wr *wr, void *wqe,
795 u32 max_sge, bool rsv)
797 struct hns_roce_v2_wqe_data_seg *dseg = wqe;
800 for (i = 0, cnt = 0; i < wr->num_sge; i++) {
801 /* Skip zero-length sge */
802 if (!wr->sg_list[i].length)
804 set_data_seg_v2(dseg + cnt, wr->sg_list + i);
808 /* Fill a reserved sge to make hw stop reading remaining segments */
810 dseg[cnt].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
812 dseg[cnt].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
814 /* Clear remaining segments to make ROCEE ignore sges */
816 memset(dseg + cnt, 0,
817 (max_sge - cnt) * HNS_ROCE_SGE_SIZE);
821 static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr,
822 u32 wqe_idx, u32 max_sge)
824 struct hns_roce_rinl_sge *sge_list;
828 wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
829 fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge);
831 /* rq support inline data */
832 if (hr_qp->rq_inl_buf.wqe_cnt) {
833 sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
834 hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt = (u32)wr->num_sge;
835 for (i = 0; i < wr->num_sge; i++) {
836 sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr;
837 sge_list[i].len = wr->sg_list[i].length;
842 static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
843 const struct ib_recv_wr *wr,
844 const struct ib_recv_wr **bad_wr)
846 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
847 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
848 struct ib_device *ibdev = &hr_dev->ib_dev;
849 u32 wqe_idx, nreq, max_sge;
853 spin_lock_irqsave(&hr_qp->rq.lock, flags);
855 ret = check_recv_valid(hr_dev, hr_qp);
862 max_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
863 for (nreq = 0; wr; ++nreq, wr = wr->next) {
864 if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq,
865 hr_qp->ibqp.recv_cq))) {
871 if (unlikely(wr->num_sge > max_sge)) {
872 ibdev_err(ibdev, "num_sge = %d >= max_sge = %u.\n",
873 wr->num_sge, max_sge);
879 wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
880 fill_rq_wqe(hr_qp, wr, wqe_idx, max_sge);
881 hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
886 hr_qp->rq.head += nreq;
888 update_rq_db(hr_dev, hr_qp);
890 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
895 static void *get_srq_wqe_buf(struct hns_roce_srq *srq, u32 n)
897 return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift);
900 static void *get_idx_buf(struct hns_roce_idx_que *idx_que, u32 n)
902 return hns_roce_buf_offset(idx_que->mtr.kmem,
903 n << idx_que->entry_shift);
906 static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, u32 wqe_index)
908 /* always called with interrupts disabled. */
909 spin_lock(&srq->lock);
911 bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
914 spin_unlock(&srq->lock);
917 static int hns_roce_srqwq_overflow(struct hns_roce_srq *srq)
919 struct hns_roce_idx_que *idx_que = &srq->idx_que;
921 return idx_que->head - idx_que->tail >= srq->wqe_cnt;
924 static int check_post_srq_valid(struct hns_roce_srq *srq, u32 max_sge,
925 const struct ib_recv_wr *wr)
927 struct ib_device *ib_dev = srq->ibsrq.device;
929 if (unlikely(wr->num_sge > max_sge)) {
931 "failed to check sge, wr->num_sge = %d, max_sge = %u.\n",
932 wr->num_sge, max_sge);
936 if (unlikely(hns_roce_srqwq_overflow(srq))) {
938 "failed to check srqwq status, srqwq is full.\n");
945 static int get_srq_wqe_idx(struct hns_roce_srq *srq, u32 *wqe_idx)
947 struct hns_roce_idx_que *idx_que = &srq->idx_que;
950 pos = find_first_zero_bit(idx_que->bitmap, srq->wqe_cnt);
951 if (unlikely(pos == srq->wqe_cnt))
954 bitmap_set(idx_que->bitmap, pos, 1);
959 static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx)
961 struct hns_roce_idx_que *idx_que = &srq->idx_que;
965 head = idx_que->head & (srq->wqe_cnt - 1);
967 buf = get_idx_buf(idx_que, head);
968 *buf = cpu_to_le32(wqe_idx);
973 static void update_srq_db(struct hns_roce_v2_db *db, struct hns_roce_srq *srq)
975 hr_reg_write(db, DB_TAG, srq->srqn);
976 hr_reg_write(db, DB_CMD, HNS_ROCE_V2_SRQ_DB);
977 hr_reg_write(db, DB_PI, srq->idx_que.head);
980 static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
981 const struct ib_recv_wr *wr,
982 const struct ib_recv_wr **bad_wr)
984 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
985 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
986 struct hns_roce_v2_db srq_db;
994 spin_lock_irqsave(&srq->lock, flags);
996 max_sge = srq->max_gs - srq->rsv_sge;
997 for (nreq = 0; wr; ++nreq, wr = wr->next) {
998 ret = check_post_srq_valid(srq, max_sge, wr);
1004 ret = get_srq_wqe_idx(srq, &wqe_idx);
1005 if (unlikely(ret)) {
1010 wqe = get_srq_wqe_buf(srq, wqe_idx);
1011 fill_recv_sge_to_wqe(wr, wqe, max_sge, srq->rsv_sge);
1012 fill_wqe_idx(srq, wqe_idx);
1013 srq->wrid[wqe_idx] = wr->wr_id;
1017 update_srq_db(&srq_db, srq);
1019 hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg);
1022 spin_unlock_irqrestore(&srq->lock, flags);
1027 static u32 hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
1028 unsigned long instance_stage,
1029 unsigned long reset_stage)
1031 /* When hardware reset has been completed once or more, we should stop
1032 * sending mailbox&cmq&doorbell to hardware. If now in .init_instance()
1033 * function, we should exit with error. If now at HNAE3_INIT_CLIENT
1034 * stage of soft reset process, we should exit with error, and then
1035 * HNAE3_INIT_CLIENT related process can rollback the operation like
1036 * notifing hardware to free resources, HNAE3_INIT_CLIENT related
1037 * process will exit with error to notify NIC driver to reschedule soft
1038 * reset process once again.
1040 hr_dev->is_reset = true;
1041 hr_dev->dis_db = true;
1043 if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
1044 instance_stage == HNS_ROCE_STATE_INIT)
1045 return CMD_RST_PRC_EBUSY;
1047 return CMD_RST_PRC_SUCCESS;
1050 static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
1051 unsigned long instance_stage,
1052 unsigned long reset_stage)
1054 #define HW_RESET_TIMEOUT_US 1000000
1055 #define HW_RESET_SLEEP_US 1000
1057 struct hns_roce_v2_priv *priv = hr_dev->priv;
1058 struct hnae3_handle *handle = priv->handle;
1059 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1063 /* When hardware reset is detected, we should stop sending mailbox&cmq&
1064 * doorbell to hardware. If now in .init_instance() function, we should
1065 * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset
1066 * process, we should exit with error, and then HNAE3_INIT_CLIENT
1067 * related process can rollback the operation like notifing hardware to
1068 * free resources, HNAE3_INIT_CLIENT related process will exit with
1069 * error to notify NIC driver to reschedule soft reset process once
1072 hr_dev->dis_db = true;
1074 ret = read_poll_timeout(ops->ae_dev_reset_cnt, val,
1075 val > hr_dev->reset_cnt, HW_RESET_SLEEP_US,
1076 HW_RESET_TIMEOUT_US, false, handle);
1078 hr_dev->is_reset = true;
1080 if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
1081 instance_stage == HNS_ROCE_STATE_INIT)
1082 return CMD_RST_PRC_EBUSY;
1084 return CMD_RST_PRC_SUCCESS;
1087 static u32 hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
1089 struct hns_roce_v2_priv *priv = hr_dev->priv;
1090 struct hnae3_handle *handle = priv->handle;
1091 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1093 /* When software reset is detected at .init_instance() function, we
1094 * should stop sending mailbox&cmq&doorbell to hardware, and exit
1097 hr_dev->dis_db = true;
1098 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
1099 hr_dev->is_reset = true;
1101 return CMD_RST_PRC_EBUSY;
1104 static u32 check_aedev_reset_status(struct hns_roce_dev *hr_dev,
1105 struct hnae3_handle *handle)
1107 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1108 unsigned long instance_stage; /* the current instance stage */
1109 unsigned long reset_stage; /* the current reset stage */
1110 unsigned long reset_cnt;
1114 /* Get information about reset from NIC driver or RoCE driver itself,
1115 * the meaning of the following variables from NIC driver are described
1117 * reset_cnt -- The count value of completed hardware reset.
1118 * hw_resetting -- Whether hardware device is resetting now.
1119 * sw_resetting -- Whether NIC's software reset process is running now.
1121 instance_stage = handle->rinfo.instance_state;
1122 reset_stage = handle->rinfo.reset_state;
1123 reset_cnt = ops->ae_dev_reset_cnt(handle);
1124 if (reset_cnt != hr_dev->reset_cnt)
1125 return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
1128 hw_resetting = ops->get_cmdq_stat(handle);
1130 return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
1133 sw_resetting = ops->ae_dev_resetting(handle);
1134 if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
1135 return hns_roce_v2_cmd_sw_resetting(hr_dev);
1137 return CMD_RST_PRC_OTHERS;
1140 static bool check_device_is_in_reset(struct hns_roce_dev *hr_dev)
1142 struct hns_roce_v2_priv *priv = hr_dev->priv;
1143 struct hnae3_handle *handle = priv->handle;
1144 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1146 if (hr_dev->reset_cnt != ops->ae_dev_reset_cnt(handle))
1149 if (ops->get_hw_reset_stat(handle))
1152 if (ops->ae_dev_resetting(handle))
1158 static bool v2_chk_mbox_is_avail(struct hns_roce_dev *hr_dev, bool *busy)
1160 struct hns_roce_v2_priv *priv = hr_dev->priv;
1163 if (hr_dev->is_reset)
1164 status = CMD_RST_PRC_SUCCESS;
1166 status = check_aedev_reset_status(hr_dev, priv->handle);
1168 *busy = (status == CMD_RST_PRC_EBUSY);
1170 return status == CMD_RST_PRC_OTHERS;
1173 static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
1174 struct hns_roce_v2_cmq_ring *ring)
1176 int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
1178 ring->desc = dma_alloc_coherent(hr_dev->dev, size,
1179 &ring->desc_dma_addr, GFP_KERNEL);
1186 static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
1187 struct hns_roce_v2_cmq_ring *ring)
1189 dma_free_coherent(hr_dev->dev,
1190 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
1191 ring->desc, ring->desc_dma_addr);
1193 ring->desc_dma_addr = 0;
1196 static int init_csq(struct hns_roce_dev *hr_dev,
1197 struct hns_roce_v2_cmq_ring *csq)
1202 csq->desc_num = CMD_CSQ_DESC_NUM;
1203 spin_lock_init(&csq->lock);
1204 csq->flag = TYPE_CSQ;
1207 ret = hns_roce_alloc_cmq_desc(hr_dev, csq);
1211 dma = csq->desc_dma_addr;
1212 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, lower_32_bits(dma));
1213 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG, upper_32_bits(dma));
1214 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
1215 (u32)csq->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
1217 /* Make sure to write CI first and then PI */
1218 roce_write(hr_dev, ROCEE_TX_CMQ_CI_REG, 0);
1219 roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, 0);
1224 static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
1226 struct hns_roce_v2_priv *priv = hr_dev->priv;
1229 priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
1231 ret = init_csq(hr_dev, &priv->cmq.csq);
1233 dev_err(hr_dev->dev, "failed to init CSQ, ret = %d.\n", ret);
1238 static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
1240 struct hns_roce_v2_priv *priv = hr_dev->priv;
1242 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
1245 static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
1246 enum hns_roce_opcode_type opcode,
1249 memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
1250 desc->opcode = cpu_to_le16(opcode);
1251 desc->flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN);
1253 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
1255 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1258 static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
1260 u32 tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG);
1261 struct hns_roce_v2_priv *priv = hr_dev->priv;
1263 return tail == priv->cmq.csq.head;
1266 static void update_cmdq_status(struct hns_roce_dev *hr_dev)
1268 struct hns_roce_v2_priv *priv = hr_dev->priv;
1269 struct hnae3_handle *handle = priv->handle;
1271 if (handle->rinfo.reset_state == HNS_ROCE_STATE_RST_INIT ||
1272 handle->rinfo.instance_state == HNS_ROCE_STATE_INIT)
1273 hr_dev->cmd.state = HNS_ROCE_CMDQ_STATE_FATAL_ERR;
1276 static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1277 struct hns_roce_cmq_desc *desc, int num)
1279 struct hns_roce_v2_priv *priv = hr_dev->priv;
1280 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1287 spin_lock_bh(&csq->lock);
1291 for (i = 0; i < num; i++) {
1292 csq->desc[csq->head++] = desc[i];
1293 if (csq->head == csq->desc_num)
1297 /* Write to hardware */
1298 roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, csq->head);
1301 if (hns_roce_cmq_csq_done(hr_dev))
1304 } while (++timeout < priv->cmq.tx_timeout);
1306 if (hns_roce_cmq_csq_done(hr_dev)) {
1308 for (i = 0; i < num; i++) {
1309 /* check the result of hardware write back */
1310 desc[i] = csq->desc[tail++];
1311 if (tail == csq->desc_num)
1314 desc_ret = le16_to_cpu(desc[i].retval);
1315 if (likely(desc_ret == CMD_EXEC_SUCCESS))
1318 dev_err_ratelimited(hr_dev->dev,
1319 "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n",
1320 desc->opcode, desc_ret);
1324 /* FW/HW reset or incorrect number of desc */
1325 tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG);
1326 dev_warn(hr_dev->dev, "CMDQ move tail from %u to %u.\n",
1330 update_cmdq_status(hr_dev);
1335 spin_unlock_bh(&csq->lock);
1340 static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1341 struct hns_roce_cmq_desc *desc, int num)
1346 if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR)
1349 if (!v2_chk_mbox_is_avail(hr_dev, &busy))
1350 return busy ? -EBUSY : 0;
1352 ret = __hns_roce_cmq_send(hr_dev, desc, num);
1354 if (!v2_chk_mbox_is_avail(hr_dev, &busy))
1355 return busy ? -EBUSY : 0;
1361 static int config_hem_ba_to_hw(struct hns_roce_dev *hr_dev,
1362 dma_addr_t base_addr, u8 cmd, unsigned long tag)
1364 struct hns_roce_cmd_mailbox *mbox;
1367 mbox = hns_roce_alloc_cmd_mailbox(hr_dev);
1369 return PTR_ERR(mbox);
1371 ret = hns_roce_cmd_mbox(hr_dev, base_addr, mbox->dma, cmd, tag);
1372 hns_roce_free_cmd_mailbox(hr_dev, mbox);
1376 static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
1378 struct hns_roce_query_version *resp;
1379 struct hns_roce_cmq_desc desc;
1382 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
1383 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1387 resp = (struct hns_roce_query_version *)desc.data;
1388 hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version);
1389 hr_dev->vendor_id = hr_dev->pci_dev->vendor;
1394 static void func_clr_hw_resetting_state(struct hns_roce_dev *hr_dev,
1395 struct hnae3_handle *handle)
1397 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1400 hr_dev->dis_db = true;
1402 dev_warn(hr_dev->dev,
1403 "func clear is pending, device in resetting state.\n");
1404 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1406 if (!ops->get_hw_reset_stat(handle)) {
1407 hr_dev->is_reset = true;
1408 dev_info(hr_dev->dev,
1409 "func clear success after reset.\n");
1412 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1413 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1416 dev_warn(hr_dev->dev, "func clear failed.\n");
1419 static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev,
1420 struct hnae3_handle *handle)
1422 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1425 hr_dev->dis_db = true;
1427 dev_warn(hr_dev->dev,
1428 "func clear is pending, device in resetting state.\n");
1429 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1431 if (ops->ae_dev_reset_cnt(handle) !=
1432 hr_dev->reset_cnt) {
1433 hr_dev->is_reset = true;
1434 dev_info(hr_dev->dev,
1435 "func clear success after sw reset\n");
1438 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1439 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1442 dev_warn(hr_dev->dev, "func clear failed because of unfinished sw reset\n");
1445 static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval,
1448 struct hns_roce_v2_priv *priv = hr_dev->priv;
1449 struct hnae3_handle *handle = priv->handle;
1450 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1452 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) {
1453 hr_dev->dis_db = true;
1454 hr_dev->is_reset = true;
1455 dev_info(hr_dev->dev, "func clear success after reset.\n");
1459 if (ops->get_hw_reset_stat(handle)) {
1460 func_clr_hw_resetting_state(hr_dev, handle);
1464 if (ops->ae_dev_resetting(handle) &&
1465 handle->rinfo.instance_state == HNS_ROCE_STATE_INIT) {
1466 func_clr_sw_resetting_state(hr_dev, handle);
1470 if (retval && !flag)
1471 dev_warn(hr_dev->dev,
1472 "func clear read failed, ret = %d.\n", retval);
1474 dev_warn(hr_dev->dev, "func clear failed.\n");
1477 static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id)
1479 bool fclr_write_fail_flag = false;
1480 struct hns_roce_func_clear *resp;
1481 struct hns_roce_cmq_desc desc;
1485 if (check_device_is_in_reset(hr_dev))
1488 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false);
1489 resp = (struct hns_roce_func_clear *)desc.data;
1490 resp->rst_funcid_en = cpu_to_le32(vf_id);
1492 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1494 fclr_write_fail_flag = true;
1495 dev_err(hr_dev->dev, "func clear write failed, ret = %d.\n",
1500 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
1501 end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS;
1503 if (check_device_is_in_reset(hr_dev))
1505 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
1506 end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT;
1508 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR,
1511 resp->rst_funcid_en = cpu_to_le32(vf_id);
1512 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1516 if (hr_reg_read(resp, FUNC_CLEAR_RST_FUN_DONE)) {
1518 hr_dev->is_reset = true;
1524 hns_roce_func_clr_rst_proc(hr_dev, ret, fclr_write_fail_flag);
1527 static int hns_roce_free_vf_resource(struct hns_roce_dev *hr_dev, int vf_id)
1529 enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
1530 struct hns_roce_cmq_desc desc[2];
1531 struct hns_roce_cmq_req *req_a;
1533 req_a = (struct hns_roce_cmq_req *)desc[0].data;
1534 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
1535 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1536 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
1537 hr_reg_write(req_a, FUNC_RES_A_VF_ID, vf_id);
1539 return hns_roce_cmq_send(hr_dev, desc, 2);
1542 static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
1547 if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR)
1550 for (i = hr_dev->func_num - 1; i >= 0; i--) {
1551 __hns_roce_function_clear(hr_dev, i);
1556 ret = hns_roce_free_vf_resource(hr_dev, i);
1558 ibdev_err(&hr_dev->ib_dev,
1559 "failed to free vf resource, vf_id = %d, ret = %d.\n",
1564 static int hns_roce_clear_extdb_list_info(struct hns_roce_dev *hr_dev)
1566 struct hns_roce_cmq_desc desc;
1569 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLEAR_EXTDB_LIST_INFO,
1571 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1573 ibdev_err(&hr_dev->ib_dev,
1574 "failed to clear extended doorbell info, ret = %d.\n",
1580 static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
1582 struct hns_roce_query_fw_info *resp;
1583 struct hns_roce_cmq_desc desc;
1586 hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
1587 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1591 resp = (struct hns_roce_query_fw_info *)desc.data;
1592 hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
1597 static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev)
1599 struct hns_roce_cmq_desc desc;
1602 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
1603 hr_dev->func_num = 1;
1607 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_FUNC_INFO,
1609 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1611 hr_dev->func_num = 1;
1615 hr_dev->func_num = le32_to_cpu(desc.func_info.own_func_num);
1616 hr_dev->cong_algo_tmpl_id = le32_to_cpu(desc.func_info.own_mac_id);
1621 static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
1623 struct hns_roce_cmq_desc desc;
1624 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1625 u32 clock_cycles_of_1us;
1627 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
1630 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
1631 clock_cycles_of_1us = HNS_ROCE_1NS_CFG;
1633 clock_cycles_of_1us = HNS_ROCE_1US_CFG;
1635 hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, clock_cycles_of_1us);
1636 hr_reg_write(req, CFG_GLOBAL_PARAM_UDP_PORT, ROCE_V2_UDP_DPORT);
1638 return hns_roce_cmq_send(hr_dev, &desc, 1);
1641 static int load_func_res_caps(struct hns_roce_dev *hr_dev, bool is_vf)
1643 struct hns_roce_cmq_desc desc[2];
1644 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
1645 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
1646 struct hns_roce_caps *caps = &hr_dev->caps;
1647 enum hns_roce_opcode_type opcode;
1652 opcode = HNS_ROCE_OPC_QUERY_VF_RES;
1655 opcode = HNS_ROCE_OPC_QUERY_PF_RES;
1656 func_num = hr_dev->func_num;
1659 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, true);
1660 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1661 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, true);
1663 ret = hns_roce_cmq_send(hr_dev, desc, 2);
1667 caps->qpc_bt_num = hr_reg_read(r_a, FUNC_RES_A_QPC_BT_NUM) / func_num;
1668 caps->srqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_SRQC_BT_NUM) / func_num;
1669 caps->cqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_CQC_BT_NUM) / func_num;
1670 caps->mpt_bt_num = hr_reg_read(r_a, FUNC_RES_A_MPT_BT_NUM) / func_num;
1671 caps->eqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_EQC_BT_NUM) / func_num;
1672 caps->smac_bt_num = hr_reg_read(r_b, FUNC_RES_B_SMAC_NUM) / func_num;
1673 caps->sgid_bt_num = hr_reg_read(r_b, FUNC_RES_B_SGID_NUM) / func_num;
1674 caps->sccc_bt_num = hr_reg_read(r_b, FUNC_RES_B_SCCC_BT_NUM) / func_num;
1677 caps->sl_num = hr_reg_read(r_b, FUNC_RES_V_QID_NUM) / func_num;
1678 caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_V_GMV_BT_NUM) /
1681 caps->sl_num = hr_reg_read(r_b, FUNC_RES_B_QID_NUM) / func_num;
1682 caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_B_GMV_BT_NUM) /
1689 static int load_ext_cfg_caps(struct hns_roce_dev *hr_dev, bool is_vf)
1691 struct hns_roce_cmq_desc desc;
1692 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1693 struct hns_roce_caps *caps = &hr_dev->caps;
1694 u32 func_num, qp_num;
1697 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_EXT_CFG, true);
1698 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1702 func_num = is_vf ? 1 : max_t(u32, 1, hr_dev->func_num);
1703 qp_num = hr_reg_read(req, EXT_CFG_QP_PI_NUM) / func_num;
1704 caps->num_pi_qps = round_down(qp_num, HNS_ROCE_QP_BANK_NUM);
1706 qp_num = hr_reg_read(req, EXT_CFG_QP_NUM) / func_num;
1707 caps->num_qps = round_down(qp_num, HNS_ROCE_QP_BANK_NUM);
1712 static int load_pf_timer_res_caps(struct hns_roce_dev *hr_dev)
1714 struct hns_roce_cmq_desc desc;
1715 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1716 struct hns_roce_caps *caps = &hr_dev->caps;
1719 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
1722 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1726 caps->qpc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_QPC_ITEM_NUM);
1727 caps->cqc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_CQC_ITEM_NUM);
1732 static int query_func_resource_caps(struct hns_roce_dev *hr_dev, bool is_vf)
1734 struct device *dev = hr_dev->dev;
1737 ret = load_func_res_caps(hr_dev, is_vf);
1739 dev_err(dev, "failed to load res caps, ret = %d (%s).\n", ret,
1740 is_vf ? "vf" : "pf");
1744 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
1745 ret = load_ext_cfg_caps(hr_dev, is_vf);
1747 dev_err(dev, "failed to load ext cfg, ret = %d (%s).\n",
1748 ret, is_vf ? "vf" : "pf");
1754 static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
1756 struct device *dev = hr_dev->dev;
1759 ret = query_func_resource_caps(hr_dev, false);
1763 ret = load_pf_timer_res_caps(hr_dev);
1765 dev_err(dev, "failed to load pf timer resource, ret = %d.\n",
1771 static int hns_roce_query_vf_resource(struct hns_roce_dev *hr_dev)
1773 return query_func_resource_caps(hr_dev, true);
1776 static int __hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
1779 struct hns_roce_vf_switch *swt;
1780 struct hns_roce_cmq_desc desc;
1783 swt = (struct hns_roce_vf_switch *)desc.data;
1784 hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
1785 swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
1786 hr_reg_write(swt, VF_SWITCH_VF_ID, vf_id);
1787 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1791 desc.flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN);
1792 desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1793 hr_reg_enable(swt, VF_SWITCH_ALW_LPBK);
1794 hr_reg_clear(swt, VF_SWITCH_ALW_LCL_LPBK);
1795 hr_reg_enable(swt, VF_SWITCH_ALW_DST_OVRD);
1797 return hns_roce_cmq_send(hr_dev, &desc, 1);
1800 static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev)
1805 for (vf_id = 0; vf_id < hr_dev->func_num; vf_id++) {
1806 ret = __hns_roce_set_vf_switch_param(hr_dev, vf_id);
1813 static int config_vf_hem_resource(struct hns_roce_dev *hr_dev, int vf_id)
1815 struct hns_roce_cmq_desc desc[2];
1816 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
1817 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
1818 enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
1819 struct hns_roce_caps *caps = &hr_dev->caps;
1821 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
1822 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1823 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
1825 hr_reg_write(r_a, FUNC_RES_A_VF_ID, vf_id);
1827 hr_reg_write(r_a, FUNC_RES_A_QPC_BT_NUM, caps->qpc_bt_num);
1828 hr_reg_write(r_a, FUNC_RES_A_QPC_BT_IDX, vf_id * caps->qpc_bt_num);
1829 hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_NUM, caps->srqc_bt_num);
1830 hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_IDX, vf_id * caps->srqc_bt_num);
1831 hr_reg_write(r_a, FUNC_RES_A_CQC_BT_NUM, caps->cqc_bt_num);
1832 hr_reg_write(r_a, FUNC_RES_A_CQC_BT_IDX, vf_id * caps->cqc_bt_num);
1833 hr_reg_write(r_a, FUNC_RES_A_MPT_BT_NUM, caps->mpt_bt_num);
1834 hr_reg_write(r_a, FUNC_RES_A_MPT_BT_IDX, vf_id * caps->mpt_bt_num);
1835 hr_reg_write(r_a, FUNC_RES_A_EQC_BT_NUM, caps->eqc_bt_num);
1836 hr_reg_write(r_a, FUNC_RES_A_EQC_BT_IDX, vf_id * caps->eqc_bt_num);
1837 hr_reg_write(r_b, FUNC_RES_V_QID_NUM, caps->sl_num);
1838 hr_reg_write(r_b, FUNC_RES_B_QID_IDX, vf_id * caps->sl_num);
1839 hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_NUM, caps->sccc_bt_num);
1840 hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_IDX, vf_id * caps->sccc_bt_num);
1842 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
1843 hr_reg_write(r_b, FUNC_RES_V_GMV_BT_NUM, caps->gmv_bt_num);
1844 hr_reg_write(r_b, FUNC_RES_B_GMV_BT_IDX,
1845 vf_id * caps->gmv_bt_num);
1847 hr_reg_write(r_b, FUNC_RES_B_SGID_NUM, caps->sgid_bt_num);
1848 hr_reg_write(r_b, FUNC_RES_B_SGID_IDX,
1849 vf_id * caps->sgid_bt_num);
1850 hr_reg_write(r_b, FUNC_RES_B_SMAC_NUM, caps->smac_bt_num);
1851 hr_reg_write(r_b, FUNC_RES_B_SMAC_IDX,
1852 vf_id * caps->smac_bt_num);
1855 return hns_roce_cmq_send(hr_dev, desc, 2);
1858 static int config_vf_ext_resource(struct hns_roce_dev *hr_dev, u32 vf_id)
1860 struct hns_roce_cmq_desc desc;
1861 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1862 struct hns_roce_caps *caps = &hr_dev->caps;
1864 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_EXT_CFG, false);
1866 hr_reg_write(req, EXT_CFG_VF_ID, vf_id);
1868 hr_reg_write(req, EXT_CFG_QP_PI_NUM, caps->num_pi_qps);
1869 hr_reg_write(req, EXT_CFG_QP_PI_IDX, vf_id * caps->num_pi_qps);
1870 hr_reg_write(req, EXT_CFG_QP_NUM, caps->num_qps);
1871 hr_reg_write(req, EXT_CFG_QP_IDX, vf_id * caps->num_qps);
1873 return hns_roce_cmq_send(hr_dev, &desc, 1);
1876 static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
1878 u32 func_num = max_t(u32, 1, hr_dev->func_num);
1882 for (vf_id = 0; vf_id < func_num; vf_id++) {
1883 ret = config_vf_hem_resource(hr_dev, vf_id);
1885 dev_err(hr_dev->dev,
1886 "failed to config vf-%u hem res, ret = %d.\n",
1891 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
1892 ret = config_vf_ext_resource(hr_dev, vf_id);
1894 dev_err(hr_dev->dev,
1895 "failed to config vf-%u ext res, ret = %d.\n",
1905 static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1907 struct hns_roce_cmq_desc desc;
1908 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1909 struct hns_roce_caps *caps = &hr_dev->caps;
1911 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1913 hr_reg_write(req, CFG_BT_ATTR_QPC_BA_PGSZ,
1914 caps->qpc_ba_pg_sz + PG_SHIFT_OFFSET);
1915 hr_reg_write(req, CFG_BT_ATTR_QPC_BUF_PGSZ,
1916 caps->qpc_buf_pg_sz + PG_SHIFT_OFFSET);
1917 hr_reg_write(req, CFG_BT_ATTR_QPC_HOPNUM,
1918 to_hr_hem_hopnum(caps->qpc_hop_num, caps->num_qps));
1920 hr_reg_write(req, CFG_BT_ATTR_SRQC_BA_PGSZ,
1921 caps->srqc_ba_pg_sz + PG_SHIFT_OFFSET);
1922 hr_reg_write(req, CFG_BT_ATTR_SRQC_BUF_PGSZ,
1923 caps->srqc_buf_pg_sz + PG_SHIFT_OFFSET);
1924 hr_reg_write(req, CFG_BT_ATTR_SRQC_HOPNUM,
1925 to_hr_hem_hopnum(caps->srqc_hop_num, caps->num_srqs));
1927 hr_reg_write(req, CFG_BT_ATTR_CQC_BA_PGSZ,
1928 caps->cqc_ba_pg_sz + PG_SHIFT_OFFSET);
1929 hr_reg_write(req, CFG_BT_ATTR_CQC_BUF_PGSZ,
1930 caps->cqc_buf_pg_sz + PG_SHIFT_OFFSET);
1931 hr_reg_write(req, CFG_BT_ATTR_CQC_HOPNUM,
1932 to_hr_hem_hopnum(caps->cqc_hop_num, caps->num_cqs));
1934 hr_reg_write(req, CFG_BT_ATTR_MPT_BA_PGSZ,
1935 caps->mpt_ba_pg_sz + PG_SHIFT_OFFSET);
1936 hr_reg_write(req, CFG_BT_ATTR_MPT_BUF_PGSZ,
1937 caps->mpt_buf_pg_sz + PG_SHIFT_OFFSET);
1938 hr_reg_write(req, CFG_BT_ATTR_MPT_HOPNUM,
1939 to_hr_hem_hopnum(caps->mpt_hop_num, caps->num_mtpts));
1941 hr_reg_write(req, CFG_BT_ATTR_SCCC_BA_PGSZ,
1942 caps->sccc_ba_pg_sz + PG_SHIFT_OFFSET);
1943 hr_reg_write(req, CFG_BT_ATTR_SCCC_BUF_PGSZ,
1944 caps->sccc_buf_pg_sz + PG_SHIFT_OFFSET);
1945 hr_reg_write(req, CFG_BT_ATTR_SCCC_HOPNUM,
1946 to_hr_hem_hopnum(caps->sccc_hop_num, caps->num_qps));
1948 return hns_roce_cmq_send(hr_dev, &desc, 1);
1951 /* Use default caps when hns_roce_query_pf_caps() failed or init VF profile */
1952 static void set_default_caps(struct hns_roce_dev *hr_dev)
1954 struct hns_roce_caps *caps = &hr_dev->caps;
1956 caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
1957 caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
1958 caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
1959 caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM;
1960 caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
1961 caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
1962 caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
1963 caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
1965 caps->num_uars = HNS_ROCE_V2_UAR_NUM;
1966 caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
1967 caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
1968 caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
1969 caps->num_comp_vectors = 0;
1971 caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
1972 caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
1973 caps->qpc_timer_bt_num = HNS_ROCE_V2_MAX_QPC_TIMER_BT_NUM;
1974 caps->cqc_timer_bt_num = HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM;
1976 caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
1977 caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
1978 caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
1979 caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
1980 caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
1981 caps->trrl_entry_sz = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ;
1982 caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
1983 caps->srqc_entry_sz = HNS_ROCE_V2_SRQC_ENTRY_SZ;
1984 caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
1985 caps->idx_entry_sz = HNS_ROCE_V2_IDX_ENTRY_SZ;
1986 caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
1987 caps->reserved_lkey = 0;
1988 caps->reserved_pds = 0;
1989 caps->reserved_mrws = 1;
1990 caps->reserved_uars = 0;
1991 caps->reserved_cqs = 0;
1992 caps->reserved_srqs = 0;
1993 caps->reserved_qps = HNS_ROCE_V2_RSV_QPS;
1995 caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1996 caps->srqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1997 caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1998 caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1999 caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM;
2001 caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
2002 caps->wqe_sq_hop_num = HNS_ROCE_SQWQE_HOP_NUM;
2003 caps->wqe_sge_hop_num = HNS_ROCE_EXT_SGE_HOP_NUM;
2004 caps->wqe_rq_hop_num = HNS_ROCE_RQWQE_HOP_NUM;
2005 caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
2006 caps->srqwqe_hop_num = HNS_ROCE_SRQWQE_HOP_NUM;
2007 caps->idx_hop_num = HNS_ROCE_IDX_HOP_NUM;
2008 caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
2010 caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
2011 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
2012 HNS_ROCE_CAP_FLAG_CQ_RECORD_DB |
2013 HNS_ROCE_CAP_FLAG_QP_RECORD_DB;
2015 caps->pkey_table_len[0] = 1;
2016 caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
2017 caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
2018 caps->local_ca_ack_delay = 0;
2019 caps->max_mtu = IB_MTU_4096;
2021 caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR;
2022 caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE;
2024 caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW |
2025 HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR |
2026 HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL | HNS_ROCE_CAP_FLAG_XRC;
2028 caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
2030 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
2031 caps->flags |= HNS_ROCE_CAP_FLAG_STASH |
2032 HNS_ROCE_CAP_FLAG_DIRECT_WQE;
2033 caps->max_sq_inline = HNS_ROCE_V3_MAX_SQ_INLINE;
2035 caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
2037 /* The following configuration are only valid for HIP08 */
2038 caps->qpc_sz = HNS_ROCE_V2_QPC_SZ;
2039 caps->sccc_sz = HNS_ROCE_V2_SCCC_SZ;
2040 caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE;
2044 static void calc_pg_sz(u32 obj_num, u32 obj_size, u32 hop_num, u32 ctx_bt_num,
2045 u32 *buf_page_size, u32 *bt_page_size, u32 hem_type)
2048 u64 bt_chunk_size = PAGE_SIZE;
2049 u64 buf_chunk_size = PAGE_SIZE;
2050 u64 obj_per_chunk_default = buf_chunk_size / obj_size;
2057 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
2058 (bt_chunk_size / BA_BYTE_LEN) *
2059 (bt_chunk_size / BA_BYTE_LEN) *
2060 obj_per_chunk_default;
2063 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
2064 (bt_chunk_size / BA_BYTE_LEN) *
2065 obj_per_chunk_default;
2068 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
2069 obj_per_chunk_default;
2071 case HNS_ROCE_HOP_NUM_0:
2072 obj_per_chunk = ctx_bt_num * obj_per_chunk_default;
2075 pr_err("table %u not support hop_num = %u!\n", hem_type,
2080 if (hem_type >= HEM_TYPE_MTT)
2081 *bt_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
2083 *buf_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
2086 static void set_hem_page_size(struct hns_roce_dev *hr_dev)
2088 struct hns_roce_caps *caps = &hr_dev->caps;
2091 caps->eqe_ba_pg_sz = 0;
2092 caps->eqe_buf_pg_sz = 0;
2095 caps->llm_buf_pg_sz = 0;
2098 caps->mpt_ba_pg_sz = 0;
2099 caps->mpt_buf_pg_sz = 0;
2100 caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
2101 caps->pbl_buf_pg_sz = 0;
2102 calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
2103 caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz,
2107 caps->qpc_ba_pg_sz = 0;
2108 caps->qpc_buf_pg_sz = 0;
2109 caps->qpc_timer_ba_pg_sz = 0;
2110 caps->qpc_timer_buf_pg_sz = 0;
2111 caps->sccc_ba_pg_sz = 0;
2112 caps->sccc_buf_pg_sz = 0;
2113 caps->mtt_ba_pg_sz = 0;
2114 caps->mtt_buf_pg_sz = 0;
2115 calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num,
2116 caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
2119 if (caps->flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
2120 calc_pg_sz(caps->num_qps, caps->sccc_sz, caps->sccc_hop_num,
2121 caps->sccc_bt_num, &caps->sccc_buf_pg_sz,
2122 &caps->sccc_ba_pg_sz, HEM_TYPE_SCCC);
2125 caps->cqc_ba_pg_sz = 0;
2126 caps->cqc_buf_pg_sz = 0;
2127 caps->cqc_timer_ba_pg_sz = 0;
2128 caps->cqc_timer_buf_pg_sz = 0;
2129 caps->cqe_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K;
2130 caps->cqe_buf_pg_sz = 0;
2131 calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num,
2132 caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz,
2134 calc_pg_sz(caps->max_cqes, caps->cqe_sz, caps->cqe_hop_num,
2135 1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE);
2138 if (caps->flags & HNS_ROCE_CAP_FLAG_SRQ) {
2139 caps->srqc_ba_pg_sz = 0;
2140 caps->srqc_buf_pg_sz = 0;
2141 caps->srqwqe_ba_pg_sz = 0;
2142 caps->srqwqe_buf_pg_sz = 0;
2143 caps->idx_ba_pg_sz = 0;
2144 caps->idx_buf_pg_sz = 0;
2145 calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz,
2146 caps->srqc_hop_num, caps->srqc_bt_num,
2147 &caps->srqc_buf_pg_sz, &caps->srqc_ba_pg_sz,
2149 calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz,
2150 caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz,
2151 &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE);
2152 calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz,
2153 caps->idx_hop_num, 1, &caps->idx_buf_pg_sz,
2154 &caps->idx_ba_pg_sz, HEM_TYPE_IDX);
2158 caps->gmv_ba_pg_sz = 0;
2159 caps->gmv_buf_pg_sz = 0;
2162 /* Apply all loaded caps before setting to hardware */
2163 static void apply_func_caps(struct hns_roce_dev *hr_dev)
2165 struct hns_roce_caps *caps = &hr_dev->caps;
2166 struct hns_roce_v2_priv *priv = hr_dev->priv;
2168 /* The following configurations don't need to be got from firmware. */
2169 caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
2170 caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
2171 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
2173 caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
2174 caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
2175 caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
2177 caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM;
2178 caps->reserved_xrcds = HNS_ROCE_V2_RSV_XRCD_NUM;
2180 caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
2181 caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
2183 if (!caps->num_comp_vectors)
2184 caps->num_comp_vectors =
2185 min_t(u32, caps->eqc_bt_num - HNS_ROCE_V2_AEQE_VEC_NUM,
2186 (u32)priv->handle->rinfo.num_vectors -
2187 (HNS_ROCE_V2_AEQE_VEC_NUM + HNS_ROCE_V2_ABNORMAL_VEC_NUM));
2189 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
2190 caps->eqe_hop_num = HNS_ROCE_V3_EQE_HOP_NUM;
2191 caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
2192 caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
2194 /* The following configurations will be overwritten */
2195 caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
2196 caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
2197 caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ;
2199 /* The following configurations are not got from firmware */
2200 caps->gmv_entry_sz = HNS_ROCE_V3_GMV_ENTRY_SZ;
2202 caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
2203 caps->gid_table_len[0] = caps->gmv_bt_num *
2204 (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz);
2206 caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
2207 caps->gmv_entry_sz);
2209 u32 func_num = max_t(u32, 1, hr_dev->func_num);
2211 caps->eqe_hop_num = HNS_ROCE_V2_EQE_HOP_NUM;
2212 caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
2213 caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
2214 caps->gid_table_len[0] /= func_num;
2217 if (hr_dev->is_vf) {
2218 caps->default_aeq_arm_st = 0x3;
2219 caps->default_ceq_arm_st = 0x3;
2220 caps->default_ceq_max_cnt = 0x1;
2221 caps->default_ceq_period = 0x10;
2222 caps->default_aeq_max_cnt = 0x1;
2223 caps->default_aeq_period = 0x10;
2226 set_hem_page_size(hr_dev);
2229 static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
2231 struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM];
2232 struct hns_roce_caps *caps = &hr_dev->caps;
2233 struct hns_roce_query_pf_caps_a *resp_a;
2234 struct hns_roce_query_pf_caps_b *resp_b;
2235 struct hns_roce_query_pf_caps_c *resp_c;
2236 struct hns_roce_query_pf_caps_d *resp_d;
2237 struct hns_roce_query_pf_caps_e *resp_e;
2243 for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) {
2244 hns_roce_cmq_setup_basic_desc(&desc[i],
2245 HNS_ROCE_OPC_QUERY_PF_CAPS_NUM,
2247 if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1))
2248 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2250 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2253 ret = hns_roce_cmq_send(hr_dev, desc, HNS_ROCE_QUERY_PF_CAPS_CMD_NUM);
2257 resp_a = (struct hns_roce_query_pf_caps_a *)desc[0].data;
2258 resp_b = (struct hns_roce_query_pf_caps_b *)desc[1].data;
2259 resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data;
2260 resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data;
2261 resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data;
2263 caps->local_ca_ack_delay = resp_a->local_ca_ack_delay;
2264 caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg);
2265 caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline);
2266 caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
2267 caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg);
2268 caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
2269 caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges);
2270 caps->num_aeq_vectors = resp_a->num_aeq_vectors;
2271 caps->num_other_vectors = resp_a->num_other_vectors;
2272 caps->max_sq_desc_sz = resp_a->max_sq_desc_sz;
2273 caps->max_rq_desc_sz = resp_a->max_rq_desc_sz;
2274 caps->cqe_sz = resp_a->cqe_sz;
2276 caps->mtpt_entry_sz = resp_b->mtpt_entry_sz;
2277 caps->irrl_entry_sz = resp_b->irrl_entry_sz;
2278 caps->trrl_entry_sz = resp_b->trrl_entry_sz;
2279 caps->cqc_entry_sz = resp_b->cqc_entry_sz;
2280 caps->srqc_entry_sz = resp_b->srqc_entry_sz;
2281 caps->idx_entry_sz = resp_b->idx_entry_sz;
2282 caps->sccc_sz = resp_b->sccc_sz;
2283 caps->max_mtu = resp_b->max_mtu;
2284 caps->qpc_sz = le16_to_cpu(resp_b->qpc_sz);
2285 caps->min_cqes = resp_b->min_cqes;
2286 caps->min_wqes = resp_b->min_wqes;
2287 caps->page_size_cap = le32_to_cpu(resp_b->page_size_cap);
2288 caps->pkey_table_len[0] = resp_b->pkey_table_len;
2289 caps->phy_num_uars = resp_b->phy_num_uars;
2290 ctx_hop_num = resp_b->ctx_hop_num;
2291 pbl_hop_num = resp_b->pbl_hop_num;
2293 caps->num_pds = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_PDS);
2295 caps->flags = hr_reg_read(resp_c, PF_CAPS_C_CAP_FLAGS);
2296 caps->flags |= le16_to_cpu(resp_d->cap_flags_ex) <<
2297 HNS_ROCE_CAP_FLAGS_EX_SHIFT;
2299 caps->num_cqs = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_CQS);
2300 caps->gid_table_len[0] = hr_reg_read(resp_c, PF_CAPS_C_MAX_GID);
2301 caps->max_cqes = 1 << hr_reg_read(resp_c, PF_CAPS_C_CQ_DEPTH);
2302 caps->num_mtpts = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_MRWS);
2303 caps->num_qps = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_QPS);
2304 caps->max_qp_init_rdma = hr_reg_read(resp_c, PF_CAPS_C_MAX_ORD);
2305 caps->max_qp_dest_rdma = caps->max_qp_init_rdma;
2306 caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth);
2308 caps->num_srqs = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_SRQS);
2309 caps->cong_type = hr_reg_read(resp_d, PF_CAPS_D_CONG_TYPE);
2310 caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth);
2311 caps->ceqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_CEQ_DEPTH);
2312 caps->num_comp_vectors = hr_reg_read(resp_d, PF_CAPS_D_NUM_CEQS);
2313 caps->aeqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_AEQ_DEPTH);
2314 caps->default_aeq_arm_st = hr_reg_read(resp_d, PF_CAPS_D_AEQ_ARM_ST);
2315 caps->default_ceq_arm_st = hr_reg_read(resp_d, PF_CAPS_D_CEQ_ARM_ST);
2316 caps->reserved_pds = hr_reg_read(resp_d, PF_CAPS_D_RSV_PDS);
2317 caps->num_uars = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_UARS);
2318 caps->reserved_qps = hr_reg_read(resp_d, PF_CAPS_D_RSV_QPS);
2319 caps->reserved_uars = hr_reg_read(resp_d, PF_CAPS_D_RSV_UARS);
2321 caps->reserved_mrws = hr_reg_read(resp_e, PF_CAPS_E_RSV_MRWS);
2322 caps->chunk_sz = 1 << hr_reg_read(resp_e, PF_CAPS_E_CHUNK_SIZE_SHIFT);
2323 caps->reserved_cqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_CQS);
2324 caps->reserved_srqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_SRQS);
2325 caps->reserved_lkey = hr_reg_read(resp_e, PF_CAPS_E_RSV_LKEYS);
2326 caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt);
2327 caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period);
2328 caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt);
2329 caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period);
2331 caps->qpc_hop_num = ctx_hop_num;
2332 caps->sccc_hop_num = ctx_hop_num;
2333 caps->srqc_hop_num = ctx_hop_num;
2334 caps->cqc_hop_num = ctx_hop_num;
2335 caps->mpt_hop_num = ctx_hop_num;
2336 caps->mtt_hop_num = pbl_hop_num;
2337 caps->cqe_hop_num = pbl_hop_num;
2338 caps->srqwqe_hop_num = pbl_hop_num;
2339 caps->idx_hop_num = pbl_hop_num;
2340 caps->wqe_sq_hop_num = hr_reg_read(resp_d, PF_CAPS_D_SQWQE_HOP_NUM);
2341 caps->wqe_sge_hop_num = hr_reg_read(resp_d, PF_CAPS_D_EX_SGE_HOP_NUM);
2342 caps->wqe_rq_hop_num = hr_reg_read(resp_d, PF_CAPS_D_RQWQE_HOP_NUM);
2347 static int config_hem_entry_size(struct hns_roce_dev *hr_dev, u32 type, u32 val)
2349 struct hns_roce_cmq_desc desc;
2350 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
2352 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE,
2355 hr_reg_write(req, CFG_HEM_ENTRY_SIZE_TYPE, type);
2356 hr_reg_write(req, CFG_HEM_ENTRY_SIZE_VALUE, val);
2358 return hns_roce_cmq_send(hr_dev, &desc, 1);
2361 static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev)
2363 struct hns_roce_caps *caps = &hr_dev->caps;
2366 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
2369 ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_QPC_SIZE,
2372 dev_err(hr_dev->dev, "failed to cfg qpc sz, ret = %d.\n", ret);
2376 ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_SCCC_SIZE,
2379 dev_err(hr_dev->dev, "failed to cfg sccc sz, ret = %d.\n", ret);
2384 static int hns_roce_v2_vf_profile(struct hns_roce_dev *hr_dev)
2386 struct device *dev = hr_dev->dev;
2389 hr_dev->func_num = 1;
2391 set_default_caps(hr_dev);
2393 ret = hns_roce_query_vf_resource(hr_dev);
2395 dev_err(dev, "failed to query VF resource, ret = %d.\n", ret);
2399 apply_func_caps(hr_dev);
2401 ret = hns_roce_v2_set_bt(hr_dev);
2403 dev_err(dev, "failed to config VF BA table, ret = %d.\n", ret);
2408 static int hns_roce_v2_pf_profile(struct hns_roce_dev *hr_dev)
2410 struct device *dev = hr_dev->dev;
2413 ret = hns_roce_query_func_info(hr_dev);
2415 dev_err(dev, "failed to query func info, ret = %d.\n", ret);
2419 ret = hns_roce_config_global_param(hr_dev);
2421 dev_err(dev, "failed to config global param, ret = %d.\n", ret);
2425 ret = hns_roce_set_vf_switch_param(hr_dev);
2427 dev_err(dev, "failed to set switch param, ret = %d.\n", ret);
2431 ret = hns_roce_query_pf_caps(hr_dev);
2433 set_default_caps(hr_dev);
2435 ret = hns_roce_query_pf_resource(hr_dev);
2437 dev_err(dev, "failed to query pf resource, ret = %d.\n", ret);
2441 apply_func_caps(hr_dev);
2443 ret = hns_roce_alloc_vf_resource(hr_dev);
2445 dev_err(dev, "failed to alloc vf resource, ret = %d.\n", ret);
2449 ret = hns_roce_v2_set_bt(hr_dev);
2451 dev_err(dev, "failed to config BA table, ret = %d.\n", ret);
2455 /* Configure the size of QPC, SCCC, etc. */
2456 return hns_roce_config_entry_size(hr_dev);
2459 static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
2461 struct device *dev = hr_dev->dev;
2464 ret = hns_roce_cmq_query_hw_info(hr_dev);
2466 dev_err(dev, "failed to query hardware info, ret = %d.\n", ret);
2470 ret = hns_roce_query_fw_ver(hr_dev);
2472 dev_err(dev, "failed to query firmware info, ret = %d.\n", ret);
2476 hr_dev->vendor_part_id = hr_dev->pci_dev->device;
2477 hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
2480 return hns_roce_v2_vf_profile(hr_dev);
2482 return hns_roce_v2_pf_profile(hr_dev);
2485 static void config_llm_table(struct hns_roce_buf *data_buf, void *cfg_buf)
2487 u32 i, next_ptr, page_num;
2488 __le64 *entry = cfg_buf;
2492 page_num = data_buf->npages;
2493 for (i = 0; i < page_num; i++) {
2494 addr = hns_roce_buf_page(data_buf, i);
2495 if (i == (page_num - 1))
2500 val = HNS_ROCE_EXT_LLM_ENTRY(addr, (u64)next_ptr);
2501 entry[i] = cpu_to_le64(val);
2505 static int set_llm_cfg_to_hw(struct hns_roce_dev *hr_dev,
2506 struct hns_roce_link_table *table)
2508 struct hns_roce_cmq_desc desc[2];
2509 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
2510 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
2511 struct hns_roce_buf *buf = table->buf;
2512 enum hns_roce_opcode_type opcode;
2515 opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
2516 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
2517 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2518 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
2520 hr_reg_write(r_a, CFG_LLM_A_BA_L, lower_32_bits(table->table.map));
2521 hr_reg_write(r_a, CFG_LLM_A_BA_H, upper_32_bits(table->table.map));
2522 hr_reg_write(r_a, CFG_LLM_A_DEPTH, buf->npages);
2523 hr_reg_write(r_a, CFG_LLM_A_PGSZ, to_hr_hw_page_shift(buf->page_shift));
2524 hr_reg_enable(r_a, CFG_LLM_A_INIT_EN);
2526 addr = to_hr_hw_page_addr(hns_roce_buf_page(buf, 0));
2527 hr_reg_write(r_a, CFG_LLM_A_HEAD_BA_L, lower_32_bits(addr));
2528 hr_reg_write(r_a, CFG_LLM_A_HEAD_BA_H, upper_32_bits(addr));
2529 hr_reg_write(r_a, CFG_LLM_A_HEAD_NXTPTR, 1);
2530 hr_reg_write(r_a, CFG_LLM_A_HEAD_PTR, 0);
2532 addr = to_hr_hw_page_addr(hns_roce_buf_page(buf, buf->npages - 1));
2533 hr_reg_write(r_b, CFG_LLM_B_TAIL_BA_L, lower_32_bits(addr));
2534 hr_reg_write(r_b, CFG_LLM_B_TAIL_BA_H, upper_32_bits(addr));
2535 hr_reg_write(r_b, CFG_LLM_B_TAIL_PTR, buf->npages - 1);
2537 return hns_roce_cmq_send(hr_dev, desc, 2);
2540 static struct hns_roce_link_table *
2541 alloc_link_table_buf(struct hns_roce_dev *hr_dev)
2543 struct hns_roce_v2_priv *priv = hr_dev->priv;
2544 struct hns_roce_link_table *link_tbl;
2545 u32 pg_shift, size, min_size;
2547 link_tbl = &priv->ext_llm;
2548 pg_shift = hr_dev->caps.llm_buf_pg_sz + PAGE_SHIFT;
2549 size = hr_dev->caps.num_qps * HNS_ROCE_V2_EXT_LLM_ENTRY_SZ;
2550 min_size = HNS_ROCE_EXT_LLM_MIN_PAGES(hr_dev->caps.sl_num) << pg_shift;
2552 /* Alloc data table */
2553 size = max(size, min_size);
2554 link_tbl->buf = hns_roce_buf_alloc(hr_dev, size, pg_shift, 0);
2555 if (IS_ERR(link_tbl->buf))
2556 return ERR_PTR(-ENOMEM);
2558 /* Alloc config table */
2559 size = link_tbl->buf->npages * sizeof(u64);
2560 link_tbl->table.buf = dma_alloc_coherent(hr_dev->dev, size,
2561 &link_tbl->table.map,
2563 if (!link_tbl->table.buf) {
2564 hns_roce_buf_free(hr_dev, link_tbl->buf);
2565 return ERR_PTR(-ENOMEM);
2571 static void free_link_table_buf(struct hns_roce_dev *hr_dev,
2572 struct hns_roce_link_table *tbl)
2575 u32 size = tbl->buf->npages * sizeof(u64);
2577 dma_free_coherent(hr_dev->dev, size, tbl->table.buf,
2581 hns_roce_buf_free(hr_dev, tbl->buf);
2584 static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev)
2586 struct hns_roce_link_table *link_tbl;
2589 link_tbl = alloc_link_table_buf(hr_dev);
2590 if (IS_ERR(link_tbl))
2593 if (WARN_ON(link_tbl->buf->npages > HNS_ROCE_V2_EXT_LLM_MAX_DEPTH)) {
2598 config_llm_table(link_tbl->buf, link_tbl->table.buf);
2599 ret = set_llm_cfg_to_hw(hr_dev, link_tbl);
2606 free_link_table_buf(hr_dev, link_tbl);
2610 static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev)
2612 struct hns_roce_v2_priv *priv = hr_dev->priv;
2614 free_link_table_buf(hr_dev, &priv->ext_llm);
2617 static void free_dip_list(struct hns_roce_dev *hr_dev)
2619 struct hns_roce_dip *hr_dip;
2620 struct hns_roce_dip *tmp;
2621 unsigned long flags;
2623 spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
2625 list_for_each_entry_safe(hr_dip, tmp, &hr_dev->dip_list, node) {
2626 list_del(&hr_dip->node);
2630 spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
2633 static struct ib_pd *free_mr_init_pd(struct hns_roce_dev *hr_dev)
2635 struct hns_roce_v2_priv *priv = hr_dev->priv;
2636 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2637 struct ib_device *ibdev = &hr_dev->ib_dev;
2638 struct hns_roce_pd *hr_pd;
2641 hr_pd = kzalloc(sizeof(*hr_pd), GFP_KERNEL);
2642 if (ZERO_OR_NULL_PTR(hr_pd))
2647 if (hns_roce_alloc_pd(pd, NULL)) {
2648 ibdev_err(ibdev, "failed to create pd for free mr.\n");
2652 free_mr->rsv_pd = to_hr_pd(pd);
2653 free_mr->rsv_pd->ibpd.device = &hr_dev->ib_dev;
2654 free_mr->rsv_pd->ibpd.uobject = NULL;
2655 free_mr->rsv_pd->ibpd.__internal_mr = NULL;
2656 atomic_set(&free_mr->rsv_pd->ibpd.usecnt, 0);
2661 static struct ib_cq *free_mr_init_cq(struct hns_roce_dev *hr_dev)
2663 struct hns_roce_v2_priv *priv = hr_dev->priv;
2664 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2665 struct ib_device *ibdev = &hr_dev->ib_dev;
2666 struct ib_cq_init_attr cq_init_attr = {};
2667 struct hns_roce_cq *hr_cq;
2670 cq_init_attr.cqe = HNS_ROCE_FREE_MR_USED_CQE_NUM;
2672 hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL);
2673 if (ZERO_OR_NULL_PTR(hr_cq))
2679 if (hns_roce_create_cq(cq, &cq_init_attr, NULL)) {
2680 ibdev_err(ibdev, "failed to create cq for free mr.\n");
2684 free_mr->rsv_cq = to_hr_cq(cq);
2685 free_mr->rsv_cq->ib_cq.device = &hr_dev->ib_dev;
2686 free_mr->rsv_cq->ib_cq.uobject = NULL;
2687 free_mr->rsv_cq->ib_cq.comp_handler = NULL;
2688 free_mr->rsv_cq->ib_cq.event_handler = NULL;
2689 free_mr->rsv_cq->ib_cq.cq_context = NULL;
2690 atomic_set(&free_mr->rsv_cq->ib_cq.usecnt, 0);
2695 static int free_mr_init_qp(struct hns_roce_dev *hr_dev, struct ib_cq *cq,
2696 struct ib_qp_init_attr *init_attr, int i)
2698 struct hns_roce_v2_priv *priv = hr_dev->priv;
2699 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2700 struct ib_device *ibdev = &hr_dev->ib_dev;
2701 struct hns_roce_qp *hr_qp;
2705 hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
2706 if (ZERO_OR_NULL_PTR(hr_qp))
2712 ret = hns_roce_create_qp(qp, init_attr, NULL);
2714 ibdev_err(ibdev, "failed to create qp for free mr.\n");
2719 free_mr->rsv_qp[i] = hr_qp;
2720 free_mr->rsv_qp[i]->ibqp.recv_cq = cq;
2721 free_mr->rsv_qp[i]->ibqp.send_cq = cq;
2726 static void free_mr_exit(struct hns_roce_dev *hr_dev)
2728 struct hns_roce_v2_priv *priv = hr_dev->priv;
2729 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2733 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
2734 if (free_mr->rsv_qp[i]) {
2735 qp = &free_mr->rsv_qp[i]->ibqp;
2736 hns_roce_v2_destroy_qp(qp, NULL);
2737 kfree(free_mr->rsv_qp[i]);
2738 free_mr->rsv_qp[i] = NULL;
2742 if (free_mr->rsv_cq) {
2743 hns_roce_destroy_cq(&free_mr->rsv_cq->ib_cq, NULL);
2744 kfree(free_mr->rsv_cq);
2745 free_mr->rsv_cq = NULL;
2748 if (free_mr->rsv_pd) {
2749 hns_roce_dealloc_pd(&free_mr->rsv_pd->ibpd, NULL);
2750 kfree(free_mr->rsv_pd);
2751 free_mr->rsv_pd = NULL;
2755 static int free_mr_alloc_res(struct hns_roce_dev *hr_dev)
2757 struct hns_roce_v2_priv *priv = hr_dev->priv;
2758 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2759 struct ib_qp_init_attr qp_init_attr = {};
2765 pd = free_mr_init_pd(hr_dev);
2769 cq = free_mr_init_cq(hr_dev);
2772 goto create_failed_cq;
2775 qp_init_attr.qp_type = IB_QPT_RC;
2776 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2777 qp_init_attr.send_cq = cq;
2778 qp_init_attr.recv_cq = cq;
2779 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
2780 qp_init_attr.cap.max_send_wr = HNS_ROCE_FREE_MR_USED_SQWQE_NUM;
2781 qp_init_attr.cap.max_send_sge = HNS_ROCE_FREE_MR_USED_SQSGE_NUM;
2782 qp_init_attr.cap.max_recv_wr = HNS_ROCE_FREE_MR_USED_RQWQE_NUM;
2783 qp_init_attr.cap.max_recv_sge = HNS_ROCE_FREE_MR_USED_RQSGE_NUM;
2785 ret = free_mr_init_qp(hr_dev, cq, &qp_init_attr, i);
2787 goto create_failed_qp;
2793 hns_roce_destroy_cq(cq, NULL);
2797 hns_roce_dealloc_pd(pd, NULL);
2803 static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
2804 struct ib_qp_attr *attr, int sl_num)
2806 struct hns_roce_v2_priv *priv = hr_dev->priv;
2807 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2808 struct ib_device *ibdev = &hr_dev->ib_dev;
2809 struct hns_roce_qp *hr_qp;
2814 hr_qp = to_hr_qp(&free_mr->rsv_qp[sl_num]->ibqp);
2815 hr_qp->free_mr_en = 1;
2816 hr_qp->ibqp.device = ibdev;
2817 hr_qp->ibqp.qp_type = IB_QPT_RC;
2819 mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS;
2820 attr->qp_state = IB_QPS_INIT;
2822 attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
2823 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT,
2826 ibdev_err(ibdev, "failed to modify qp to init, ret = %d.\n",
2831 loopback = hr_dev->loop_idc;
2832 /* Set qpc lbi = 1 incidate loopback IO */
2833 hr_dev->loop_idc = 1;
2835 mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN |
2836 IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER;
2837 attr->qp_state = IB_QPS_RTR;
2838 attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2839 attr->path_mtu = IB_MTU_256;
2840 attr->dest_qp_num = hr_qp->qpn;
2841 attr->rq_psn = HNS_ROCE_FREE_MR_USED_PSN;
2843 rdma_ah_set_sl(&attr->ah_attr, (u8)sl_num);
2845 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT,
2847 hr_dev->loop_idc = loopback;
2849 ibdev_err(ibdev, "failed to modify qp to rtr, ret = %d.\n",
2854 mask = IB_QP_STATE | IB_QP_SQ_PSN | IB_QP_RETRY_CNT | IB_QP_TIMEOUT |
2855 IB_QP_RNR_RETRY | IB_QP_MAX_QP_RD_ATOMIC;
2856 attr->qp_state = IB_QPS_RTS;
2857 attr->sq_psn = HNS_ROCE_FREE_MR_USED_PSN;
2858 attr->retry_cnt = HNS_ROCE_FREE_MR_USED_QP_RETRY_CNT;
2859 attr->timeout = HNS_ROCE_FREE_MR_USED_QP_TIMEOUT;
2860 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_RTR,
2863 ibdev_err(ibdev, "failed to modify qp to rts, ret = %d.\n",
2869 static int free_mr_modify_qp(struct hns_roce_dev *hr_dev)
2871 struct hns_roce_v2_priv *priv = hr_dev->priv;
2872 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2873 struct ib_qp_attr attr = {};
2877 rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
2878 rdma_ah_set_static_rate(&attr.ah_attr, 3);
2879 rdma_ah_set_port_num(&attr.ah_attr, 1);
2881 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
2882 ret = free_mr_modify_rsv_qp(hr_dev, &attr, i);
2890 static int free_mr_init(struct hns_roce_dev *hr_dev)
2892 struct hns_roce_v2_priv *priv = hr_dev->priv;
2893 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2896 mutex_init(&free_mr->mutex);
2898 ret = free_mr_alloc_res(hr_dev);
2902 ret = free_mr_modify_qp(hr_dev);
2909 free_mr_exit(hr_dev);
2914 static int get_hem_table(struct hns_roce_dev *hr_dev)
2916 unsigned int qpc_count;
2917 unsigned int cqc_count;
2918 unsigned int gmv_count;
2922 /* Alloc memory for source address table buffer space chunk */
2923 for (gmv_count = 0; gmv_count < hr_dev->caps.gmv_entry_num;
2925 ret = hns_roce_table_get(hr_dev, &hr_dev->gmv_table, gmv_count);
2927 goto err_gmv_failed;
2933 /* Alloc memory for QPC Timer buffer space chunk */
2934 for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
2936 ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
2939 dev_err(hr_dev->dev, "QPC Timer get failed\n");
2940 goto err_qpc_timer_failed;
2944 /* Alloc memory for CQC Timer buffer space chunk */
2945 for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
2947 ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
2950 dev_err(hr_dev->dev, "CQC Timer get failed\n");
2951 goto err_cqc_timer_failed;
2957 err_cqc_timer_failed:
2958 for (i = 0; i < cqc_count; i++)
2959 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
2961 err_qpc_timer_failed:
2962 for (i = 0; i < qpc_count; i++)
2963 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
2966 for (i = 0; i < gmv_count; i++)
2967 hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
2972 static void put_hem_table(struct hns_roce_dev *hr_dev)
2976 for (i = 0; i < hr_dev->caps.gmv_entry_num; i++)
2977 hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
2982 for (i = 0; i < hr_dev->caps.qpc_timer_bt_num; i++)
2983 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
2985 for (i = 0; i < hr_dev->caps.cqc_timer_bt_num; i++)
2986 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
2989 static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
2993 /* The hns ROCEE requires the extdb info to be cleared before using */
2994 ret = hns_roce_clear_extdb_list_info(hr_dev);
2998 ret = get_hem_table(hr_dev);
3005 ret = hns_roce_init_link_table(hr_dev);
3007 dev_err(hr_dev->dev, "failed to init llm, ret = %d.\n", ret);
3008 goto err_llm_init_failed;
3013 err_llm_init_failed:
3014 put_hem_table(hr_dev);
3019 static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
3021 hns_roce_function_clear(hr_dev);
3024 hns_roce_free_link_table(hr_dev);
3026 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09)
3027 free_dip_list(hr_dev);
3030 static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev,
3031 struct hns_roce_mbox_msg *mbox_msg)
3033 struct hns_roce_cmq_desc desc;
3034 struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
3036 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
3038 mb->in_param_l = cpu_to_le32(mbox_msg->in_param);
3039 mb->in_param_h = cpu_to_le32(mbox_msg->in_param >> 32);
3040 mb->out_param_l = cpu_to_le32(mbox_msg->out_param);
3041 mb->out_param_h = cpu_to_le32(mbox_msg->out_param >> 32);
3042 mb->cmd_tag = cpu_to_le32(mbox_msg->tag << 8 | mbox_msg->cmd);
3043 mb->token_event_en = cpu_to_le32(mbox_msg->event_en << 16 |
3046 return hns_roce_cmq_send(hr_dev, &desc, 1);
3049 static int v2_wait_mbox_complete(struct hns_roce_dev *hr_dev, u32 timeout,
3050 u8 *complete_status)
3052 struct hns_roce_mbox_status *mb_st;
3053 struct hns_roce_cmq_desc desc;
3059 mb_st = (struct hns_roce_mbox_status *)desc.data;
3060 end = msecs_to_jiffies(timeout) + jiffies;
3061 while (v2_chk_mbox_is_avail(hr_dev, &busy)) {
3062 if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR)
3066 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST,
3068 ret = __hns_roce_cmq_send(hr_dev, &desc, 1);
3070 status = le32_to_cpu(mb_st->mb_status_hw_run);
3071 /* No pending message exists in ROCEE mbox. */
3072 if (!(status & MB_ST_HW_RUN_M))
3074 } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) {
3078 if (time_after(jiffies, end)) {
3079 dev_err_ratelimited(hr_dev->dev,
3080 "failed to wait mbox status 0x%x\n",
3090 *complete_status = (u8)(status & MB_ST_COMPLETE_M);
3091 } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) {
3092 /* Ignore all errors if the mbox is unavailable. */
3094 *complete_status = MB_ST_COMPLETE_M;
3100 static int v2_post_mbox(struct hns_roce_dev *hr_dev,
3101 struct hns_roce_mbox_msg *mbox_msg)
3106 /* Waiting for the mbox to be idle */
3107 ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS,
3109 if (unlikely(ret)) {
3110 dev_err_ratelimited(hr_dev->dev,
3111 "failed to check post mbox status = 0x%x, ret = %d.\n",
3116 /* Post new message to mbox */
3117 ret = hns_roce_mbox_post(hr_dev, mbox_msg);
3119 dev_err_ratelimited(hr_dev->dev,
3120 "failed to post mailbox, ret = %d.\n", ret);
3125 static int v2_poll_mbox_done(struct hns_roce_dev *hr_dev)
3130 ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_CMD_TIMEOUT_MSECS,
3133 if (status != MB_ST_COMPLETE_SUCC)
3136 dev_err_ratelimited(hr_dev->dev,
3137 "failed to check mbox status = 0x%x, ret = %d.\n",
3144 static void copy_gid(void *dest, const union ib_gid *gid)
3147 const union ib_gid *src = gid;
3148 __le32 (*p)[GID_SIZE] = dest;
3154 for (i = 0; i < GID_SIZE; i++)
3155 (*p)[i] = cpu_to_le32(*(u32 *)&src->raw[i * sizeof(u32)]);
3158 static int config_sgid_table(struct hns_roce_dev *hr_dev,
3159 int gid_index, const union ib_gid *gid,
3160 enum hns_roce_sgid_type sgid_type)
3162 struct hns_roce_cmq_desc desc;
3163 struct hns_roce_cfg_sgid_tb *sgid_tb =
3164 (struct hns_roce_cfg_sgid_tb *)desc.data;
3166 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
3168 hr_reg_write(sgid_tb, CFG_SGID_TB_TABLE_IDX, gid_index);
3169 hr_reg_write(sgid_tb, CFG_SGID_TB_VF_SGID_TYPE, sgid_type);
3171 copy_gid(&sgid_tb->vf_sgid_l, gid);
3173 return hns_roce_cmq_send(hr_dev, &desc, 1);
3176 static int config_gmv_table(struct hns_roce_dev *hr_dev,
3177 int gid_index, const union ib_gid *gid,
3178 enum hns_roce_sgid_type sgid_type,
3179 const struct ib_gid_attr *attr)
3181 struct hns_roce_cmq_desc desc[2];
3182 struct hns_roce_cfg_gmv_tb_a *tb_a =
3183 (struct hns_roce_cfg_gmv_tb_a *)desc[0].data;
3184 struct hns_roce_cfg_gmv_tb_b *tb_b =
3185 (struct hns_roce_cfg_gmv_tb_b *)desc[1].data;
3187 u16 vlan_id = VLAN_CFI_MASK;
3188 u8 mac[ETH_ALEN] = {};
3192 ret = rdma_read_gid_l2_fields(attr, &vlan_id, mac);
3197 hns_roce_cmq_setup_basic_desc(&desc[0], HNS_ROCE_OPC_CFG_GMV_TBL, false);
3198 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
3200 hns_roce_cmq_setup_basic_desc(&desc[1], HNS_ROCE_OPC_CFG_GMV_TBL, false);
3202 copy_gid(&tb_a->vf_sgid_l, gid);
3204 hr_reg_write(tb_a, GMV_TB_A_VF_SGID_TYPE, sgid_type);
3205 hr_reg_write(tb_a, GMV_TB_A_VF_VLAN_EN, vlan_id < VLAN_CFI_MASK);
3206 hr_reg_write(tb_a, GMV_TB_A_VF_VLAN_ID, vlan_id);
3208 tb_b->vf_smac_l = cpu_to_le32(*(u32 *)mac);
3210 hr_reg_write(tb_b, GMV_TB_B_SMAC_H, *(u16 *)&mac[4]);
3211 hr_reg_write(tb_b, GMV_TB_B_SGID_IDX, gid_index);
3213 return hns_roce_cmq_send(hr_dev, desc, 2);
3216 static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, int gid_index,
3217 const union ib_gid *gid,
3218 const struct ib_gid_attr *attr)
3220 enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
3224 if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
3225 if (ipv6_addr_v4mapped((void *)gid))
3226 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
3228 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
3229 } else if (attr->gid_type == IB_GID_TYPE_ROCE) {
3230 sgid_type = GID_TYPE_FLAG_ROCE_V1;
3234 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
3235 ret = config_gmv_table(hr_dev, gid_index, gid, sgid_type, attr);
3237 ret = config_sgid_table(hr_dev, gid_index, gid, sgid_type);
3240 ibdev_err(&hr_dev->ib_dev, "failed to set gid, ret = %d!\n",
3246 static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
3249 struct hns_roce_cmq_desc desc;
3250 struct hns_roce_cfg_smac_tb *smac_tb =
3251 (struct hns_roce_cfg_smac_tb *)desc.data;
3255 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
3257 reg_smac_l = *(u32 *)(&addr[0]);
3258 reg_smac_h = *(u16 *)(&addr[4]);
3260 hr_reg_write(smac_tb, CFG_SMAC_TB_IDX, phy_port);
3261 hr_reg_write(smac_tb, CFG_SMAC_TB_VF_SMAC_H, reg_smac_h);
3262 smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l);
3264 return hns_roce_cmq_send(hr_dev, &desc, 1);
3267 static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
3268 struct hns_roce_v2_mpt_entry *mpt_entry,
3269 struct hns_roce_mr *mr)
3271 u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
3272 struct ib_device *ibdev = &hr_dev->ib_dev;
3276 count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
3277 ARRAY_SIZE(pages), &pbl_ba);
3279 ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n",
3284 /* Aligned to the hardware address access unit */
3285 for (i = 0; i < count; i++)
3288 mpt_entry->pbl_size = cpu_to_le32(mr->npages);
3289 mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3);
3290 hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3));
3292 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
3293 hr_reg_write(mpt_entry, MPT_PA0_H, upper_32_bits(pages[0]));
3295 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
3296 hr_reg_write(mpt_entry, MPT_PA1_H, upper_32_bits(pages[1]));
3297 hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ,
3298 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
3303 static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
3304 void *mb_buf, struct hns_roce_mr *mr)
3306 struct hns_roce_v2_mpt_entry *mpt_entry;
3309 memset(mpt_entry, 0, sizeof(*mpt_entry));
3311 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID);
3312 hr_reg_write(mpt_entry, MPT_PD, mr->pd);
3314 hr_reg_write_bool(mpt_entry, MPT_BIND_EN,
3315 mr->access & IB_ACCESS_MW_BIND);
3316 hr_reg_write_bool(mpt_entry, MPT_ATOMIC_EN,
3317 mr->access & IB_ACCESS_REMOTE_ATOMIC);
3318 hr_reg_write_bool(mpt_entry, MPT_RR_EN,
3319 mr->access & IB_ACCESS_REMOTE_READ);
3320 hr_reg_write_bool(mpt_entry, MPT_RW_EN,
3321 mr->access & IB_ACCESS_REMOTE_WRITE);
3322 hr_reg_write_bool(mpt_entry, MPT_LW_EN,
3323 mr->access & IB_ACCESS_LOCAL_WRITE);
3325 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
3326 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
3327 mpt_entry->lkey = cpu_to_le32(mr->key);
3328 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
3329 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
3331 if (mr->type != MR_TYPE_MR)
3332 hr_reg_enable(mpt_entry, MPT_PA);
3334 if (mr->type == MR_TYPE_DMA)
3337 if (mr->pbl_hop_num != HNS_ROCE_HOP_NUM_0)
3338 hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, mr->pbl_hop_num);
3340 hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
3341 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
3342 hr_reg_enable(mpt_entry, MPT_INNER_PA_VLD);
3344 return set_mtpt_pbl(hr_dev, mpt_entry, mr);
3347 static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
3348 struct hns_roce_mr *mr, int flags,
3351 struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
3352 u32 mr_access_flags = mr->access;
3355 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID);
3356 hr_reg_write(mpt_entry, MPT_PD, mr->pd);
3358 if (flags & IB_MR_REREG_ACCESS) {
3359 hr_reg_write(mpt_entry, MPT_BIND_EN,
3360 (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
3361 hr_reg_write(mpt_entry, MPT_ATOMIC_EN,
3362 mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
3363 hr_reg_write(mpt_entry, MPT_RR_EN,
3364 mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
3365 hr_reg_write(mpt_entry, MPT_RW_EN,
3366 mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
3367 hr_reg_write(mpt_entry, MPT_LW_EN,
3368 mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
3371 if (flags & IB_MR_REREG_TRANS) {
3372 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
3373 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
3374 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
3375 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
3377 ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
3383 static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
3384 void *mb_buf, struct hns_roce_mr *mr)
3386 struct ib_device *ibdev = &hr_dev->ib_dev;
3387 struct hns_roce_v2_mpt_entry *mpt_entry;
3388 dma_addr_t pbl_ba = 0;
3391 memset(mpt_entry, 0, sizeof(*mpt_entry));
3393 if (hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, NULL, 0, &pbl_ba) < 0) {
3394 ibdev_err(ibdev, "failed to find frmr mtr.\n");
3398 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE);
3399 hr_reg_write(mpt_entry, MPT_PD, mr->pd);
3401 hr_reg_enable(mpt_entry, MPT_RA_EN);
3402 hr_reg_enable(mpt_entry, MPT_R_INV_EN);
3404 hr_reg_enable(mpt_entry, MPT_FRE);
3405 hr_reg_clear(mpt_entry, MPT_MR_MW);
3406 hr_reg_enable(mpt_entry, MPT_BPD);
3407 hr_reg_clear(mpt_entry, MPT_PA);
3409 hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, 1);
3410 hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
3411 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
3412 hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ,
3413 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
3415 mpt_entry->pbl_size = cpu_to_le32(mr->npages);
3417 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >> 3));
3418 hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3));
3423 static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
3425 struct hns_roce_v2_mpt_entry *mpt_entry;
3428 memset(mpt_entry, 0, sizeof(*mpt_entry));
3430 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE);
3431 hr_reg_write(mpt_entry, MPT_PD, mw->pdn);
3433 hr_reg_enable(mpt_entry, MPT_R_INV_EN);
3434 hr_reg_enable(mpt_entry, MPT_LW_EN);
3436 hr_reg_enable(mpt_entry, MPT_MR_MW);
3437 hr_reg_enable(mpt_entry, MPT_BPD);
3438 hr_reg_clear(mpt_entry, MPT_PA);
3439 hr_reg_write(mpt_entry, MPT_BQP,
3440 mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
3442 mpt_entry->lkey = cpu_to_le32(mw->rkey);
3444 hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM,
3445 mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
3447 hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
3448 mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
3449 hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ,
3450 mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
3455 static int free_mr_post_send_lp_wqe(struct hns_roce_qp *hr_qp)
3457 struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
3458 struct ib_device *ibdev = &hr_dev->ib_dev;
3459 const struct ib_send_wr *bad_wr;
3460 struct ib_rdma_wr rdma_wr = {};
3461 struct ib_send_wr *send_wr;
3464 send_wr = &rdma_wr.wr;
3465 send_wr->opcode = IB_WR_RDMA_WRITE;
3467 ret = hns_roce_v2_post_send(&hr_qp->ibqp, send_wr, &bad_wr);
3469 ibdev_err(ibdev, "failed to post wqe for free mr, ret = %d.\n",
3477 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
3480 static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev)
3482 struct hns_roce_v2_priv *priv = hr_dev->priv;
3483 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
3484 struct ib_wc wc[ARRAY_SIZE(free_mr->rsv_qp)];
3485 struct ib_device *ibdev = &hr_dev->ib_dev;
3486 struct hns_roce_qp *hr_qp;
3494 * If the device initialization is not complete or in the uninstall
3495 * process, then there is no need to execute free mr.
3497 if (priv->handle->rinfo.reset_state == HNS_ROCE_STATE_RST_INIT ||
3498 priv->handle->rinfo.instance_state == HNS_ROCE_STATE_INIT ||
3499 hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT)
3502 mutex_lock(&free_mr->mutex);
3504 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
3505 hr_qp = free_mr->rsv_qp[i];
3507 ret = free_mr_post_send_lp_wqe(hr_qp);
3510 "failed to send wqe (qp:0x%lx) for free mr, ret = %d.\n",
3518 end = msecs_to_jiffies(HNS_ROCE_V2_FREE_MR_TIMEOUT) + jiffies;
3520 npolled = hns_roce_v2_poll_cq(&free_mr->rsv_cq->ib_cq, cqe_cnt, wc);
3523 "failed to poll cqe for free mr, remain %d cqe.\n",
3528 if (time_after(jiffies, end)) {
3530 "failed to poll cqe for free mr and timeout, remain %d cqe.\n",
3538 mutex_unlock(&free_mr->mutex);
3541 static void hns_roce_v2_dereg_mr(struct hns_roce_dev *hr_dev)
3543 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
3544 free_mr_send_cmd_to_hw(hr_dev);
3547 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
3549 return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size);
3552 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, unsigned int n)
3554 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
3556 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
3557 return (hr_reg_read(cqe, CQE_OWNER) ^ !!(n & hr_cq->cq_depth)) ? cqe :
3561 static inline void update_cq_db(struct hns_roce_dev *hr_dev,
3562 struct hns_roce_cq *hr_cq)
3564 if (likely(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB)) {
3565 *hr_cq->set_ci_db = hr_cq->cons_index & V2_CQ_DB_CONS_IDX_M;
3567 struct hns_roce_v2_db cq_db = {};
3569 hr_reg_write(&cq_db, DB_TAG, hr_cq->cqn);
3570 hr_reg_write(&cq_db, DB_CMD, HNS_ROCE_V2_CQ_DB);
3571 hr_reg_write(&cq_db, DB_CQ_CI, hr_cq->cons_index);
3572 hr_reg_write(&cq_db, DB_CQ_CMD_SN, 1);
3574 hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg);
3578 static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
3579 struct hns_roce_srq *srq)
3581 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
3582 struct hns_roce_v2_cqe *cqe, *dest;
3588 for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
3590 if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe)
3595 * Now backwards through the CQ, removing CQ entries
3596 * that match our QP by overwriting them with next entries.
3598 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
3599 cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
3600 if (hr_reg_read(cqe, CQE_LCL_QPN) == qpn) {
3601 if (srq && hr_reg_read(cqe, CQE_S_R)) {
3602 wqe_index = hr_reg_read(cqe, CQE_WQE_IDX);
3603 hns_roce_free_srq_wqe(srq, wqe_index);
3606 } else if (nfreed) {
3607 dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
3609 owner_bit = hr_reg_read(dest, CQE_OWNER);
3610 memcpy(dest, cqe, hr_cq->cqe_size);
3611 hr_reg_write(dest, CQE_OWNER, owner_bit);
3616 hr_cq->cons_index += nfreed;
3617 update_cq_db(hr_dev, hr_cq);
3621 static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
3622 struct hns_roce_srq *srq)
3624 spin_lock_irq(&hr_cq->lock);
3625 __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
3626 spin_unlock_irq(&hr_cq->lock);
3629 static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
3630 struct hns_roce_cq *hr_cq, void *mb_buf,
3631 u64 *mtts, dma_addr_t dma_handle)
3633 struct hns_roce_v2_cq_context *cq_context;
3635 cq_context = mb_buf;
3636 memset(cq_context, 0, sizeof(*cq_context));
3638 hr_reg_write(cq_context, CQC_CQ_ST, V2_CQ_STATE_VALID);
3639 hr_reg_write(cq_context, CQC_ARM_ST, NO_ARMED);
3640 hr_reg_write(cq_context, CQC_SHIFT, ilog2(hr_cq->cq_depth));
3641 hr_reg_write(cq_context, CQC_CEQN, hr_cq->vector);
3642 hr_reg_write(cq_context, CQC_CQN, hr_cq->cqn);
3644 if (hr_cq->cqe_size == HNS_ROCE_V3_CQE_SIZE)
3645 hr_reg_write(cq_context, CQC_CQE_SIZE, CQE_SIZE_64B);
3647 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH)
3648 hr_reg_enable(cq_context, CQC_STASH);
3650 hr_reg_write(cq_context, CQC_CQE_CUR_BLK_ADDR_L,
3651 to_hr_hw_page_addr(mtts[0]));
3652 hr_reg_write(cq_context, CQC_CQE_CUR_BLK_ADDR_H,
3653 upper_32_bits(to_hr_hw_page_addr(mtts[0])));
3654 hr_reg_write(cq_context, CQC_CQE_HOP_NUM, hr_dev->caps.cqe_hop_num ==
3655 HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
3656 hr_reg_write(cq_context, CQC_CQE_NEX_BLK_ADDR_L,
3657 to_hr_hw_page_addr(mtts[1]));
3658 hr_reg_write(cq_context, CQC_CQE_NEX_BLK_ADDR_H,
3659 upper_32_bits(to_hr_hw_page_addr(mtts[1])));
3660 hr_reg_write(cq_context, CQC_CQE_BAR_PG_SZ,
3661 to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift));
3662 hr_reg_write(cq_context, CQC_CQE_BUF_PG_SZ,
3663 to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift));
3664 hr_reg_write(cq_context, CQC_CQE_BA_L, dma_handle >> 3);
3665 hr_reg_write(cq_context, CQC_CQE_BA_H, (dma_handle >> (32 + 3)));
3666 hr_reg_write_bool(cq_context, CQC_DB_RECORD_EN,
3667 hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB);
3668 hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_L,
3669 ((u32)hr_cq->db.dma) >> 1);
3670 hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_H,
3671 hr_cq->db.dma >> 32);
3672 hr_reg_write(cq_context, CQC_CQ_MAX_CNT,
3673 HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
3674 hr_reg_write(cq_context, CQC_CQ_PERIOD,
3675 HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
3678 static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
3679 enum ib_cq_notify_flags flags)
3681 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3682 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3683 struct hns_roce_v2_db cq_db = {};
3687 * flags = 0, then notify_flag : next
3688 * flags = 1, then notify flag : solocited
3690 notify_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
3691 V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
3693 hr_reg_write(&cq_db, DB_TAG, hr_cq->cqn);
3694 hr_reg_write(&cq_db, DB_CMD, HNS_ROCE_V2_CQ_DB_NOTIFY);
3695 hr_reg_write(&cq_db, DB_CQ_CI, hr_cq->cons_index);
3696 hr_reg_write(&cq_db, DB_CQ_CMD_SN, hr_cq->arm_sn);
3697 hr_reg_write(&cq_db, DB_CQ_NOTIFY, notify_flag);
3699 hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg);
3704 static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
3705 struct hns_roce_qp *qp,
3708 struct hns_roce_rinl_sge *sge_list;
3709 u32 wr_num, wr_cnt, sge_num;
3710 u32 sge_cnt, data_len, size;
3713 wr_num = hr_reg_read(cqe, CQE_WQE_IDX);
3714 wr_cnt = wr_num & (qp->rq.wqe_cnt - 1);
3716 sge_list = qp->rq_inl_buf.wqe_list[wr_cnt].sg_list;
3717 sge_num = qp->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
3718 wqe_buf = hns_roce_get_recv_wqe(qp, wr_cnt);
3719 data_len = wc->byte_len;
3721 for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
3722 size = min(sge_list[sge_cnt].len, data_len);
3723 memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
3729 if (unlikely(data_len)) {
3730 wc->status = IB_WC_LOC_LEN_ERR;
3737 static int sw_comp(struct hns_roce_qp *hr_qp, struct hns_roce_wq *wq,
3738 int num_entries, struct ib_wc *wc)
3743 left = wq->head - wq->tail;
3747 left = min_t(unsigned int, (unsigned int)num_entries, left);
3748 while (npolled < left) {
3749 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3750 wc->status = IB_WC_WR_FLUSH_ERR;
3752 wc->qp = &hr_qp->ibqp;
3762 static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries,
3765 struct hns_roce_qp *hr_qp;
3768 list_for_each_entry(hr_qp, &hr_cq->sq_list, sq_node) {
3769 npolled += sw_comp(hr_qp, &hr_qp->sq,
3770 num_entries - npolled, wc + npolled);
3771 if (npolled >= num_entries)
3775 list_for_each_entry(hr_qp, &hr_cq->rq_list, rq_node) {
3776 npolled += sw_comp(hr_qp, &hr_qp->rq,
3777 num_entries - npolled, wc + npolled);
3778 if (npolled >= num_entries)
3786 static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
3787 struct hns_roce_cq *cq, struct hns_roce_v2_cqe *cqe,
3790 static const struct {
3792 enum ib_wc_status wc_status;
3794 { HNS_ROCE_CQE_V2_SUCCESS, IB_WC_SUCCESS },
3795 { HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR, IB_WC_LOC_LEN_ERR },
3796 { HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR, IB_WC_LOC_QP_OP_ERR },
3797 { HNS_ROCE_CQE_V2_LOCAL_PROT_ERR, IB_WC_LOC_PROT_ERR },
3798 { HNS_ROCE_CQE_V2_WR_FLUSH_ERR, IB_WC_WR_FLUSH_ERR },
3799 { HNS_ROCE_CQE_V2_MW_BIND_ERR, IB_WC_MW_BIND_ERR },
3800 { HNS_ROCE_CQE_V2_BAD_RESP_ERR, IB_WC_BAD_RESP_ERR },
3801 { HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR, IB_WC_LOC_ACCESS_ERR },
3802 { HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR, IB_WC_REM_INV_REQ_ERR },
3803 { HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR, IB_WC_REM_ACCESS_ERR },
3804 { HNS_ROCE_CQE_V2_REMOTE_OP_ERR, IB_WC_REM_OP_ERR },
3805 { HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR,
3806 IB_WC_RETRY_EXC_ERR },
3807 { HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR, IB_WC_RNR_RETRY_EXC_ERR },
3808 { HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR, IB_WC_REM_ABORT_ERR },
3809 { HNS_ROCE_CQE_V2_GENERAL_ERR, IB_WC_GENERAL_ERR}
3812 u32 cqe_status = hr_reg_read(cqe, CQE_STATUS);
3815 wc->status = IB_WC_GENERAL_ERR;
3816 for (i = 0; i < ARRAY_SIZE(map); i++)
3817 if (cqe_status == map[i].cqe_status) {
3818 wc->status = map[i].wc_status;
3822 if (likely(wc->status == IB_WC_SUCCESS ||
3823 wc->status == IB_WC_WR_FLUSH_ERR))
3826 ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status);
3827 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe,
3828 cq->cqe_size, false);
3829 wc->vendor_err = hr_reg_read(cqe, CQE_SUB_STATUS);
3832 * For hns ROCEE, GENERAL_ERR is an error type that is not defined in
3833 * the standard protocol, the driver must ignore it and needn't to set
3834 * the QP to an error state.
3836 if (cqe_status == HNS_ROCE_CQE_V2_GENERAL_ERR)
3839 flush_cqe(hr_dev, qp);
3842 static int get_cur_qp(struct hns_roce_cq *hr_cq, struct hns_roce_v2_cqe *cqe,
3843 struct hns_roce_qp **cur_qp)
3845 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
3846 struct hns_roce_qp *hr_qp = *cur_qp;
3849 qpn = hr_reg_read(cqe, CQE_LCL_QPN);
3851 if (!hr_qp || qpn != hr_qp->qpn) {
3852 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
3853 if (unlikely(!hr_qp)) {
3854 ibdev_err(&hr_dev->ib_dev,
3855 "CQ %06lx with entry for unknown QPN %06x\n",
3866 * mapped-value = 1 + real-value
3867 * The ib wc opcode's real value is start from 0, In order to distinguish
3868 * between initialized and uninitialized map values, we plus 1 to the actual
3869 * value when defining the mapping, so that the validity can be identified by
3870 * checking whether the mapped value is greater than 0.
3872 #define HR_WC_OP_MAP(hr_key, ib_key) \
3873 [HNS_ROCE_V2_WQE_OP_ ## hr_key] = 1 + IB_WC_ ## ib_key
3875 static const u32 wc_send_op_map[] = {
3876 HR_WC_OP_MAP(SEND, SEND),
3877 HR_WC_OP_MAP(SEND_WITH_INV, SEND),
3878 HR_WC_OP_MAP(SEND_WITH_IMM, SEND),
3879 HR_WC_OP_MAP(RDMA_READ, RDMA_READ),
3880 HR_WC_OP_MAP(RDMA_WRITE, RDMA_WRITE),
3881 HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE),
3882 HR_WC_OP_MAP(ATOM_CMP_AND_SWAP, COMP_SWAP),
3883 HR_WC_OP_MAP(ATOM_FETCH_AND_ADD, FETCH_ADD),
3884 HR_WC_OP_MAP(ATOM_MSK_CMP_AND_SWAP, MASKED_COMP_SWAP),
3885 HR_WC_OP_MAP(ATOM_MSK_FETCH_AND_ADD, MASKED_FETCH_ADD),
3886 HR_WC_OP_MAP(FAST_REG_PMR, REG_MR),
3887 HR_WC_OP_MAP(BIND_MW, REG_MR),
3890 static int to_ib_wc_send_op(u32 hr_opcode)
3892 if (hr_opcode >= ARRAY_SIZE(wc_send_op_map))
3895 return wc_send_op_map[hr_opcode] ? wc_send_op_map[hr_opcode] - 1 :
3899 static const u32 wc_recv_op_map[] = {
3900 HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, WITH_IMM),
3901 HR_WC_OP_MAP(SEND, RECV),
3902 HR_WC_OP_MAP(SEND_WITH_IMM, WITH_IMM),
3903 HR_WC_OP_MAP(SEND_WITH_INV, RECV),
3906 static int to_ib_wc_recv_op(u32 hr_opcode)
3908 if (hr_opcode >= ARRAY_SIZE(wc_recv_op_map))
3911 return wc_recv_op_map[hr_opcode] ? wc_recv_op_map[hr_opcode] - 1 :
3915 static void fill_send_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
3922 hr_opcode = hr_reg_read(cqe, CQE_OPCODE);
3923 switch (hr_opcode) {
3924 case HNS_ROCE_V2_WQE_OP_RDMA_READ:
3925 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3927 case HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM:
3928 case HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM:
3929 wc->wc_flags |= IB_WC_WITH_IMM;
3931 case HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP:
3932 case HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD:
3933 case HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP:
3934 case HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD:
3941 ib_opcode = to_ib_wc_send_op(hr_opcode);
3943 wc->status = IB_WC_GENERAL_ERR;
3945 wc->opcode = ib_opcode;
3948 static inline bool is_rq_inl_enabled(struct ib_wc *wc, u32 hr_opcode,
3949 struct hns_roce_v2_cqe *cqe)
3951 return wc->qp->qp_type != IB_QPT_UD && wc->qp->qp_type != IB_QPT_GSI &&
3952 (hr_opcode == HNS_ROCE_V2_OPCODE_SEND ||
3953 hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
3954 hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
3955 hr_reg_read(cqe, CQE_RQ_INLINE);
3958 static int fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
3960 struct hns_roce_qp *qp = to_hr_qp(wc->qp);
3965 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3967 hr_opcode = hr_reg_read(cqe, CQE_OPCODE);
3968 switch (hr_opcode) {
3969 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
3970 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
3971 wc->wc_flags = IB_WC_WITH_IMM;
3972 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immtdata));
3974 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
3975 wc->wc_flags = IB_WC_WITH_INVALIDATE;
3976 wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
3982 ib_opcode = to_ib_wc_recv_op(hr_opcode);
3984 wc->status = IB_WC_GENERAL_ERR;
3986 wc->opcode = ib_opcode;
3988 if (is_rq_inl_enabled(wc, hr_opcode, cqe)) {
3989 ret = hns_roce_handle_recv_inl_wqe(cqe, qp, wc);
3994 wc->sl = hr_reg_read(cqe, CQE_SL);
3995 wc->src_qp = hr_reg_read(cqe, CQE_RMT_QPN);
3997 wc->wc_flags |= hr_reg_read(cqe, CQE_GRH) ? IB_WC_GRH : 0;
3998 wc->port_num = hr_reg_read(cqe, CQE_PORTN);
4001 if (hr_reg_read(cqe, CQE_VID_VLD)) {
4002 wc->vlan_id = hr_reg_read(cqe, CQE_VID);
4003 wc->wc_flags |= IB_WC_WITH_VLAN;
4005 wc->vlan_id = 0xffff;
4008 wc->network_hdr_type = hr_reg_read(cqe, CQE_PORT_TYPE);
4013 static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
4014 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
4016 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
4017 struct hns_roce_qp *qp = *cur_qp;
4018 struct hns_roce_srq *srq = NULL;
4019 struct hns_roce_v2_cqe *cqe;
4020 struct hns_roce_wq *wq;
4025 cqe = get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
4029 ++hr_cq->cons_index;
4030 /* Memory barrier */
4033 ret = get_cur_qp(hr_cq, cqe, &qp);
4040 wqe_idx = hr_reg_read(cqe, CQE_WQE_IDX);
4042 is_send = !hr_reg_read(cqe, CQE_S_R);
4046 /* If sg_signal_bit is set, tail pointer will be updated to
4047 * the WQE corresponding to the current CQE.
4049 if (qp->sq_signal_bits)
4050 wq->tail += (wqe_idx - (u16)wq->tail) &
4053 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
4056 fill_send_wc(wc, cqe);
4059 srq = to_hr_srq(qp->ibqp.srq);
4060 wc->wr_id = srq->wrid[wqe_idx];
4061 hns_roce_free_srq_wqe(srq, wqe_idx);
4064 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
4068 ret = fill_recv_wc(wc, cqe);
4071 get_cqe_status(hr_dev, qp, hr_cq, cqe, wc);
4072 if (unlikely(wc->status != IB_WC_SUCCESS))
4078 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
4081 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
4082 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
4083 struct hns_roce_qp *cur_qp = NULL;
4084 unsigned long flags;
4087 spin_lock_irqsave(&hr_cq->lock, flags);
4090 * When the device starts to reset, the state is RST_DOWN. At this time,
4091 * there may still be some valid CQEs in the hardware that are not
4092 * polled. Therefore, it is not allowed to switch to the software mode
4093 * immediately. When the state changes to UNINIT, CQE no longer exists
4094 * in the hardware, and then switch to software mode.
4096 if (hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT) {
4097 npolled = hns_roce_v2_sw_poll_cq(hr_cq, num_entries, wc);
4101 for (npolled = 0; npolled < num_entries; ++npolled) {
4102 if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
4107 update_cq_db(hr_dev, hr_cq);
4110 spin_unlock_irqrestore(&hr_cq->lock, flags);
4115 static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
4116 u32 step_idx, u8 *mbox_cmd)
4122 cmd = HNS_ROCE_CMD_WRITE_QPC_BT0;
4125 cmd = HNS_ROCE_CMD_WRITE_MPT_BT0;
4128 cmd = HNS_ROCE_CMD_WRITE_CQC_BT0;
4131 cmd = HNS_ROCE_CMD_WRITE_SRQC_BT0;
4134 cmd = HNS_ROCE_CMD_WRITE_SCCC_BT0;
4136 case HEM_TYPE_QPC_TIMER:
4137 cmd = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
4139 case HEM_TYPE_CQC_TIMER:
4140 cmd = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
4143 dev_warn(hr_dev->dev, "failed to check hem type %u.\n", type);
4147 *mbox_cmd = cmd + step_idx;
4152 static int config_gmv_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj,
4153 dma_addr_t base_addr)
4155 struct hns_roce_cmq_desc desc;
4156 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
4157 u32 idx = obj / (HNS_HW_PAGE_SIZE / hr_dev->caps.gmv_entry_sz);
4158 u64 addr = to_hr_hw_page_addr(base_addr);
4160 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, false);
4162 hr_reg_write(req, CFG_GMV_BT_BA_L, lower_32_bits(addr));
4163 hr_reg_write(req, CFG_GMV_BT_BA_H, upper_32_bits(addr));
4164 hr_reg_write(req, CFG_GMV_BT_IDX, idx);
4166 return hns_roce_cmq_send(hr_dev, &desc, 1);
4169 static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj,
4170 dma_addr_t base_addr, u32 hem_type, u32 step_idx)
4175 if (unlikely(hem_type == HEM_TYPE_GMV))
4176 return config_gmv_ba_to_hw(hr_dev, obj, base_addr);
4178 if (unlikely(hem_type == HEM_TYPE_SCCC && step_idx))
4181 ret = get_op_for_set_hem(hr_dev, hem_type, step_idx, &cmd);
4185 return config_hem_ba_to_hw(hr_dev, base_addr, cmd, obj);
4188 static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
4189 struct hns_roce_hem_table *table, int obj,
4192 struct hns_roce_hem_iter iter;
4193 struct hns_roce_hem_mhop mhop;
4194 struct hns_roce_hem *hem;
4195 unsigned long mhop_obj = obj;
4204 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
4207 hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
4211 hop_num = mhop.hop_num;
4212 chunk_ba_num = mhop.bt_chunk_size / 8;
4215 hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
4217 l1_idx = i * chunk_ba_num + j;
4218 } else if (hop_num == 1) {
4219 hem_idx = i * chunk_ba_num + j;
4220 } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
4224 if (table->type == HEM_TYPE_SCCC)
4227 if (check_whether_last_step(hop_num, step_idx)) {
4228 hem = table->hem[hem_idx];
4229 for (hns_roce_hem_first(hem, &iter);
4230 !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
4231 bt_ba = hns_roce_hem_addr(&iter);
4232 ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type,
4237 bt_ba = table->bt_l0_dma_addr[i];
4238 else if (step_idx == 1 && hop_num == 2)
4239 bt_ba = table->bt_l1_dma_addr[l1_idx];
4241 ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type, step_idx);
4247 static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
4248 struct hns_roce_hem_table *table,
4249 int tag, u32 step_idx)
4251 struct hns_roce_cmd_mailbox *mailbox;
4252 struct device *dev = hr_dev->dev;
4256 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
4259 switch (table->type) {
4261 cmd = HNS_ROCE_CMD_DESTROY_QPC_BT0;
4264 cmd = HNS_ROCE_CMD_DESTROY_MPT_BT0;
4267 cmd = HNS_ROCE_CMD_DESTROY_CQC_BT0;
4270 cmd = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
4273 case HEM_TYPE_QPC_TIMER:
4274 case HEM_TYPE_CQC_TIMER:
4278 dev_warn(dev, "table %u not to be destroyed by mailbox!\n",
4285 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4286 if (IS_ERR(mailbox))
4287 return PTR_ERR(mailbox);
4289 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cmd, tag);
4291 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4295 static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
4296 struct hns_roce_v2_qp_context *context,
4297 struct hns_roce_v2_qp_context *qpc_mask,
4298 struct hns_roce_qp *hr_qp)
4300 struct hns_roce_cmd_mailbox *mailbox;
4304 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4305 if (IS_ERR(mailbox))
4306 return PTR_ERR(mailbox);
4308 /* The qpc size of HIP08 is only 256B, which is half of HIP09 */
4309 qpc_size = hr_dev->caps.qpc_sz;
4310 memcpy(mailbox->buf, context, qpc_size);
4311 memcpy(mailbox->buf + qpc_size, qpc_mask, qpc_size);
4313 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0,
4314 HNS_ROCE_CMD_MODIFY_QPC, hr_qp->qpn);
4316 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4321 static void set_access_flags(struct hns_roce_qp *hr_qp,
4322 struct hns_roce_v2_qp_context *context,
4323 struct hns_roce_v2_qp_context *qpc_mask,
4324 const struct ib_qp_attr *attr, int attr_mask)
4329 dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
4330 attr->max_dest_rd_atomic : hr_qp->resp_depth;
4332 access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
4333 attr->qp_access_flags : hr_qp->atomic_rd_en;
4335 if (!dest_rd_atomic)
4336 access_flags &= IB_ACCESS_REMOTE_WRITE;
4338 hr_reg_write_bool(context, QPC_RRE,
4339 access_flags & IB_ACCESS_REMOTE_READ);
4340 hr_reg_clear(qpc_mask, QPC_RRE);
4342 hr_reg_write_bool(context, QPC_RWE,
4343 access_flags & IB_ACCESS_REMOTE_WRITE);
4344 hr_reg_clear(qpc_mask, QPC_RWE);
4346 hr_reg_write_bool(context, QPC_ATE,
4347 access_flags & IB_ACCESS_REMOTE_ATOMIC);
4348 hr_reg_clear(qpc_mask, QPC_ATE);
4349 hr_reg_write_bool(context, QPC_EXT_ATE,
4350 access_flags & IB_ACCESS_REMOTE_ATOMIC);
4351 hr_reg_clear(qpc_mask, QPC_EXT_ATE);
4354 static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
4355 struct hns_roce_v2_qp_context *context,
4356 struct hns_roce_v2_qp_context *qpc_mask)
4358 hr_reg_write(context, QPC_SGE_SHIFT,
4359 to_hr_hem_entries_shift(hr_qp->sge.sge_cnt,
4360 hr_qp->sge.sge_shift));
4362 hr_reg_write(context, QPC_SQ_SHIFT, ilog2(hr_qp->sq.wqe_cnt));
4364 hr_reg_write(context, QPC_RQ_SHIFT, ilog2(hr_qp->rq.wqe_cnt));
4367 static inline int get_cqn(struct ib_cq *ib_cq)
4369 return ib_cq ? to_hr_cq(ib_cq)->cqn : 0;
4372 static inline int get_pdn(struct ib_pd *ib_pd)
4374 return ib_pd ? to_hr_pd(ib_pd)->pdn : 0;
4377 static void modify_qp_reset_to_init(struct ib_qp *ibqp,
4378 const struct ib_qp_attr *attr,
4379 struct hns_roce_v2_qp_context *context,
4380 struct hns_roce_v2_qp_context *qpc_mask)
4382 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4383 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4386 * In v2 engine, software pass context and context mask to hardware
4387 * when modifying qp. If software need modify some fields in context,
4388 * we should set all bits of the relevant fields in context mask to
4389 * 0 at the same time, else set them to 0x1.
4391 hr_reg_write(context, QPC_TST, to_hr_qp_type(ibqp->qp_type));
4393 hr_reg_write(context, QPC_PD, get_pdn(ibqp->pd));
4395 hr_reg_write(context, QPC_RQWS, ilog2(hr_qp->rq.max_gs));
4397 set_qpc_wqe_cnt(hr_qp, context, qpc_mask);
4399 /* No VLAN need to set 0xFFF */
4400 hr_reg_write(context, QPC_VLAN_ID, 0xfff);
4402 if (ibqp->qp_type == IB_QPT_XRC_TGT) {
4403 context->qkey_xrcd = cpu_to_le32(hr_qp->xrcdn);
4405 hr_reg_enable(context, QPC_XRC_QP_TYPE);
4408 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
4409 hr_reg_enable(context, QPC_RQ_RECORD_EN);
4411 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
4412 hr_reg_enable(context, QPC_OWNER_MODE);
4414 hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_L,
4415 lower_32_bits(hr_qp->rdb.dma) >> 1);
4416 hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_H,
4417 upper_32_bits(hr_qp->rdb.dma));
4419 if (ibqp->qp_type != IB_QPT_UD && ibqp->qp_type != IB_QPT_GSI)
4420 hr_reg_write_bool(context, QPC_RQIE,
4421 hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE);
4423 hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq));
4426 hr_reg_enable(context, QPC_SRQ_EN);
4427 hr_reg_write(context, QPC_SRQN, to_hr_srq(ibqp->srq)->srqn);
4430 hr_reg_enable(context, QPC_FRE);
4432 hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq));
4434 if (hr_dev->caps.qpc_sz < HNS_ROCE_V3_QPC_SZ)
4437 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH)
4438 hr_reg_enable(&context->ext, QPCEX_STASH);
4441 static void modify_qp_init_to_init(struct ib_qp *ibqp,
4442 const struct ib_qp_attr *attr,
4443 struct hns_roce_v2_qp_context *context,
4444 struct hns_roce_v2_qp_context *qpc_mask)
4447 * In v2 engine, software pass context and context mask to hardware
4448 * when modifying qp. If software need modify some fields in context,
4449 * we should set all bits of the relevant fields in context mask to
4450 * 0 at the same time, else set them to 0x1.
4452 hr_reg_write(context, QPC_TST, to_hr_qp_type(ibqp->qp_type));
4453 hr_reg_clear(qpc_mask, QPC_TST);
4455 hr_reg_write(context, QPC_PD, get_pdn(ibqp->pd));
4456 hr_reg_clear(qpc_mask, QPC_PD);
4458 hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq));
4459 hr_reg_clear(qpc_mask, QPC_RX_CQN);
4461 hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq));
4462 hr_reg_clear(qpc_mask, QPC_TX_CQN);
4465 hr_reg_enable(context, QPC_SRQ_EN);
4466 hr_reg_clear(qpc_mask, QPC_SRQ_EN);
4467 hr_reg_write(context, QPC_SRQN, to_hr_srq(ibqp->srq)->srqn);
4468 hr_reg_clear(qpc_mask, QPC_SRQN);
4472 static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
4473 struct hns_roce_qp *hr_qp,
4474 struct hns_roce_v2_qp_context *context,
4475 struct hns_roce_v2_qp_context *qpc_mask)
4477 u64 mtts[MTT_MIN_COUNT] = { 0 };
4481 /* Search qp buf's mtts */
4482 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts,
4483 MTT_MIN_COUNT, &wqe_sge_ba);
4484 if (hr_qp->rq.wqe_cnt && count < 1) {
4485 ibdev_err(&hr_dev->ib_dev,
4486 "failed to find RQ WQE, QPN = 0x%lx.\n", hr_qp->qpn);
4490 context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
4491 qpc_mask->wqe_sge_ba = 0;
4494 * In v2 engine, software pass context and context mask to hardware
4495 * when modifying qp. If software need modify some fields in context,
4496 * we should set all bits of the relevant fields in context mask to
4497 * 0 at the same time, else set them to 0x1.
4499 hr_reg_write(context, QPC_WQE_SGE_BA_H, wqe_sge_ba >> (32 + 3));
4500 hr_reg_clear(qpc_mask, QPC_WQE_SGE_BA_H);
4502 hr_reg_write(context, QPC_SQ_HOP_NUM,
4503 to_hr_hem_hopnum(hr_dev->caps.wqe_sq_hop_num,
4504 hr_qp->sq.wqe_cnt));
4505 hr_reg_clear(qpc_mask, QPC_SQ_HOP_NUM);
4507 hr_reg_write(context, QPC_SGE_HOP_NUM,
4508 to_hr_hem_hopnum(hr_dev->caps.wqe_sge_hop_num,
4509 hr_qp->sge.sge_cnt));
4510 hr_reg_clear(qpc_mask, QPC_SGE_HOP_NUM);
4512 hr_reg_write(context, QPC_RQ_HOP_NUM,
4513 to_hr_hem_hopnum(hr_dev->caps.wqe_rq_hop_num,
4514 hr_qp->rq.wqe_cnt));
4516 hr_reg_clear(qpc_mask, QPC_RQ_HOP_NUM);
4518 hr_reg_write(context, QPC_WQE_SGE_BA_PG_SZ,
4519 to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.ba_pg_shift));
4520 hr_reg_clear(qpc_mask, QPC_WQE_SGE_BA_PG_SZ);
4522 hr_reg_write(context, QPC_WQE_SGE_BUF_PG_SZ,
4523 to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.buf_pg_shift));
4524 hr_reg_clear(qpc_mask, QPC_WQE_SGE_BUF_PG_SZ);
4526 context->rq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
4527 qpc_mask->rq_cur_blk_addr = 0;
4529 hr_reg_write(context, QPC_RQ_CUR_BLK_ADDR_H,
4530 upper_32_bits(to_hr_hw_page_addr(mtts[0])));
4531 hr_reg_clear(qpc_mask, QPC_RQ_CUR_BLK_ADDR_H);
4533 context->rq_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
4534 qpc_mask->rq_nxt_blk_addr = 0;
4536 hr_reg_write(context, QPC_RQ_NXT_BLK_ADDR_H,
4537 upper_32_bits(to_hr_hw_page_addr(mtts[1])));
4538 hr_reg_clear(qpc_mask, QPC_RQ_NXT_BLK_ADDR_H);
4543 static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
4544 struct hns_roce_qp *hr_qp,
4545 struct hns_roce_v2_qp_context *context,
4546 struct hns_roce_v2_qp_context *qpc_mask)
4548 struct ib_device *ibdev = &hr_dev->ib_dev;
4549 u64 sge_cur_blk = 0;
4553 /* search qp buf's mtts */
4554 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
4556 ibdev_err(ibdev, "failed to find QP(0x%lx) SQ buf.\n",
4560 if (hr_qp->sge.sge_cnt > 0) {
4561 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
4563 &sge_cur_blk, 1, NULL);
4565 ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf.\n",
4572 * In v2 engine, software pass context and context mask to hardware
4573 * when modifying qp. If software need modify some fields in context,
4574 * we should set all bits of the relevant fields in context mask to
4575 * 0 at the same time, else set them to 0x1.
4577 hr_reg_write(context, QPC_SQ_CUR_BLK_ADDR_L,
4578 lower_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4579 hr_reg_write(context, QPC_SQ_CUR_BLK_ADDR_H,
4580 upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4581 hr_reg_clear(qpc_mask, QPC_SQ_CUR_BLK_ADDR_L);
4582 hr_reg_clear(qpc_mask, QPC_SQ_CUR_BLK_ADDR_H);
4584 hr_reg_write(context, QPC_SQ_CUR_SGE_BLK_ADDR_L,
4585 lower_32_bits(to_hr_hw_page_addr(sge_cur_blk)));
4586 hr_reg_write(context, QPC_SQ_CUR_SGE_BLK_ADDR_H,
4587 upper_32_bits(to_hr_hw_page_addr(sge_cur_blk)));
4588 hr_reg_clear(qpc_mask, QPC_SQ_CUR_SGE_BLK_ADDR_L);
4589 hr_reg_clear(qpc_mask, QPC_SQ_CUR_SGE_BLK_ADDR_H);
4591 hr_reg_write(context, QPC_RX_SQ_CUR_BLK_ADDR_L,
4592 lower_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4593 hr_reg_write(context, QPC_RX_SQ_CUR_BLK_ADDR_H,
4594 upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4595 hr_reg_clear(qpc_mask, QPC_RX_SQ_CUR_BLK_ADDR_L);
4596 hr_reg_clear(qpc_mask, QPC_RX_SQ_CUR_BLK_ADDR_H);
4601 static inline enum ib_mtu get_mtu(struct ib_qp *ibqp,
4602 const struct ib_qp_attr *attr)
4604 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
4607 return attr->path_mtu;
4610 static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
4611 const struct ib_qp_attr *attr, int attr_mask,
4612 struct hns_roce_v2_qp_context *context,
4613 struct hns_roce_v2_qp_context *qpc_mask)
4615 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4616 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4617 struct ib_device *ibdev = &hr_dev->ib_dev;
4629 ret = config_qp_rq_buf(hr_dev, hr_qp, context, qpc_mask);
4631 ibdev_err(ibdev, "failed to config rq buf, ret = %d.\n", ret);
4635 /* Search IRRL's mtts */
4636 mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
4637 hr_qp->qpn, &irrl_ba);
4639 ibdev_err(ibdev, "failed to find qp irrl_table.\n");
4643 /* Search TRRL's mtts */
4644 mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
4645 hr_qp->qpn, &trrl_ba);
4647 ibdev_err(ibdev, "failed to find qp trrl_table.\n");
4651 if (attr_mask & IB_QP_ALT_PATH) {
4652 ibdev_err(ibdev, "INIT2RTR attr_mask (0x%x) error.\n",
4657 hr_reg_write(context, QPC_TRRL_BA_L, trrl_ba >> 4);
4658 hr_reg_clear(qpc_mask, QPC_TRRL_BA_L);
4659 context->trrl_ba = cpu_to_le32(trrl_ba >> (16 + 4));
4660 qpc_mask->trrl_ba = 0;
4661 hr_reg_write(context, QPC_TRRL_BA_H, trrl_ba >> (32 + 16 + 4));
4662 hr_reg_clear(qpc_mask, QPC_TRRL_BA_H);
4664 context->irrl_ba = cpu_to_le32(irrl_ba >> 6);
4665 qpc_mask->irrl_ba = 0;
4666 hr_reg_write(context, QPC_IRRL_BA_H, irrl_ba >> (32 + 6));
4667 hr_reg_clear(qpc_mask, QPC_IRRL_BA_H);
4669 hr_reg_enable(context, QPC_RMT_E2E);
4670 hr_reg_clear(qpc_mask, QPC_RMT_E2E);
4672 hr_reg_write(context, QPC_SIG_TYPE, hr_qp->sq_signal_bits);
4673 hr_reg_clear(qpc_mask, QPC_SIG_TYPE);
4675 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
4677 smac = (const u8 *)hr_dev->dev_addr[port];
4678 dmac = (u8 *)attr->ah_attr.roce.dmac;
4679 /* when dmac equals smac or loop_idc is 1, it should loopback */
4680 if (ether_addr_equal_unaligned(dmac, smac) ||
4681 hr_dev->loop_idc == 0x1) {
4682 hr_reg_write(context, QPC_LBI, hr_dev->loop_idc);
4683 hr_reg_clear(qpc_mask, QPC_LBI);
4686 if (attr_mask & IB_QP_DEST_QPN) {
4687 hr_reg_write(context, QPC_DQPN, attr->dest_qp_num);
4688 hr_reg_clear(qpc_mask, QPC_DQPN);
4691 memcpy(&context->dmac, dmac, sizeof(u32));
4692 hr_reg_write(context, QPC_DMAC_H, *((u16 *)(&dmac[4])));
4694 hr_reg_clear(qpc_mask, QPC_DMAC_H);
4696 ib_mtu = get_mtu(ibqp, attr);
4697 hr_qp->path_mtu = ib_mtu;
4699 mtu = ib_mtu_enum_to_int(ib_mtu);
4700 if (WARN_ON(mtu <= 0))
4702 #define MAX_LP_MSG_LEN 16384
4703 /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 16KB */
4704 lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
4705 if (WARN_ON(lp_pktn_ini >= 0xF))
4708 if (attr_mask & IB_QP_PATH_MTU) {
4709 hr_reg_write(context, QPC_MTU, ib_mtu);
4710 hr_reg_clear(qpc_mask, QPC_MTU);
4713 hr_reg_write(context, QPC_LP_PKTN_INI, lp_pktn_ini);
4714 hr_reg_clear(qpc_mask, QPC_LP_PKTN_INI);
4716 /* ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI */
4717 hr_reg_write(context, QPC_ACK_REQ_FREQ, lp_pktn_ini);
4718 hr_reg_clear(qpc_mask, QPC_ACK_REQ_FREQ);
4720 hr_reg_clear(qpc_mask, QPC_RX_REQ_PSN_ERR);
4721 hr_reg_clear(qpc_mask, QPC_RX_REQ_MSN);
4722 hr_reg_clear(qpc_mask, QPC_RX_REQ_LAST_OPTYPE);
4724 context->rq_rnr_timer = 0;
4725 qpc_mask->rq_rnr_timer = 0;
4727 hr_reg_clear(qpc_mask, QPC_TRRL_HEAD_MAX);
4728 hr_reg_clear(qpc_mask, QPC_TRRL_TAIL_MAX);
4730 /* rocee send 2^lp_sgen_ini segs every time */
4731 hr_reg_write(context, QPC_LP_SGEN_INI, 3);
4732 hr_reg_clear(qpc_mask, QPC_LP_SGEN_INI);
4737 static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
4738 const struct ib_qp_attr *attr, int attr_mask,
4739 struct hns_roce_v2_qp_context *context,
4740 struct hns_roce_v2_qp_context *qpc_mask)
4742 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4743 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4744 struct ib_device *ibdev = &hr_dev->ib_dev;
4747 /* Not support alternate path and path migration */
4748 if (attr_mask & (IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE)) {
4749 ibdev_err(ibdev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
4753 ret = config_qp_sq_buf(hr_dev, hr_qp, context, qpc_mask);
4755 ibdev_err(ibdev, "failed to config sq buf, ret = %d.\n", ret);
4760 * Set some fields in context to zero, Because the default values
4761 * of all fields in context are zero, we need not set them to 0 again.
4762 * but we should set the relevant fields of context mask to 0.
4764 hr_reg_clear(qpc_mask, QPC_IRRL_SGE_IDX);
4766 hr_reg_clear(qpc_mask, QPC_RX_ACK_MSN);
4768 hr_reg_clear(qpc_mask, QPC_ACK_LAST_OPTYPE);
4769 hr_reg_clear(qpc_mask, QPC_IRRL_PSN_VLD);
4770 hr_reg_clear(qpc_mask, QPC_IRRL_PSN);
4772 hr_reg_clear(qpc_mask, QPC_IRRL_TAIL_REAL);
4774 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_MSN);
4776 hr_reg_clear(qpc_mask, QPC_RNR_RETRY_FLAG);
4778 hr_reg_clear(qpc_mask, QPC_CHECK_FLG);
4780 hr_reg_clear(qpc_mask, QPC_V2_IRRL_HEAD);
4785 static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
4788 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4789 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4790 u32 *spare_idx = hr_dev->qp_table.idx_table.spare_idx;
4791 u32 *head = &hr_dev->qp_table.idx_table.head;
4792 u32 *tail = &hr_dev->qp_table.idx_table.tail;
4793 struct hns_roce_dip *hr_dip;
4794 unsigned long flags;
4797 spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
4799 spare_idx[*tail] = ibqp->qp_num;
4800 *tail = (*tail == hr_dev->caps.num_qps - 1) ? 0 : (*tail + 1);
4802 list_for_each_entry(hr_dip, &hr_dev->dip_list, node) {
4803 if (!memcmp(grh->dgid.raw, hr_dip->dgid, 16)) {
4804 *dip_idx = hr_dip->dip_idx;
4809 /* If no dgid is found, a new dip and a mapping between dgid and
4810 * dip_idx will be created.
4812 hr_dip = kzalloc(sizeof(*hr_dip), GFP_ATOMIC);
4818 memcpy(hr_dip->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
4819 hr_dip->dip_idx = *dip_idx = spare_idx[*head];
4820 *head = (*head == hr_dev->caps.num_qps - 1) ? 0 : (*head + 1);
4821 list_add_tail(&hr_dip->node, &hr_dev->dip_list);
4824 spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
4834 UNSUPPORT_CONG_LEVEL,
4853 static int check_cong_type(struct ib_qp *ibqp,
4854 struct hns_roce_congestion_algorithm *cong_alg)
4856 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4858 /* different congestion types match different configurations */
4859 switch (hr_dev->caps.cong_type) {
4860 case CONG_TYPE_DCQCN:
4861 cong_alg->alg_sel = CONG_DCQCN;
4862 cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
4863 cong_alg->dip_vld = DIP_INVALID;
4864 cong_alg->wnd_mode_sel = WND_LIMIT;
4866 case CONG_TYPE_LDCP:
4867 cong_alg->alg_sel = CONG_WINDOW;
4868 cong_alg->alg_sub_sel = CONG_LDCP;
4869 cong_alg->dip_vld = DIP_INVALID;
4870 cong_alg->wnd_mode_sel = WND_UNLIMIT;
4873 cong_alg->alg_sel = CONG_WINDOW;
4874 cong_alg->alg_sub_sel = CONG_HC3;
4875 cong_alg->dip_vld = DIP_INVALID;
4876 cong_alg->wnd_mode_sel = WND_LIMIT;
4879 cong_alg->alg_sel = CONG_DCQCN;
4880 cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
4881 cong_alg->dip_vld = DIP_VALID;
4882 cong_alg->wnd_mode_sel = WND_LIMIT;
4885 ibdev_err(&hr_dev->ib_dev,
4886 "error type(%u) for congestion selection.\n",
4887 hr_dev->caps.cong_type);
4894 static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
4895 struct hns_roce_v2_qp_context *context,
4896 struct hns_roce_v2_qp_context *qpc_mask)
4898 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4899 struct hns_roce_congestion_algorithm cong_field;
4900 struct ib_device *ibdev = ibqp->device;
4901 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
4905 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ||
4906 grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE)
4909 ret = check_cong_type(ibqp, &cong_field);
4913 hr_reg_write(context, QPC_CONG_ALGO_TMPL_ID, hr_dev->cong_algo_tmpl_id +
4914 hr_dev->caps.cong_type * HNS_ROCE_CONG_SIZE);
4915 hr_reg_clear(qpc_mask, QPC_CONG_ALGO_TMPL_ID);
4916 hr_reg_write(&context->ext, QPCEX_CONG_ALG_SEL, cong_field.alg_sel);
4917 hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SEL);
4918 hr_reg_write(&context->ext, QPCEX_CONG_ALG_SUB_SEL,
4919 cong_field.alg_sub_sel);
4920 hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SUB_SEL);
4921 hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX_VLD, cong_field.dip_vld);
4922 hr_reg_clear(&qpc_mask->ext, QPCEX_DIP_CTX_IDX_VLD);
4923 hr_reg_write(&context->ext, QPCEX_SQ_RQ_NOT_FORBID_EN,
4924 cong_field.wnd_mode_sel);
4925 hr_reg_clear(&qpc_mask->ext, QPCEX_SQ_RQ_NOT_FORBID_EN);
4927 /* if dip is disabled, there is no need to set dip idx */
4928 if (cong_field.dip_vld == 0)
4931 ret = get_dip_ctx_idx(ibqp, attr, &dip_idx);
4933 ibdev_err(ibdev, "failed to fill cong field, ret = %d.\n", ret);
4937 hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX, dip_idx);
4938 hr_reg_write(&qpc_mask->ext, QPCEX_DIP_CTX_IDX, 0);
4943 static int hns_roce_v2_set_path(struct ib_qp *ibqp,
4944 const struct ib_qp_attr *attr,
4946 struct hns_roce_v2_qp_context *context,
4947 struct hns_roce_v2_qp_context *qpc_mask)
4949 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4950 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4951 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4952 struct ib_device *ibdev = &hr_dev->ib_dev;
4953 const struct ib_gid_attr *gid_attr = NULL;
4954 int is_roce_protocol;
4955 u16 vlan_id = 0xffff;
4956 bool is_udp = false;
4962 * If free_mr_en of qp is set, it means that this qp comes from
4963 * free mr. This qp will perform the loopback operation.
4964 * In the loopback scenario, only sl needs to be set.
4966 if (hr_qp->free_mr_en) {
4967 hr_reg_write(context, QPC_SL, rdma_ah_get_sl(&attr->ah_attr));
4968 hr_reg_clear(qpc_mask, QPC_SL);
4969 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
4973 ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1;
4974 hr_port = ib_port - 1;
4975 is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
4976 rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
4978 if (is_roce_protocol) {
4979 gid_attr = attr->ah_attr.grh.sgid_attr;
4980 ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
4984 is_udp = (gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
4987 /* Only HIP08 needs to set the vlan_en bits in QPC */
4988 if (vlan_id < VLAN_N_VID &&
4989 hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
4990 hr_reg_enable(context, QPC_RQ_VLAN_EN);
4991 hr_reg_clear(qpc_mask, QPC_RQ_VLAN_EN);
4992 hr_reg_enable(context, QPC_SQ_VLAN_EN);
4993 hr_reg_clear(qpc_mask, QPC_SQ_VLAN_EN);
4996 hr_reg_write(context, QPC_VLAN_ID, vlan_id);
4997 hr_reg_clear(qpc_mask, QPC_VLAN_ID);
4999 if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
5000 ibdev_err(ibdev, "sgid_index(%u) too large. max is %d\n",
5001 grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
5005 if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
5006 ibdev_err(ibdev, "ah attr is not RDMA roce type\n");
5010 hr_reg_write(context, QPC_UDPSPN,
5011 is_udp ? rdma_get_udp_sport(grh->flow_label, ibqp->qp_num,
5012 attr->dest_qp_num) :
5015 hr_reg_clear(qpc_mask, QPC_UDPSPN);
5017 hr_reg_write(context, QPC_GMV_IDX, grh->sgid_index);
5019 hr_reg_clear(qpc_mask, QPC_GMV_IDX);
5021 hr_reg_write(context, QPC_HOPLIMIT, grh->hop_limit);
5022 hr_reg_clear(qpc_mask, QPC_HOPLIMIT);
5024 ret = fill_cong_field(ibqp, attr, context, qpc_mask);
5028 hr_reg_write(context, QPC_TC, get_tclass(&attr->ah_attr.grh));
5029 hr_reg_clear(qpc_mask, QPC_TC);
5031 hr_reg_write(context, QPC_FL, grh->flow_label);
5032 hr_reg_clear(qpc_mask, QPC_FL);
5033 memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
5034 memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
5036 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
5037 if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
5039 "failed to fill QPC, sl (%u) shouldn't be larger than %d.\n",
5040 hr_qp->sl, MAX_SERVICE_LEVEL);
5044 hr_reg_write(context, QPC_SL, hr_qp->sl);
5045 hr_reg_clear(qpc_mask, QPC_SL);
5050 static bool check_qp_state(enum ib_qp_state cur_state,
5051 enum ib_qp_state new_state)
5053 static const bool sm[][IB_QPS_ERR + 1] = {
5054 [IB_QPS_RESET] = { [IB_QPS_RESET] = true,
5055 [IB_QPS_INIT] = true },
5056 [IB_QPS_INIT] = { [IB_QPS_RESET] = true,
5057 [IB_QPS_INIT] = true,
5058 [IB_QPS_RTR] = true,
5059 [IB_QPS_ERR] = true },
5060 [IB_QPS_RTR] = { [IB_QPS_RESET] = true,
5061 [IB_QPS_RTS] = true,
5062 [IB_QPS_ERR] = true },
5063 [IB_QPS_RTS] = { [IB_QPS_RESET] = true,
5064 [IB_QPS_RTS] = true,
5065 [IB_QPS_ERR] = true },
5068 [IB_QPS_ERR] = { [IB_QPS_RESET] = true,
5069 [IB_QPS_ERR] = true }
5072 return sm[cur_state][new_state];
5075 static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
5076 const struct ib_qp_attr *attr,
5078 enum ib_qp_state cur_state,
5079 enum ib_qp_state new_state,
5080 struct hns_roce_v2_qp_context *context,
5081 struct hns_roce_v2_qp_context *qpc_mask)
5083 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5086 if (!check_qp_state(cur_state, new_state)) {
5087 ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n");
5091 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
5092 memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
5093 modify_qp_reset_to_init(ibqp, attr, context, qpc_mask);
5094 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
5095 modify_qp_init_to_init(ibqp, attr, context, qpc_mask);
5096 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
5097 ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
5099 } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
5100 ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
5107 static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
5109 #define QP_ACK_TIMEOUT_MAX_HIP08 20
5110 #define QP_ACK_TIMEOUT_OFFSET 10
5111 #define QP_ACK_TIMEOUT_MAX 31
5113 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
5114 if (*timeout > QP_ACK_TIMEOUT_MAX_HIP08) {
5115 ibdev_warn(&hr_dev->ib_dev,
5116 "local ACK timeout shall be 0 to 20.\n");
5119 *timeout += QP_ACK_TIMEOUT_OFFSET;
5120 } else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) {
5121 if (*timeout > QP_ACK_TIMEOUT_MAX) {
5122 ibdev_warn(&hr_dev->ib_dev,
5123 "local ACK timeout shall be 0 to 31.\n");
5131 static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
5132 const struct ib_qp_attr *attr,
5134 struct hns_roce_v2_qp_context *context,
5135 struct hns_roce_v2_qp_context *qpc_mask)
5137 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5138 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5142 if (attr_mask & IB_QP_AV) {
5143 ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
5149 if (attr_mask & IB_QP_TIMEOUT) {
5150 timeout = attr->timeout;
5151 if (check_qp_timeout_cfg_range(hr_dev, &timeout)) {
5152 hr_reg_write(context, QPC_AT, timeout);
5153 hr_reg_clear(qpc_mask, QPC_AT);
5157 if (attr_mask & IB_QP_RETRY_CNT) {
5158 hr_reg_write(context, QPC_RETRY_NUM_INIT, attr->retry_cnt);
5159 hr_reg_clear(qpc_mask, QPC_RETRY_NUM_INIT);
5161 hr_reg_write(context, QPC_RETRY_CNT, attr->retry_cnt);
5162 hr_reg_clear(qpc_mask, QPC_RETRY_CNT);
5165 if (attr_mask & IB_QP_RNR_RETRY) {
5166 hr_reg_write(context, QPC_RNR_NUM_INIT, attr->rnr_retry);
5167 hr_reg_clear(qpc_mask, QPC_RNR_NUM_INIT);
5169 hr_reg_write(context, QPC_RNR_CNT, attr->rnr_retry);
5170 hr_reg_clear(qpc_mask, QPC_RNR_CNT);
5173 if (attr_mask & IB_QP_SQ_PSN) {
5174 hr_reg_write(context, QPC_SQ_CUR_PSN, attr->sq_psn);
5175 hr_reg_clear(qpc_mask, QPC_SQ_CUR_PSN);
5177 hr_reg_write(context, QPC_SQ_MAX_PSN, attr->sq_psn);
5178 hr_reg_clear(qpc_mask, QPC_SQ_MAX_PSN);
5180 hr_reg_write(context, QPC_RETRY_MSG_PSN_L, attr->sq_psn);
5181 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_PSN_L);
5183 hr_reg_write(context, QPC_RETRY_MSG_PSN_H,
5184 attr->sq_psn >> RETRY_MSG_PSN_SHIFT);
5185 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_PSN_H);
5187 hr_reg_write(context, QPC_RETRY_MSG_FPKT_PSN, attr->sq_psn);
5188 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_FPKT_PSN);
5190 hr_reg_write(context, QPC_RX_ACK_EPSN, attr->sq_psn);
5191 hr_reg_clear(qpc_mask, QPC_RX_ACK_EPSN);
5194 if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
5195 attr->max_dest_rd_atomic) {
5196 hr_reg_write(context, QPC_RR_MAX,
5197 fls(attr->max_dest_rd_atomic - 1));
5198 hr_reg_clear(qpc_mask, QPC_RR_MAX);
5201 if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
5202 hr_reg_write(context, QPC_SR_MAX, fls(attr->max_rd_atomic - 1));
5203 hr_reg_clear(qpc_mask, QPC_SR_MAX);
5206 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
5207 set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
5209 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
5210 hr_reg_write(context, QPC_MIN_RNR_TIME,
5211 hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ?
5212 HNS_ROCE_RNR_TIMER_10NS : attr->min_rnr_timer);
5213 hr_reg_clear(qpc_mask, QPC_MIN_RNR_TIME);
5216 if (attr_mask & IB_QP_RQ_PSN) {
5217 hr_reg_write(context, QPC_RX_REQ_EPSN, attr->rq_psn);
5218 hr_reg_clear(qpc_mask, QPC_RX_REQ_EPSN);
5220 hr_reg_write(context, QPC_RAQ_PSN, attr->rq_psn - 1);
5221 hr_reg_clear(qpc_mask, QPC_RAQ_PSN);
5224 if (attr_mask & IB_QP_QKEY) {
5225 context->qkey_xrcd = cpu_to_le32(attr->qkey);
5226 qpc_mask->qkey_xrcd = 0;
5227 hr_qp->qkey = attr->qkey;
5233 static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp,
5234 const struct ib_qp_attr *attr,
5237 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5238 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5240 if (attr_mask & IB_QP_ACCESS_FLAGS)
5241 hr_qp->atomic_rd_en = attr->qp_access_flags;
5243 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
5244 hr_qp->resp_depth = attr->max_dest_rd_atomic;
5245 if (attr_mask & IB_QP_PORT) {
5246 hr_qp->port = attr->port_num - 1;
5247 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
5251 static void clear_qp(struct hns_roce_qp *hr_qp)
5253 struct ib_qp *ibqp = &hr_qp->ibqp;
5256 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
5259 if (ibqp->recv_cq && ibqp->recv_cq != ibqp->send_cq)
5260 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq),
5261 hr_qp->qpn, ibqp->srq ?
5262 to_hr_srq(ibqp->srq) : NULL);
5264 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
5265 *hr_qp->rdb.db_record = 0;
5271 hr_qp->next_sge = 0;
5274 static void v2_set_flushed_fields(struct ib_qp *ibqp,
5275 struct hns_roce_v2_qp_context *context,
5276 struct hns_roce_v2_qp_context *qpc_mask)
5278 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5279 unsigned long sq_flag = 0;
5280 unsigned long rq_flag = 0;
5282 if (ibqp->qp_type == IB_QPT_XRC_TGT)
5285 spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
5286 hr_reg_write(context, QPC_SQ_PRODUCER_IDX, hr_qp->sq.head);
5287 hr_reg_clear(qpc_mask, QPC_SQ_PRODUCER_IDX);
5288 hr_qp->state = IB_QPS_ERR;
5289 spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag);
5291 if (ibqp->srq || ibqp->qp_type == IB_QPT_XRC_INI) /* no RQ */
5294 spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
5295 hr_reg_write(context, QPC_RQ_PRODUCER_IDX, hr_qp->rq.head);
5296 hr_reg_clear(qpc_mask, QPC_RQ_PRODUCER_IDX);
5297 spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag);
5300 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
5301 const struct ib_qp_attr *attr,
5302 int attr_mask, enum ib_qp_state cur_state,
5303 enum ib_qp_state new_state)
5305 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5306 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5307 struct hns_roce_v2_qp_context ctx[2];
5308 struct hns_roce_v2_qp_context *context = ctx;
5309 struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
5310 struct ib_device *ibdev = &hr_dev->ib_dev;
5313 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
5317 * In v2 engine, software pass context and context mask to hardware
5318 * when modifying qp. If software need modify some fields in context,
5319 * we should set all bits of the relevant fields in context mask to
5320 * 0 at the same time, else set them to 0x1.
5322 memset(context, 0, hr_dev->caps.qpc_sz);
5323 memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz);
5325 ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
5326 new_state, context, qpc_mask);
5330 /* When QP state is err, SQ and RQ WQE should be flushed */
5331 if (new_state == IB_QPS_ERR)
5332 v2_set_flushed_fields(ibqp, context, qpc_mask);
5334 /* Configure the optional fields */
5335 ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context,
5340 hr_reg_write_bool(context, QPC_INV_CREDIT,
5341 to_hr_qp_type(hr_qp->ibqp.qp_type) == SERV_TYPE_XRC ||
5343 hr_reg_clear(qpc_mask, QPC_INV_CREDIT);
5345 /* Every status migrate must change state */
5346 hr_reg_write(context, QPC_QP_ST, new_state);
5347 hr_reg_clear(qpc_mask, QPC_QP_ST);
5349 /* SW pass context to HW */
5350 ret = hns_roce_v2_qp_modify(hr_dev, context, qpc_mask, hr_qp);
5352 ibdev_err(ibdev, "failed to modify QP, ret = %d.\n", ret);
5356 hr_qp->state = new_state;
5358 hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask);
5360 if (new_state == IB_QPS_RESET && !ibqp->uobject)
5367 static int to_ib_qp_st(enum hns_roce_v2_qp_state state)
5369 static const enum ib_qp_state map[] = {
5370 [HNS_ROCE_QP_ST_RST] = IB_QPS_RESET,
5371 [HNS_ROCE_QP_ST_INIT] = IB_QPS_INIT,
5372 [HNS_ROCE_QP_ST_RTR] = IB_QPS_RTR,
5373 [HNS_ROCE_QP_ST_RTS] = IB_QPS_RTS,
5374 [HNS_ROCE_QP_ST_SQD] = IB_QPS_SQD,
5375 [HNS_ROCE_QP_ST_SQER] = IB_QPS_SQE,
5376 [HNS_ROCE_QP_ST_ERR] = IB_QPS_ERR,
5377 [HNS_ROCE_QP_ST_SQ_DRAINING] = IB_QPS_SQD
5380 return (state < ARRAY_SIZE(map)) ? map[state] : -1;
5383 static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, u32 qpn,
5386 struct hns_roce_cmd_mailbox *mailbox;
5389 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5390 if (IS_ERR(mailbox))
5391 return PTR_ERR(mailbox);
5393 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_QPC,
5398 memcpy(buffer, mailbox->buf, hr_dev->caps.qpc_sz);
5401 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5405 static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
5407 struct ib_qp_init_attr *qp_init_attr)
5409 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5410 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5411 struct hns_roce_v2_qp_context context = {};
5412 struct ib_device *ibdev = &hr_dev->ib_dev;
5417 memset(qp_attr, 0, sizeof(*qp_attr));
5418 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
5420 mutex_lock(&hr_qp->mutex);
5422 if (hr_qp->state == IB_QPS_RESET) {
5423 qp_attr->qp_state = IB_QPS_RESET;
5428 ret = hns_roce_v2_query_qpc(hr_dev, hr_qp->qpn, &context);
5430 ibdev_err(ibdev, "failed to query QPC, ret = %d.\n", ret);
5435 state = hr_reg_read(&context, QPC_QP_ST);
5436 tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
5437 if (tmp_qp_state == -1) {
5438 ibdev_err(ibdev, "Illegal ib_qp_state\n");
5442 hr_qp->state = (u8)tmp_qp_state;
5443 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
5444 qp_attr->path_mtu = (enum ib_mtu)hr_reg_read(&context, QPC_MTU);
5445 qp_attr->path_mig_state = IB_MIG_ARMED;
5446 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
5447 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
5448 qp_attr->qkey = le32_to_cpu(context.qkey_xrcd);
5450 qp_attr->rq_psn = hr_reg_read(&context, QPC_RX_REQ_EPSN);
5451 qp_attr->sq_psn = (u32)hr_reg_read(&context, QPC_SQ_CUR_PSN);
5452 qp_attr->dest_qp_num = hr_reg_read(&context, QPC_DQPN);
5453 qp_attr->qp_access_flags =
5454 ((hr_reg_read(&context, QPC_RRE)) << V2_QP_RRE_S) |
5455 ((hr_reg_read(&context, QPC_RWE)) << V2_QP_RWE_S) |
5456 ((hr_reg_read(&context, QPC_ATE)) << V2_QP_ATE_S);
5458 if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
5459 hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
5460 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) {
5461 struct ib_global_route *grh =
5462 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
5464 rdma_ah_set_sl(&qp_attr->ah_attr,
5465 hr_reg_read(&context, QPC_SL));
5466 rdma_ah_set_port_num(&qp_attr->ah_attr, hr_qp->port + 1);
5467 rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH);
5468 grh->flow_label = hr_reg_read(&context, QPC_FL);
5469 grh->sgid_index = hr_reg_read(&context, QPC_GMV_IDX);
5470 grh->hop_limit = hr_reg_read(&context, QPC_HOPLIMIT);
5471 grh->traffic_class = hr_reg_read(&context, QPC_TC);
5473 memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw));
5476 qp_attr->port_num = hr_qp->port + 1;
5477 qp_attr->sq_draining = 0;
5478 qp_attr->max_rd_atomic = 1 << hr_reg_read(&context, QPC_SR_MAX);
5479 qp_attr->max_dest_rd_atomic = 1 << hr_reg_read(&context, QPC_RR_MAX);
5481 qp_attr->min_rnr_timer = (u8)hr_reg_read(&context, QPC_MIN_RNR_TIME);
5482 qp_attr->timeout = (u8)hr_reg_read(&context, QPC_AT);
5483 qp_attr->retry_cnt = hr_reg_read(&context, QPC_RETRY_NUM_INIT);
5484 qp_attr->rnr_retry = hr_reg_read(&context, QPC_RNR_NUM_INIT);
5487 qp_attr->cur_qp_state = qp_attr->qp_state;
5488 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
5489 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
5490 qp_attr->cap.max_inline_data = hr_qp->max_inline_data;
5492 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
5493 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
5495 qp_init_attr->qp_context = ibqp->qp_context;
5496 qp_init_attr->qp_type = ibqp->qp_type;
5497 qp_init_attr->recv_cq = ibqp->recv_cq;
5498 qp_init_attr->send_cq = ibqp->send_cq;
5499 qp_init_attr->srq = ibqp->srq;
5500 qp_init_attr->cap = qp_attr->cap;
5501 qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits;
5504 mutex_unlock(&hr_qp->mutex);
5508 static inline int modify_qp_is_ok(struct hns_roce_qp *hr_qp)
5510 return ((hr_qp->ibqp.qp_type == IB_QPT_RC ||
5511 hr_qp->ibqp.qp_type == IB_QPT_UD ||
5512 hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
5513 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) &&
5514 hr_qp->state != IB_QPS_RESET);
5517 static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
5518 struct hns_roce_qp *hr_qp,
5519 struct ib_udata *udata)
5521 struct ib_device *ibdev = &hr_dev->ib_dev;
5522 struct hns_roce_cq *send_cq, *recv_cq;
5523 unsigned long flags;
5526 if (modify_qp_is_ok(hr_qp)) {
5527 /* Modify qp to reset before destroying qp */
5528 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
5529 hr_qp->state, IB_QPS_RESET);
5532 "failed to modify QP to RST, ret = %d.\n",
5536 send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
5537 recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
5539 spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
5540 hns_roce_lock_cqs(send_cq, recv_cq);
5544 __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn,
5546 to_hr_srq(hr_qp->ibqp.srq) :
5549 if (send_cq && send_cq != recv_cq)
5550 __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
5553 hns_roce_qp_remove(hr_dev, hr_qp);
5555 hns_roce_unlock_cqs(send_cq, recv_cq);
5556 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
5561 int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
5563 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5564 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5567 ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
5569 ibdev_err(&hr_dev->ib_dev,
5570 "failed to destroy QP, QPN = 0x%06lx, ret = %d.\n",
5573 hns_roce_qp_destroy(hr_dev, hr_qp, udata);
5578 static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
5579 struct hns_roce_qp *hr_qp)
5581 struct ib_device *ibdev = &hr_dev->ib_dev;
5582 struct hns_roce_sccc_clr_done *resp;
5583 struct hns_roce_sccc_clr *clr;
5584 struct hns_roce_cmq_desc desc;
5587 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
5590 mutex_lock(&hr_dev->qp_table.scc_mutex);
5592 /* set scc ctx clear done flag */
5593 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
5594 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5596 ibdev_err(ibdev, "failed to reset SCC ctx, ret = %d.\n", ret);
5600 /* clear scc context */
5601 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
5602 clr = (struct hns_roce_sccc_clr *)desc.data;
5603 clr->qpn = cpu_to_le32(hr_qp->qpn);
5604 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5606 ibdev_err(ibdev, "failed to clear SCC ctx, ret = %d.\n", ret);
5610 /* query scc context clear is done or not */
5611 resp = (struct hns_roce_sccc_clr_done *)desc.data;
5612 for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
5613 hns_roce_cmq_setup_basic_desc(&desc,
5614 HNS_ROCE_OPC_QUERY_SCCC, true);
5615 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5617 ibdev_err(ibdev, "failed to query clr cmq, ret = %d\n",
5628 ibdev_err(ibdev, "query SCC clr done flag overtime.\n");
5632 mutex_unlock(&hr_dev->qp_table.scc_mutex);
5636 #define DMA_IDX_SHIFT 3
5637 #define DMA_WQE_SHIFT 3
5639 static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq,
5640 struct hns_roce_srq_context *ctx)
5642 struct hns_roce_idx_que *idx_que = &srq->idx_que;
5643 struct ib_device *ibdev = srq->ibsrq.device;
5644 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
5645 u64 mtts_idx[MTT_MIN_COUNT] = {};
5646 dma_addr_t dma_handle_idx = 0;
5649 /* Get physical address of idx que buf */
5650 ret = hns_roce_mtr_find(hr_dev, &idx_que->mtr, 0, mtts_idx,
5651 ARRAY_SIZE(mtts_idx), &dma_handle_idx);
5653 ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n",
5658 hr_reg_write(ctx, SRQC_IDX_HOP_NUM,
5659 to_hr_hem_hopnum(hr_dev->caps.idx_hop_num, srq->wqe_cnt));
5661 hr_reg_write(ctx, SRQC_IDX_BT_BA_L, dma_handle_idx >> DMA_IDX_SHIFT);
5662 hr_reg_write(ctx, SRQC_IDX_BT_BA_H,
5663 upper_32_bits(dma_handle_idx >> DMA_IDX_SHIFT));
5665 hr_reg_write(ctx, SRQC_IDX_BA_PG_SZ,
5666 to_hr_hw_page_shift(idx_que->mtr.hem_cfg.ba_pg_shift));
5667 hr_reg_write(ctx, SRQC_IDX_BUF_PG_SZ,
5668 to_hr_hw_page_shift(idx_que->mtr.hem_cfg.buf_pg_shift));
5670 hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_L,
5671 to_hr_hw_page_addr(mtts_idx[0]));
5672 hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_H,
5673 upper_32_bits(to_hr_hw_page_addr(mtts_idx[0])));
5675 hr_reg_write(ctx, SRQC_IDX_NXT_BLK_ADDR_L,
5676 to_hr_hw_page_addr(mtts_idx[1]));
5677 hr_reg_write(ctx, SRQC_IDX_NXT_BLK_ADDR_H,
5678 upper_32_bits(to_hr_hw_page_addr(mtts_idx[1])));
5683 static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf)
5685 struct ib_device *ibdev = srq->ibsrq.device;
5686 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
5687 struct hns_roce_srq_context *ctx = mb_buf;
5688 u64 mtts_wqe[MTT_MIN_COUNT] = {};
5689 dma_addr_t dma_handle_wqe = 0;
5692 memset(ctx, 0, sizeof(*ctx));
5694 /* Get the physical address of srq buf */
5695 ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
5696 ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
5698 ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n",
5703 hr_reg_write(ctx, SRQC_SRQ_ST, 1);
5704 hr_reg_write_bool(ctx, SRQC_SRQ_TYPE,
5705 srq->ibsrq.srq_type == IB_SRQT_XRC);
5706 hr_reg_write(ctx, SRQC_PD, to_hr_pd(srq->ibsrq.pd)->pdn);
5707 hr_reg_write(ctx, SRQC_SRQN, srq->srqn);
5708 hr_reg_write(ctx, SRQC_XRCD, srq->xrcdn);
5709 hr_reg_write(ctx, SRQC_XRC_CQN, srq->cqn);
5710 hr_reg_write(ctx, SRQC_SHIFT, ilog2(srq->wqe_cnt));
5711 hr_reg_write(ctx, SRQC_RQWS,
5712 srq->max_gs <= 0 ? 0 : fls(srq->max_gs - 1));
5714 hr_reg_write(ctx, SRQC_WQE_HOP_NUM,
5715 to_hr_hem_hopnum(hr_dev->caps.srqwqe_hop_num,
5718 hr_reg_write(ctx, SRQC_WQE_BT_BA_L, dma_handle_wqe >> DMA_WQE_SHIFT);
5719 hr_reg_write(ctx, SRQC_WQE_BT_BA_H,
5720 upper_32_bits(dma_handle_wqe >> DMA_WQE_SHIFT));
5722 hr_reg_write(ctx, SRQC_WQE_BA_PG_SZ,
5723 to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift));
5724 hr_reg_write(ctx, SRQC_WQE_BUF_PG_SZ,
5725 to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift));
5727 return hns_roce_v2_write_srqc_index_queue(srq, ctx);
5730 static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
5731 struct ib_srq_attr *srq_attr,
5732 enum ib_srq_attr_mask srq_attr_mask,
5733 struct ib_udata *udata)
5735 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5736 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5737 struct hns_roce_srq_context *srq_context;
5738 struct hns_roce_srq_context *srqc_mask;
5739 struct hns_roce_cmd_mailbox *mailbox;
5742 /* Resizing SRQs is not supported yet */
5743 if (srq_attr_mask & IB_SRQ_MAX_WR)
5746 if (srq_attr_mask & IB_SRQ_LIMIT) {
5747 if (srq_attr->srq_limit > srq->wqe_cnt)
5750 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5751 if (IS_ERR(mailbox))
5752 return PTR_ERR(mailbox);
5754 srq_context = mailbox->buf;
5755 srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
5757 memset(srqc_mask, 0xff, sizeof(*srqc_mask));
5759 hr_reg_write(srq_context, SRQC_LIMIT_WL, srq_attr->srq_limit);
5760 hr_reg_clear(srqc_mask, SRQC_LIMIT_WL);
5762 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0,
5763 HNS_ROCE_CMD_MODIFY_SRQC, srq->srqn);
5764 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5766 ibdev_err(&hr_dev->ib_dev,
5767 "failed to handle cmd of modifying SRQ, ret = %d.\n",
5776 static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
5778 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5779 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5780 struct hns_roce_srq_context *srq_context;
5781 struct hns_roce_cmd_mailbox *mailbox;
5784 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5785 if (IS_ERR(mailbox))
5786 return PTR_ERR(mailbox);
5788 srq_context = mailbox->buf;
5789 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma,
5790 HNS_ROCE_CMD_QUERY_SRQC, srq->srqn);
5792 ibdev_err(&hr_dev->ib_dev,
5793 "failed to process cmd of querying SRQ, ret = %d.\n",
5798 attr->srq_limit = hr_reg_read(srq_context, SRQC_LIMIT_WL);
5799 attr->max_wr = srq->wqe_cnt;
5800 attr->max_sge = srq->max_gs - srq->rsv_sge;
5803 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5807 static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
5809 struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
5810 struct hns_roce_v2_cq_context *cq_context;
5811 struct hns_roce_cq *hr_cq = to_hr_cq(cq);
5812 struct hns_roce_v2_cq_context *cqc_mask;
5813 struct hns_roce_cmd_mailbox *mailbox;
5816 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5817 if (IS_ERR(mailbox))
5818 return PTR_ERR(mailbox);
5820 cq_context = mailbox->buf;
5821 cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
5823 memset(cqc_mask, 0xff, sizeof(*cqc_mask));
5825 hr_reg_write(cq_context, CQC_CQ_MAX_CNT, cq_count);
5826 hr_reg_clear(cqc_mask, CQC_CQ_MAX_CNT);
5828 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
5829 if (cq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
5830 dev_info(hr_dev->dev,
5831 "cq_period(%u) reached the upper limit, adjusted to 65.\n",
5833 cq_period = HNS_ROCE_MAX_CQ_PERIOD;
5835 cq_period *= HNS_ROCE_CLOCK_ADJUST;
5837 hr_reg_write(cq_context, CQC_CQ_PERIOD, cq_period);
5838 hr_reg_clear(cqc_mask, CQC_CQ_PERIOD);
5840 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0,
5841 HNS_ROCE_CMD_MODIFY_CQC, hr_cq->cqn);
5842 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5844 ibdev_err(&hr_dev->ib_dev,
5845 "failed to process cmd when modifying CQ, ret = %d.\n",
5851 static int hns_roce_v2_query_cqc(struct hns_roce_dev *hr_dev, u32 cqn,
5854 struct hns_roce_v2_cq_context *context;
5855 struct hns_roce_cmd_mailbox *mailbox;
5858 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5859 if (IS_ERR(mailbox))
5860 return PTR_ERR(mailbox);
5862 context = mailbox->buf;
5863 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma,
5864 HNS_ROCE_CMD_QUERY_CQC, cqn);
5866 ibdev_err(&hr_dev->ib_dev,
5867 "failed to process cmd when querying CQ, ret = %d.\n",
5872 memcpy(buffer, context, sizeof(*context));
5875 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5880 static int hns_roce_v2_query_mpt(struct hns_roce_dev *hr_dev, u32 key,
5883 struct hns_roce_v2_mpt_entry *context;
5884 struct hns_roce_cmd_mailbox *mailbox;
5887 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5888 if (IS_ERR(mailbox))
5889 return PTR_ERR(mailbox);
5891 context = mailbox->buf;
5892 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_MPT,
5893 key_to_hw_index(key));
5895 ibdev_err(&hr_dev->ib_dev,
5896 "failed to process cmd when querying MPT, ret = %d.\n",
5901 memcpy(buffer, context, sizeof(*context));
5904 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5909 static void hns_roce_irq_work_handle(struct work_struct *work)
5911 struct hns_roce_work *irq_work =
5912 container_of(work, struct hns_roce_work, work);
5913 struct ib_device *ibdev = &irq_work->hr_dev->ib_dev;
5915 switch (irq_work->event_type) {
5916 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5917 ibdev_info(ibdev, "path migrated succeeded.\n");
5919 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5920 ibdev_warn(ibdev, "path migration failed.\n");
5922 case HNS_ROCE_EVENT_TYPE_COMM_EST:
5924 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5925 ibdev_warn(ibdev, "send queue drained.\n");
5927 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
5928 ibdev_err(ibdev, "local work queue 0x%x catast error, sub_event type is: %d\n",
5929 irq_work->queue_num, irq_work->sub_type);
5931 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
5932 ibdev_err(ibdev, "invalid request local work queue 0x%x error.\n",
5933 irq_work->queue_num);
5935 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
5936 ibdev_err(ibdev, "local access violation work queue 0x%x error, sub_event type is: %d\n",
5937 irq_work->queue_num, irq_work->sub_type);
5939 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
5940 ibdev_warn(ibdev, "SRQ limit reach.\n");
5942 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
5943 ibdev_warn(ibdev, "SRQ last wqe reach.\n");
5945 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
5946 ibdev_err(ibdev, "SRQ catas error.\n");
5948 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
5949 ibdev_err(ibdev, "CQ 0x%x access err.\n", irq_work->queue_num);
5951 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
5952 ibdev_warn(ibdev, "CQ 0x%x overflow\n", irq_work->queue_num);
5954 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
5955 ibdev_warn(ibdev, "DB overflow.\n");
5957 case HNS_ROCE_EVENT_TYPE_FLR:
5958 ibdev_warn(ibdev, "function level reset.\n");
5960 case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
5961 ibdev_err(ibdev, "xrc domain violation error.\n");
5963 case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
5964 ibdev_err(ibdev, "invalid xrceth error.\n");
5973 static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
5974 struct hns_roce_eq *eq, u32 queue_num)
5976 struct hns_roce_work *irq_work;
5978 irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
5982 INIT_WORK(&irq_work->work, hns_roce_irq_work_handle);
5983 irq_work->hr_dev = hr_dev;
5984 irq_work->event_type = eq->event_type;
5985 irq_work->sub_type = eq->sub_type;
5986 irq_work->queue_num = queue_num;
5987 queue_work(hr_dev->irq_workq, &irq_work->work);
5990 static void update_eq_db(struct hns_roce_eq *eq)
5992 struct hns_roce_dev *hr_dev = eq->hr_dev;
5993 struct hns_roce_v2_db eq_db = {};
5995 if (eq->type_flag == HNS_ROCE_AEQ) {
5996 hr_reg_write(&eq_db, EQ_DB_CMD,
5997 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
5998 HNS_ROCE_EQ_DB_CMD_AEQ :
5999 HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
6001 hr_reg_write(&eq_db, EQ_DB_TAG, eq->eqn);
6003 hr_reg_write(&eq_db, EQ_DB_CMD,
6004 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
6005 HNS_ROCE_EQ_DB_CMD_CEQ :
6006 HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
6009 hr_reg_write(&eq_db, EQ_DB_CI, eq->cons_index);
6011 hns_roce_write64(hr_dev, (__le32 *)&eq_db, eq->db_reg);
6014 static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
6016 struct hns_roce_aeqe *aeqe;
6018 aeqe = hns_roce_buf_offset(eq->mtr.kmem,
6019 (eq->cons_index & (eq->entries - 1)) *
6022 return (hr_reg_read(aeqe, AEQE_OWNER) ^
6023 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
6026 static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
6027 struct hns_roce_eq *eq)
6029 struct device *dev = hr_dev->dev;
6030 struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq);
6031 irqreturn_t aeqe_found = IRQ_NONE;
6037 /* Make sure we read AEQ entry after we have checked the
6042 event_type = hr_reg_read(aeqe, AEQE_EVENT_TYPE);
6043 sub_type = hr_reg_read(aeqe, AEQE_SUB_TYPE);
6044 queue_num = hr_reg_read(aeqe, AEQE_EVENT_QUEUE_NUM);
6046 switch (event_type) {
6047 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
6048 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
6049 case HNS_ROCE_EVENT_TYPE_COMM_EST:
6050 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
6051 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
6052 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
6053 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
6054 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
6055 case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
6056 case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
6057 hns_roce_qp_event(hr_dev, queue_num, event_type);
6059 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
6060 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
6061 hns_roce_srq_event(hr_dev, queue_num, event_type);
6063 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
6064 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
6065 hns_roce_cq_event(hr_dev, queue_num, event_type);
6067 case HNS_ROCE_EVENT_TYPE_MB:
6068 hns_roce_cmd_event(hr_dev,
6069 le16_to_cpu(aeqe->event.cmd.token),
6070 aeqe->event.cmd.status,
6071 le64_to_cpu(aeqe->event.cmd.out_param));
6073 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
6074 case HNS_ROCE_EVENT_TYPE_FLR:
6077 dev_err(dev, "unhandled event %d on EQ %d at idx %u.\n",
6078 event_type, eq->eqn, eq->cons_index);
6082 eq->event_type = event_type;
6083 eq->sub_type = sub_type;
6085 aeqe_found = IRQ_HANDLED;
6087 hns_roce_v2_init_irq_work(hr_dev, eq, queue_num);
6089 aeqe = next_aeqe_sw_v2(eq);
6094 return IRQ_RETVAL(aeqe_found);
6097 static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
6099 struct hns_roce_ceqe *ceqe;
6101 ceqe = hns_roce_buf_offset(eq->mtr.kmem,
6102 (eq->cons_index & (eq->entries - 1)) *
6105 return (hr_reg_read(ceqe, CEQE_OWNER) ^
6106 !!(eq->cons_index & eq->entries)) ? ceqe : NULL;
6109 static irqreturn_t hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
6110 struct hns_roce_eq *eq)
6112 struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
6113 irqreturn_t ceqe_found = IRQ_NONE;
6117 /* Make sure we read CEQ entry after we have checked the
6122 cqn = hr_reg_read(ceqe, CEQE_CQN);
6124 hns_roce_cq_completion(hr_dev, cqn);
6127 ceqe_found = IRQ_HANDLED;
6129 ceqe = next_ceqe_sw_v2(eq);
6134 return IRQ_RETVAL(ceqe_found);
6137 static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
6139 struct hns_roce_eq *eq = eq_ptr;
6140 struct hns_roce_dev *hr_dev = eq->hr_dev;
6141 irqreturn_t int_work;
6143 if (eq->type_flag == HNS_ROCE_CEQ)
6144 /* Completion event interrupt */
6145 int_work = hns_roce_v2_ceq_int(hr_dev, eq);
6147 /* Asynchronous event interrupt */
6148 int_work = hns_roce_v2_aeq_int(hr_dev, eq);
6150 return IRQ_RETVAL(int_work);
6153 static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev,
6156 struct pci_dev *pdev = hr_dev->pci_dev;
6157 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
6158 const struct hnae3_ae_ops *ops = ae_dev->ops;
6159 irqreturn_t int_work = IRQ_NONE;
6162 int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
6164 if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
6165 dev_err(hr_dev->dev, "AEQ overflow!\n");
6167 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG,
6168 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S);
6170 /* Set reset level for reset_event() */
6171 if (ops->set_default_reset_request)
6172 ops->set_default_reset_request(ae_dev,
6174 if (ops->reset_event)
6175 ops->reset_event(pdev, NULL);
6177 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
6178 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
6180 int_work = IRQ_HANDLED;
6182 dev_err(hr_dev->dev, "there is no basic abn irq found.\n");
6185 return IRQ_RETVAL(int_work);
6188 static int fmea_ram_ecc_query(struct hns_roce_dev *hr_dev,
6189 struct fmea_ram_ecc *ecc_info)
6191 struct hns_roce_cmq_desc desc;
6192 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
6195 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_QUERY_RAM_ECC, true);
6196 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
6200 ecc_info->is_ecc_err = hr_reg_read(req, QUERY_RAM_ECC_1BIT_ERR);
6201 ecc_info->res_type = hr_reg_read(req, QUERY_RAM_ECC_RES_TYPE);
6202 ecc_info->index = hr_reg_read(req, QUERY_RAM_ECC_TAG);
6207 static int fmea_recover_gmv(struct hns_roce_dev *hr_dev, u32 idx)
6209 struct hns_roce_cmq_desc desc;
6210 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
6215 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, true);
6216 hr_reg_write(req, CFG_GMV_BT_IDX, idx);
6218 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
6220 dev_err(hr_dev->dev,
6221 "failed to execute cmd to read gmv, ret = %d.\n", ret);
6225 addr_low = hr_reg_read(req, CFG_GMV_BT_BA_L);
6226 addr_upper = hr_reg_read(req, CFG_GMV_BT_BA_H);
6228 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, false);
6229 hr_reg_write(req, CFG_GMV_BT_BA_L, addr_low);
6230 hr_reg_write(req, CFG_GMV_BT_BA_H, addr_upper);
6231 hr_reg_write(req, CFG_GMV_BT_IDX, idx);
6233 return hns_roce_cmq_send(hr_dev, &desc, 1);
6236 static u64 fmea_get_ram_res_addr(u32 res_type, __le64 *data)
6238 if (res_type == ECC_RESOURCE_QPC_TIMER ||
6239 res_type == ECC_RESOURCE_CQC_TIMER ||
6240 res_type == ECC_RESOURCE_SCCC)
6241 return le64_to_cpu(*data);
6243 return le64_to_cpu(*data) << PAGE_SHIFT;
6246 static int fmea_recover_others(struct hns_roce_dev *hr_dev, u32 res_type,
6249 u8 write_bt0_op = fmea_ram_res[res_type].write_bt0_op;
6250 u8 read_bt0_op = fmea_ram_res[res_type].read_bt0_op;
6251 struct hns_roce_cmd_mailbox *mailbox;
6255 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
6256 if (IS_ERR(mailbox))
6257 return PTR_ERR(mailbox);
6259 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, read_bt0_op, index);
6261 dev_err(hr_dev->dev,
6262 "failed to execute cmd to read fmea ram, ret = %d.\n",
6267 addr = fmea_get_ram_res_addr(res_type, mailbox->buf);
6269 ret = hns_roce_cmd_mbox(hr_dev, addr, 0, write_bt0_op, index);
6271 dev_err(hr_dev->dev,
6272 "failed to execute cmd to write fmea ram, ret = %d.\n",
6276 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6280 static void fmea_ram_ecc_recover(struct hns_roce_dev *hr_dev,
6281 struct fmea_ram_ecc *ecc_info)
6283 u32 res_type = ecc_info->res_type;
6284 u32 index = ecc_info->index;
6287 BUILD_BUG_ON(ARRAY_SIZE(fmea_ram_res) != ECC_RESOURCE_COUNT);
6289 if (res_type >= ECC_RESOURCE_COUNT) {
6290 dev_err(hr_dev->dev, "unsupported fmea ram ecc type %u.\n",
6295 if (res_type == ECC_RESOURCE_GMV)
6296 ret = fmea_recover_gmv(hr_dev, index);
6298 ret = fmea_recover_others(hr_dev, res_type, index);
6300 dev_err(hr_dev->dev,
6301 "failed to recover %s, index = %u, ret = %d.\n",
6302 fmea_ram_res[res_type].name, index, ret);
6305 static void fmea_ram_ecc_work(struct work_struct *ecc_work)
6307 struct hns_roce_dev *hr_dev =
6308 container_of(ecc_work, struct hns_roce_dev, ecc_work);
6309 struct fmea_ram_ecc ecc_info = {};
6311 if (fmea_ram_ecc_query(hr_dev, &ecc_info)) {
6312 dev_err(hr_dev->dev, "failed to query fmea ram ecc.\n");
6316 if (!ecc_info.is_ecc_err) {
6317 dev_err(hr_dev->dev, "there is no fmea ram ecc err found.\n");
6321 fmea_ram_ecc_recover(hr_dev, &ecc_info);
6324 static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
6326 struct hns_roce_dev *hr_dev = dev_id;
6327 irqreturn_t int_work = IRQ_NONE;
6330 int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
6333 int_work = abnormal_interrupt_basic(hr_dev, int_st);
6334 } else if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
6335 queue_work(hr_dev->irq_workq, &hr_dev->ecc_work);
6336 int_work = IRQ_HANDLED;
6338 dev_err(hr_dev->dev, "there is no abnormal irq found.\n");
6341 return IRQ_RETVAL(int_work);
6344 static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
6345 int eq_num, u32 enable_flag)
6349 for (i = 0; i < eq_num; i++)
6350 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
6351 i * EQ_REG_OFFSET, enable_flag);
6353 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, enable_flag);
6354 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag);
6357 static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, u32 eqn)
6359 struct device *dev = hr_dev->dev;
6363 if (eqn < hr_dev->caps.num_comp_vectors)
6364 cmd = HNS_ROCE_CMD_DESTROY_CEQC;
6366 cmd = HNS_ROCE_CMD_DESTROY_AEQC;
6368 ret = hns_roce_destroy_hw_ctx(hr_dev, cmd, eqn & HNS_ROCE_V2_EQN_M);
6370 dev_err(dev, "[mailbox cmd] destroy eqc(%u) failed.\n", eqn);
6373 static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
6375 hns_roce_mtr_destroy(hr_dev, &eq->mtr);
6378 static void init_eq_config(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
6380 eq->db_reg = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
6382 eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
6383 eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
6384 eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
6385 eq->shift = ilog2((unsigned int)eq->entries);
6388 static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
6391 u64 eqe_ba[MTT_MIN_COUNT] = { 0 };
6392 struct hns_roce_eq_context *eqc;
6397 memset(eqc, 0, sizeof(struct hns_roce_eq_context));
6399 init_eq_config(hr_dev, eq);
6401 /* if not multi-hop, eqe buffer only use one trunk */
6402 count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT,
6405 dev_err(hr_dev->dev, "failed to find EQE mtr\n");
6409 hr_reg_write(eqc, EQC_EQ_ST, HNS_ROCE_V2_EQ_STATE_VALID);
6410 hr_reg_write(eqc, EQC_EQE_HOP_NUM, eq->hop_num);
6411 hr_reg_write(eqc, EQC_OVER_IGNORE, eq->over_ignore);
6412 hr_reg_write(eqc, EQC_COALESCE, eq->coalesce);
6413 hr_reg_write(eqc, EQC_ARM_ST, eq->arm_st);
6414 hr_reg_write(eqc, EQC_EQN, eq->eqn);
6415 hr_reg_write(eqc, EQC_EQE_CNT, HNS_ROCE_EQ_INIT_EQE_CNT);
6416 hr_reg_write(eqc, EQC_EQE_BA_PG_SZ,
6417 to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift));
6418 hr_reg_write(eqc, EQC_EQE_BUF_PG_SZ,
6419 to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift));
6420 hr_reg_write(eqc, EQC_EQ_PROD_INDX, HNS_ROCE_EQ_INIT_PROD_IDX);
6421 hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt);
6423 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
6424 if (eq->eq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
6425 dev_info(hr_dev->dev, "eq_period(%u) reached the upper limit, adjusted to 65.\n",
6427 eq->eq_period = HNS_ROCE_MAX_EQ_PERIOD;
6429 eq->eq_period *= HNS_ROCE_CLOCK_ADJUST;
6432 hr_reg_write(eqc, EQC_EQ_PERIOD, eq->eq_period);
6433 hr_reg_write(eqc, EQC_EQE_REPORT_TIMER, HNS_ROCE_EQ_INIT_REPORT_TIMER);
6434 hr_reg_write(eqc, EQC_EQE_BA_L, bt_ba >> 3);
6435 hr_reg_write(eqc, EQC_EQE_BA_H, bt_ba >> 35);
6436 hr_reg_write(eqc, EQC_SHIFT, eq->shift);
6437 hr_reg_write(eqc, EQC_MSI_INDX, HNS_ROCE_EQ_INIT_MSI_IDX);
6438 hr_reg_write(eqc, EQC_CUR_EQE_BA_L, eqe_ba[0] >> 12);
6439 hr_reg_write(eqc, EQC_CUR_EQE_BA_M, eqe_ba[0] >> 28);
6440 hr_reg_write(eqc, EQC_CUR_EQE_BA_H, eqe_ba[0] >> 60);
6441 hr_reg_write(eqc, EQC_EQ_CONS_INDX, HNS_ROCE_EQ_INIT_CONS_IDX);
6442 hr_reg_write(eqc, EQC_NEX_EQE_BA_L, eqe_ba[1] >> 12);
6443 hr_reg_write(eqc, EQC_NEX_EQE_BA_H, eqe_ba[1] >> 44);
6444 hr_reg_write(eqc, EQC_EQE_SIZE, eq->eqe_size == HNS_ROCE_V3_EQE_SIZE);
6449 static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
6451 struct hns_roce_buf_attr buf_attr = {};
6454 if (hr_dev->caps.eqe_hop_num == HNS_ROCE_HOP_NUM_0)
6457 eq->hop_num = hr_dev->caps.eqe_hop_num;
6459 buf_attr.page_shift = hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT;
6460 buf_attr.region[0].size = eq->entries * eq->eqe_size;
6461 buf_attr.region[0].hopnum = eq->hop_num;
6462 buf_attr.region_count = 1;
6464 err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr,
6465 hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT, NULL,
6468 dev_err(hr_dev->dev, "failed to alloc EQE mtr, err %d\n", err);
6473 static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
6474 struct hns_roce_eq *eq, u8 eq_cmd)
6476 struct hns_roce_cmd_mailbox *mailbox;
6479 /* Allocate mailbox memory */
6480 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
6481 if (IS_ERR(mailbox))
6482 return PTR_ERR(mailbox);
6484 ret = alloc_eq_buf(hr_dev, eq);
6488 ret = config_eqc(hr_dev, eq, mailbox->buf);
6492 ret = hns_roce_create_hw_ctx(hr_dev, mailbox, eq_cmd, eq->eqn);
6494 dev_err(hr_dev->dev, "[mailbox cmd] create eqc failed.\n");
6498 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6503 free_eq_buf(hr_dev, eq);
6506 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6511 static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
6512 int comp_num, int aeq_num, int other_num)
6514 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6518 for (i = 0; i < irq_num; i++) {
6519 hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
6521 if (!hr_dev->irq_names[i]) {
6523 goto err_kzalloc_failed;
6527 /* irq contains: abnormal + AEQ + CEQ */
6528 for (j = 0; j < other_num; j++)
6529 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6532 for (j = other_num; j < (other_num + aeq_num); j++)
6533 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6534 "hns-aeq-%d", j - other_num);
6536 for (j = (other_num + aeq_num); j < irq_num; j++)
6537 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6538 "hns-ceq-%d", j - other_num - aeq_num);
6540 for (j = 0; j < irq_num; j++) {
6542 ret = request_irq(hr_dev->irq[j],
6543 hns_roce_v2_msix_interrupt_abn,
6544 0, hr_dev->irq_names[j], hr_dev);
6546 else if (j < (other_num + comp_num))
6547 ret = request_irq(eq_table->eq[j - other_num].irq,
6548 hns_roce_v2_msix_interrupt_eq,
6549 0, hr_dev->irq_names[j + aeq_num],
6550 &eq_table->eq[j - other_num]);
6552 ret = request_irq(eq_table->eq[j - other_num].irq,
6553 hns_roce_v2_msix_interrupt_eq,
6554 0, hr_dev->irq_names[j - comp_num],
6555 &eq_table->eq[j - other_num]);
6557 dev_err(hr_dev->dev, "request irq error!\n");
6558 goto err_request_failed;
6565 for (j -= 1; j >= 0; j--)
6567 free_irq(hr_dev->irq[j], hr_dev);
6569 free_irq(eq_table->eq[j - other_num].irq,
6570 &eq_table->eq[j - other_num]);
6573 for (i -= 1; i >= 0; i--)
6574 kfree(hr_dev->irq_names[i]);
6579 static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev)
6585 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
6586 irq_num = eq_num + hr_dev->caps.num_other_vectors;
6588 for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
6589 free_irq(hr_dev->irq[i], hr_dev);
6591 for (i = 0; i < eq_num; i++)
6592 free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]);
6594 for (i = 0; i < irq_num; i++)
6595 kfree(hr_dev->irq_names[i]);
6598 static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
6600 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6601 struct device *dev = hr_dev->dev;
6602 struct hns_roce_eq *eq;
6612 other_num = hr_dev->caps.num_other_vectors;
6613 comp_num = hr_dev->caps.num_comp_vectors;
6614 aeq_num = hr_dev->caps.num_aeq_vectors;
6616 eq_num = comp_num + aeq_num;
6617 irq_num = eq_num + other_num;
6619 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
6624 for (i = 0; i < eq_num; i++) {
6625 eq = &eq_table->eq[i];
6626 eq->hr_dev = hr_dev;
6630 eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
6631 eq->type_flag = HNS_ROCE_CEQ;
6632 eq->entries = hr_dev->caps.ceqe_depth;
6633 eq->eqe_size = hr_dev->caps.ceqe_size;
6634 eq->irq = hr_dev->irq[i + other_num + aeq_num];
6635 eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
6636 eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
6639 eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
6640 eq->type_flag = HNS_ROCE_AEQ;
6641 eq->entries = hr_dev->caps.aeqe_depth;
6642 eq->eqe_size = hr_dev->caps.aeqe_size;
6643 eq->irq = hr_dev->irq[i - comp_num + other_num];
6644 eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
6645 eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
6648 ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
6650 dev_err(dev, "failed to create eq.\n");
6651 goto err_create_eq_fail;
6655 INIT_WORK(&hr_dev->ecc_work, fmea_ram_ecc_work);
6657 hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0);
6658 if (!hr_dev->irq_workq) {
6659 dev_err(dev, "failed to create irq workqueue.\n");
6661 goto err_create_eq_fail;
6664 ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num, aeq_num,
6667 dev_err(dev, "failed to request irq.\n");
6668 goto err_request_irq_fail;
6672 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
6676 err_request_irq_fail:
6677 destroy_workqueue(hr_dev->irq_workq);
6680 for (i -= 1; i >= 0; i--)
6681 free_eq_buf(hr_dev, &eq_table->eq[i]);
6682 kfree(eq_table->eq);
6687 static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
6689 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6693 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
6696 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
6698 __hns_roce_free_irq(hr_dev);
6699 destroy_workqueue(hr_dev->irq_workq);
6701 for (i = 0; i < eq_num; i++) {
6702 hns_roce_v2_destroy_eqc(hr_dev, i);
6704 free_eq_buf(hr_dev, &eq_table->eq[i]);
6707 kfree(eq_table->eq);
6710 static const struct ib_device_ops hns_roce_v2_dev_ops = {
6711 .destroy_qp = hns_roce_v2_destroy_qp,
6712 .modify_cq = hns_roce_v2_modify_cq,
6713 .poll_cq = hns_roce_v2_poll_cq,
6714 .post_recv = hns_roce_v2_post_recv,
6715 .post_send = hns_roce_v2_post_send,
6716 .query_qp = hns_roce_v2_query_qp,
6717 .req_notify_cq = hns_roce_v2_req_notify_cq,
6720 static const struct ib_device_ops hns_roce_v2_dev_srq_ops = {
6721 .modify_srq = hns_roce_v2_modify_srq,
6722 .post_srq_recv = hns_roce_v2_post_srq_recv,
6723 .query_srq = hns_roce_v2_query_srq,
6726 static const struct hns_roce_hw hns_roce_hw_v2 = {
6727 .cmq_init = hns_roce_v2_cmq_init,
6728 .cmq_exit = hns_roce_v2_cmq_exit,
6729 .hw_profile = hns_roce_v2_profile,
6730 .hw_init = hns_roce_v2_init,
6731 .hw_exit = hns_roce_v2_exit,
6732 .post_mbox = v2_post_mbox,
6733 .poll_mbox_done = v2_poll_mbox_done,
6734 .chk_mbox_avail = v2_chk_mbox_is_avail,
6735 .set_gid = hns_roce_v2_set_gid,
6736 .set_mac = hns_roce_v2_set_mac,
6737 .write_mtpt = hns_roce_v2_write_mtpt,
6738 .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
6739 .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
6740 .mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
6741 .write_cqc = hns_roce_v2_write_cqc,
6742 .set_hem = hns_roce_v2_set_hem,
6743 .clear_hem = hns_roce_v2_clear_hem,
6744 .modify_qp = hns_roce_v2_modify_qp,
6745 .dereg_mr = hns_roce_v2_dereg_mr,
6746 .qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
6747 .init_eq = hns_roce_v2_init_eq_table,
6748 .cleanup_eq = hns_roce_v2_cleanup_eq_table,
6749 .write_srqc = hns_roce_v2_write_srqc,
6750 .query_cqc = hns_roce_v2_query_cqc,
6751 .query_qpc = hns_roce_v2_query_qpc,
6752 .query_mpt = hns_roce_v2_query_mpt,
6753 .hns_roce_dev_ops = &hns_roce_v2_dev_ops,
6754 .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
6757 static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
6758 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
6759 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
6760 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
6761 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
6762 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
6763 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
6764 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
6765 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
6766 /* required last entry */
6770 MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
6772 static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
6773 struct hnae3_handle *handle)
6775 struct hns_roce_v2_priv *priv = hr_dev->priv;
6776 const struct pci_device_id *id;
6779 hr_dev->pci_dev = handle->pdev;
6780 id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
6781 hr_dev->is_vf = id->driver_data;
6782 hr_dev->dev = &handle->pdev->dev;
6783 hr_dev->hw = &hns_roce_hw_v2;
6784 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
6785 hr_dev->odb_offset = hr_dev->sdb_offset;
6787 /* Get info from NIC driver. */
6788 hr_dev->reg_base = handle->rinfo.roce_io_base;
6789 hr_dev->mem_base = handle->rinfo.roce_mem_base;
6790 hr_dev->caps.num_ports = 1;
6791 hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
6792 hr_dev->iboe.phy_port[0] = 0;
6794 addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
6795 hr_dev->iboe.netdevs[0]->dev_addr);
6797 for (i = 0; i < handle->rinfo.num_vectors; i++)
6798 hr_dev->irq[i] = pci_irq_vector(handle->pdev,
6799 i + handle->rinfo.base_vector);
6801 /* cmd issue mode: 0 is poll, 1 is event */
6802 hr_dev->cmd_mod = 1;
6803 hr_dev->loop_idc = 0;
6805 hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
6806 priv->handle = handle;
6809 static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6811 struct hns_roce_dev *hr_dev;
6814 hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
6818 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
6819 if (!hr_dev->priv) {
6821 goto error_failed_kzalloc;
6824 hns_roce_hw_v2_get_cfg(hr_dev, handle);
6826 ret = hns_roce_init(hr_dev);
6828 dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
6829 goto error_failed_cfg;
6832 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
6833 ret = free_mr_init(hr_dev);
6835 dev_err(hr_dev->dev, "failed to init free mr!\n");
6836 goto error_failed_roce_init;
6840 handle->priv = hr_dev;
6844 error_failed_roce_init:
6845 hns_roce_exit(hr_dev);
6848 kfree(hr_dev->priv);
6850 error_failed_kzalloc:
6851 ib_dealloc_device(&hr_dev->ib_dev);
6856 static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6859 struct hns_roce_dev *hr_dev = handle->priv;
6864 handle->priv = NULL;
6866 hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT;
6867 hns_roce_handle_device_err(hr_dev);
6869 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
6870 free_mr_exit(hr_dev);
6872 hns_roce_exit(hr_dev);
6873 kfree(hr_dev->priv);
6874 ib_dealloc_device(&hr_dev->ib_dev);
6877 static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6879 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
6880 const struct pci_device_id *id;
6881 struct device *dev = &handle->pdev->dev;
6884 handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
6886 if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
6887 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6891 id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev);
6895 if (id->driver_data && handle->pdev->revision == PCI_REVISION_ID_HIP08)
6898 ret = __hns_roce_hw_v2_init_instance(handle);
6900 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6901 dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
6902 if (ops->ae_dev_resetting(handle) ||
6903 ops->get_hw_reset_stat(handle))
6909 handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
6914 dev_err(dev, "Device is busy in resetting state.\n"
6915 "please retry later.\n");
6920 static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6923 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
6926 handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
6928 __hns_roce_hw_v2_uninit_instance(handle, reset);
6930 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6932 static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
6934 struct hns_roce_dev *hr_dev;
6936 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
6937 set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6941 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
6942 clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6944 hr_dev = handle->priv;
6948 hr_dev->active = false;
6949 hr_dev->dis_db = true;
6950 hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
6955 static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
6957 struct device *dev = &handle->pdev->dev;
6960 if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
6961 &handle->rinfo.state)) {
6962 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6966 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
6968 dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
6969 ret = __hns_roce_hw_v2_init_instance(handle);
6971 /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
6972 * callback function, RoCE Engine reinitialize. If RoCE reinit
6973 * failed, we should inform NIC driver.
6975 handle->priv = NULL;
6976 dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
6978 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6979 dev_info(dev, "reset done, RoCE client reinit finished.\n");
6985 static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
6987 if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
6990 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
6991 dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
6992 msleep(HNS_ROCE_V2_HW_RST_UNINT_DELAY);
6993 __hns_roce_hw_v2_uninit_instance(handle, false);
6998 static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
6999 enum hnae3_reset_notify_type type)
7004 case HNAE3_DOWN_CLIENT:
7005 ret = hns_roce_hw_v2_reset_notify_down(handle);
7007 case HNAE3_INIT_CLIENT:
7008 ret = hns_roce_hw_v2_reset_notify_init(handle);
7010 case HNAE3_UNINIT_CLIENT:
7011 ret = hns_roce_hw_v2_reset_notify_uninit(handle);
7020 static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
7021 .init_instance = hns_roce_hw_v2_init_instance,
7022 .uninit_instance = hns_roce_hw_v2_uninit_instance,
7023 .reset_notify = hns_roce_hw_v2_reset_notify,
7026 static struct hnae3_client hns_roce_hw_v2_client = {
7027 .name = "hns_roce_hw_v2",
7028 .type = HNAE3_CLIENT_ROCE,
7029 .ops = &hns_roce_hw_v2_ops,
7032 static int __init hns_roce_hw_v2_init(void)
7034 return hnae3_register_client(&hns_roce_hw_v2_client);
7037 static void __exit hns_roce_hw_v2_exit(void)
7039 hnae3_unregister_client(&hns_roce_hw_v2_client);
7042 module_init(hns_roce_hw_v2_init);
7043 module_exit(hns_roce_hw_v2_exit);
7045 MODULE_LICENSE("Dual BSD/GPL");
7046 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
7047 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
7048 MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
7049 MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");