2 * Copyright (c) 2016-2017 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/acpi.h>
34 #include <linux/etherdevice.h>
35 #include <linux/interrupt.h>
36 #include <linux/kernel.h>
37 #include <linux/types.h>
38 #include <net/addrconf.h>
39 #include <rdma/ib_addr.h>
40 #include <rdma/ib_cache.h>
41 #include <rdma/ib_umem.h>
42 #include <rdma/uverbs_ioctl.h>
45 #include "hns_roce_common.h"
46 #include "hns_roce_device.h"
47 #include "hns_roce_cmd.h"
48 #include "hns_roce_hem.h"
49 #include "hns_roce_hw_v2.h"
57 static inline void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
60 dseg->lkey = cpu_to_le32(sg->lkey);
61 dseg->addr = cpu_to_le64(sg->addr);
62 dseg->len = cpu_to_le32(sg->length);
66 * mapped-value = 1 + real-value
67 * The hns wr opcode real value is start from 0, In order to distinguish between
68 * initialized and uninitialized map values, we plus 1 to the actual value when
69 * defining the mapping, so that the validity can be identified by checking the
70 * mapped value is greater than 0.
72 #define HR_OPC_MAP(ib_key, hr_key) \
73 [IB_WR_ ## ib_key] = 1 + HNS_ROCE_V2_WQE_OP_ ## hr_key
75 static const u32 hns_roce_op_code[] = {
76 HR_OPC_MAP(RDMA_WRITE, RDMA_WRITE),
77 HR_OPC_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE_WITH_IMM),
78 HR_OPC_MAP(SEND, SEND),
79 HR_OPC_MAP(SEND_WITH_IMM, SEND_WITH_IMM),
80 HR_OPC_MAP(RDMA_READ, RDMA_READ),
81 HR_OPC_MAP(ATOMIC_CMP_AND_SWP, ATOM_CMP_AND_SWAP),
82 HR_OPC_MAP(ATOMIC_FETCH_AND_ADD, ATOM_FETCH_AND_ADD),
83 HR_OPC_MAP(SEND_WITH_INV, SEND_WITH_INV),
84 HR_OPC_MAP(LOCAL_INV, LOCAL_INV),
85 HR_OPC_MAP(MASKED_ATOMIC_CMP_AND_SWP, ATOM_MSK_CMP_AND_SWAP),
86 HR_OPC_MAP(MASKED_ATOMIC_FETCH_AND_ADD, ATOM_MSK_FETCH_AND_ADD),
87 HR_OPC_MAP(REG_MR, FAST_REG_PMR),
90 static u32 to_hr_opcode(u32 ib_opcode)
92 if (ib_opcode >= ARRAY_SIZE(hns_roce_op_code))
93 return HNS_ROCE_V2_WQE_OP_MASK;
95 return hns_roce_op_code[ib_opcode] ? hns_roce_op_code[ib_opcode] - 1 :
96 HNS_ROCE_V2_WQE_OP_MASK;
99 static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
100 const struct ib_reg_wr *wr)
102 struct hns_roce_wqe_frmr_seg *fseg =
103 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
104 struct hns_roce_mr *mr = to_hr_mr(wr->mr);
107 /* use ib_access_flags */
108 roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_BIND_EN_S,
109 !!(wr->access & IB_ACCESS_MW_BIND));
110 roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_ATOMIC_S,
111 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
112 roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_RR_S,
113 !!(wr->access & IB_ACCESS_REMOTE_READ));
114 roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_RW_S,
115 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
116 roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_LW_S,
117 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
119 /* Data structure reuse may lead to confusion */
120 pbl_ba = mr->pbl_mtr.hem_cfg.root_ba;
121 rc_sq_wqe->msg_len = cpu_to_le32(lower_32_bits(pbl_ba));
122 rc_sq_wqe->inv_key = cpu_to_le32(upper_32_bits(pbl_ba));
124 rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
125 rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
126 rc_sq_wqe->rkey = cpu_to_le32(wr->key);
127 rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
129 fseg->pbl_size = cpu_to_le32(mr->npages);
130 roce_set_field(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
131 V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
132 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
133 roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
136 static void set_atomic_seg(const struct ib_send_wr *wr,
137 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
138 unsigned int valid_num_sge)
140 struct hns_roce_v2_wqe_data_seg *dseg =
141 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
142 struct hns_roce_wqe_atomic_seg *aseg =
143 (void *)dseg + sizeof(struct hns_roce_v2_wqe_data_seg);
145 set_data_seg_v2(dseg, wr->sg_list);
147 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
148 aseg->fetchadd_swap_data = cpu_to_le64(atomic_wr(wr)->swap);
149 aseg->cmp_data = cpu_to_le64(atomic_wr(wr)->compare_add);
151 aseg->fetchadd_swap_data =
152 cpu_to_le64(atomic_wr(wr)->compare_add);
156 roce_set_field(rc_sq_wqe->byte_16, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
157 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
160 static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
161 const struct ib_send_wr *wr,
162 unsigned int *sge_idx, u32 msg_len)
164 struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev;
165 unsigned int dseg_len = sizeof(struct hns_roce_v2_wqe_data_seg);
166 unsigned int ext_sge_sz = qp->sq.max_gs * dseg_len;
167 unsigned int left_len_in_pg;
168 unsigned int idx = *sge_idx;
174 if (msg_len > ext_sge_sz) {
176 "no enough extended sge space for inline data.\n");
180 dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
181 left_len_in_pg = hr_hw_page_align((uintptr_t)dseg) - (uintptr_t)dseg;
182 len = wr->sg_list[0].length;
183 addr = (void *)(unsigned long)(wr->sg_list[0].addr);
185 /* When copying data to extended sge space, the left length in page may
186 * not long enough for current user's sge. So the data should be
187 * splited into several parts, one in the first page, and the others in
188 * the subsequent pages.
191 if (len <= left_len_in_pg) {
192 memcpy(dseg, addr, len);
194 idx += len / dseg_len;
197 if (i >= wr->num_sge)
200 left_len_in_pg -= len;
201 len = wr->sg_list[i].length;
202 addr = (void *)(unsigned long)(wr->sg_list[i].addr);
205 memcpy(dseg, addr, left_len_in_pg);
207 len -= left_len_in_pg;
208 addr += left_len_in_pg;
209 idx += left_len_in_pg / dseg_len;
210 dseg = hns_roce_get_extend_sge(qp,
211 idx & (qp->sge.sge_cnt - 1));
212 left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT;
221 static void set_extend_sge(struct hns_roce_qp *qp, struct ib_sge *sge,
222 unsigned int *sge_ind, unsigned int cnt)
224 struct hns_roce_v2_wqe_data_seg *dseg;
225 unsigned int idx = *sge_ind;
228 dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
229 if (likely(sge->length)) {
230 set_data_seg_v2(dseg, sge);
240 static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len)
242 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
243 int mtu = ib_mtu_enum_to_int(qp->path_mtu);
245 if (len > qp->max_inline_data || len > mtu) {
246 ibdev_err(&hr_dev->ib_dev,
247 "invalid length of data, data len = %u, max inline len = %u, path mtu = %d.\n",
248 len, qp->max_inline_data, mtu);
255 static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
256 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
257 unsigned int *sge_idx)
259 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
260 u32 msg_len = le32_to_cpu(rc_sq_wqe->msg_len);
261 struct ib_device *ibdev = &hr_dev->ib_dev;
262 unsigned int curr_idx = *sge_idx;
263 void *dseg = rc_sq_wqe;
267 if (unlikely(wr->opcode == IB_WR_RDMA_READ)) {
268 ibdev_err(ibdev, "invalid inline parameters!\n");
272 if (!check_inl_data_len(qp, msg_len))
275 dseg += sizeof(struct hns_roce_v2_rc_send_wqe);
277 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1);
279 if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) {
280 roce_set_bit(rc_sq_wqe->byte_20,
281 V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 0);
283 for (i = 0; i < wr->num_sge; i++) {
284 memcpy(dseg, ((void *)wr->sg_list[i].addr),
285 wr->sg_list[i].length);
286 dseg += wr->sg_list[i].length;
289 roce_set_bit(rc_sq_wqe->byte_20,
290 V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 1);
292 ret = fill_ext_sge_inl_data(qp, wr, &curr_idx, msg_len);
296 roce_set_field(rc_sq_wqe->byte_16,
297 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
298 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
299 curr_idx - *sge_idx);
307 static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
308 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
309 unsigned int *sge_ind,
310 unsigned int valid_num_sge)
312 struct hns_roce_v2_wqe_data_seg *dseg =
313 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
314 struct hns_roce_qp *qp = to_hr_qp(ibqp);
318 roce_set_field(rc_sq_wqe->byte_20,
319 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
320 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
321 (*sge_ind) & (qp->sge.sge_cnt - 1));
323 if (wr->send_flags & IB_SEND_INLINE)
324 return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind);
326 if (valid_num_sge <= HNS_ROCE_SGE_IN_WQE) {
327 for (i = 0; i < wr->num_sge; i++) {
328 if (likely(wr->sg_list[i].length)) {
329 set_data_seg_v2(dseg, wr->sg_list + i);
334 for (i = 0; i < wr->num_sge && j < HNS_ROCE_SGE_IN_WQE; i++) {
335 if (likely(wr->sg_list[i].length)) {
336 set_data_seg_v2(dseg, wr->sg_list + i);
342 set_extend_sge(qp, wr->sg_list + i, sge_ind,
343 valid_num_sge - HNS_ROCE_SGE_IN_WQE);
346 roce_set_field(rc_sq_wqe->byte_16,
347 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
348 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
353 static int check_send_valid(struct hns_roce_dev *hr_dev,
354 struct hns_roce_qp *hr_qp)
356 struct ib_device *ibdev = &hr_dev->ib_dev;
357 struct ib_qp *ibqp = &hr_qp->ibqp;
359 if (unlikely(ibqp->qp_type != IB_QPT_RC &&
360 ibqp->qp_type != IB_QPT_GSI &&
361 ibqp->qp_type != IB_QPT_UD)) {
362 ibdev_err(ibdev, "Not supported QP(0x%x)type!\n",
365 } else if (unlikely(hr_qp->state == IB_QPS_RESET ||
366 hr_qp->state == IB_QPS_INIT ||
367 hr_qp->state == IB_QPS_RTR)) {
368 ibdev_err(ibdev, "failed to post WQE, QP state %u!\n",
371 } else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) {
372 ibdev_err(ibdev, "failed to post WQE, dev state %d!\n",
380 static unsigned int calc_wr_sge_num(const struct ib_send_wr *wr,
381 unsigned int *sge_len)
383 unsigned int valid_num = 0;
384 unsigned int len = 0;
387 for (i = 0; i < wr->num_sge; i++) {
388 if (likely(wr->sg_list[i].length)) {
389 len += wr->sg_list[i].length;
398 static __le32 get_immtdata(const struct ib_send_wr *wr)
400 switch (wr->opcode) {
401 case IB_WR_SEND_WITH_IMM:
402 case IB_WR_RDMA_WRITE_WITH_IMM:
403 return cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
409 static int set_ud_opcode(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
410 const struct ib_send_wr *wr)
412 u32 ib_op = wr->opcode;
414 if (ib_op != IB_WR_SEND && ib_op != IB_WR_SEND_WITH_IMM)
417 ud_sq_wqe->immtdata = get_immtdata(wr);
419 roce_set_field(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
420 V2_UD_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op));
425 static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
426 struct hns_roce_ah *ah)
428 struct ib_device *ib_dev = ah->ibah.device;
429 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
431 roce_set_field(ud_sq_wqe->byte_24, V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
432 V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, ah->av.udp_sport);
434 roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
435 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S, ah->av.hop_limit);
436 roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
437 V2_UD_SEND_WQE_BYTE_36_TCLASS_S, ah->av.tclass);
438 roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
439 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, ah->av.flowlabel);
441 if (WARN_ON(ah->av.sl > MAX_SERVICE_LEVEL))
444 roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_SL_M,
445 V2_UD_SEND_WQE_BYTE_40_SL_S, ah->av.sl);
447 ud_sq_wqe->sgid_index = ah->av.gid_index;
449 memcpy(ud_sq_wqe->dmac, ah->av.mac, ETH_ALEN);
450 memcpy(ud_sq_wqe->dgid, ah->av.dgid, GID_LEN_V2);
452 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
455 roce_set_bit(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
457 roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_VLAN_M,
458 V2_UD_SEND_WQE_BYTE_36_VLAN_S, ah->av.vlan_id);
463 static inline int set_ud_wqe(struct hns_roce_qp *qp,
464 const struct ib_send_wr *wr,
465 void *wqe, unsigned int *sge_idx,
466 unsigned int owner_bit)
468 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
469 struct hns_roce_v2_ud_send_wqe *ud_sq_wqe = wqe;
470 unsigned int curr_idx = *sge_idx;
471 unsigned int valid_num_sge;
475 valid_num_sge = calc_wr_sge_num(wr, &msg_len);
477 ret = set_ud_opcode(ud_sq_wqe, wr);
481 ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
483 roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_CQE_S,
484 !!(wr->send_flags & IB_SEND_SIGNALED));
486 roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_SE_S,
487 !!(wr->send_flags & IB_SEND_SOLICITED));
489 roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_PD_M,
490 V2_UD_SEND_WQE_BYTE_16_PD_S, to_hr_pd(qp->ibqp.pd)->pdn);
492 roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
493 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
495 roce_set_field(ud_sq_wqe->byte_20,
496 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
497 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
498 curr_idx & (qp->sge.sge_cnt - 1));
500 ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
501 qp->qkey : ud_wr(wr)->remote_qkey);
502 roce_set_field(ud_sq_wqe->byte_32, V2_UD_SEND_WQE_BYTE_32_DQPN_M,
503 V2_UD_SEND_WQE_BYTE_32_DQPN_S, ud_wr(wr)->remote_qpn);
505 ret = fill_ud_av(ud_sq_wqe, ah);
509 qp->sl = to_hr_ah(ud_wr(wr)->ah)->av.sl;
511 set_extend_sge(qp, wr->sg_list, &curr_idx, valid_num_sge);
514 * The pipeline can sequentially post all valid WQEs into WQ buffer,
515 * including new WQEs waiting for the doorbell to update the PI again.
516 * Therefore, the owner bit of WQE MUST be updated after all fields
517 * and extSGEs have been written into DDR instead of cache.
519 if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
523 roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OWNER_S,
529 static int set_rc_opcode(struct hns_roce_dev *hr_dev,
530 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
531 const struct ib_send_wr *wr)
533 u32 ib_op = wr->opcode;
536 rc_sq_wqe->immtdata = get_immtdata(wr);
539 case IB_WR_RDMA_READ:
540 case IB_WR_RDMA_WRITE:
541 case IB_WR_RDMA_WRITE_WITH_IMM:
542 rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
543 rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
546 case IB_WR_SEND_WITH_IMM:
548 case IB_WR_ATOMIC_CMP_AND_SWP:
549 case IB_WR_ATOMIC_FETCH_AND_ADD:
550 rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey);
551 rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr);
554 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
555 set_frmr_seg(rc_sq_wqe, reg_wr(wr));
559 case IB_WR_LOCAL_INV:
560 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
562 case IB_WR_SEND_WITH_INV:
563 rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
572 roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
573 V2_RC_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op));
577 static inline int set_rc_wqe(struct hns_roce_qp *qp,
578 const struct ib_send_wr *wr,
579 void *wqe, unsigned int *sge_idx,
580 unsigned int owner_bit)
582 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
583 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
584 unsigned int curr_idx = *sge_idx;
585 unsigned int valid_num_sge;
589 valid_num_sge = calc_wr_sge_num(wr, &msg_len);
591 rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
593 ret = set_rc_opcode(hr_dev, rc_sq_wqe, wr);
597 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S,
598 (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
600 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SE_S,
601 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
603 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S,
604 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
606 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
607 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
608 set_atomic_seg(wr, rc_sq_wqe, valid_num_sge);
609 else if (wr->opcode != IB_WR_REG_MR)
610 ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe,
611 &curr_idx, valid_num_sge);
614 * The pipeline can sequentially post all valid WQEs into WQ buffer,
615 * including new WQEs waiting for the doorbell to update the PI again.
616 * Therefore, the owner bit of WQE MUST be updated after all fields
617 * and extSGEs have been written into DDR instead of cache.
619 if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
623 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S,
629 static inline void update_sq_db(struct hns_roce_dev *hr_dev,
630 struct hns_roce_qp *qp)
633 * Hip08 hardware cannot flush the WQEs in SQ if the QP state
634 * gets into errored mode. Hence, as a workaround to this
635 * hardware limitation, driver needs to assist in flushing. But
636 * the flushing operation uses mailbox to convey the QP state to
637 * the hardware and which can sleep due to the mutex protection
638 * around the mailbox calls. Hence, use the deferred flush for
641 if (unlikely(qp->state == IB_QPS_ERR)) {
642 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
643 init_flush_work(hr_dev, qp);
645 struct hns_roce_v2_db sq_db = {};
647 roce_set_field(sq_db.byte_4, V2_DB_TAG_M, V2_DB_TAG_S,
649 roce_set_field(sq_db.byte_4, V2_DB_CMD_M, V2_DB_CMD_S,
652 /* indicates data on new BAR, 0 : SQ doorbell, 1 : DWQE */
653 roce_set_bit(sq_db.byte_4, V2_DB_FLAG_S, 0);
654 roce_set_field(sq_db.parameter, V2_DB_PRODUCER_IDX_M,
655 V2_DB_PRODUCER_IDX_S, qp->sq.head);
656 roce_set_field(sq_db.parameter, V2_DB_SL_M, V2_DB_SL_S,
659 hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg);
663 static inline void update_rq_db(struct hns_roce_dev *hr_dev,
664 struct hns_roce_qp *qp)
667 * Hip08 hardware cannot flush the WQEs in RQ if the QP state
668 * gets into errored mode. Hence, as a workaround to this
669 * hardware limitation, driver needs to assist in flushing. But
670 * the flushing operation uses mailbox to convey the QP state to
671 * the hardware and which can sleep due to the mutex protection
672 * around the mailbox calls. Hence, use the deferred flush for
675 if (unlikely(qp->state == IB_QPS_ERR)) {
676 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
677 init_flush_work(hr_dev, qp);
679 if (likely(qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)) {
681 qp->rq.head & V2_DB_PRODUCER_IDX_M;
683 struct hns_roce_v2_db rq_db = {};
685 roce_set_field(rq_db.byte_4, V2_DB_TAG_M, V2_DB_TAG_S,
687 roce_set_field(rq_db.byte_4, V2_DB_CMD_M, V2_DB_CMD_S,
689 roce_set_field(rq_db.parameter, V2_DB_PRODUCER_IDX_M,
690 V2_DB_PRODUCER_IDX_S, qp->rq.head);
692 hns_roce_write64(hr_dev, (__le32 *)&rq_db,
698 static void hns_roce_write512(struct hns_roce_dev *hr_dev, u64 *val,
701 #define HNS_ROCE_WRITE_TIMES 8
702 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
703 struct hnae3_handle *handle = priv->handle;
704 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
707 if (!hr_dev->dis_db && !ops->get_hw_reset_stat(handle))
708 for (i = 0; i < HNS_ROCE_WRITE_TIMES; i++)
709 writeq_relaxed(*(val + i), dest + i);
712 static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
715 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
717 /* All kinds of DirectWQE have the same header field layout */
718 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FLAG_S, 1);
719 roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_L_M,
720 V2_RC_SEND_WQE_BYTE_4_DB_SL_L_S, qp->sl);
721 roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_H_M,
722 V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S, qp->sl >> 2);
723 roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M,
724 V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S, qp->sq.head);
726 hns_roce_write512(hr_dev, wqe, qp->sq.db_reg);
729 static int hns_roce_v2_post_send(struct ib_qp *ibqp,
730 const struct ib_send_wr *wr,
731 const struct ib_send_wr **bad_wr)
733 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
734 struct ib_device *ibdev = &hr_dev->ib_dev;
735 struct hns_roce_qp *qp = to_hr_qp(ibqp);
736 unsigned long flags = 0;
737 unsigned int owner_bit;
738 unsigned int sge_idx;
739 unsigned int wqe_idx;
744 spin_lock_irqsave(&qp->sq.lock, flags);
746 ret = check_send_valid(hr_dev, qp);
753 sge_idx = qp->next_sge;
755 for (nreq = 0; wr; ++nreq, wr = wr->next) {
756 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
762 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
764 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
765 ibdev_err(ibdev, "num_sge = %d > qp->sq.max_gs = %u.\n",
766 wr->num_sge, qp->sq.max_gs);
772 wqe = hns_roce_get_send_wqe(qp, wqe_idx);
773 qp->sq.wrid[wqe_idx] = wr->wr_id;
775 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
777 /* Corresponding to the QP type, wqe process separately */
778 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
779 ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit);
780 else if (ibqp->qp_type == IB_QPT_RC)
781 ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
792 qp->next_sge = sge_idx;
794 if (nreq == 1 && qp->sq.head == qp->sq.tail + 1 &&
795 (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
796 write_dwqe(hr_dev, qp, wqe);
798 update_sq_db(hr_dev, qp);
801 spin_unlock_irqrestore(&qp->sq.lock, flags);
806 static int check_recv_valid(struct hns_roce_dev *hr_dev,
807 struct hns_roce_qp *hr_qp)
809 struct ib_device *ibdev = &hr_dev->ib_dev;
810 struct ib_qp *ibqp = &hr_qp->ibqp;
812 if (unlikely(ibqp->qp_type != IB_QPT_RC &&
813 ibqp->qp_type != IB_QPT_GSI &&
814 ibqp->qp_type != IB_QPT_UD)) {
815 ibdev_err(ibdev, "unsupported qp type, qp_type = %d.\n",
820 if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
823 if (hr_qp->state == IB_QPS_RESET)
829 static void fill_recv_sge_to_wqe(const struct ib_recv_wr *wr, void *wqe,
830 u32 max_sge, bool rsv)
832 struct hns_roce_v2_wqe_data_seg *dseg = wqe;
835 for (i = 0, cnt = 0; i < wr->num_sge; i++) {
836 /* Skip zero-length sge */
837 if (!wr->sg_list[i].length)
839 set_data_seg_v2(dseg + cnt, wr->sg_list + i);
843 /* Fill a reserved sge to make hw stop reading remaining segments */
845 dseg[cnt].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
847 dseg[cnt].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
849 /* Clear remaining segments to make ROCEE ignore sges */
851 memset(dseg + cnt, 0,
852 (max_sge - cnt) * HNS_ROCE_SGE_SIZE);
856 static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr,
857 u32 wqe_idx, u32 max_sge)
859 struct hns_roce_rinl_sge *sge_list;
863 wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
864 fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge);
866 /* rq support inline data */
867 if (hr_qp->rq_inl_buf.wqe_cnt) {
868 sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
869 hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt = (u32)wr->num_sge;
870 for (i = 0; i < wr->num_sge; i++) {
871 sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr;
872 sge_list[i].len = wr->sg_list[i].length;
877 static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
878 const struct ib_recv_wr *wr,
879 const struct ib_recv_wr **bad_wr)
881 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
882 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
883 struct ib_device *ibdev = &hr_dev->ib_dev;
884 u32 wqe_idx, nreq, max_sge;
888 spin_lock_irqsave(&hr_qp->rq.lock, flags);
890 ret = check_recv_valid(hr_dev, hr_qp);
897 max_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
898 for (nreq = 0; wr; ++nreq, wr = wr->next) {
899 if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq,
900 hr_qp->ibqp.recv_cq))) {
906 if (unlikely(wr->num_sge > max_sge)) {
907 ibdev_err(ibdev, "num_sge = %d >= max_sge = %u.\n",
908 wr->num_sge, max_sge);
914 wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
915 fill_rq_wqe(hr_qp, wr, wqe_idx, max_sge);
916 hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
921 hr_qp->rq.head += nreq;
923 update_rq_db(hr_dev, hr_qp);
925 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
930 static void *get_srq_wqe_buf(struct hns_roce_srq *srq, u32 n)
932 return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift);
935 static void *get_idx_buf(struct hns_roce_idx_que *idx_que, u32 n)
937 return hns_roce_buf_offset(idx_que->mtr.kmem,
938 n << idx_que->entry_shift);
941 static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, u32 wqe_index)
943 /* always called with interrupts disabled. */
944 spin_lock(&srq->lock);
946 bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
949 spin_unlock(&srq->lock);
952 static int hns_roce_srqwq_overflow(struct hns_roce_srq *srq)
954 struct hns_roce_idx_que *idx_que = &srq->idx_que;
956 return idx_que->head - idx_que->tail >= srq->wqe_cnt;
959 static int check_post_srq_valid(struct hns_roce_srq *srq, u32 max_sge,
960 const struct ib_recv_wr *wr)
962 struct ib_device *ib_dev = srq->ibsrq.device;
964 if (unlikely(wr->num_sge > max_sge)) {
966 "failed to check sge, wr->num_sge = %d, max_sge = %u.\n",
967 wr->num_sge, max_sge);
971 if (unlikely(hns_roce_srqwq_overflow(srq))) {
973 "failed to check srqwq status, srqwq is full.\n");
980 static int get_srq_wqe_idx(struct hns_roce_srq *srq, u32 *wqe_idx)
982 struct hns_roce_idx_que *idx_que = &srq->idx_que;
985 pos = find_first_zero_bit(idx_que->bitmap, srq->wqe_cnt);
986 if (unlikely(pos == srq->wqe_cnt))
989 bitmap_set(idx_que->bitmap, pos, 1);
994 static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx)
996 struct hns_roce_idx_que *idx_que = &srq->idx_que;
1000 head = idx_que->head & (srq->wqe_cnt - 1);
1002 buf = get_idx_buf(idx_que, head);
1003 *buf = cpu_to_le32(wqe_idx);
1008 static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
1009 const struct ib_recv_wr *wr,
1010 const struct ib_recv_wr **bad_wr)
1012 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
1013 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
1014 struct hns_roce_v2_db srq_db;
1015 unsigned long flags;
1022 spin_lock_irqsave(&srq->lock, flags);
1024 max_sge = srq->max_gs - srq->rsv_sge;
1025 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1026 ret = check_post_srq_valid(srq, max_sge, wr);
1032 ret = get_srq_wqe_idx(srq, &wqe_idx);
1033 if (unlikely(ret)) {
1038 wqe = get_srq_wqe_buf(srq, wqe_idx);
1039 fill_recv_sge_to_wqe(wr, wqe, max_sge, srq->rsv_sge);
1040 fill_wqe_idx(srq, wqe_idx);
1041 srq->wrid[wqe_idx] = wr->wr_id;
1045 roce_set_field(srq_db.byte_4, V2_DB_TAG_M, V2_DB_TAG_S,
1047 roce_set_field(srq_db.byte_4, V2_DB_CMD_M, V2_DB_CMD_S,
1048 HNS_ROCE_V2_SRQ_DB);
1049 roce_set_field(srq_db.parameter, V2_DB_PRODUCER_IDX_M,
1050 V2_DB_PRODUCER_IDX_S, srq->idx_que.head);
1052 hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg);
1055 spin_unlock_irqrestore(&srq->lock, flags);
1060 static u32 hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
1061 unsigned long instance_stage,
1062 unsigned long reset_stage)
1064 /* When hardware reset has been completed once or more, we should stop
1065 * sending mailbox&cmq&doorbell to hardware. If now in .init_instance()
1066 * function, we should exit with error. If now at HNAE3_INIT_CLIENT
1067 * stage of soft reset process, we should exit with error, and then
1068 * HNAE3_INIT_CLIENT related process can rollback the operation like
1069 * notifing hardware to free resources, HNAE3_INIT_CLIENT related
1070 * process will exit with error to notify NIC driver to reschedule soft
1071 * reset process once again.
1073 hr_dev->is_reset = true;
1074 hr_dev->dis_db = true;
1076 if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
1077 instance_stage == HNS_ROCE_STATE_INIT)
1078 return CMD_RST_PRC_EBUSY;
1080 return CMD_RST_PRC_SUCCESS;
1083 static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
1084 unsigned long instance_stage,
1085 unsigned long reset_stage)
1087 struct hns_roce_v2_priv *priv = hr_dev->priv;
1088 struct hnae3_handle *handle = priv->handle;
1089 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1091 /* When hardware reset is detected, we should stop sending mailbox&cmq&
1092 * doorbell to hardware. If now in .init_instance() function, we should
1093 * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset
1094 * process, we should exit with error, and then HNAE3_INIT_CLIENT
1095 * related process can rollback the operation like notifing hardware to
1096 * free resources, HNAE3_INIT_CLIENT related process will exit with
1097 * error to notify NIC driver to reschedule soft reset process once
1100 hr_dev->dis_db = true;
1101 if (!ops->get_hw_reset_stat(handle))
1102 hr_dev->is_reset = true;
1104 if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
1105 instance_stage == HNS_ROCE_STATE_INIT)
1106 return CMD_RST_PRC_EBUSY;
1108 return CMD_RST_PRC_SUCCESS;
1111 static u32 hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
1113 struct hns_roce_v2_priv *priv = hr_dev->priv;
1114 struct hnae3_handle *handle = priv->handle;
1115 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1117 /* When software reset is detected at .init_instance() function, we
1118 * should stop sending mailbox&cmq&doorbell to hardware, and exit
1121 hr_dev->dis_db = true;
1122 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
1123 hr_dev->is_reset = true;
1125 return CMD_RST_PRC_EBUSY;
1128 static u32 check_aedev_reset_status(struct hns_roce_dev *hr_dev,
1129 struct hnae3_handle *handle)
1131 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1132 unsigned long instance_stage; /* the current instance stage */
1133 unsigned long reset_stage; /* the current reset stage */
1134 unsigned long reset_cnt;
1138 /* Get information about reset from NIC driver or RoCE driver itself,
1139 * the meaning of the following variables from NIC driver are described
1141 * reset_cnt -- The count value of completed hardware reset.
1142 * hw_resetting -- Whether hardware device is resetting now.
1143 * sw_resetting -- Whether NIC's software reset process is running now.
1145 instance_stage = handle->rinfo.instance_state;
1146 reset_stage = handle->rinfo.reset_state;
1147 reset_cnt = ops->ae_dev_reset_cnt(handle);
1148 if (reset_cnt != hr_dev->reset_cnt)
1149 return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
1152 hw_resetting = ops->get_cmdq_stat(handle);
1154 return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
1157 sw_resetting = ops->ae_dev_resetting(handle);
1158 if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
1159 return hns_roce_v2_cmd_sw_resetting(hr_dev);
1161 return CMD_RST_PRC_OTHERS;
1164 static bool check_device_is_in_reset(struct hns_roce_dev *hr_dev)
1166 struct hns_roce_v2_priv *priv = hr_dev->priv;
1167 struct hnae3_handle *handle = priv->handle;
1168 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1170 if (hr_dev->reset_cnt != ops->ae_dev_reset_cnt(handle))
1173 if (ops->get_hw_reset_stat(handle))
1176 if (ops->ae_dev_resetting(handle))
1182 static bool v2_chk_mbox_is_avail(struct hns_roce_dev *hr_dev, bool *busy)
1184 struct hns_roce_v2_priv *priv = hr_dev->priv;
1187 if (hr_dev->is_reset)
1188 status = CMD_RST_PRC_SUCCESS;
1190 status = check_aedev_reset_status(hr_dev, priv->handle);
1192 *busy = (status == CMD_RST_PRC_EBUSY);
1194 return status == CMD_RST_PRC_OTHERS;
1197 static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
1198 struct hns_roce_v2_cmq_ring *ring)
1200 int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
1202 ring->desc = kzalloc(size, GFP_KERNEL);
1206 ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
1208 if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
1209 ring->desc_dma_addr = 0;
1213 dev_err_ratelimited(hr_dev->dev,
1214 "failed to map cmq desc addr.\n");
1221 static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
1222 struct hns_roce_v2_cmq_ring *ring)
1224 dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
1225 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
1228 ring->desc_dma_addr = 0;
1232 static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
1234 struct hns_roce_v2_priv *priv = hr_dev->priv;
1235 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
1236 &priv->cmq.csq : &priv->cmq.crq;
1238 ring->flag = ring_type;
1241 return hns_roce_alloc_cmq_desc(hr_dev, ring);
1244 static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
1246 struct hns_roce_v2_priv *priv = hr_dev->priv;
1247 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
1248 &priv->cmq.csq : &priv->cmq.crq;
1249 dma_addr_t dma = ring->desc_dma_addr;
1251 if (ring_type == TYPE_CSQ) {
1252 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
1253 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
1254 upper_32_bits(dma));
1255 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
1256 (u32)ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
1258 /* Make sure to write tail first and then head */
1259 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
1260 roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
1262 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
1263 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
1264 upper_32_bits(dma));
1265 roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
1266 (u32)ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
1267 roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
1268 roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
1272 static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
1274 struct hns_roce_v2_priv *priv = hr_dev->priv;
1277 /* Setup the queue entries for command queue */
1278 priv->cmq.csq.desc_num = CMD_CSQ_DESC_NUM;
1279 priv->cmq.crq.desc_num = CMD_CRQ_DESC_NUM;
1281 /* Setup the lock for command queue */
1282 spin_lock_init(&priv->cmq.csq.lock);
1283 spin_lock_init(&priv->cmq.crq.lock);
1285 /* Setup Tx write back timeout */
1286 priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
1289 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
1291 dev_err_ratelimited(hr_dev->dev,
1292 "failed to init CSQ, ret = %d.\n", ret);
1297 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
1299 dev_err_ratelimited(hr_dev->dev,
1300 "failed to init CRQ, ret = %d.\n", ret);
1305 hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
1308 hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
1313 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
1318 static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
1320 struct hns_roce_v2_priv *priv = hr_dev->priv;
1322 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
1323 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
1326 static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
1327 enum hns_roce_opcode_type opcode,
1330 memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
1331 desc->opcode = cpu_to_le16(opcode);
1333 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
1335 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
1337 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1340 static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
1342 u32 tail = roce_read(hr_dev, ROCEE_TX_CMQ_TAIL_REG);
1343 struct hns_roce_v2_priv *priv = hr_dev->priv;
1345 return tail == priv->cmq.csq.head;
1348 static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1349 struct hns_roce_cmq_desc *desc, int num)
1351 struct hns_roce_v2_priv *priv = hr_dev->priv;
1352 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1359 spin_lock_bh(&csq->lock);
1363 for (i = 0; i < num; i++) {
1364 csq->desc[csq->head++] = desc[i];
1365 if (csq->head == csq->desc_num)
1369 /* Write to hardware */
1370 roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, csq->head);
1372 /* If the command is sync, wait for the firmware to write back,
1373 * if multi descriptors to be sent, use the first one to check
1375 if (le16_to_cpu(desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
1377 if (hns_roce_cmq_csq_done(hr_dev))
1380 } while (++timeout < priv->cmq.tx_timeout);
1383 if (hns_roce_cmq_csq_done(hr_dev)) {
1384 for (ret = 0, i = 0; i < num; i++) {
1385 /* check the result of hardware write back */
1386 desc[i] = csq->desc[tail++];
1387 if (tail == csq->desc_num)
1390 desc_ret = le16_to_cpu(desc[i].retval);
1391 if (likely(desc_ret == CMD_EXEC_SUCCESS))
1394 dev_err_ratelimited(hr_dev->dev,
1395 "Cmdq IO error, opcode = %x, return = %x\n",
1396 desc->opcode, desc_ret);
1400 /* FW/HW reset or incorrect number of desc */
1401 tail = roce_read(hr_dev, ROCEE_TX_CMQ_TAIL_REG);
1402 dev_warn(hr_dev->dev, "CMDQ move tail from %d to %d\n",
1409 spin_unlock_bh(&csq->lock);
1414 static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1415 struct hns_roce_cmq_desc *desc, int num)
1420 if (!v2_chk_mbox_is_avail(hr_dev, &busy))
1421 return busy ? -EBUSY : 0;
1423 ret = __hns_roce_cmq_send(hr_dev, desc, num);
1425 if (!v2_chk_mbox_is_avail(hr_dev, &busy))
1426 return busy ? -EBUSY : 0;
1432 static int config_hem_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj,
1433 dma_addr_t base_addr, u16 op)
1435 struct hns_roce_cmd_mailbox *mbox = hns_roce_alloc_cmd_mailbox(hr_dev);
1439 return PTR_ERR(mbox);
1441 ret = hns_roce_cmd_mbox(hr_dev, base_addr, mbox->dma, obj, 0, op,
1442 HNS_ROCE_CMD_TIMEOUT_MSECS);
1443 hns_roce_free_cmd_mailbox(hr_dev, mbox);
1447 static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
1449 struct hns_roce_query_version *resp;
1450 struct hns_roce_cmq_desc desc;
1453 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
1454 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1458 resp = (struct hns_roce_query_version *)desc.data;
1459 hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version);
1460 hr_dev->vendor_id = hr_dev->pci_dev->vendor;
1465 static void func_clr_hw_resetting_state(struct hns_roce_dev *hr_dev,
1466 struct hnae3_handle *handle)
1468 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1471 hr_dev->dis_db = true;
1473 dev_warn(hr_dev->dev,
1474 "Func clear is pending, device in resetting state.\n");
1475 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1477 if (!ops->get_hw_reset_stat(handle)) {
1478 hr_dev->is_reset = true;
1479 dev_info(hr_dev->dev,
1480 "Func clear success after reset.\n");
1483 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1484 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1487 dev_warn(hr_dev->dev, "Func clear failed.\n");
1490 static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev,
1491 struct hnae3_handle *handle)
1493 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1496 hr_dev->dis_db = true;
1498 dev_warn(hr_dev->dev,
1499 "Func clear is pending, device in resetting state.\n");
1500 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1502 if (ops->ae_dev_reset_cnt(handle) !=
1503 hr_dev->reset_cnt) {
1504 hr_dev->is_reset = true;
1505 dev_info(hr_dev->dev,
1506 "Func clear success after sw reset\n");
1509 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1510 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1513 dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n");
1516 static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval,
1519 struct hns_roce_v2_priv *priv = hr_dev->priv;
1520 struct hnae3_handle *handle = priv->handle;
1521 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1523 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) {
1524 hr_dev->dis_db = true;
1525 hr_dev->is_reset = true;
1526 dev_info(hr_dev->dev, "Func clear success after reset.\n");
1530 if (ops->get_hw_reset_stat(handle)) {
1531 func_clr_hw_resetting_state(hr_dev, handle);
1535 if (ops->ae_dev_resetting(handle) &&
1536 handle->rinfo.instance_state == HNS_ROCE_STATE_INIT) {
1537 func_clr_sw_resetting_state(hr_dev, handle);
1541 if (retval && !flag)
1542 dev_warn(hr_dev->dev,
1543 "Func clear read failed, ret = %d.\n", retval);
1545 dev_warn(hr_dev->dev, "Func clear failed.\n");
1548 static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id)
1550 bool fclr_write_fail_flag = false;
1551 struct hns_roce_func_clear *resp;
1552 struct hns_roce_cmq_desc desc;
1556 if (check_device_is_in_reset(hr_dev))
1559 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false);
1560 resp = (struct hns_roce_func_clear *)desc.data;
1561 resp->rst_funcid_en = cpu_to_le32(vf_id);
1563 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1565 fclr_write_fail_flag = true;
1566 dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n",
1571 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
1572 end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS;
1574 if (check_device_is_in_reset(hr_dev))
1576 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
1577 end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT;
1579 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR,
1582 resp->rst_funcid_en = cpu_to_le32(vf_id);
1583 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1587 if (roce_get_bit(resp->func_done, FUNC_CLEAR_RST_FUN_DONE_S)) {
1589 hr_dev->is_reset = true;
1595 hns_roce_func_clr_rst_proc(hr_dev, ret, fclr_write_fail_flag);
1598 static void hns_roce_free_vf_resource(struct hns_roce_dev *hr_dev, int vf_id)
1600 enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
1601 struct hns_roce_cmq_desc desc[2];
1602 struct hns_roce_cmq_req *req_a;
1604 req_a = (struct hns_roce_cmq_req *)desc[0].data;
1605 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
1606 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1607 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
1608 hr_reg_write(req_a, FUNC_RES_A_VF_ID, vf_id);
1609 hns_roce_cmq_send(hr_dev, desc, 2);
1612 static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
1616 for (i = hr_dev->func_num - 1; i >= 0; i--) {
1617 __hns_roce_function_clear(hr_dev, i);
1619 hns_roce_free_vf_resource(hr_dev, i);
1623 static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
1625 struct hns_roce_query_fw_info *resp;
1626 struct hns_roce_cmq_desc desc;
1629 hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
1630 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1634 resp = (struct hns_roce_query_fw_info *)desc.data;
1635 hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
1640 static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev)
1642 struct hns_roce_cmq_desc desc;
1645 if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09) {
1646 hr_dev->func_num = 1;
1650 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_FUNC_INFO,
1652 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1654 hr_dev->func_num = 1;
1658 hr_dev->func_num = le32_to_cpu(desc.func_info.own_func_num);
1659 hr_dev->cong_algo_tmpl_id = le32_to_cpu(desc.func_info.own_mac_id);
1664 static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
1666 struct hns_roce_cmq_desc desc;
1667 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1669 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
1672 hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, 0x3e8);
1673 hr_reg_write(req, CFG_GLOBAL_PARAM_UDP_PORT, ROCE_V2_UDP_DPORT);
1675 return hns_roce_cmq_send(hr_dev, &desc, 1);
1678 static int load_func_res_caps(struct hns_roce_dev *hr_dev, bool is_vf)
1680 struct hns_roce_cmq_desc desc[2];
1681 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
1682 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
1683 struct hns_roce_caps *caps = &hr_dev->caps;
1684 enum hns_roce_opcode_type opcode;
1689 opcode = HNS_ROCE_OPC_QUERY_VF_RES;
1692 opcode = HNS_ROCE_OPC_QUERY_PF_RES;
1693 func_num = hr_dev->func_num;
1696 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, true);
1697 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1698 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, true);
1700 ret = hns_roce_cmq_send(hr_dev, desc, 2);
1704 caps->qpc_bt_num = hr_reg_read(r_a, FUNC_RES_A_QPC_BT_NUM) / func_num;
1705 caps->srqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_SRQC_BT_NUM) / func_num;
1706 caps->cqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_CQC_BT_NUM) / func_num;
1707 caps->mpt_bt_num = hr_reg_read(r_a, FUNC_RES_A_MPT_BT_NUM) / func_num;
1708 caps->eqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_EQC_BT_NUM) / func_num;
1709 caps->smac_bt_num = hr_reg_read(r_b, FUNC_RES_B_SMAC_NUM) / func_num;
1710 caps->sgid_bt_num = hr_reg_read(r_b, FUNC_RES_B_SGID_NUM) / func_num;
1711 caps->sccc_bt_num = hr_reg_read(r_b, FUNC_RES_B_SCCC_BT_NUM) / func_num;
1714 caps->sl_num = hr_reg_read(r_b, FUNC_RES_V_QID_NUM) / func_num;
1715 caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_V_GMV_BT_NUM) /
1718 caps->sl_num = hr_reg_read(r_b, FUNC_RES_B_QID_NUM) / func_num;
1719 caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_B_GMV_BT_NUM) /
1726 static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
1728 return load_func_res_caps(hr_dev, false);
1731 static int hns_roce_query_vf_resource(struct hns_roce_dev *hr_dev)
1733 return load_func_res_caps(hr_dev, true);
1736 static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
1738 struct hns_roce_cmq_desc desc;
1739 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1740 struct hns_roce_caps *caps = &hr_dev->caps;
1743 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
1746 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1750 caps->qpc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_QPC_ITEM_NUM);
1751 caps->cqc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_CQC_ITEM_NUM);
1756 static int __hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
1759 struct hns_roce_vf_switch *swt;
1760 struct hns_roce_cmq_desc desc;
1763 swt = (struct hns_roce_vf_switch *)desc.data;
1764 hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
1765 swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
1766 roce_set_field(swt->fun_id, VF_SWITCH_DATA_FUN_ID_VF_ID_M,
1767 VF_SWITCH_DATA_FUN_ID_VF_ID_S, vf_id);
1768 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1773 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
1774 desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1775 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1);
1776 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 0);
1777 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1);
1779 return hns_roce_cmq_send(hr_dev, &desc, 1);
1782 static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev)
1787 for (vf_id = 0; vf_id < hr_dev->func_num; vf_id++) {
1788 ret = __hns_roce_set_vf_switch_param(hr_dev, vf_id);
1795 static int __hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev, int vf_id)
1797 struct hns_roce_cmq_desc desc[2];
1798 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
1799 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
1800 enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
1801 struct hns_roce_caps *caps = &hr_dev->caps;
1803 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
1804 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1805 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
1807 hr_reg_write(r_a, FUNC_RES_A_VF_ID, vf_id);
1809 hr_reg_write(r_a, FUNC_RES_A_QPC_BT_NUM, caps->qpc_bt_num);
1810 hr_reg_write(r_a, FUNC_RES_A_QPC_BT_IDX, vf_id * caps->qpc_bt_num);
1811 hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_NUM, caps->srqc_bt_num);
1812 hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_IDX, vf_id * caps->srqc_bt_num);
1813 hr_reg_write(r_a, FUNC_RES_A_CQC_BT_NUM, caps->cqc_bt_num);
1814 hr_reg_write(r_a, FUNC_RES_A_CQC_BT_IDX, vf_id * caps->cqc_bt_num);
1815 hr_reg_write(r_a, FUNC_RES_A_MPT_BT_NUM, caps->mpt_bt_num);
1816 hr_reg_write(r_a, FUNC_RES_A_MPT_BT_IDX, vf_id * caps->mpt_bt_num);
1817 hr_reg_write(r_a, FUNC_RES_A_EQC_BT_NUM, caps->eqc_bt_num);
1818 hr_reg_write(r_a, FUNC_RES_A_EQC_BT_IDX, vf_id * caps->eqc_bt_num);
1819 hr_reg_write(r_b, FUNC_RES_V_QID_NUM, caps->sl_num);
1820 hr_reg_write(r_b, FUNC_RES_B_QID_IDX, vf_id * caps->sl_num);
1821 hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_NUM, caps->sccc_bt_num);
1822 hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_IDX, vf_id * caps->sccc_bt_num);
1824 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
1825 hr_reg_write(r_b, FUNC_RES_V_GMV_BT_NUM, caps->gmv_bt_num);
1826 hr_reg_write(r_b, FUNC_RES_B_GMV_BT_IDX,
1827 vf_id * caps->gmv_bt_num);
1829 hr_reg_write(r_b, FUNC_RES_B_SGID_NUM, caps->sgid_bt_num);
1830 hr_reg_write(r_b, FUNC_RES_B_SGID_IDX,
1831 vf_id * caps->sgid_bt_num);
1832 hr_reg_write(r_b, FUNC_RES_B_SMAC_NUM, caps->smac_bt_num);
1833 hr_reg_write(r_b, FUNC_RES_B_SMAC_IDX,
1834 vf_id * caps->smac_bt_num);
1837 return hns_roce_cmq_send(hr_dev, desc, 2);
1840 static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
1845 for (vf_id = 0; vf_id < hr_dev->func_num; vf_id++) {
1846 ret = __hns_roce_alloc_vf_resource(hr_dev, vf_id);
1854 static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1856 struct hns_roce_cmq_desc desc;
1857 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1858 struct hns_roce_caps *caps = &hr_dev->caps;
1860 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1862 hr_reg_write(req, CFG_BT_ATTR_QPC_BA_PGSZ,
1863 caps->qpc_ba_pg_sz + PG_SHIFT_OFFSET);
1864 hr_reg_write(req, CFG_BT_ATTR_QPC_BUF_PGSZ,
1865 caps->qpc_buf_pg_sz + PG_SHIFT_OFFSET);
1866 hr_reg_write(req, CFG_BT_ATTR_QPC_HOPNUM,
1867 to_hr_hem_hopnum(caps->qpc_hop_num, caps->num_qps));
1869 hr_reg_write(req, CFG_BT_ATTR_SRQC_BA_PGSZ,
1870 caps->srqc_ba_pg_sz + PG_SHIFT_OFFSET);
1871 hr_reg_write(req, CFG_BT_ATTR_SRQC_BUF_PGSZ,
1872 caps->srqc_buf_pg_sz + PG_SHIFT_OFFSET);
1873 hr_reg_write(req, CFG_BT_ATTR_SRQC_HOPNUM,
1874 to_hr_hem_hopnum(caps->srqc_hop_num, caps->num_srqs));
1876 hr_reg_write(req, CFG_BT_ATTR_CQC_BA_PGSZ,
1877 caps->cqc_ba_pg_sz + PG_SHIFT_OFFSET);
1878 hr_reg_write(req, CFG_BT_ATTR_CQC_BUF_PGSZ,
1879 caps->cqc_buf_pg_sz + PG_SHIFT_OFFSET);
1880 hr_reg_write(req, CFG_BT_ATTR_CQC_HOPNUM,
1881 to_hr_hem_hopnum(caps->cqc_hop_num, caps->num_cqs));
1883 hr_reg_write(req, CFG_BT_ATTR_MPT_BA_PGSZ,
1884 caps->mpt_ba_pg_sz + PG_SHIFT_OFFSET);
1885 hr_reg_write(req, CFG_BT_ATTR_MPT_BUF_PGSZ,
1886 caps->mpt_buf_pg_sz + PG_SHIFT_OFFSET);
1887 hr_reg_write(req, CFG_BT_ATTR_MPT_HOPNUM,
1888 to_hr_hem_hopnum(caps->mpt_hop_num, caps->num_mtpts));
1890 hr_reg_write(req, CFG_BT_ATTR_SCCC_BA_PGSZ,
1891 caps->sccc_ba_pg_sz + PG_SHIFT_OFFSET);
1892 hr_reg_write(req, CFG_BT_ATTR_SCCC_BUF_PGSZ,
1893 caps->sccc_buf_pg_sz + PG_SHIFT_OFFSET);
1894 hr_reg_write(req, CFG_BT_ATTR_SCCC_HOPNUM,
1895 to_hr_hem_hopnum(caps->sccc_hop_num, caps->num_qps));
1897 return hns_roce_cmq_send(hr_dev, &desc, 1);
1900 static void set_default_caps(struct hns_roce_dev *hr_dev)
1902 struct hns_roce_v2_priv *priv = hr_dev->priv;
1903 struct hns_roce_caps *caps = &hr_dev->caps;
1905 caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
1906 caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
1907 caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
1908 caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM;
1909 caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
1910 caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
1911 caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
1912 caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
1913 caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
1914 caps->num_uars = HNS_ROCE_V2_UAR_NUM;
1915 caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
1916 caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
1917 caps->num_comp_vectors =
1918 min_t(u32, caps->eqc_bt_num - 1,
1919 (u32)priv->handle->rinfo.num_vectors - 2);
1920 caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
1921 caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
1922 caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
1923 caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
1924 caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
1925 caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
1926 caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM;
1927 caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
1928 caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
1929 caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
1930 caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
1931 caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
1932 caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
1933 caps->trrl_entry_sz = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ;
1934 caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
1935 caps->srqc_entry_sz = HNS_ROCE_V2_SRQC_ENTRY_SZ;
1936 caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
1937 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
1938 caps->idx_entry_sz = HNS_ROCE_V2_IDX_ENTRY_SZ;
1939 caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
1940 caps->reserved_lkey = 0;
1941 caps->reserved_pds = 0;
1942 caps->reserved_xrcds = HNS_ROCE_V2_RSV_XRCD_NUM;
1943 caps->reserved_mrws = 1;
1944 caps->reserved_uars = 0;
1945 caps->reserved_cqs = 0;
1946 caps->reserved_srqs = 0;
1947 caps->reserved_qps = HNS_ROCE_V2_RSV_QPS;
1949 caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1950 caps->srqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1951 caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1952 caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1953 caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
1954 caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
1955 caps->wqe_sq_hop_num = HNS_ROCE_SQWQE_HOP_NUM;
1956 caps->wqe_sge_hop_num = HNS_ROCE_EXT_SGE_HOP_NUM;
1957 caps->wqe_rq_hop_num = HNS_ROCE_RQWQE_HOP_NUM;
1958 caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
1959 caps->srqwqe_hop_num = HNS_ROCE_SRQWQE_HOP_NUM;
1960 caps->idx_hop_num = HNS_ROCE_IDX_HOP_NUM;
1961 caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
1962 caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
1964 caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
1965 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
1966 HNS_ROCE_CAP_FLAG_CQ_RECORD_DB |
1967 HNS_ROCE_CAP_FLAG_QP_RECORD_DB;
1969 caps->pkey_table_len[0] = 1;
1970 caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
1971 caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
1972 caps->local_ca_ack_delay = 0;
1973 caps->max_mtu = IB_MTU_4096;
1975 caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR;
1976 caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE;
1978 caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW |
1979 HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR |
1980 HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL | HNS_ROCE_CAP_FLAG_XRC;
1982 caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
1983 caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
1984 caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
1985 caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
1986 caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
1987 caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
1989 caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM;
1991 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
1992 caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
1993 caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
1994 caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
1995 caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
1996 caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ;
1997 caps->gmv_entry_sz = HNS_ROCE_V3_GMV_ENTRY_SZ;
1998 caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
1999 caps->gmv_entry_sz);
2000 caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
2001 caps->gid_table_len[0] = caps->gmv_bt_num * (HNS_HW_PAGE_SIZE /
2002 caps->gmv_entry_sz);
2003 caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INL_EXT;
2005 caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
2006 caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
2007 caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE;
2008 caps->qpc_sz = HNS_ROCE_V2_QPC_SZ;
2009 caps->sccc_sz = HNS_ROCE_V2_SCCC_SZ;
2010 caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
2011 caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
2015 static void calc_pg_sz(u32 obj_num, u32 obj_size, u32 hop_num, u32 ctx_bt_num,
2016 u32 *buf_page_size, u32 *bt_page_size, u32 hem_type)
2019 u64 bt_chunk_size = PAGE_SIZE;
2020 u64 buf_chunk_size = PAGE_SIZE;
2021 u64 obj_per_chunk_default = buf_chunk_size / obj_size;
2028 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
2029 (bt_chunk_size / BA_BYTE_LEN) *
2030 (bt_chunk_size / BA_BYTE_LEN) *
2031 obj_per_chunk_default;
2034 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
2035 (bt_chunk_size / BA_BYTE_LEN) *
2036 obj_per_chunk_default;
2039 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
2040 obj_per_chunk_default;
2042 case HNS_ROCE_HOP_NUM_0:
2043 obj_per_chunk = ctx_bt_num * obj_per_chunk_default;
2046 pr_err("table %u not support hop_num = %u!\n", hem_type,
2051 if (hem_type >= HEM_TYPE_MTT)
2052 *bt_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
2054 *buf_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
2057 static void set_hem_page_size(struct hns_roce_dev *hr_dev)
2059 struct hns_roce_caps *caps = &hr_dev->caps;
2062 caps->eqe_ba_pg_sz = 0;
2063 caps->eqe_buf_pg_sz = 0;
2066 caps->tsq_buf_pg_sz = 0;
2069 caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
2070 caps->pbl_buf_pg_sz = 0;
2071 calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
2072 caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz,
2076 caps->qpc_timer_ba_pg_sz = 0;
2077 caps->qpc_timer_buf_pg_sz = 0;
2078 caps->mtt_ba_pg_sz = 0;
2079 caps->mtt_buf_pg_sz = 0;
2080 calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num,
2081 caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
2084 if (caps->flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
2085 calc_pg_sz(caps->num_qps, caps->sccc_sz, caps->sccc_hop_num,
2086 caps->sccc_bt_num, &caps->sccc_buf_pg_sz,
2087 &caps->sccc_ba_pg_sz, HEM_TYPE_SCCC);
2090 calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num,
2091 caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz,
2093 calc_pg_sz(caps->max_cqes, caps->cqe_sz, caps->cqe_hop_num,
2094 1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE);
2096 if (caps->cqc_timer_entry_sz)
2097 calc_pg_sz(caps->num_cqc_timer, caps->cqc_timer_entry_sz,
2098 caps->cqc_timer_hop_num, caps->cqc_timer_bt_num,
2099 &caps->cqc_timer_buf_pg_sz,
2100 &caps->cqc_timer_ba_pg_sz, HEM_TYPE_CQC_TIMER);
2103 if (caps->flags & HNS_ROCE_CAP_FLAG_SRQ) {
2104 calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz,
2105 caps->srqc_hop_num, caps->srqc_bt_num,
2106 &caps->srqc_buf_pg_sz, &caps->srqc_ba_pg_sz,
2108 calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz,
2109 caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz,
2110 &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE);
2111 calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz,
2112 caps->idx_hop_num, 1, &caps->idx_buf_pg_sz,
2113 &caps->idx_ba_pg_sz, HEM_TYPE_IDX);
2117 caps->gmv_ba_pg_sz = 0;
2118 caps->gmv_buf_pg_sz = 0;
2121 static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
2123 struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM];
2124 struct hns_roce_caps *caps = &hr_dev->caps;
2125 struct hns_roce_query_pf_caps_a *resp_a;
2126 struct hns_roce_query_pf_caps_b *resp_b;
2127 struct hns_roce_query_pf_caps_c *resp_c;
2128 struct hns_roce_query_pf_caps_d *resp_d;
2129 struct hns_roce_query_pf_caps_e *resp_e;
2135 for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) {
2136 hns_roce_cmq_setup_basic_desc(&desc[i],
2137 HNS_ROCE_OPC_QUERY_PF_CAPS_NUM,
2139 if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1))
2140 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2142 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2145 ret = hns_roce_cmq_send(hr_dev, desc, HNS_ROCE_QUERY_PF_CAPS_CMD_NUM);
2149 resp_a = (struct hns_roce_query_pf_caps_a *)desc[0].data;
2150 resp_b = (struct hns_roce_query_pf_caps_b *)desc[1].data;
2151 resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data;
2152 resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data;
2153 resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data;
2155 caps->local_ca_ack_delay = resp_a->local_ca_ack_delay;
2156 caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg);
2157 caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline);
2158 caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
2159 caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg);
2160 caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg);
2161 caps->num_qpc_timer = le16_to_cpu(resp_a->num_qpc_timer);
2162 caps->num_cqc_timer = le16_to_cpu(resp_a->num_cqc_timer);
2163 caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
2164 caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges);
2165 caps->num_aeq_vectors = resp_a->num_aeq_vectors;
2166 caps->num_other_vectors = resp_a->num_other_vectors;
2167 caps->max_sq_desc_sz = resp_a->max_sq_desc_sz;
2168 caps->max_rq_desc_sz = resp_a->max_rq_desc_sz;
2169 caps->max_srq_desc_sz = resp_a->max_srq_desc_sz;
2170 caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE;
2172 caps->mtpt_entry_sz = resp_b->mtpt_entry_sz;
2173 caps->irrl_entry_sz = resp_b->irrl_entry_sz;
2174 caps->trrl_entry_sz = resp_b->trrl_entry_sz;
2175 caps->cqc_entry_sz = resp_b->cqc_entry_sz;
2176 caps->srqc_entry_sz = resp_b->srqc_entry_sz;
2177 caps->idx_entry_sz = resp_b->idx_entry_sz;
2178 caps->sccc_sz = resp_b->sccc_sz;
2179 caps->max_mtu = resp_b->max_mtu;
2180 caps->qpc_sz = HNS_ROCE_V2_QPC_SZ;
2181 caps->min_cqes = resp_b->min_cqes;
2182 caps->min_wqes = resp_b->min_wqes;
2183 caps->page_size_cap = le32_to_cpu(resp_b->page_size_cap);
2184 caps->pkey_table_len[0] = resp_b->pkey_table_len;
2185 caps->phy_num_uars = resp_b->phy_num_uars;
2186 ctx_hop_num = resp_b->ctx_hop_num;
2187 pbl_hop_num = resp_b->pbl_hop_num;
2189 caps->num_pds = 1 << roce_get_field(resp_c->cap_flags_num_pds,
2190 V2_QUERY_PF_CAPS_C_NUM_PDS_M,
2191 V2_QUERY_PF_CAPS_C_NUM_PDS_S);
2192 caps->flags = roce_get_field(resp_c->cap_flags_num_pds,
2193 V2_QUERY_PF_CAPS_C_CAP_FLAGS_M,
2194 V2_QUERY_PF_CAPS_C_CAP_FLAGS_S);
2195 caps->flags |= le16_to_cpu(resp_d->cap_flags_ex) <<
2196 HNS_ROCE_CAP_FLAGS_EX_SHIFT;
2198 caps->num_cqs = 1 << roce_get_field(resp_c->max_gid_num_cqs,
2199 V2_QUERY_PF_CAPS_C_NUM_CQS_M,
2200 V2_QUERY_PF_CAPS_C_NUM_CQS_S);
2201 caps->gid_table_len[0] = roce_get_field(resp_c->max_gid_num_cqs,
2202 V2_QUERY_PF_CAPS_C_MAX_GID_M,
2203 V2_QUERY_PF_CAPS_C_MAX_GID_S);
2205 caps->gid_table_len[0] /= hr_dev->func_num;
2207 caps->max_cqes = 1 << roce_get_field(resp_c->cq_depth,
2208 V2_QUERY_PF_CAPS_C_CQ_DEPTH_M,
2209 V2_QUERY_PF_CAPS_C_CQ_DEPTH_S);
2210 caps->num_mtpts = 1 << roce_get_field(resp_c->num_mrws,
2211 V2_QUERY_PF_CAPS_C_NUM_MRWS_M,
2212 V2_QUERY_PF_CAPS_C_NUM_MRWS_S);
2213 caps->num_qps = 1 << roce_get_field(resp_c->ord_num_qps,
2214 V2_QUERY_PF_CAPS_C_NUM_QPS_M,
2215 V2_QUERY_PF_CAPS_C_NUM_QPS_S);
2216 caps->max_qp_init_rdma = roce_get_field(resp_c->ord_num_qps,
2217 V2_QUERY_PF_CAPS_C_MAX_ORD_M,
2218 V2_QUERY_PF_CAPS_C_MAX_ORD_S);
2219 caps->max_qp_dest_rdma = caps->max_qp_init_rdma;
2220 caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth);
2221 caps->num_srqs = 1 << roce_get_field(resp_d->wq_hop_num_max_srqs,
2222 V2_QUERY_PF_CAPS_D_NUM_SRQS_M,
2223 V2_QUERY_PF_CAPS_D_NUM_SRQS_S);
2224 caps->cong_type = roce_get_field(resp_d->wq_hop_num_max_srqs,
2225 V2_QUERY_PF_CAPS_D_CONG_TYPE_M,
2226 V2_QUERY_PF_CAPS_D_CONG_TYPE_S);
2227 caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth);
2229 caps->ceqe_depth = 1 << roce_get_field(resp_d->num_ceqs_ceq_depth,
2230 V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M,
2231 V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S);
2232 caps->num_comp_vectors = roce_get_field(resp_d->num_ceqs_ceq_depth,
2233 V2_QUERY_PF_CAPS_D_NUM_CEQS_M,
2234 V2_QUERY_PF_CAPS_D_NUM_CEQS_S);
2236 caps->aeqe_depth = 1 << roce_get_field(resp_d->arm_st_aeq_depth,
2237 V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M,
2238 V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S);
2239 caps->default_aeq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth,
2240 V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_M,
2241 V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_S);
2242 caps->default_ceq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth,
2243 V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_M,
2244 V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_S);
2245 caps->reserved_pds = roce_get_field(resp_d->num_uars_rsv_pds,
2246 V2_QUERY_PF_CAPS_D_RSV_PDS_M,
2247 V2_QUERY_PF_CAPS_D_RSV_PDS_S);
2248 caps->num_uars = 1 << roce_get_field(resp_d->num_uars_rsv_pds,
2249 V2_QUERY_PF_CAPS_D_NUM_UARS_M,
2250 V2_QUERY_PF_CAPS_D_NUM_UARS_S);
2251 caps->reserved_qps = roce_get_field(resp_d->rsv_uars_rsv_qps,
2252 V2_QUERY_PF_CAPS_D_RSV_QPS_M,
2253 V2_QUERY_PF_CAPS_D_RSV_QPS_S);
2254 caps->reserved_uars = roce_get_field(resp_d->rsv_uars_rsv_qps,
2255 V2_QUERY_PF_CAPS_D_RSV_UARS_M,
2256 V2_QUERY_PF_CAPS_D_RSV_UARS_S);
2257 caps->reserved_mrws = roce_get_field(resp_e->chunk_size_shift_rsv_mrws,
2258 V2_QUERY_PF_CAPS_E_RSV_MRWS_M,
2259 V2_QUERY_PF_CAPS_E_RSV_MRWS_S);
2260 caps->chunk_sz = 1 << roce_get_field(resp_e->chunk_size_shift_rsv_mrws,
2261 V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_M,
2262 V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_S);
2263 caps->reserved_cqs = roce_get_field(resp_e->rsv_cqs,
2264 V2_QUERY_PF_CAPS_E_RSV_CQS_M,
2265 V2_QUERY_PF_CAPS_E_RSV_CQS_S);
2266 caps->reserved_srqs = roce_get_field(resp_e->rsv_srqs,
2267 V2_QUERY_PF_CAPS_E_RSV_SRQS_M,
2268 V2_QUERY_PF_CAPS_E_RSV_SRQS_S);
2269 caps->reserved_lkey = roce_get_field(resp_e->rsv_lkey,
2270 V2_QUERY_PF_CAPS_E_RSV_LKEYS_M,
2271 V2_QUERY_PF_CAPS_E_RSV_LKEYS_S);
2272 caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt);
2273 caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period);
2274 caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt);
2275 caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period);
2277 caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
2278 caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
2279 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
2280 caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
2281 caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
2282 caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
2283 caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM;
2284 caps->reserved_xrcds = HNS_ROCE_V2_RSV_XRCD_NUM;
2285 caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
2286 caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
2288 caps->qpc_hop_num = ctx_hop_num;
2289 caps->srqc_hop_num = ctx_hop_num;
2290 caps->cqc_hop_num = ctx_hop_num;
2291 caps->mpt_hop_num = ctx_hop_num;
2292 caps->mtt_hop_num = pbl_hop_num;
2293 caps->cqe_hop_num = pbl_hop_num;
2294 caps->srqwqe_hop_num = pbl_hop_num;
2295 caps->idx_hop_num = pbl_hop_num;
2296 caps->wqe_sq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
2297 V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M,
2298 V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S);
2299 caps->wqe_sge_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
2300 V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_M,
2301 V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_S);
2302 caps->wqe_rq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
2303 V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M,
2304 V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S);
2306 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
2307 caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
2308 caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
2309 caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
2310 caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
2311 caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ;
2312 caps->gmv_entry_sz = HNS_ROCE_V3_GMV_ENTRY_SZ;
2313 caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
2314 caps->gmv_entry_sz);
2315 caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
2316 caps->gid_table_len[0] = caps->gmv_bt_num *
2317 (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz);
2320 caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
2321 caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
2326 static int config_hem_entry_size(struct hns_roce_dev *hr_dev, u32 type, u32 val)
2328 struct hns_roce_cmq_desc desc;
2329 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
2331 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE,
2334 hr_reg_write(req, CFG_HEM_ENTRY_SIZE_TYPE, type);
2335 hr_reg_write(req, CFG_HEM_ENTRY_SIZE_VALUE, val);
2337 return hns_roce_cmq_send(hr_dev, &desc, 1);
2340 static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev)
2342 struct hns_roce_caps *caps = &hr_dev->caps;
2345 if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
2348 ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_QPC_SIZE,
2351 dev_err(hr_dev->dev, "failed to cfg qpc sz, ret = %d.\n", ret);
2355 ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_SCCC_SIZE,
2358 dev_err(hr_dev->dev, "failed to cfg sccc sz, ret = %d.\n", ret);
2363 static int hns_roce_v2_vf_profile(struct hns_roce_dev *hr_dev)
2367 hr_dev->vendor_part_id = hr_dev->pci_dev->device;
2368 hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
2369 hr_dev->func_num = 1;
2371 ret = hns_roce_query_vf_resource(hr_dev);
2373 dev_err(hr_dev->dev,
2374 "Query the VF resource fail, ret = %d.\n", ret);
2378 set_default_caps(hr_dev);
2379 set_hem_page_size(hr_dev);
2381 ret = hns_roce_v2_set_bt(hr_dev);
2383 dev_err(hr_dev->dev,
2384 "Configure the VF bt attribute fail, ret = %d.\n",
2392 static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
2394 struct hns_roce_caps *caps = &hr_dev->caps;
2397 ret = hns_roce_cmq_query_hw_info(hr_dev);
2399 dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n",
2404 ret = hns_roce_query_fw_ver(hr_dev);
2406 dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
2412 return hns_roce_v2_vf_profile(hr_dev);
2414 ret = hns_roce_query_func_info(hr_dev);
2416 dev_err(hr_dev->dev, "Query function info fail, ret = %d.\n",
2421 ret = hns_roce_config_global_param(hr_dev);
2423 dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
2428 /* Get pf resource owned by every pf */
2429 ret = hns_roce_query_pf_resource(hr_dev);
2431 dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
2436 ret = hns_roce_query_pf_timer_resource(hr_dev);
2438 dev_err(hr_dev->dev,
2439 "failed to query pf timer resource, ret = %d.\n", ret);
2443 ret = hns_roce_set_vf_switch_param(hr_dev);
2445 dev_err(hr_dev->dev,
2446 "failed to set function switch param, ret = %d.\n",
2451 hr_dev->vendor_part_id = hr_dev->pci_dev->device;
2452 hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
2454 caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
2455 caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
2457 ret = hns_roce_query_pf_caps(hr_dev);
2459 set_default_caps(hr_dev);
2461 ret = hns_roce_alloc_vf_resource(hr_dev);
2463 dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
2468 set_hem_page_size(hr_dev);
2469 ret = hns_roce_v2_set_bt(hr_dev);
2471 dev_err(hr_dev->dev,
2472 "Configure bt attribute fail, ret = %d.\n", ret);
2476 /* Configure the size of QPC, SCCC, etc. */
2477 ret = hns_roce_config_entry_size(hr_dev);
2482 static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
2483 enum hns_roce_link_table_type type)
2485 struct hns_roce_cmq_desc desc[2];
2486 struct hns_roce_cfg_llm_a *req_a =
2487 (struct hns_roce_cfg_llm_a *)desc[0].data;
2488 struct hns_roce_cfg_llm_b *req_b =
2489 (struct hns_roce_cfg_llm_b *)desc[1].data;
2490 struct hns_roce_v2_priv *priv = hr_dev->priv;
2491 struct hns_roce_link_table *link_tbl;
2492 struct hns_roce_link_table_entry *entry;
2493 enum hns_roce_opcode_type opcode;
2497 case TSQ_LINK_TABLE:
2498 link_tbl = &priv->tsq;
2499 opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
2501 case TPQ_LINK_TABLE:
2502 link_tbl = &priv->tpq;
2503 opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM;
2509 page_num = link_tbl->npages;
2510 entry = link_tbl->table.buf;
2512 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
2513 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2515 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
2517 req_a->base_addr_l = cpu_to_le32(link_tbl->table.map & 0xffffffff);
2518 req_a->base_addr_h = cpu_to_le32(link_tbl->table.map >> 32);
2519 roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_QUE_DEPTH_M,
2520 CFG_LLM_QUE_DEPTH_S, link_tbl->npages);
2521 roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_QUE_PGSZ_M,
2522 CFG_LLM_QUE_PGSZ_S, link_tbl->pg_sz);
2523 roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_INIT_EN_M,
2524 CFG_LLM_INIT_EN_S, 1);
2525 req_a->head_ba_l = cpu_to_le32(entry[0].blk_ba0);
2526 req_a->head_ba_h_nxtptr = cpu_to_le32(entry[0].blk_ba1_nxt_ptr);
2527 roce_set_field(req_a->head_ptr, CFG_LLM_HEAD_PTR_M, CFG_LLM_HEAD_PTR_S,
2530 req_b->tail_ba_l = cpu_to_le32(entry[page_num - 1].blk_ba0);
2531 roce_set_field(req_b->tail_ba_h, CFG_LLM_TAIL_BA_H_M,
2532 CFG_LLM_TAIL_BA_H_S,
2533 entry[page_num - 1].blk_ba1_nxt_ptr &
2534 HNS_ROCE_LINK_TABLE_BA1_M);
2535 roce_set_field(req_b->tail_ptr, CFG_LLM_TAIL_PTR_M, CFG_LLM_TAIL_PTR_S,
2536 (entry[page_num - 2].blk_ba1_nxt_ptr &
2537 HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
2538 HNS_ROCE_LINK_TABLE_NXT_PTR_S);
2540 return hns_roce_cmq_send(hr_dev, desc, 2);
2543 static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
2544 enum hns_roce_link_table_type type)
2546 struct hns_roce_v2_priv *priv = hr_dev->priv;
2547 struct hns_roce_link_table *link_tbl;
2548 struct hns_roce_link_table_entry *entry;
2549 struct device *dev = hr_dev->dev;
2560 case TSQ_LINK_TABLE:
2561 link_tbl = &priv->tsq;
2562 buf_chk_sz = 1 << (hr_dev->caps.tsq_buf_pg_sz + PAGE_SHIFT);
2563 pg_num_a = hr_dev->caps.num_qps * 8 / buf_chk_sz;
2564 pg_num_b = hr_dev->caps.sl_num * 4 + 2;
2566 case TPQ_LINK_TABLE:
2567 link_tbl = &priv->tpq;
2568 buf_chk_sz = 1 << (hr_dev->caps.tpq_buf_pg_sz + PAGE_SHIFT);
2569 pg_num_a = hr_dev->caps.num_cqs * 4 / buf_chk_sz;
2570 pg_num_b = 2 * 4 * func_num + 2;
2576 pg_num = max(pg_num_a, pg_num_b);
2577 size = pg_num * sizeof(struct hns_roce_link_table_entry);
2579 link_tbl->table.buf = dma_alloc_coherent(dev, size,
2580 &link_tbl->table.map,
2582 if (!link_tbl->table.buf)
2585 link_tbl->pg_list = kcalloc(pg_num, sizeof(*link_tbl->pg_list),
2587 if (!link_tbl->pg_list)
2588 goto err_kcalloc_failed;
2590 entry = link_tbl->table.buf;
2591 for (i = 0; i < pg_num; ++i) {
2592 link_tbl->pg_list[i].buf = dma_alloc_coherent(dev, buf_chk_sz,
2594 if (!link_tbl->pg_list[i].buf)
2595 goto err_alloc_buf_failed;
2597 link_tbl->pg_list[i].map = t;
2599 entry[i].blk_ba0 = (u32)(t >> 12);
2600 entry[i].blk_ba1_nxt_ptr = (u32)(t >> 44);
2602 if (i < (pg_num - 1))
2603 entry[i].blk_ba1_nxt_ptr |=
2604 (i + 1) << HNS_ROCE_LINK_TABLE_NXT_PTR_S;
2606 link_tbl->npages = pg_num;
2607 link_tbl->pg_sz = buf_chk_sz;
2609 return hns_roce_config_link_table(hr_dev, type);
2611 err_alloc_buf_failed:
2612 for (i -= 1; i >= 0; i--)
2613 dma_free_coherent(dev, buf_chk_sz,
2614 link_tbl->pg_list[i].buf,
2615 link_tbl->pg_list[i].map);
2616 kfree(link_tbl->pg_list);
2619 dma_free_coherent(dev, size, link_tbl->table.buf,
2620 link_tbl->table.map);
2626 static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
2627 struct hns_roce_link_table *link_tbl)
2629 struct device *dev = hr_dev->dev;
2633 size = link_tbl->npages * sizeof(struct hns_roce_link_table_entry);
2635 for (i = 0; i < link_tbl->npages; ++i)
2636 if (link_tbl->pg_list[i].buf)
2637 dma_free_coherent(dev, link_tbl->pg_sz,
2638 link_tbl->pg_list[i].buf,
2639 link_tbl->pg_list[i].map);
2640 kfree(link_tbl->pg_list);
2642 dma_free_coherent(dev, size, link_tbl->table.buf,
2643 link_tbl->table.map);
2646 static void free_dip_list(struct hns_roce_dev *hr_dev)
2648 struct hns_roce_dip *hr_dip;
2649 struct hns_roce_dip *tmp;
2650 unsigned long flags;
2652 spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
2654 list_for_each_entry_safe(hr_dip, tmp, &hr_dev->dip_list, node) {
2655 list_del(&hr_dip->node);
2659 spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
2662 static int get_hem_table(struct hns_roce_dev *hr_dev)
2664 unsigned int qpc_count;
2665 unsigned int cqc_count;
2666 unsigned int gmv_count;
2670 /* Alloc memory for source address table buffer space chunk */
2671 for (gmv_count = 0; gmv_count < hr_dev->caps.gmv_entry_num;
2673 ret = hns_roce_table_get(hr_dev, &hr_dev->gmv_table, gmv_count);
2675 goto err_gmv_failed;
2681 /* Alloc memory for QPC Timer buffer space chunk */
2682 for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
2684 ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
2687 dev_err(hr_dev->dev, "QPC Timer get failed\n");
2688 goto err_qpc_timer_failed;
2692 /* Alloc memory for CQC Timer buffer space chunk */
2693 for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
2695 ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
2698 dev_err(hr_dev->dev, "CQC Timer get failed\n");
2699 goto err_cqc_timer_failed;
2705 err_cqc_timer_failed:
2706 for (i = 0; i < cqc_count; i++)
2707 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
2709 err_qpc_timer_failed:
2710 for (i = 0; i < qpc_count; i++)
2711 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
2714 for (i = 0; i < gmv_count; i++)
2715 hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
2720 static void put_hem_table(struct hns_roce_dev *hr_dev)
2724 for (i = 0; i < hr_dev->caps.gmv_entry_num; i++)
2725 hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
2730 for (i = 0; i < hr_dev->caps.qpc_timer_bt_num; i++)
2731 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
2733 for (i = 0; i < hr_dev->caps.cqc_timer_bt_num; i++)
2734 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
2737 static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
2739 struct hns_roce_v2_priv *priv = hr_dev->priv;
2742 ret = get_hem_table(hr_dev);
2749 /* TSQ includes SQ doorbell and ack doorbell */
2750 ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
2752 dev_err(hr_dev->dev, "failed to init TSQ, ret = %d.\n", ret);
2753 goto err_tsq_init_failed;
2756 ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
2758 dev_err(hr_dev->dev, "failed to init TPQ, ret = %d.\n", ret);
2759 goto err_tpq_init_failed;
2764 err_tsq_init_failed:
2765 put_hem_table(hr_dev);
2767 err_tpq_init_failed:
2768 hns_roce_free_link_table(hr_dev, &priv->tpq);
2773 static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
2775 struct hns_roce_v2_priv *priv = hr_dev->priv;
2777 hns_roce_function_clear(hr_dev);
2779 if (!hr_dev->is_vf) {
2780 hns_roce_free_link_table(hr_dev, &priv->tpq);
2781 hns_roce_free_link_table(hr_dev, &priv->tsq);
2784 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09)
2785 free_dip_list(hr_dev);
2788 static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
2789 u64 out_param, u32 in_modifier, u8 op_modifier,
2790 u16 op, u16 token, int event)
2792 struct hns_roce_cmq_desc desc;
2793 struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
2795 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
2797 mb->in_param_l = cpu_to_le32(in_param);
2798 mb->in_param_h = cpu_to_le32(in_param >> 32);
2799 mb->out_param_l = cpu_to_le32(out_param);
2800 mb->out_param_h = cpu_to_le32(out_param >> 32);
2801 mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op);
2802 mb->token_event_en = cpu_to_le32(event << 16 | token);
2804 return hns_roce_cmq_send(hr_dev, &desc, 1);
2807 static int v2_wait_mbox_complete(struct hns_roce_dev *hr_dev, u32 timeout,
2808 u8 *complete_status)
2810 struct hns_roce_mbox_status *mb_st;
2811 struct hns_roce_cmq_desc desc;
2817 mb_st = (struct hns_roce_mbox_status *)desc.data;
2818 end = msecs_to_jiffies(timeout) + jiffies;
2819 while (v2_chk_mbox_is_avail(hr_dev, &busy)) {
2821 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST,
2823 ret = __hns_roce_cmq_send(hr_dev, &desc, 1);
2825 status = le32_to_cpu(mb_st->mb_status_hw_run);
2826 /* No pending message exists in ROCEE mbox. */
2827 if (!(status & MB_ST_HW_RUN_M))
2829 } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) {
2833 if (time_after(jiffies, end)) {
2834 dev_err_ratelimited(hr_dev->dev,
2835 "failed to wait mbox status 0x%x\n",
2845 *complete_status = (u8)(status & MB_ST_COMPLETE_M);
2846 } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) {
2847 /* Ignore all errors if the mbox is unavailable. */
2849 *complete_status = MB_ST_COMPLETE_M;
2855 static int v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
2856 u64 out_param, u32 in_modifier, u8 op_modifier,
2857 u16 op, u16 token, int event)
2862 /* Waiting for the mbox to be idle */
2863 ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS,
2865 if (unlikely(ret)) {
2866 dev_err_ratelimited(hr_dev->dev,
2867 "failed to check post mbox status = 0x%x, ret = %d.\n",
2872 /* Post new message to mbox */
2873 ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier,
2874 op_modifier, op, token, event);
2876 dev_err_ratelimited(hr_dev->dev,
2877 "failed to post mailbox, ret = %d.\n", ret);
2882 static int v2_poll_mbox_done(struct hns_roce_dev *hr_dev, unsigned int timeout)
2887 ret = v2_wait_mbox_complete(hr_dev, timeout, &status);
2889 if (status != MB_ST_COMPLETE_SUCC)
2892 dev_err_ratelimited(hr_dev->dev,
2893 "failed to check mbox status = 0x%x, ret = %d.\n",
2900 static void copy_gid(void *dest, const union ib_gid *gid)
2903 const union ib_gid *src = gid;
2904 __le32 (*p)[GID_SIZE] = dest;
2910 for (i = 0; i < GID_SIZE; i++)
2911 (*p)[i] = cpu_to_le32(*(u32 *)&src->raw[i * sizeof(u32)]);
2914 static int config_sgid_table(struct hns_roce_dev *hr_dev,
2915 int gid_index, const union ib_gid *gid,
2916 enum hns_roce_sgid_type sgid_type)
2918 struct hns_roce_cmq_desc desc;
2919 struct hns_roce_cfg_sgid_tb *sgid_tb =
2920 (struct hns_roce_cfg_sgid_tb *)desc.data;
2922 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
2924 roce_set_field(sgid_tb->table_idx_rsv, CFG_SGID_TB_TABLE_IDX_M,
2925 CFG_SGID_TB_TABLE_IDX_S, gid_index);
2926 roce_set_field(sgid_tb->vf_sgid_type_rsv, CFG_SGID_TB_VF_SGID_TYPE_M,
2927 CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
2929 copy_gid(&sgid_tb->vf_sgid_l, gid);
2931 return hns_roce_cmq_send(hr_dev, &desc, 1);
2934 static int config_gmv_table(struct hns_roce_dev *hr_dev,
2935 int gid_index, const union ib_gid *gid,
2936 enum hns_roce_sgid_type sgid_type,
2937 const struct ib_gid_attr *attr)
2939 struct hns_roce_cmq_desc desc[2];
2940 struct hns_roce_cfg_gmv_tb_a *tb_a =
2941 (struct hns_roce_cfg_gmv_tb_a *)desc[0].data;
2942 struct hns_roce_cfg_gmv_tb_b *tb_b =
2943 (struct hns_roce_cfg_gmv_tb_b *)desc[1].data;
2945 u16 vlan_id = VLAN_CFI_MASK;
2946 u8 mac[ETH_ALEN] = {};
2950 ret = rdma_read_gid_l2_fields(attr, &vlan_id, mac);
2955 hns_roce_cmq_setup_basic_desc(&desc[0], HNS_ROCE_OPC_CFG_GMV_TBL, false);
2956 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2958 hns_roce_cmq_setup_basic_desc(&desc[1], HNS_ROCE_OPC_CFG_GMV_TBL, false);
2960 copy_gid(&tb_a->vf_sgid_l, gid);
2962 roce_set_field(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_SGID_TYPE_M,
2963 CFG_GMV_TB_VF_SGID_TYPE_S, sgid_type);
2964 roce_set_bit(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_VLAN_EN_S,
2965 vlan_id < VLAN_CFI_MASK);
2966 roce_set_field(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_VLAN_ID_M,
2967 CFG_GMV_TB_VF_VLAN_ID_S, vlan_id);
2969 tb_b->vf_smac_l = cpu_to_le32(*(u32 *)mac);
2970 roce_set_field(tb_b->vf_smac_h, CFG_GMV_TB_SMAC_H_M,
2971 CFG_GMV_TB_SMAC_H_S, *(u16 *)&mac[4]);
2973 roce_set_field(tb_b->table_idx_rsv, CFG_GMV_TB_SGID_IDX_M,
2974 CFG_GMV_TB_SGID_IDX_S, gid_index);
2976 return hns_roce_cmq_send(hr_dev, desc, 2);
2979 static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u32 port,
2980 int gid_index, const union ib_gid *gid,
2981 const struct ib_gid_attr *attr)
2983 enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
2987 if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
2988 if (ipv6_addr_v4mapped((void *)gid))
2989 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
2991 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
2992 } else if (attr->gid_type == IB_GID_TYPE_ROCE) {
2993 sgid_type = GID_TYPE_FLAG_ROCE_V1;
2997 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
2998 ret = config_gmv_table(hr_dev, gid_index, gid, sgid_type, attr);
3000 ret = config_sgid_table(hr_dev, gid_index, gid, sgid_type);
3003 ibdev_err(&hr_dev->ib_dev, "failed to set gid, ret = %d!\n",
3009 static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
3012 struct hns_roce_cmq_desc desc;
3013 struct hns_roce_cfg_smac_tb *smac_tb =
3014 (struct hns_roce_cfg_smac_tb *)desc.data;
3018 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
3020 reg_smac_l = *(u32 *)(&addr[0]);
3021 reg_smac_h = *(u16 *)(&addr[4]);
3023 roce_set_field(smac_tb->tb_idx_rsv, CFG_SMAC_TB_IDX_M,
3024 CFG_SMAC_TB_IDX_S, phy_port);
3025 roce_set_field(smac_tb->vf_smac_h_rsv, CFG_SMAC_TB_VF_SMAC_H_M,
3026 CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
3027 smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l);
3029 return hns_roce_cmq_send(hr_dev, &desc, 1);
3032 static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
3033 struct hns_roce_v2_mpt_entry *mpt_entry,
3034 struct hns_roce_mr *mr)
3036 u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
3037 struct ib_device *ibdev = &hr_dev->ib_dev;
3041 count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
3042 ARRAY_SIZE(pages), &pbl_ba);
3044 ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n",
3049 /* Aligned to the hardware address access unit */
3050 for (i = 0; i < count; i++)
3053 mpt_entry->pbl_size = cpu_to_le32(mr->npages);
3054 mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3);
3055 roce_set_field(mpt_entry->byte_48_mode_ba,
3056 V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
3057 upper_32_bits(pbl_ba >> 3));
3059 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
3060 roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
3061 V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
3063 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
3064 roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
3065 V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
3066 roce_set_field(mpt_entry->byte_64_buf_pa1,
3067 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
3068 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
3069 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
3074 static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
3075 void *mb_buf, struct hns_roce_mr *mr,
3076 unsigned long mtpt_idx)
3078 struct hns_roce_v2_mpt_entry *mpt_entry;
3082 memset(mpt_entry, 0, sizeof(*mpt_entry));
3084 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID);
3085 hr_reg_write(mpt_entry, MPT_PD, mr->pd);
3086 hr_reg_enable(mpt_entry, MPT_L_INV_EN);
3088 hr_reg_write(mpt_entry, MPT_BIND_EN,
3089 !!(mr->access & IB_ACCESS_MW_BIND));
3090 hr_reg_write(mpt_entry, MPT_ATOMIC_EN,
3091 !!(mr->access & IB_ACCESS_REMOTE_ATOMIC));
3092 hr_reg_write(mpt_entry, MPT_RR_EN,
3093 !!(mr->access & IB_ACCESS_REMOTE_READ));
3094 hr_reg_write(mpt_entry, MPT_RW_EN,
3095 !!(mr->access & IB_ACCESS_REMOTE_WRITE));
3096 hr_reg_write(mpt_entry, MPT_LW_EN,
3097 !!((mr->access & IB_ACCESS_LOCAL_WRITE)));
3099 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
3100 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
3101 mpt_entry->lkey = cpu_to_le32(mr->key);
3102 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
3103 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
3105 if (mr->type != MR_TYPE_MR)
3106 hr_reg_enable(mpt_entry, MPT_PA);
3108 if (mr->type == MR_TYPE_DMA)
3111 if (mr->pbl_hop_num != HNS_ROCE_HOP_NUM_0)
3112 hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, mr->pbl_hop_num);
3114 hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
3115 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
3116 hr_reg_enable(mpt_entry, MPT_INNER_PA_VLD);
3118 ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
3123 static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
3124 struct hns_roce_mr *mr, int flags,
3127 struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
3128 u32 mr_access_flags = mr->access;
3131 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
3132 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
3134 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
3135 V2_MPT_BYTE_4_PD_S, mr->pd);
3137 if (flags & IB_MR_REREG_ACCESS) {
3138 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
3139 V2_MPT_BYTE_8_BIND_EN_S,
3140 (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
3141 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
3142 V2_MPT_BYTE_8_ATOMIC_EN_S,
3143 mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
3144 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
3145 mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
3146 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
3147 mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
3148 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
3149 mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
3152 if (flags & IB_MR_REREG_TRANS) {
3153 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
3154 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
3155 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
3156 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
3158 ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
3164 static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
3165 void *mb_buf, struct hns_roce_mr *mr)
3167 struct ib_device *ibdev = &hr_dev->ib_dev;
3168 struct hns_roce_v2_mpt_entry *mpt_entry;
3169 dma_addr_t pbl_ba = 0;
3172 memset(mpt_entry, 0, sizeof(*mpt_entry));
3174 if (hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, NULL, 0, &pbl_ba) < 0) {
3175 ibdev_err(ibdev, "failed to find frmr mtr.\n");
3179 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
3180 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
3181 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
3182 V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1);
3183 roce_set_field(mpt_entry->byte_4_pd_hop_st,
3184 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
3185 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
3186 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
3187 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
3188 V2_MPT_BYTE_4_PD_S, mr->pd);
3190 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1);
3191 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
3192 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
3194 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1);
3195 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
3196 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
3197 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
3199 mpt_entry->pbl_size = cpu_to_le32(mr->npages);
3201 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >> 3));
3202 roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
3203 V2_MPT_BYTE_48_PBL_BA_H_S,
3204 upper_32_bits(pbl_ba >> 3));
3206 roce_set_field(mpt_entry->byte_64_buf_pa1,
3207 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
3208 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
3209 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
3214 static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
3216 struct hns_roce_v2_mpt_entry *mpt_entry;
3219 memset(mpt_entry, 0, sizeof(*mpt_entry));
3221 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
3222 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
3223 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
3224 V2_MPT_BYTE_4_PD_S, mw->pdn);
3225 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
3226 V2_MPT_BYTE_4_PBL_HOP_NUM_S,
3227 mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
3229 roce_set_field(mpt_entry->byte_4_pd_hop_st,
3230 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
3231 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
3232 mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
3234 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
3235 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
3236 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, 1);
3238 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
3239 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
3240 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
3241 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S,
3242 mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
3244 roce_set_field(mpt_entry->byte_64_buf_pa1,
3245 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
3246 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
3247 mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
3249 mpt_entry->lkey = cpu_to_le32(mw->rkey);
3254 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
3256 return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size);
3259 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, unsigned int n)
3261 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
3263 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
3264 return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
3265 !!(n & hr_cq->cq_depth)) ? cqe : NULL;
3268 static inline void update_cq_db(struct hns_roce_dev *hr_dev,
3269 struct hns_roce_cq *hr_cq)
3271 if (likely(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB)) {
3272 *hr_cq->set_ci_db = hr_cq->cons_index & V2_CQ_DB_CONS_IDX_M;
3274 struct hns_roce_v2_db cq_db = {};
3276 roce_set_field(cq_db.byte_4, V2_DB_TAG_M, V2_DB_TAG_S,
3278 roce_set_field(cq_db.byte_4, V2_DB_CMD_M, V2_DB_CMD_S,
3280 roce_set_field(cq_db.parameter, V2_CQ_DB_CONS_IDX_M,
3281 V2_CQ_DB_CONS_IDX_S, hr_cq->cons_index);
3282 roce_set_field(cq_db.parameter, V2_CQ_DB_CMD_SN_M,
3283 V2_CQ_DB_CMD_SN_S, 1);
3285 hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg);
3289 static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
3290 struct hns_roce_srq *srq)
3292 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
3293 struct hns_roce_v2_cqe *cqe, *dest;
3299 for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
3301 if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe)
3306 * Now backwards through the CQ, removing CQ entries
3307 * that match our QP by overwriting them with next entries.
3309 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
3310 cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
3311 if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
3312 V2_CQE_BYTE_16_LCL_QPN_S) &
3313 HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
3315 roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S)) {
3316 wqe_index = roce_get_field(cqe->byte_4,
3317 V2_CQE_BYTE_4_WQE_INDX_M,
3318 V2_CQE_BYTE_4_WQE_INDX_S);
3319 hns_roce_free_srq_wqe(srq, wqe_index);
3322 } else if (nfreed) {
3323 dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
3325 owner_bit = roce_get_bit(dest->byte_4,
3326 V2_CQE_BYTE_4_OWNER_S);
3327 memcpy(dest, cqe, sizeof(*cqe));
3328 roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S,
3334 hr_cq->cons_index += nfreed;
3335 update_cq_db(hr_dev, hr_cq);
3339 static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
3340 struct hns_roce_srq *srq)
3342 spin_lock_irq(&hr_cq->lock);
3343 __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
3344 spin_unlock_irq(&hr_cq->lock);
3347 static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
3348 struct hns_roce_cq *hr_cq, void *mb_buf,
3349 u64 *mtts, dma_addr_t dma_handle)
3351 struct hns_roce_v2_cq_context *cq_context;
3353 cq_context = mb_buf;
3354 memset(cq_context, 0, sizeof(*cq_context));
3356 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
3357 V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
3358 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
3359 V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
3360 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
3361 V2_CQC_BYTE_4_SHIFT_S, ilog2(hr_cq->cq_depth));
3362 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
3363 V2_CQC_BYTE_4_CEQN_S, hr_cq->vector);
3365 roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
3366 V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
3368 roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQE_SIZE_M,
3369 V2_CQC_BYTE_8_CQE_SIZE_S, hr_cq->cqe_size ==
3370 HNS_ROCE_V3_CQE_SIZE ? 1 : 0);
3372 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH)
3373 hr_reg_enable(cq_context, CQC_STASH);
3375 cq_context->cqe_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
3377 roce_set_field(cq_context->byte_16_hop_addr,
3378 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
3379 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
3380 upper_32_bits(to_hr_hw_page_addr(mtts[0])));
3381 roce_set_field(cq_context->byte_16_hop_addr,
3382 V2_CQC_BYTE_16_CQE_HOP_NUM_M,
3383 V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
3384 HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
3386 cq_context->cqe_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
3387 roce_set_field(cq_context->byte_24_pgsz_addr,
3388 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
3389 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
3390 upper_32_bits(to_hr_hw_page_addr(mtts[1])));
3391 roce_set_field(cq_context->byte_24_pgsz_addr,
3392 V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
3393 V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
3394 to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift));
3395 roce_set_field(cq_context->byte_24_pgsz_addr,
3396 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
3397 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
3398 to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift));
3400 cq_context->cqe_ba = cpu_to_le32(dma_handle >> 3);
3402 roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
3403 V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
3405 roce_set_bit(cq_context->byte_44_db_record,
3406 V2_CQC_BYTE_44_DB_RECORD_EN_S,
3407 (hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB) ? 1 : 0);
3409 roce_set_field(cq_context->byte_44_db_record,
3410 V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
3411 V2_CQC_BYTE_44_DB_RECORD_ADDR_S,
3412 ((u32)hr_cq->db.dma) >> 1);
3413 cq_context->db_record_addr = cpu_to_le32(hr_cq->db.dma >> 32);
3415 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
3416 V2_CQC_BYTE_56_CQ_MAX_CNT_M,
3417 V2_CQC_BYTE_56_CQ_MAX_CNT_S,
3418 HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
3419 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
3420 V2_CQC_BYTE_56_CQ_PERIOD_M,
3421 V2_CQC_BYTE_56_CQ_PERIOD_S,
3422 HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
3425 static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
3426 enum ib_cq_notify_flags flags)
3428 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3429 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3430 struct hns_roce_v2_db cq_db = {};
3434 * flags = 0, then notify_flag : next
3435 * flags = 1, then notify flag : solocited
3437 notify_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
3438 V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
3440 roce_set_field(cq_db.byte_4, V2_DB_TAG_M, V2_DB_TAG_S, hr_cq->cqn);
3441 roce_set_field(cq_db.byte_4, V2_DB_CMD_M, V2_DB_CMD_S,
3442 HNS_ROCE_V2_CQ_DB_NOTIFY);
3443 roce_set_field(cq_db.parameter, V2_CQ_DB_CONS_IDX_M,
3444 V2_CQ_DB_CONS_IDX_S, hr_cq->cons_index);
3445 roce_set_field(cq_db.parameter, V2_CQ_DB_CMD_SN_M,
3446 V2_CQ_DB_CMD_SN_S, hr_cq->arm_sn);
3447 roce_set_bit(cq_db.parameter, V2_CQ_DB_NOTIFY_TYPE_S, notify_flag);
3449 hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg);
3454 static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
3455 struct hns_roce_qp *qp,
3458 struct hns_roce_rinl_sge *sge_list;
3459 u32 wr_num, wr_cnt, sge_num;
3460 u32 sge_cnt, data_len, size;
3463 wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
3464 V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
3465 wr_cnt = wr_num & (qp->rq.wqe_cnt - 1);
3467 sge_list = qp->rq_inl_buf.wqe_list[wr_cnt].sg_list;
3468 sge_num = qp->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
3469 wqe_buf = hns_roce_get_recv_wqe(qp, wr_cnt);
3470 data_len = wc->byte_len;
3472 for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
3473 size = min(sge_list[sge_cnt].len, data_len);
3474 memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
3480 if (unlikely(data_len)) {
3481 wc->status = IB_WC_LOC_LEN_ERR;
3488 static int sw_comp(struct hns_roce_qp *hr_qp, struct hns_roce_wq *wq,
3489 int num_entries, struct ib_wc *wc)
3494 left = wq->head - wq->tail;
3498 left = min_t(unsigned int, (unsigned int)num_entries, left);
3499 while (npolled < left) {
3500 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3501 wc->status = IB_WC_WR_FLUSH_ERR;
3503 wc->qp = &hr_qp->ibqp;
3513 static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries,
3516 struct hns_roce_qp *hr_qp;
3519 list_for_each_entry(hr_qp, &hr_cq->sq_list, sq_node) {
3520 npolled += sw_comp(hr_qp, &hr_qp->sq,
3521 num_entries - npolled, wc + npolled);
3522 if (npolled >= num_entries)
3526 list_for_each_entry(hr_qp, &hr_cq->rq_list, rq_node) {
3527 npolled += sw_comp(hr_qp, &hr_qp->rq,
3528 num_entries - npolled, wc + npolled);
3529 if (npolled >= num_entries)
3537 static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
3538 struct hns_roce_cq *cq, struct hns_roce_v2_cqe *cqe,
3541 static const struct {
3543 enum ib_wc_status wc_status;
3545 { HNS_ROCE_CQE_V2_SUCCESS, IB_WC_SUCCESS },
3546 { HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR, IB_WC_LOC_LEN_ERR },
3547 { HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR, IB_WC_LOC_QP_OP_ERR },
3548 { HNS_ROCE_CQE_V2_LOCAL_PROT_ERR, IB_WC_LOC_PROT_ERR },
3549 { HNS_ROCE_CQE_V2_WR_FLUSH_ERR, IB_WC_WR_FLUSH_ERR },
3550 { HNS_ROCE_CQE_V2_MW_BIND_ERR, IB_WC_MW_BIND_ERR },
3551 { HNS_ROCE_CQE_V2_BAD_RESP_ERR, IB_WC_BAD_RESP_ERR },
3552 { HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR, IB_WC_LOC_ACCESS_ERR },
3553 { HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR, IB_WC_REM_INV_REQ_ERR },
3554 { HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR, IB_WC_REM_ACCESS_ERR },
3555 { HNS_ROCE_CQE_V2_REMOTE_OP_ERR, IB_WC_REM_OP_ERR },
3556 { HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR,
3557 IB_WC_RETRY_EXC_ERR },
3558 { HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR, IB_WC_RNR_RETRY_EXC_ERR },
3559 { HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR, IB_WC_REM_ABORT_ERR },
3560 { HNS_ROCE_CQE_V2_GENERAL_ERR, IB_WC_GENERAL_ERR}
3563 u32 cqe_status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
3564 V2_CQE_BYTE_4_STATUS_S);
3567 wc->status = IB_WC_GENERAL_ERR;
3568 for (i = 0; i < ARRAY_SIZE(map); i++)
3569 if (cqe_status == map[i].cqe_status) {
3570 wc->status = map[i].wc_status;
3574 if (likely(wc->status == IB_WC_SUCCESS ||
3575 wc->status == IB_WC_WR_FLUSH_ERR))
3578 ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status);
3579 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe,
3580 cq->cqe_size, false);
3583 * For hns ROCEE, GENERAL_ERR is an error type that is not defined in
3584 * the standard protocol, the driver must ignore it and needn't to set
3585 * the QP to an error state.
3587 if (cqe_status == HNS_ROCE_CQE_V2_GENERAL_ERR)
3591 * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state gets
3592 * into errored mode. Hence, as a workaround to this hardware
3593 * limitation, driver needs to assist in flushing. But the flushing
3594 * operation uses mailbox to convey the QP state to the hardware and
3595 * which can sleep due to the mutex protection around the mailbox calls.
3596 * Hence, use the deferred flush for now. Once wc error detected, the
3597 * flushing operation is needed.
3599 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
3600 init_flush_work(hr_dev, qp);
3603 static int get_cur_qp(struct hns_roce_cq *hr_cq, struct hns_roce_v2_cqe *cqe,
3604 struct hns_roce_qp **cur_qp)
3606 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
3607 struct hns_roce_qp *hr_qp = *cur_qp;
3610 qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
3611 V2_CQE_BYTE_16_LCL_QPN_S) &
3612 HNS_ROCE_V2_CQE_QPN_MASK;
3614 if (!hr_qp || qpn != hr_qp->qpn) {
3615 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
3616 if (unlikely(!hr_qp)) {
3617 ibdev_err(&hr_dev->ib_dev,
3618 "CQ %06lx with entry for unknown QPN %06x\n",
3629 * mapped-value = 1 + real-value
3630 * The ib wc opcode's real value is start from 0, In order to distinguish
3631 * between initialized and uninitialized map values, we plus 1 to the actual
3632 * value when defining the mapping, so that the validity can be identified by
3633 * checking whether the mapped value is greater than 0.
3635 #define HR_WC_OP_MAP(hr_key, ib_key) \
3636 [HNS_ROCE_V2_WQE_OP_ ## hr_key] = 1 + IB_WC_ ## ib_key
3638 static const u32 wc_send_op_map[] = {
3639 HR_WC_OP_MAP(SEND, SEND),
3640 HR_WC_OP_MAP(SEND_WITH_INV, SEND),
3641 HR_WC_OP_MAP(SEND_WITH_IMM, SEND),
3642 HR_WC_OP_MAP(RDMA_READ, RDMA_READ),
3643 HR_WC_OP_MAP(RDMA_WRITE, RDMA_WRITE),
3644 HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE),
3645 HR_WC_OP_MAP(LOCAL_INV, LOCAL_INV),
3646 HR_WC_OP_MAP(ATOM_CMP_AND_SWAP, COMP_SWAP),
3647 HR_WC_OP_MAP(ATOM_FETCH_AND_ADD, FETCH_ADD),
3648 HR_WC_OP_MAP(ATOM_MSK_CMP_AND_SWAP, MASKED_COMP_SWAP),
3649 HR_WC_OP_MAP(ATOM_MSK_FETCH_AND_ADD, MASKED_FETCH_ADD),
3650 HR_WC_OP_MAP(FAST_REG_PMR, REG_MR),
3651 HR_WC_OP_MAP(BIND_MW, REG_MR),
3654 static int to_ib_wc_send_op(u32 hr_opcode)
3656 if (hr_opcode >= ARRAY_SIZE(wc_send_op_map))
3659 return wc_send_op_map[hr_opcode] ? wc_send_op_map[hr_opcode] - 1 :
3663 static const u32 wc_recv_op_map[] = {
3664 HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, WITH_IMM),
3665 HR_WC_OP_MAP(SEND, RECV),
3666 HR_WC_OP_MAP(SEND_WITH_IMM, WITH_IMM),
3667 HR_WC_OP_MAP(SEND_WITH_INV, RECV),
3670 static int to_ib_wc_recv_op(u32 hr_opcode)
3672 if (hr_opcode >= ARRAY_SIZE(wc_recv_op_map))
3675 return wc_recv_op_map[hr_opcode] ? wc_recv_op_map[hr_opcode] - 1 :
3679 static void fill_send_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
3686 hr_opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
3687 V2_CQE_BYTE_4_OPCODE_S) & 0x1f;
3688 switch (hr_opcode) {
3689 case HNS_ROCE_V2_WQE_OP_RDMA_READ:
3690 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3692 case HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM:
3693 case HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM:
3694 wc->wc_flags |= IB_WC_WITH_IMM;
3696 case HNS_ROCE_V2_WQE_OP_LOCAL_INV:
3697 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3699 case HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP:
3700 case HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD:
3701 case HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP:
3702 case HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD:
3709 ib_opcode = to_ib_wc_send_op(hr_opcode);
3711 wc->status = IB_WC_GENERAL_ERR;
3713 wc->opcode = ib_opcode;
3716 static inline bool is_rq_inl_enabled(struct ib_wc *wc, u32 hr_opcode,
3717 struct hns_roce_v2_cqe *cqe)
3719 return wc->qp->qp_type != IB_QPT_UD &&
3720 wc->qp->qp_type != IB_QPT_GSI &&
3721 (hr_opcode == HNS_ROCE_V2_OPCODE_SEND ||
3722 hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
3723 hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
3724 roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S);
3727 static int fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
3729 struct hns_roce_qp *qp = to_hr_qp(wc->qp);
3734 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3736 hr_opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
3737 V2_CQE_BYTE_4_OPCODE_S) & 0x1f;
3738 switch (hr_opcode) {
3739 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
3740 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
3741 wc->wc_flags = IB_WC_WITH_IMM;
3742 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immtdata));
3744 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
3745 wc->wc_flags = IB_WC_WITH_INVALIDATE;
3746 wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
3752 ib_opcode = to_ib_wc_recv_op(hr_opcode);
3754 wc->status = IB_WC_GENERAL_ERR;
3756 wc->opcode = ib_opcode;
3758 if (is_rq_inl_enabled(wc, hr_opcode, cqe)) {
3759 ret = hns_roce_handle_recv_inl_wqe(cqe, qp, wc);
3764 wc->sl = roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
3765 V2_CQE_BYTE_32_SL_S);
3766 wc->src_qp = roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_RMT_QPN_M,
3767 V2_CQE_BYTE_32_RMT_QPN_S);
3769 wc->wc_flags |= roce_get_bit(cqe->byte_32, V2_CQE_BYTE_32_GRH_S) ?
3771 wc->port_num = roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_PORTN_M,
3772 V2_CQE_BYTE_32_PORTN_S);
3775 if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
3776 wc->vlan_id = roce_get_field(cqe->byte_28, V2_CQE_BYTE_28_VID_M,
3777 V2_CQE_BYTE_28_VID_S);
3778 wc->wc_flags |= IB_WC_WITH_VLAN;
3780 wc->vlan_id = 0xffff;
3783 wc->network_hdr_type = roce_get_field(cqe->byte_28,
3784 V2_CQE_BYTE_28_PORT_TYPE_M,
3785 V2_CQE_BYTE_28_PORT_TYPE_S);
3790 static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
3791 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
3793 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
3794 struct hns_roce_qp *qp = *cur_qp;
3795 struct hns_roce_srq *srq = NULL;
3796 struct hns_roce_v2_cqe *cqe;
3797 struct hns_roce_wq *wq;
3802 cqe = get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
3806 ++hr_cq->cons_index;
3807 /* Memory barrier */
3810 ret = get_cur_qp(hr_cq, cqe, &qp);
3817 wqe_idx = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
3818 V2_CQE_BYTE_4_WQE_INDX_S);
3820 is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
3824 /* If sg_signal_bit is set, tail pointer will be updated to
3825 * the WQE corresponding to the current CQE.
3827 if (qp->sq_signal_bits)
3828 wq->tail += (wqe_idx - (u16)wq->tail) &
3831 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3834 fill_send_wc(wc, cqe);
3837 srq = to_hr_srq(qp->ibqp.srq);
3838 wc->wr_id = srq->wrid[wqe_idx];
3839 hns_roce_free_srq_wqe(srq, wqe_idx);
3842 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3846 ret = fill_recv_wc(wc, cqe);
3849 get_cqe_status(hr_dev, qp, hr_cq, cqe, wc);
3850 if (unlikely(wc->status != IB_WC_SUCCESS))
3856 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
3859 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3860 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3861 struct hns_roce_qp *cur_qp = NULL;
3862 unsigned long flags;
3865 spin_lock_irqsave(&hr_cq->lock, flags);
3868 * When the device starts to reset, the state is RST_DOWN. At this time,
3869 * there may still be some valid CQEs in the hardware that are not
3870 * polled. Therefore, it is not allowed to switch to the software mode
3871 * immediately. When the state changes to UNINIT, CQE no longer exists
3872 * in the hardware, and then switch to software mode.
3874 if (hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT) {
3875 npolled = hns_roce_v2_sw_poll_cq(hr_cq, num_entries, wc);
3879 for (npolled = 0; npolled < num_entries; ++npolled) {
3880 if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
3885 update_cq_db(hr_dev, hr_cq);
3888 spin_unlock_irqrestore(&hr_cq->lock, flags);
3893 static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
3894 int step_idx, u16 *mbox_op)
3900 op = HNS_ROCE_CMD_WRITE_QPC_BT0;
3903 op = HNS_ROCE_CMD_WRITE_MPT_BT0;
3906 op = HNS_ROCE_CMD_WRITE_CQC_BT0;
3909 op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
3912 op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
3914 case HEM_TYPE_QPC_TIMER:
3915 op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
3917 case HEM_TYPE_CQC_TIMER:
3918 op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
3921 dev_warn(hr_dev->dev, "failed to check hem type %u.\n", type);
3925 *mbox_op = op + step_idx;
3930 static int config_gmv_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj,
3931 dma_addr_t base_addr)
3933 struct hns_roce_cmq_desc desc;
3934 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
3935 u32 idx = obj / (HNS_HW_PAGE_SIZE / hr_dev->caps.gmv_entry_sz);
3936 u64 addr = to_hr_hw_page_addr(base_addr);
3938 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, false);
3940 hr_reg_write(req, CFG_GMV_BT_BA_L, lower_32_bits(addr));
3941 hr_reg_write(req, CFG_GMV_BT_BA_H, upper_32_bits(addr));
3942 hr_reg_write(req, CFG_GMV_BT_IDX, idx);
3944 return hns_roce_cmq_send(hr_dev, &desc, 1);
3947 static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj,
3948 dma_addr_t base_addr, u32 hem_type, int step_idx)
3953 if (unlikely(hem_type == HEM_TYPE_GMV))
3954 return config_gmv_ba_to_hw(hr_dev, obj, base_addr);
3956 if (unlikely(hem_type == HEM_TYPE_SCCC && step_idx))
3959 ret = get_op_for_set_hem(hr_dev, hem_type, step_idx, &op);
3963 return config_hem_ba_to_hw(hr_dev, obj, base_addr, op);
3966 static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
3967 struct hns_roce_hem_table *table, int obj,
3970 struct hns_roce_hem_iter iter;
3971 struct hns_roce_hem_mhop mhop;
3972 struct hns_roce_hem *hem;
3973 unsigned long mhop_obj = obj;
3982 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
3985 hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
3989 hop_num = mhop.hop_num;
3990 chunk_ba_num = mhop.bt_chunk_size / 8;
3993 hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
3995 l1_idx = i * chunk_ba_num + j;
3996 } else if (hop_num == 1) {
3997 hem_idx = i * chunk_ba_num + j;
3998 } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
4002 if (table->type == HEM_TYPE_SCCC)
4005 if (check_whether_last_step(hop_num, step_idx)) {
4006 hem = table->hem[hem_idx];
4007 for (hns_roce_hem_first(hem, &iter);
4008 !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
4009 bt_ba = hns_roce_hem_addr(&iter);
4010 ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type,
4015 bt_ba = table->bt_l0_dma_addr[i];
4016 else if (step_idx == 1 && hop_num == 2)
4017 bt_ba = table->bt_l1_dma_addr[l1_idx];
4019 ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type, step_idx);
4025 static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
4026 struct hns_roce_hem_table *table, int obj,
4029 struct device *dev = hr_dev->dev;
4030 struct hns_roce_cmd_mailbox *mailbox;
4034 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
4037 switch (table->type) {
4039 op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
4042 op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
4045 op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
4048 op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
4051 case HEM_TYPE_QPC_TIMER:
4052 case HEM_TYPE_CQC_TIMER:
4056 dev_warn(dev, "table %u not to be destroyed by mailbox!\n",
4063 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4064 if (IS_ERR(mailbox))
4065 return PTR_ERR(mailbox);
4067 /* configure the tag and op */
4068 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
4069 HNS_ROCE_CMD_TIMEOUT_MSECS);
4071 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4075 static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
4076 struct hns_roce_v2_qp_context *context,
4077 struct hns_roce_v2_qp_context *qpc_mask,
4078 struct hns_roce_qp *hr_qp)
4080 struct hns_roce_cmd_mailbox *mailbox;
4084 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4085 if (IS_ERR(mailbox))
4086 return PTR_ERR(mailbox);
4088 /* The qpc size of HIP08 is only 256B, which is half of HIP09 */
4089 qpc_size = hr_dev->caps.qpc_sz;
4090 memcpy(mailbox->buf, context, qpc_size);
4091 memcpy(mailbox->buf + qpc_size, qpc_mask, qpc_size);
4093 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
4094 HNS_ROCE_CMD_MODIFY_QPC,
4095 HNS_ROCE_CMD_TIMEOUT_MSECS);
4097 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4102 static void set_access_flags(struct hns_roce_qp *hr_qp,
4103 struct hns_roce_v2_qp_context *context,
4104 struct hns_roce_v2_qp_context *qpc_mask,
4105 const struct ib_qp_attr *attr, int attr_mask)
4110 dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
4111 attr->max_dest_rd_atomic : hr_qp->resp_depth;
4113 access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
4114 attr->qp_access_flags : hr_qp->atomic_rd_en;
4116 if (!dest_rd_atomic)
4117 access_flags &= IB_ACCESS_REMOTE_WRITE;
4119 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
4120 !!(access_flags & IB_ACCESS_REMOTE_READ));
4121 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
4123 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
4124 !!(access_flags & IB_ACCESS_REMOTE_WRITE));
4125 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
4127 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
4128 !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
4129 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
4130 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S,
4131 !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
4132 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S, 0);
4135 static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
4136 struct hns_roce_v2_qp_context *context,
4137 struct hns_roce_v2_qp_context *qpc_mask)
4139 roce_set_field(context->byte_4_sqpn_tst,
4140 V2_QPC_BYTE_4_SGE_SHIFT_M, V2_QPC_BYTE_4_SGE_SHIFT_S,
4141 to_hr_hem_entries_shift(hr_qp->sge.sge_cnt,
4142 hr_qp->sge.sge_shift));
4144 roce_set_field(context->byte_20_smac_sgid_idx,
4145 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
4146 ilog2(hr_qp->sq.wqe_cnt));
4148 roce_set_field(context->byte_20_smac_sgid_idx,
4149 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
4150 ilog2(hr_qp->rq.wqe_cnt));
4153 static inline int get_cqn(struct ib_cq *ib_cq)
4155 return ib_cq ? to_hr_cq(ib_cq)->cqn : 0;
4158 static inline int get_pdn(struct ib_pd *ib_pd)
4160 return ib_pd ? to_hr_pd(ib_pd)->pdn : 0;
4163 static void modify_qp_reset_to_init(struct ib_qp *ibqp,
4164 const struct ib_qp_attr *attr,
4166 struct hns_roce_v2_qp_context *context,
4167 struct hns_roce_v2_qp_context *qpc_mask)
4169 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4170 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4173 * In v2 engine, software pass context and context mask to hardware
4174 * when modifying qp. If software need modify some fields in context,
4175 * we should set all bits of the relevant fields in context mask to
4176 * 0 at the same time, else set them to 0x1.
4178 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
4179 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(ibqp->qp_type));
4181 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
4182 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
4184 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
4185 V2_QPC_BYTE_16_PD_S, get_pdn(ibqp->pd));
4187 roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
4188 V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
4190 set_qpc_wqe_cnt(hr_qp, context, qpc_mask);
4192 /* No VLAN need to set 0xFFF */
4193 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
4194 V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
4196 if (ibqp->qp_type == IB_QPT_XRC_TGT) {
4197 context->qkey_xrcd = cpu_to_le32(hr_qp->xrcdn);
4199 roce_set_bit(context->byte_80_rnr_rx_cqn,
4200 V2_QPC_BYTE_80_XRC_QP_TYPE_S, 1);
4203 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
4204 roce_set_bit(context->byte_68_rq_db,
4205 V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
4207 roce_set_field(context->byte_68_rq_db,
4208 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
4209 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
4210 ((u32)hr_qp->rdb.dma) >> 1);
4211 context->rq_db_record_addr = cpu_to_le32(hr_qp->rdb.dma >> 32);
4213 if (ibqp->qp_type != IB_QPT_UD && ibqp->qp_type != IB_QPT_GSI)
4214 roce_set_bit(context->byte_76_srqn_op_en,
4215 V2_QPC_BYTE_76_RQIE_S,
4216 !!(hr_dev->caps.flags &
4217 HNS_ROCE_CAP_FLAG_RQ_INLINE));
4219 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
4220 V2_QPC_BYTE_80_RX_CQN_S, get_cqn(ibqp->recv_cq));
4223 roce_set_bit(context->byte_76_srqn_op_en,
4224 V2_QPC_BYTE_76_SRQ_EN_S, 1);
4225 roce_set_field(context->byte_76_srqn_op_en,
4226 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
4227 to_hr_srq(ibqp->srq)->srqn);
4230 roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
4232 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
4233 V2_QPC_BYTE_252_TX_CQN_S, get_cqn(ibqp->send_cq));
4235 if (hr_dev->caps.qpc_sz < HNS_ROCE_V3_QPC_SZ)
4238 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH)
4239 hr_reg_enable(&context->ext, QPCEX_STASH);
4242 static void modify_qp_init_to_init(struct ib_qp *ibqp,
4243 const struct ib_qp_attr *attr, int attr_mask,
4244 struct hns_roce_v2_qp_context *context,
4245 struct hns_roce_v2_qp_context *qpc_mask)
4247 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4250 * In v2 engine, software pass context and context mask to hardware
4251 * when modifying qp. If software need modify some fields in context,
4252 * we should set all bits of the relevant fields in context mask to
4253 * 0 at the same time, else set them to 0x1.
4255 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
4256 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(ibqp->qp_type));
4257 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
4258 V2_QPC_BYTE_4_TST_S, 0);
4260 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
4261 V2_QPC_BYTE_16_PD_S, get_pdn(ibqp->pd));
4263 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
4264 V2_QPC_BYTE_16_PD_S, 0);
4266 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
4267 V2_QPC_BYTE_80_RX_CQN_S, get_cqn(ibqp->recv_cq));
4268 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
4269 V2_QPC_BYTE_80_RX_CQN_S, 0);
4271 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
4272 V2_QPC_BYTE_252_TX_CQN_S, get_cqn(ibqp->send_cq));
4273 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
4274 V2_QPC_BYTE_252_TX_CQN_S, 0);
4277 roce_set_bit(context->byte_76_srqn_op_en,
4278 V2_QPC_BYTE_76_SRQ_EN_S, 1);
4279 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
4280 V2_QPC_BYTE_76_SRQ_EN_S, 0);
4281 roce_set_field(context->byte_76_srqn_op_en,
4282 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
4283 to_hr_srq(ibqp->srq)->srqn);
4284 roce_set_field(qpc_mask->byte_76_srqn_op_en,
4285 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
4288 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
4289 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
4290 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
4291 V2_QPC_BYTE_4_SQPN_S, 0);
4293 if (attr_mask & IB_QP_DEST_QPN) {
4294 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
4295 V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
4296 roce_set_field(qpc_mask->byte_56_dqpn_err,
4297 V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
4301 static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
4302 struct hns_roce_qp *hr_qp,
4303 struct hns_roce_v2_qp_context *context,
4304 struct hns_roce_v2_qp_context *qpc_mask)
4306 u64 mtts[MTT_MIN_COUNT] = { 0 };
4310 /* Search qp buf's mtts */
4311 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts,
4312 MTT_MIN_COUNT, &wqe_sge_ba);
4313 if (hr_qp->rq.wqe_cnt && count < 1) {
4314 ibdev_err(&hr_dev->ib_dev,
4315 "failed to find RQ WQE, QPN = 0x%lx.\n", hr_qp->qpn);
4319 context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
4320 qpc_mask->wqe_sge_ba = 0;
4323 * In v2 engine, software pass context and context mask to hardware
4324 * when modifying qp. If software need modify some fields in context,
4325 * we should set all bits of the relevant fields in context mask to
4326 * 0 at the same time, else set them to 0x1.
4328 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
4329 V2_QPC_BYTE_12_WQE_SGE_BA_S, wqe_sge_ba >> (32 + 3));
4330 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
4331 V2_QPC_BYTE_12_WQE_SGE_BA_S, 0);
4333 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
4334 V2_QPC_BYTE_12_SQ_HOP_NUM_S,
4335 to_hr_hem_hopnum(hr_dev->caps.wqe_sq_hop_num,
4336 hr_qp->sq.wqe_cnt));
4337 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
4338 V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
4340 roce_set_field(context->byte_20_smac_sgid_idx,
4341 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
4342 V2_QPC_BYTE_20_SGE_HOP_NUM_S,
4343 to_hr_hem_hopnum(hr_dev->caps.wqe_sge_hop_num,
4344 hr_qp->sge.sge_cnt));
4345 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4346 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
4347 V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
4349 roce_set_field(context->byte_20_smac_sgid_idx,
4350 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
4351 V2_QPC_BYTE_20_RQ_HOP_NUM_S,
4352 to_hr_hem_hopnum(hr_dev->caps.wqe_rq_hop_num,
4353 hr_qp->rq.wqe_cnt));
4355 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4356 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
4357 V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
4359 roce_set_field(context->byte_16_buf_ba_pg_sz,
4360 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
4361 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
4362 to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.ba_pg_shift));
4363 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
4364 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
4365 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
4367 roce_set_field(context->byte_16_buf_ba_pg_sz,
4368 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
4369 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
4370 to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.buf_pg_shift));
4371 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
4372 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
4373 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
4375 context->rq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
4376 qpc_mask->rq_cur_blk_addr = 0;
4378 roce_set_field(context->byte_92_srq_info,
4379 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
4380 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
4381 upper_32_bits(to_hr_hw_page_addr(mtts[0])));
4382 roce_set_field(qpc_mask->byte_92_srq_info,
4383 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
4384 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
4386 context->rq_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
4387 qpc_mask->rq_nxt_blk_addr = 0;
4389 roce_set_field(context->byte_104_rq_sge,
4390 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
4391 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
4392 upper_32_bits(to_hr_hw_page_addr(mtts[1])));
4393 roce_set_field(qpc_mask->byte_104_rq_sge,
4394 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
4395 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
4400 static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
4401 struct hns_roce_qp *hr_qp,
4402 struct hns_roce_v2_qp_context *context,
4403 struct hns_roce_v2_qp_context *qpc_mask)
4405 struct ib_device *ibdev = &hr_dev->ib_dev;
4406 u64 sge_cur_blk = 0;
4410 /* search qp buf's mtts */
4411 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
4413 ibdev_err(ibdev, "failed to find QP(0x%lx) SQ buf.\n",
4417 if (hr_qp->sge.sge_cnt > 0) {
4418 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
4420 &sge_cur_blk, 1, NULL);
4422 ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf.\n",
4429 * In v2 engine, software pass context and context mask to hardware
4430 * when modifying qp. If software need modify some fields in context,
4431 * we should set all bits of the relevant fields in context mask to
4432 * 0 at the same time, else set them to 0x1.
4434 context->sq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(sq_cur_blk));
4435 roce_set_field(context->byte_168_irrl_idx,
4436 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
4437 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
4438 upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4439 qpc_mask->sq_cur_blk_addr = 0;
4440 roce_set_field(qpc_mask->byte_168_irrl_idx,
4441 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
4442 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
4444 context->sq_cur_sge_blk_addr =
4445 cpu_to_le32(to_hr_hw_page_addr(sge_cur_blk));
4446 roce_set_field(context->byte_184_irrl_idx,
4447 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
4448 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
4449 upper_32_bits(to_hr_hw_page_addr(sge_cur_blk)));
4450 qpc_mask->sq_cur_sge_blk_addr = 0;
4451 roce_set_field(qpc_mask->byte_184_irrl_idx,
4452 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
4453 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
4455 context->rx_sq_cur_blk_addr =
4456 cpu_to_le32(to_hr_hw_page_addr(sq_cur_blk));
4457 roce_set_field(context->byte_232_irrl_sge,
4458 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
4459 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
4460 upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4461 qpc_mask->rx_sq_cur_blk_addr = 0;
4462 roce_set_field(qpc_mask->byte_232_irrl_sge,
4463 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
4464 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
4469 static inline enum ib_mtu get_mtu(struct ib_qp *ibqp,
4470 const struct ib_qp_attr *attr)
4472 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
4475 return attr->path_mtu;
4478 static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
4479 const struct ib_qp_attr *attr, int attr_mask,
4480 struct hns_roce_v2_qp_context *context,
4481 struct hns_roce_v2_qp_context *qpc_mask)
4483 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4484 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4485 struct ib_device *ibdev = &hr_dev->ib_dev;
4496 ret = config_qp_rq_buf(hr_dev, hr_qp, context, qpc_mask);
4498 ibdev_err(ibdev, "failed to config rq buf, ret = %d.\n", ret);
4502 /* Search IRRL's mtts */
4503 mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
4504 hr_qp->qpn, &irrl_ba);
4506 ibdev_err(ibdev, "failed to find qp irrl_table.\n");
4510 /* Search TRRL's mtts */
4511 mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
4512 hr_qp->qpn, &trrl_ba);
4514 ibdev_err(ibdev, "failed to find qp trrl_table.\n");
4518 if (attr_mask & IB_QP_ALT_PATH) {
4519 ibdev_err(ibdev, "INIT2RTR attr_mask (0x%x) error.\n",
4524 roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
4525 V2_QPC_BYTE_132_TRRL_BA_S, trrl_ba >> 4);
4526 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
4527 V2_QPC_BYTE_132_TRRL_BA_S, 0);
4528 context->trrl_ba = cpu_to_le32(trrl_ba >> (16 + 4));
4529 qpc_mask->trrl_ba = 0;
4530 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
4531 V2_QPC_BYTE_140_TRRL_BA_S,
4532 (u32)(trrl_ba >> (32 + 16 + 4)));
4533 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
4534 V2_QPC_BYTE_140_TRRL_BA_S, 0);
4536 context->irrl_ba = cpu_to_le32(irrl_ba >> 6);
4537 qpc_mask->irrl_ba = 0;
4538 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
4539 V2_QPC_BYTE_208_IRRL_BA_S,
4540 irrl_ba >> (32 + 6));
4541 roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
4542 V2_QPC_BYTE_208_IRRL_BA_S, 0);
4544 roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1);
4545 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0);
4547 roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
4548 hr_qp->sq_signal_bits);
4549 roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
4552 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
4554 smac = (u8 *)hr_dev->dev_addr[port];
4555 dmac = (u8 *)attr->ah_attr.roce.dmac;
4556 /* when dmac equals smac or loop_idc is 1, it should loopback */
4557 if (ether_addr_equal_unaligned(dmac, smac) ||
4558 hr_dev->loop_idc == 0x1) {
4559 roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1);
4560 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
4563 if (attr_mask & IB_QP_DEST_QPN) {
4564 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
4565 V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
4566 roce_set_field(qpc_mask->byte_56_dqpn_err,
4567 V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
4570 memcpy(&(context->dmac), dmac, sizeof(u32));
4571 roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
4572 V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
4574 roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
4575 V2_QPC_BYTE_52_DMAC_S, 0);
4577 mtu = get_mtu(ibqp, attr);
4578 hr_qp->path_mtu = mtu;
4580 if (attr_mask & IB_QP_PATH_MTU) {
4581 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
4582 V2_QPC_BYTE_24_MTU_S, mtu);
4583 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
4584 V2_QPC_BYTE_24_MTU_S, 0);
4587 #define MAX_LP_MSG_LEN 65536
4588 /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
4589 lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu));
4591 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
4592 V2_QPC_BYTE_56_LP_PKTN_INI_S, lp_pktn_ini);
4593 roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
4594 V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
4596 /* ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI */
4597 roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
4598 V2_QPC_BYTE_172_ACK_REQ_FREQ_S, lp_pktn_ini);
4599 roce_set_field(qpc_mask->byte_172_sq_psn,
4600 V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
4601 V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
4603 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
4604 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
4605 roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
4606 V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
4607 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
4608 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
4609 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
4611 context->rq_rnr_timer = 0;
4612 qpc_mask->rq_rnr_timer = 0;
4614 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
4615 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
4616 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
4617 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
4619 /* rocee send 2^lp_sgen_ini segs every time */
4620 roce_set_field(context->byte_168_irrl_idx,
4621 V2_QPC_BYTE_168_LP_SGEN_INI_M,
4622 V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
4623 roce_set_field(qpc_mask->byte_168_irrl_idx,
4624 V2_QPC_BYTE_168_LP_SGEN_INI_M,
4625 V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
4630 static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
4631 const struct ib_qp_attr *attr, int attr_mask,
4632 struct hns_roce_v2_qp_context *context,
4633 struct hns_roce_v2_qp_context *qpc_mask)
4635 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4636 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4637 struct ib_device *ibdev = &hr_dev->ib_dev;
4640 /* Not support alternate path and path migration */
4641 if (attr_mask & (IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE)) {
4642 ibdev_err(ibdev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
4646 ret = config_qp_sq_buf(hr_dev, hr_qp, context, qpc_mask);
4648 ibdev_err(ibdev, "failed to config sq buf, ret = %d.\n", ret);
4653 * Set some fields in context to zero, Because the default values
4654 * of all fields in context are zero, we need not set them to 0 again.
4655 * but we should set the relevant fields of context mask to 0.
4657 roce_set_field(qpc_mask->byte_232_irrl_sge,
4658 V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
4659 V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
4661 roce_set_field(qpc_mask->byte_240_irrl_tail,
4662 V2_QPC_BYTE_240_RX_ACK_MSN_M,
4663 V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
4665 roce_set_field(qpc_mask->byte_248_ack_psn,
4666 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
4667 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
4668 roce_set_bit(qpc_mask->byte_248_ack_psn,
4669 V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0);
4670 roce_set_field(qpc_mask->byte_248_ack_psn,
4671 V2_QPC_BYTE_248_IRRL_PSN_M,
4672 V2_QPC_BYTE_248_IRRL_PSN_S, 0);
4674 roce_set_field(qpc_mask->byte_240_irrl_tail,
4675 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
4676 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
4678 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
4679 V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
4680 V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
4682 roce_set_bit(qpc_mask->byte_248_ack_psn,
4683 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
4685 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
4686 V2_QPC_BYTE_212_CHECK_FLG_S, 0);
4688 roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
4689 V2_QPC_BYTE_212_LSN_S, 0x100);
4690 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
4691 V2_QPC_BYTE_212_LSN_S, 0);
4693 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
4694 V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
4699 static inline u16 get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
4702 fl = rdma_calc_flow_label(lqpn, rqpn);
4704 return rdma_flow_label_to_udp_sport(fl);
4707 static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
4710 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4711 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4712 struct hns_roce_dip *hr_dip;
4713 unsigned long flags;
4716 spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
4718 list_for_each_entry(hr_dip, &hr_dev->dip_list, node) {
4719 if (!memcmp(grh->dgid.raw, hr_dip->dgid, 16))
4723 /* If no dgid is found, a new dip and a mapping between dgid and
4724 * dip_idx will be created.
4726 hr_dip = kzalloc(sizeof(*hr_dip), GFP_ATOMIC);
4732 memcpy(hr_dip->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
4733 hr_dip->dip_idx = *dip_idx = ibqp->qp_num;
4734 list_add_tail(&hr_dip->node, &hr_dev->dip_list);
4737 spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
4747 UNSUPPORT_CONG_LEVEL,
4761 static int check_cong_type(struct ib_qp *ibqp,
4762 struct hns_roce_congestion_algorithm *cong_alg)
4764 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4766 /* different congestion types match different configurations */
4767 switch (hr_dev->caps.cong_type) {
4768 case CONG_TYPE_DCQCN:
4769 cong_alg->alg_sel = CONG_DCQCN;
4770 cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
4771 cong_alg->dip_vld = DIP_INVALID;
4773 case CONG_TYPE_LDCP:
4774 cong_alg->alg_sel = CONG_WINDOW;
4775 cong_alg->alg_sub_sel = CONG_LDCP;
4776 cong_alg->dip_vld = DIP_INVALID;
4779 cong_alg->alg_sel = CONG_WINDOW;
4780 cong_alg->alg_sub_sel = CONG_HC3;
4781 cong_alg->dip_vld = DIP_INVALID;
4784 cong_alg->alg_sel = CONG_DCQCN;
4785 cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
4786 cong_alg->dip_vld = DIP_VALID;
4789 ibdev_err(&hr_dev->ib_dev,
4790 "error type(%u) for congestion selection.\n",
4791 hr_dev->caps.cong_type);
4798 static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
4799 struct hns_roce_v2_qp_context *context,
4800 struct hns_roce_v2_qp_context *qpc_mask)
4802 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4803 struct hns_roce_congestion_algorithm cong_field;
4804 struct ib_device *ibdev = ibqp->device;
4805 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
4809 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ||
4810 grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE)
4813 ret = check_cong_type(ibqp, &cong_field);
4817 hr_reg_write(context, QPC_CONG_ALGO_TMPL_ID, hr_dev->cong_algo_tmpl_id +
4818 hr_dev->caps.cong_type * HNS_ROCE_CONG_SIZE);
4819 hr_reg_write(qpc_mask, QPC_CONG_ALGO_TMPL_ID, 0);
4820 hr_reg_write(&context->ext, QPCEX_CONG_ALG_SEL, cong_field.alg_sel);
4821 hr_reg_write(&qpc_mask->ext, QPCEX_CONG_ALG_SEL, 0);
4822 hr_reg_write(&context->ext, QPCEX_CONG_ALG_SUB_SEL,
4823 cong_field.alg_sub_sel);
4824 hr_reg_write(&qpc_mask->ext, QPCEX_CONG_ALG_SUB_SEL, 0);
4825 hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX_VLD, cong_field.dip_vld);
4826 hr_reg_write(&qpc_mask->ext, QPCEX_DIP_CTX_IDX_VLD, 0);
4828 /* if dip is disabled, there is no need to set dip idx */
4829 if (cong_field.dip_vld == 0)
4832 ret = get_dip_ctx_idx(ibqp, attr, &dip_idx);
4834 ibdev_err(ibdev, "failed to fill cong field, ret = %d.\n", ret);
4838 hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX, dip_idx);
4839 hr_reg_write(&qpc_mask->ext, QPCEX_DIP_CTX_IDX, 0);
4844 static int hns_roce_v2_set_path(struct ib_qp *ibqp,
4845 const struct ib_qp_attr *attr,
4847 struct hns_roce_v2_qp_context *context,
4848 struct hns_roce_v2_qp_context *qpc_mask)
4850 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4851 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4852 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4853 struct ib_device *ibdev = &hr_dev->ib_dev;
4854 const struct ib_gid_attr *gid_attr = NULL;
4855 int is_roce_protocol;
4856 u16 vlan_id = 0xffff;
4857 bool is_udp = false;
4862 ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1;
4863 hr_port = ib_port - 1;
4864 is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
4865 rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
4867 if (is_roce_protocol) {
4868 gid_attr = attr->ah_attr.grh.sgid_attr;
4869 ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
4874 is_udp = (gid_attr->gid_type ==
4875 IB_GID_TYPE_ROCE_UDP_ENCAP);
4878 /* Only HIP08 needs to set the vlan_en bits in QPC */
4879 if (vlan_id < VLAN_N_VID &&
4880 hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
4881 roce_set_bit(context->byte_76_srqn_op_en,
4882 V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
4883 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
4884 V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
4885 roce_set_bit(context->byte_168_irrl_idx,
4886 V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
4887 roce_set_bit(qpc_mask->byte_168_irrl_idx,
4888 V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
4891 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
4892 V2_QPC_BYTE_24_VLAN_ID_S, vlan_id);
4893 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
4894 V2_QPC_BYTE_24_VLAN_ID_S, 0);
4896 if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
4897 ibdev_err(ibdev, "sgid_index(%u) too large. max is %d\n",
4898 grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
4902 if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
4903 ibdev_err(ibdev, "ah attr is not RDMA roce type\n");
4907 roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
4908 V2_QPC_BYTE_52_UDPSPN_S,
4909 is_udp ? get_udp_sport(grh->flow_label, ibqp->qp_num,
4910 attr->dest_qp_num) : 0);
4912 roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
4913 V2_QPC_BYTE_52_UDPSPN_S, 0);
4915 roce_set_field(context->byte_20_smac_sgid_idx,
4916 V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
4919 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4920 V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
4922 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
4923 V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
4924 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
4925 V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
4927 ret = fill_cong_field(ibqp, attr, context, qpc_mask);
4931 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4932 V2_QPC_BYTE_24_TC_S, get_tclass(&attr->ah_attr.grh));
4933 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4934 V2_QPC_BYTE_24_TC_S, 0);
4936 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4937 V2_QPC_BYTE_28_FL_S, grh->flow_label);
4938 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4939 V2_QPC_BYTE_28_FL_S, 0);
4940 memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
4941 memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
4943 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
4944 if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
4946 "failed to fill QPC, sl (%d) shouldn't be larger than %d.\n",
4947 hr_qp->sl, MAX_SERVICE_LEVEL);
4951 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4952 V2_QPC_BYTE_28_SL_S, hr_qp->sl);
4953 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4954 V2_QPC_BYTE_28_SL_S, 0);
4959 static bool check_qp_state(enum ib_qp_state cur_state,
4960 enum ib_qp_state new_state)
4962 static const bool sm[][IB_QPS_ERR + 1] = {
4963 [IB_QPS_RESET] = { [IB_QPS_RESET] = true,
4964 [IB_QPS_INIT] = true },
4965 [IB_QPS_INIT] = { [IB_QPS_RESET] = true,
4966 [IB_QPS_INIT] = true,
4967 [IB_QPS_RTR] = true,
4968 [IB_QPS_ERR] = true },
4969 [IB_QPS_RTR] = { [IB_QPS_RESET] = true,
4970 [IB_QPS_RTS] = true,
4971 [IB_QPS_ERR] = true },
4972 [IB_QPS_RTS] = { [IB_QPS_RESET] = true,
4973 [IB_QPS_RTS] = true,
4974 [IB_QPS_ERR] = true },
4977 [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }
4980 return sm[cur_state][new_state];
4983 static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
4984 const struct ib_qp_attr *attr,
4986 enum ib_qp_state cur_state,
4987 enum ib_qp_state new_state,
4988 struct hns_roce_v2_qp_context *context,
4989 struct hns_roce_v2_qp_context *qpc_mask)
4991 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4994 if (!check_qp_state(cur_state, new_state)) {
4995 ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n");
4999 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
5000 memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
5001 modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
5003 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
5004 modify_qp_init_to_init(ibqp, attr, attr_mask, context,
5006 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
5007 ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
5009 } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
5010 ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
5017 static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
5018 const struct ib_qp_attr *attr,
5020 struct hns_roce_v2_qp_context *context,
5021 struct hns_roce_v2_qp_context *qpc_mask)
5023 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5024 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5027 if (attr_mask & IB_QP_AV) {
5028 ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
5034 if (attr_mask & IB_QP_TIMEOUT) {
5035 if (attr->timeout < 31) {
5036 roce_set_field(context->byte_28_at_fl,
5037 V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
5039 roce_set_field(qpc_mask->byte_28_at_fl,
5040 V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
5043 ibdev_warn(&hr_dev->ib_dev,
5044 "Local ACK timeout shall be 0 to 30.\n");
5048 if (attr_mask & IB_QP_RETRY_CNT) {
5049 roce_set_field(context->byte_212_lsn,
5050 V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
5051 V2_QPC_BYTE_212_RETRY_NUM_INIT_S,
5053 roce_set_field(qpc_mask->byte_212_lsn,
5054 V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
5055 V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
5057 roce_set_field(context->byte_212_lsn,
5058 V2_QPC_BYTE_212_RETRY_CNT_M,
5059 V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
5060 roce_set_field(qpc_mask->byte_212_lsn,
5061 V2_QPC_BYTE_212_RETRY_CNT_M,
5062 V2_QPC_BYTE_212_RETRY_CNT_S, 0);
5065 if (attr_mask & IB_QP_RNR_RETRY) {
5066 roce_set_field(context->byte_244_rnr_rxack,
5067 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
5068 V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
5069 roce_set_field(qpc_mask->byte_244_rnr_rxack,
5070 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
5071 V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
5073 roce_set_field(context->byte_244_rnr_rxack,
5074 V2_QPC_BYTE_244_RNR_CNT_M,
5075 V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
5076 roce_set_field(qpc_mask->byte_244_rnr_rxack,
5077 V2_QPC_BYTE_244_RNR_CNT_M,
5078 V2_QPC_BYTE_244_RNR_CNT_S, 0);
5081 if (attr_mask & IB_QP_SQ_PSN) {
5082 roce_set_field(context->byte_172_sq_psn,
5083 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
5084 V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
5085 roce_set_field(qpc_mask->byte_172_sq_psn,
5086 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
5087 V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
5089 roce_set_field(context->byte_196_sq_psn,
5090 V2_QPC_BYTE_196_SQ_MAX_PSN_M,
5091 V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
5092 roce_set_field(qpc_mask->byte_196_sq_psn,
5093 V2_QPC_BYTE_196_SQ_MAX_PSN_M,
5094 V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
5096 roce_set_field(context->byte_220_retry_psn_msn,
5097 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
5098 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
5099 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
5100 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
5101 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
5103 roce_set_field(context->byte_224_retry_msg,
5104 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
5105 V2_QPC_BYTE_224_RETRY_MSG_PSN_S,
5106 attr->sq_psn >> V2_QPC_BYTE_220_RETRY_MSG_PSN_S);
5107 roce_set_field(qpc_mask->byte_224_retry_msg,
5108 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
5109 V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
5111 roce_set_field(context->byte_224_retry_msg,
5112 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
5113 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S,
5115 roce_set_field(qpc_mask->byte_224_retry_msg,
5116 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
5117 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
5119 roce_set_field(context->byte_244_rnr_rxack,
5120 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
5121 V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
5122 roce_set_field(qpc_mask->byte_244_rnr_rxack,
5123 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
5124 V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
5127 if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
5128 attr->max_dest_rd_atomic) {
5129 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
5130 V2_QPC_BYTE_140_RR_MAX_S,
5131 fls(attr->max_dest_rd_atomic - 1));
5132 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
5133 V2_QPC_BYTE_140_RR_MAX_S, 0);
5136 if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
5137 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
5138 V2_QPC_BYTE_208_SR_MAX_S,
5139 fls(attr->max_rd_atomic - 1));
5140 roce_set_field(qpc_mask->byte_208_irrl,
5141 V2_QPC_BYTE_208_SR_MAX_M,
5142 V2_QPC_BYTE_208_SR_MAX_S, 0);
5145 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
5146 set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
5148 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
5149 roce_set_field(context->byte_80_rnr_rx_cqn,
5150 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
5151 V2_QPC_BYTE_80_MIN_RNR_TIME_S,
5152 attr->min_rnr_timer);
5153 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
5154 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
5155 V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
5158 if (attr_mask & IB_QP_RQ_PSN) {
5159 roce_set_field(context->byte_108_rx_reqepsn,
5160 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
5161 V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
5162 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
5163 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
5164 V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
5166 roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
5167 V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
5168 roce_set_field(qpc_mask->byte_152_raq,
5169 V2_QPC_BYTE_152_RAQ_PSN_M,
5170 V2_QPC_BYTE_152_RAQ_PSN_S, 0);
5173 if (attr_mask & IB_QP_QKEY) {
5174 context->qkey_xrcd = cpu_to_le32(attr->qkey);
5175 qpc_mask->qkey_xrcd = 0;
5176 hr_qp->qkey = attr->qkey;
5182 static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp,
5183 const struct ib_qp_attr *attr,
5186 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5187 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5189 if (attr_mask & IB_QP_ACCESS_FLAGS)
5190 hr_qp->atomic_rd_en = attr->qp_access_flags;
5192 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
5193 hr_qp->resp_depth = attr->max_dest_rd_atomic;
5194 if (attr_mask & IB_QP_PORT) {
5195 hr_qp->port = attr->port_num - 1;
5196 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
5200 static void clear_qp(struct hns_roce_qp *hr_qp)
5202 struct ib_qp *ibqp = &hr_qp->ibqp;
5205 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
5208 if (ibqp->recv_cq && ibqp->recv_cq != ibqp->send_cq)
5209 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq),
5210 hr_qp->qpn, ibqp->srq ?
5211 to_hr_srq(ibqp->srq) : NULL);
5213 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
5214 *hr_qp->rdb.db_record = 0;
5220 hr_qp->next_sge = 0;
5223 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
5224 const struct ib_qp_attr *attr,
5225 int attr_mask, enum ib_qp_state cur_state,
5226 enum ib_qp_state new_state)
5228 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5229 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5230 struct hns_roce_v2_qp_context ctx[2];
5231 struct hns_roce_v2_qp_context *context = ctx;
5232 struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
5233 struct ib_device *ibdev = &hr_dev->ib_dev;
5234 unsigned long sq_flag = 0;
5235 unsigned long rq_flag = 0;
5238 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
5242 * In v2 engine, software pass context and context mask to hardware
5243 * when modifying qp. If software need modify some fields in context,
5244 * we should set all bits of the relevant fields in context mask to
5245 * 0 at the same time, else set them to 0x1.
5247 memset(context, 0, hr_dev->caps.qpc_sz);
5248 memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz);
5250 ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
5251 new_state, context, qpc_mask);
5255 /* When QP state is err, SQ and RQ WQE should be flushed */
5256 if (new_state == IB_QPS_ERR) {
5257 if (ibqp->qp_type != IB_QPT_XRC_TGT) {
5258 spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
5259 hr_qp->state = IB_QPS_ERR;
5260 roce_set_field(context->byte_160_sq_ci_pi,
5261 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
5262 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
5264 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
5265 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
5266 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
5267 spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag);
5270 if (!ibqp->srq && ibqp->qp_type != IB_QPT_XRC_INI &&
5271 ibqp->qp_type != IB_QPT_XRC_TGT) {
5272 spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
5273 hr_qp->state = IB_QPS_ERR;
5274 roce_set_field(context->byte_84_rq_ci_pi,
5275 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
5276 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
5278 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
5279 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
5280 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
5281 spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag);
5285 /* Configure the optional fields */
5286 ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context,
5291 roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
5292 ((to_hr_qp_type(hr_qp->ibqp.qp_type) == SERV_TYPE_XRC) ||
5293 ibqp->srq) ? 1 : 0);
5294 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
5295 V2_QPC_BYTE_108_INV_CREDIT_S, 0);
5297 /* Every status migrate must change state */
5298 roce_set_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
5299 V2_QPC_BYTE_60_QP_ST_S, new_state);
5300 roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
5301 V2_QPC_BYTE_60_QP_ST_S, 0);
5303 /* SW pass context to HW */
5304 ret = hns_roce_v2_qp_modify(hr_dev, context, qpc_mask, hr_qp);
5306 ibdev_err(ibdev, "failed to modify QP, ret = %d.\n", ret);
5310 hr_qp->state = new_state;
5312 hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask);
5314 if (new_state == IB_QPS_RESET && !ibqp->uobject)
5321 static int to_ib_qp_st(enum hns_roce_v2_qp_state state)
5323 static const enum ib_qp_state map[] = {
5324 [HNS_ROCE_QP_ST_RST] = IB_QPS_RESET,
5325 [HNS_ROCE_QP_ST_INIT] = IB_QPS_INIT,
5326 [HNS_ROCE_QP_ST_RTR] = IB_QPS_RTR,
5327 [HNS_ROCE_QP_ST_RTS] = IB_QPS_RTS,
5328 [HNS_ROCE_QP_ST_SQD] = IB_QPS_SQD,
5329 [HNS_ROCE_QP_ST_SQER] = IB_QPS_SQE,
5330 [HNS_ROCE_QP_ST_ERR] = IB_QPS_ERR,
5331 [HNS_ROCE_QP_ST_SQ_DRAINING] = IB_QPS_SQD
5334 return (state < ARRAY_SIZE(map)) ? map[state] : -1;
5337 static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
5338 struct hns_roce_qp *hr_qp,
5339 struct hns_roce_v2_qp_context *hr_context)
5341 struct hns_roce_cmd_mailbox *mailbox;
5344 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5345 if (IS_ERR(mailbox))
5346 return PTR_ERR(mailbox);
5348 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
5349 HNS_ROCE_CMD_QUERY_QPC,
5350 HNS_ROCE_CMD_TIMEOUT_MSECS);
5354 memcpy(hr_context, mailbox->buf, hr_dev->caps.qpc_sz);
5357 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5361 static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
5363 struct ib_qp_init_attr *qp_init_attr)
5365 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5366 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5367 struct hns_roce_v2_qp_context context = {};
5368 struct ib_device *ibdev = &hr_dev->ib_dev;
5373 memset(qp_attr, 0, sizeof(*qp_attr));
5374 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
5376 mutex_lock(&hr_qp->mutex);
5378 if (hr_qp->state == IB_QPS_RESET) {
5379 qp_attr->qp_state = IB_QPS_RESET;
5384 ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
5386 ibdev_err(ibdev, "failed to query QPC, ret = %d.\n", ret);
5391 state = roce_get_field(context.byte_60_qpst_tempid,
5392 V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
5393 tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
5394 if (tmp_qp_state == -1) {
5395 ibdev_err(ibdev, "Illegal ib_qp_state\n");
5399 hr_qp->state = (u8)tmp_qp_state;
5400 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
5401 qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context.byte_24_mtu_tc,
5402 V2_QPC_BYTE_24_MTU_M,
5403 V2_QPC_BYTE_24_MTU_S);
5404 qp_attr->path_mig_state = IB_MIG_ARMED;
5405 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
5406 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
5407 qp_attr->qkey = le32_to_cpu(context.qkey_xrcd);
5409 qp_attr->rq_psn = roce_get_field(context.byte_108_rx_reqepsn,
5410 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
5411 V2_QPC_BYTE_108_RX_REQ_EPSN_S);
5412 qp_attr->sq_psn = (u32)roce_get_field(context.byte_172_sq_psn,
5413 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
5414 V2_QPC_BYTE_172_SQ_CUR_PSN_S);
5415 qp_attr->dest_qp_num = (u8)roce_get_field(context.byte_56_dqpn_err,
5416 V2_QPC_BYTE_56_DQPN_M,
5417 V2_QPC_BYTE_56_DQPN_S);
5418 qp_attr->qp_access_flags = ((roce_get_bit(context.byte_76_srqn_op_en,
5419 V2_QPC_BYTE_76_RRE_S)) << V2_QP_RRE_S) |
5420 ((roce_get_bit(context.byte_76_srqn_op_en,
5421 V2_QPC_BYTE_76_RWE_S)) << V2_QP_RWE_S) |
5422 ((roce_get_bit(context.byte_76_srqn_op_en,
5423 V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S);
5425 if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
5426 hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
5427 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) {
5428 struct ib_global_route *grh =
5429 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
5431 rdma_ah_set_sl(&qp_attr->ah_attr,
5432 roce_get_field(context.byte_28_at_fl,
5433 V2_QPC_BYTE_28_SL_M,
5434 V2_QPC_BYTE_28_SL_S));
5435 grh->flow_label = roce_get_field(context.byte_28_at_fl,
5436 V2_QPC_BYTE_28_FL_M,
5437 V2_QPC_BYTE_28_FL_S);
5438 grh->sgid_index = roce_get_field(context.byte_20_smac_sgid_idx,
5439 V2_QPC_BYTE_20_SGID_IDX_M,
5440 V2_QPC_BYTE_20_SGID_IDX_S);
5441 grh->hop_limit = roce_get_field(context.byte_24_mtu_tc,
5442 V2_QPC_BYTE_24_HOP_LIMIT_M,
5443 V2_QPC_BYTE_24_HOP_LIMIT_S);
5444 grh->traffic_class = roce_get_field(context.byte_24_mtu_tc,
5445 V2_QPC_BYTE_24_TC_M,
5446 V2_QPC_BYTE_24_TC_S);
5448 memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw));
5451 qp_attr->port_num = hr_qp->port + 1;
5452 qp_attr->sq_draining = 0;
5453 qp_attr->max_rd_atomic = 1 << roce_get_field(context.byte_208_irrl,
5454 V2_QPC_BYTE_208_SR_MAX_M,
5455 V2_QPC_BYTE_208_SR_MAX_S);
5456 qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context.byte_140_raq,
5457 V2_QPC_BYTE_140_RR_MAX_M,
5458 V2_QPC_BYTE_140_RR_MAX_S);
5460 qp_attr->min_rnr_timer = (u8)roce_get_field(context.byte_80_rnr_rx_cqn,
5461 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
5462 V2_QPC_BYTE_80_MIN_RNR_TIME_S);
5463 qp_attr->timeout = (u8)roce_get_field(context.byte_28_at_fl,
5464 V2_QPC_BYTE_28_AT_M,
5465 V2_QPC_BYTE_28_AT_S);
5466 qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
5467 V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
5468 V2_QPC_BYTE_212_RETRY_NUM_INIT_S);
5469 qp_attr->rnr_retry = roce_get_field(context.byte_244_rnr_rxack,
5470 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
5471 V2_QPC_BYTE_244_RNR_NUM_INIT_S);
5474 qp_attr->cur_qp_state = qp_attr->qp_state;
5475 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
5476 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
5477 qp_attr->cap.max_inline_data = hr_qp->max_inline_data;
5479 if (!ibqp->uobject) {
5480 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
5481 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
5483 qp_attr->cap.max_send_wr = 0;
5484 qp_attr->cap.max_send_sge = 0;
5487 qp_init_attr->cap = qp_attr->cap;
5488 qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits;
5491 mutex_unlock(&hr_qp->mutex);
5495 static inline int modify_qp_is_ok(struct hns_roce_qp *hr_qp)
5497 return ((hr_qp->ibqp.qp_type == IB_QPT_RC ||
5498 hr_qp->ibqp.qp_type == IB_QPT_UD ||
5499 hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
5500 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) &&
5501 hr_qp->state != IB_QPS_RESET);
5504 static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
5505 struct hns_roce_qp *hr_qp,
5506 struct ib_udata *udata)
5508 struct ib_device *ibdev = &hr_dev->ib_dev;
5509 struct hns_roce_cq *send_cq, *recv_cq;
5510 unsigned long flags;
5513 if (modify_qp_is_ok(hr_qp)) {
5514 /* Modify qp to reset before destroying qp */
5515 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
5516 hr_qp->state, IB_QPS_RESET);
5519 "failed to modify QP to RST, ret = %d.\n",
5523 send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
5524 recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
5526 spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
5527 hns_roce_lock_cqs(send_cq, recv_cq);
5531 __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn,
5533 to_hr_srq(hr_qp->ibqp.srq) :
5536 if (send_cq && send_cq != recv_cq)
5537 __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
5541 hns_roce_qp_remove(hr_dev, hr_qp);
5543 hns_roce_unlock_cqs(send_cq, recv_cq);
5544 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
5549 static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
5551 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5552 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5555 ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
5557 ibdev_err(&hr_dev->ib_dev,
5558 "failed to destroy QP, QPN = 0x%06lx, ret = %d.\n",
5561 hns_roce_qp_destroy(hr_dev, hr_qp, udata);
5566 static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
5567 struct hns_roce_qp *hr_qp)
5569 struct ib_device *ibdev = &hr_dev->ib_dev;
5570 struct hns_roce_sccc_clr_done *resp;
5571 struct hns_roce_sccc_clr *clr;
5572 struct hns_roce_cmq_desc desc;
5575 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
5578 mutex_lock(&hr_dev->qp_table.scc_mutex);
5580 /* set scc ctx clear done flag */
5581 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
5582 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5584 ibdev_err(ibdev, "failed to reset SCC ctx, ret = %d.\n", ret);
5588 /* clear scc context */
5589 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
5590 clr = (struct hns_roce_sccc_clr *)desc.data;
5591 clr->qpn = cpu_to_le32(hr_qp->qpn);
5592 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5594 ibdev_err(ibdev, "failed to clear SCC ctx, ret = %d.\n", ret);
5598 /* query scc context clear is done or not */
5599 resp = (struct hns_roce_sccc_clr_done *)desc.data;
5600 for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
5601 hns_roce_cmq_setup_basic_desc(&desc,
5602 HNS_ROCE_OPC_QUERY_SCCC, true);
5603 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5605 ibdev_err(ibdev, "failed to query clr cmq, ret = %d\n",
5616 ibdev_err(ibdev, "Query SCC clr done flag overtime.\n");
5620 mutex_unlock(&hr_dev->qp_table.scc_mutex);
5624 #define DMA_IDX_SHIFT 3
5625 #define DMA_WQE_SHIFT 3
5627 static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq,
5628 struct hns_roce_srq_context *ctx)
5630 struct hns_roce_idx_que *idx_que = &srq->idx_que;
5631 struct ib_device *ibdev = srq->ibsrq.device;
5632 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
5633 u64 mtts_idx[MTT_MIN_COUNT] = {};
5634 dma_addr_t dma_handle_idx = 0;
5637 /* Get physical address of idx que buf */
5638 ret = hns_roce_mtr_find(hr_dev, &idx_que->mtr, 0, mtts_idx,
5639 ARRAY_SIZE(mtts_idx), &dma_handle_idx);
5641 ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n",
5646 hr_reg_write(ctx, SRQC_IDX_HOP_NUM,
5647 to_hr_hem_hopnum(hr_dev->caps.idx_hop_num, srq->wqe_cnt));
5649 hr_reg_write(ctx, SRQC_IDX_BT_BA_L, dma_handle_idx >> DMA_IDX_SHIFT);
5650 hr_reg_write(ctx, SRQC_IDX_BT_BA_H,
5651 upper_32_bits(dma_handle_idx >> DMA_IDX_SHIFT));
5653 hr_reg_write(ctx, SRQC_IDX_BA_PG_SZ,
5654 to_hr_hw_page_shift(idx_que->mtr.hem_cfg.ba_pg_shift));
5655 hr_reg_write(ctx, SRQC_IDX_BUF_PG_SZ,
5656 to_hr_hw_page_shift(idx_que->mtr.hem_cfg.buf_pg_shift));
5658 hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_L,
5659 to_hr_hw_page_addr(mtts_idx[0]));
5660 hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_H,
5661 upper_32_bits(to_hr_hw_page_addr(mtts_idx[0])));
5663 hr_reg_write(ctx, SRQC_IDX_NXT_BLK_ADDR_L,
5664 to_hr_hw_page_addr(mtts_idx[1]));
5665 hr_reg_write(ctx, SRQC_IDX_NXT_BLK_ADDR_H,
5666 upper_32_bits(to_hr_hw_page_addr(mtts_idx[1])));
5671 static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf)
5673 struct ib_device *ibdev = srq->ibsrq.device;
5674 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
5675 struct hns_roce_srq_context *ctx = mb_buf;
5676 u64 mtts_wqe[MTT_MIN_COUNT] = {};
5677 dma_addr_t dma_handle_wqe = 0;
5680 memset(ctx, 0, sizeof(*ctx));
5682 /* Get the physical address of srq buf */
5683 ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
5684 ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
5686 ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n",
5691 hr_reg_write(ctx, SRQC_SRQ_ST, 1);
5692 hr_reg_write(ctx, SRQC_SRQ_TYPE,
5693 !!(srq->ibsrq.srq_type == IB_SRQT_XRC));
5694 hr_reg_write(ctx, SRQC_PD, to_hr_pd(srq->ibsrq.pd)->pdn);
5695 hr_reg_write(ctx, SRQC_SRQN, srq->srqn);
5696 hr_reg_write(ctx, SRQC_XRCD, srq->xrcdn);
5697 hr_reg_write(ctx, SRQC_XRC_CQN, srq->cqn);
5698 hr_reg_write(ctx, SRQC_SHIFT, ilog2(srq->wqe_cnt));
5699 hr_reg_write(ctx, SRQC_RQWS,
5700 srq->max_gs <= 0 ? 0 : fls(srq->max_gs - 1));
5702 hr_reg_write(ctx, SRQC_WQE_HOP_NUM,
5703 to_hr_hem_hopnum(hr_dev->caps.srqwqe_hop_num,
5706 hr_reg_write(ctx, SRQC_WQE_BT_BA_L, dma_handle_wqe >> DMA_WQE_SHIFT);
5707 hr_reg_write(ctx, SRQC_WQE_BT_BA_H,
5708 upper_32_bits(dma_handle_wqe >> DMA_WQE_SHIFT));
5710 hr_reg_write(ctx, SRQC_WQE_BA_PG_SZ,
5711 to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift));
5712 hr_reg_write(ctx, SRQC_WQE_BUF_PG_SZ,
5713 to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift));
5715 return hns_roce_v2_write_srqc_index_queue(srq, ctx);
5718 static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
5719 struct ib_srq_attr *srq_attr,
5720 enum ib_srq_attr_mask srq_attr_mask,
5721 struct ib_udata *udata)
5723 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5724 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5725 struct hns_roce_srq_context *srq_context;
5726 struct hns_roce_srq_context *srqc_mask;
5727 struct hns_roce_cmd_mailbox *mailbox;
5730 /* Resizing SRQs is not supported yet */
5731 if (srq_attr_mask & IB_SRQ_MAX_WR)
5734 if (srq_attr_mask & IB_SRQ_LIMIT) {
5735 if (srq_attr->srq_limit > srq->wqe_cnt)
5738 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5739 if (IS_ERR(mailbox))
5740 return PTR_ERR(mailbox);
5742 srq_context = mailbox->buf;
5743 srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
5745 memset(srqc_mask, 0xff, sizeof(*srqc_mask));
5747 roce_set_field(srq_context->byte_8_limit_wl,
5748 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5749 SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit);
5750 roce_set_field(srqc_mask->byte_8_limit_wl,
5751 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5752 SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
5754 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
5755 HNS_ROCE_CMD_MODIFY_SRQC,
5756 HNS_ROCE_CMD_TIMEOUT_MSECS);
5757 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5759 ibdev_err(&hr_dev->ib_dev,
5760 "failed to handle cmd of modifying SRQ, ret = %d.\n",
5769 static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
5771 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5772 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5773 struct hns_roce_srq_context *srq_context;
5774 struct hns_roce_cmd_mailbox *mailbox;
5778 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5779 if (IS_ERR(mailbox))
5780 return PTR_ERR(mailbox);
5782 srq_context = mailbox->buf;
5783 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
5784 HNS_ROCE_CMD_QUERY_SRQC,
5785 HNS_ROCE_CMD_TIMEOUT_MSECS);
5787 ibdev_err(&hr_dev->ib_dev,
5788 "failed to process cmd of querying SRQ, ret = %d.\n",
5793 limit_wl = roce_get_field(srq_context->byte_8_limit_wl,
5794 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5795 SRQC_BYTE_8_SRQ_LIMIT_WL_S);
5797 attr->srq_limit = limit_wl;
5798 attr->max_wr = srq->wqe_cnt;
5799 attr->max_sge = srq->max_gs - srq->rsv_sge;
5802 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5806 static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
5808 struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
5809 struct hns_roce_v2_cq_context *cq_context;
5810 struct hns_roce_cq *hr_cq = to_hr_cq(cq);
5811 struct hns_roce_v2_cq_context *cqc_mask;
5812 struct hns_roce_cmd_mailbox *mailbox;
5815 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5816 if (IS_ERR(mailbox))
5817 return PTR_ERR(mailbox);
5819 cq_context = mailbox->buf;
5820 cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
5822 memset(cqc_mask, 0xff, sizeof(*cqc_mask));
5824 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
5825 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
5827 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
5828 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
5830 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
5831 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
5833 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
5834 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
5837 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
5838 HNS_ROCE_CMD_MODIFY_CQC,
5839 HNS_ROCE_CMD_TIMEOUT_MSECS);
5840 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5842 ibdev_err(&hr_dev->ib_dev,
5843 "failed to process cmd when modifying CQ, ret = %d.\n",
5849 static void hns_roce_irq_work_handle(struct work_struct *work)
5851 struct hns_roce_work *irq_work =
5852 container_of(work, struct hns_roce_work, work);
5853 struct ib_device *ibdev = &irq_work->hr_dev->ib_dev;
5855 switch (irq_work->event_type) {
5856 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5857 ibdev_info(ibdev, "Path migrated succeeded.\n");
5859 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5860 ibdev_warn(ibdev, "Path migration failed.\n");
5862 case HNS_ROCE_EVENT_TYPE_COMM_EST:
5864 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5865 ibdev_warn(ibdev, "Send queue drained.\n");
5867 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
5868 ibdev_err(ibdev, "Local work queue 0x%x catast error, sub_event type is: %d\n",
5869 irq_work->queue_num, irq_work->sub_type);
5871 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
5872 ibdev_err(ibdev, "Invalid request local work queue 0x%x error.\n",
5873 irq_work->queue_num);
5875 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
5876 ibdev_err(ibdev, "Local access violation work queue 0x%x error, sub_event type is: %d\n",
5877 irq_work->queue_num, irq_work->sub_type);
5879 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
5880 ibdev_warn(ibdev, "SRQ limit reach.\n");
5882 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
5883 ibdev_warn(ibdev, "SRQ last wqe reach.\n");
5885 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
5886 ibdev_err(ibdev, "SRQ catas error.\n");
5888 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
5889 ibdev_err(ibdev, "CQ 0x%x access err.\n", irq_work->queue_num);
5891 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
5892 ibdev_warn(ibdev, "CQ 0x%x overflow\n", irq_work->queue_num);
5894 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
5895 ibdev_warn(ibdev, "DB overflow.\n");
5897 case HNS_ROCE_EVENT_TYPE_FLR:
5898 ibdev_warn(ibdev, "Function level reset.\n");
5900 case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
5901 ibdev_err(ibdev, "xrc domain violation error.\n");
5903 case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
5904 ibdev_err(ibdev, "invalid xrceth error.\n");
5913 static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
5914 struct hns_roce_eq *eq, u32 queue_num)
5916 struct hns_roce_work *irq_work;
5918 irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
5922 INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
5923 irq_work->hr_dev = hr_dev;
5924 irq_work->event_type = eq->event_type;
5925 irq_work->sub_type = eq->sub_type;
5926 irq_work->queue_num = queue_num;
5927 queue_work(hr_dev->irq_workq, &(irq_work->work));
5930 static void update_eq_db(struct hns_roce_eq *eq)
5932 struct hns_roce_dev *hr_dev = eq->hr_dev;
5933 struct hns_roce_v2_db eq_db = {};
5935 if (eq->type_flag == HNS_ROCE_AEQ) {
5936 roce_set_field(eq_db.byte_4, V2_EQ_DB_CMD_M, V2_EQ_DB_CMD_S,
5937 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
5938 HNS_ROCE_EQ_DB_CMD_AEQ :
5939 HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
5941 roce_set_field(eq_db.byte_4, V2_EQ_DB_TAG_M, V2_EQ_DB_TAG_S,
5944 roce_set_field(eq_db.byte_4, V2_EQ_DB_CMD_M, V2_EQ_DB_CMD_S,
5945 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
5946 HNS_ROCE_EQ_DB_CMD_CEQ :
5947 HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
5950 roce_set_field(eq_db.parameter, V2_EQ_DB_CONS_IDX_M,
5951 V2_EQ_DB_CONS_IDX_S, eq->cons_index);
5953 hns_roce_write64(hr_dev, (__le32 *)&eq_db, eq->db_reg);
5956 static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
5958 struct hns_roce_aeqe *aeqe;
5960 aeqe = hns_roce_buf_offset(eq->mtr.kmem,
5961 (eq->cons_index & (eq->entries - 1)) *
5964 return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
5965 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
5968 static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
5969 struct hns_roce_eq *eq)
5971 struct device *dev = hr_dev->dev;
5972 struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq);
5979 /* Make sure we read AEQ entry after we have checked the
5984 event_type = roce_get_field(aeqe->asyn,
5985 HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
5986 HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
5987 sub_type = roce_get_field(aeqe->asyn,
5988 HNS_ROCE_V2_AEQE_SUB_TYPE_M,
5989 HNS_ROCE_V2_AEQE_SUB_TYPE_S);
5990 queue_num = roce_get_field(aeqe->event.queue_event.num,
5991 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5992 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5994 switch (event_type) {
5995 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5996 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5997 case HNS_ROCE_EVENT_TYPE_COMM_EST:
5998 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5999 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
6000 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
6001 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
6002 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
6003 case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
6004 case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
6005 hns_roce_qp_event(hr_dev, queue_num, event_type);
6007 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
6008 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
6009 hns_roce_srq_event(hr_dev, queue_num, event_type);
6011 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
6012 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
6013 hns_roce_cq_event(hr_dev, queue_num, event_type);
6015 case HNS_ROCE_EVENT_TYPE_MB:
6016 hns_roce_cmd_event(hr_dev,
6017 le16_to_cpu(aeqe->event.cmd.token),
6018 aeqe->event.cmd.status,
6019 le64_to_cpu(aeqe->event.cmd.out_param));
6021 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
6022 case HNS_ROCE_EVENT_TYPE_FLR:
6025 dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
6026 event_type, eq->eqn, eq->cons_index);
6030 eq->event_type = event_type;
6031 eq->sub_type = sub_type;
6035 hns_roce_v2_init_irq_work(hr_dev, eq, queue_num);
6037 aeqe = next_aeqe_sw_v2(eq);
6044 static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
6046 struct hns_roce_ceqe *ceqe;
6048 ceqe = hns_roce_buf_offset(eq->mtr.kmem,
6049 (eq->cons_index & (eq->entries - 1)) *
6052 return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
6053 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
6056 static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
6057 struct hns_roce_eq *eq)
6059 struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
6064 /* Make sure we read CEQ entry after we have checked the
6069 cqn = roce_get_field(ceqe->comp, HNS_ROCE_V2_CEQE_COMP_CQN_M,
6070 HNS_ROCE_V2_CEQE_COMP_CQN_S);
6072 hns_roce_cq_completion(hr_dev, cqn);
6077 ceqe = next_ceqe_sw_v2(eq);
6085 static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
6087 struct hns_roce_eq *eq = eq_ptr;
6088 struct hns_roce_dev *hr_dev = eq->hr_dev;
6091 if (eq->type_flag == HNS_ROCE_CEQ)
6092 /* Completion event interrupt */
6093 int_work = hns_roce_v2_ceq_int(hr_dev, eq);
6095 /* Asychronous event interrupt */
6096 int_work = hns_roce_v2_aeq_int(hr_dev, eq);
6098 return IRQ_RETVAL(int_work);
6101 static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
6103 struct hns_roce_dev *hr_dev = dev_id;
6104 struct device *dev = hr_dev->dev;
6109 /* Abnormal interrupt */
6110 int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
6111 int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
6113 if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
6114 struct pci_dev *pdev = hr_dev->pci_dev;
6115 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
6116 const struct hnae3_ae_ops *ops = ae_dev->ops;
6118 dev_err(dev, "AEQ overflow!\n");
6120 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S;
6121 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
6123 /* Set reset level for reset_event() */
6124 if (ops->set_default_reset_request)
6125 ops->set_default_reset_request(ae_dev,
6127 if (ops->reset_event)
6128 ops->reset_event(pdev, NULL);
6130 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
6131 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
6134 } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_RAS_INT_S)) {
6135 dev_err(dev, "RAS interrupt!\n");
6137 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_RAS_INT_S;
6138 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
6140 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
6141 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
6145 dev_err(dev, "There is no abnormal irq found!\n");
6148 return IRQ_RETVAL(int_work);
6151 static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
6152 int eq_num, u32 enable_flag)
6156 for (i = 0; i < eq_num; i++)
6157 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
6158 i * EQ_REG_OFFSET, enable_flag);
6160 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, enable_flag);
6161 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag);
6164 static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
6166 struct device *dev = hr_dev->dev;
6169 if (eqn < hr_dev->caps.num_comp_vectors)
6170 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
6171 0, HNS_ROCE_CMD_DESTROY_CEQC,
6172 HNS_ROCE_CMD_TIMEOUT_MSECS);
6174 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
6175 0, HNS_ROCE_CMD_DESTROY_AEQC,
6176 HNS_ROCE_CMD_TIMEOUT_MSECS);
6178 dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
6181 static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
6183 hns_roce_mtr_destroy(hr_dev, &eq->mtr);
6186 static void init_eq_config(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
6188 eq->db_reg = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
6190 eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
6191 eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
6192 eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
6193 eq->shift = ilog2((unsigned int)eq->entries);
6196 static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
6199 u64 eqe_ba[MTT_MIN_COUNT] = { 0 };
6200 struct hns_roce_eq_context *eqc;
6205 memset(eqc, 0, sizeof(struct hns_roce_eq_context));
6207 init_eq_config(hr_dev, eq);
6209 /* if not multi-hop, eqe buffer only use one trunk */
6210 count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT,
6213 dev_err(hr_dev->dev, "failed to find EQE mtr\n");
6217 hr_reg_write(eqc, EQC_EQ_ST, HNS_ROCE_V2_EQ_STATE_VALID);
6218 hr_reg_write(eqc, EQC_EQE_HOP_NUM, eq->hop_num);
6219 hr_reg_write(eqc, EQC_OVER_IGNORE, eq->over_ignore);
6220 hr_reg_write(eqc, EQC_COALESCE, eq->coalesce);
6221 hr_reg_write(eqc, EQC_ARM_ST, eq->arm_st);
6222 hr_reg_write(eqc, EQC_EQN, eq->eqn);
6223 hr_reg_write(eqc, EQC_EQE_CNT, HNS_ROCE_EQ_INIT_EQE_CNT);
6224 hr_reg_write(eqc, EQC_EQE_BA_PG_SZ,
6225 to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift));
6226 hr_reg_write(eqc, EQC_EQE_BUF_PG_SZ,
6227 to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift));
6228 hr_reg_write(eqc, EQC_EQ_PROD_INDX, HNS_ROCE_EQ_INIT_PROD_IDX);
6229 hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt);
6231 hr_reg_write(eqc, EQC_EQ_PERIOD, eq->eq_period);
6232 hr_reg_write(eqc, EQC_EQE_REPORT_TIMER, HNS_ROCE_EQ_INIT_REPORT_TIMER);
6233 hr_reg_write(eqc, EQC_EQE_BA_L, bt_ba >> 3);
6234 hr_reg_write(eqc, EQC_EQE_BA_H, bt_ba >> 35);
6235 hr_reg_write(eqc, EQC_SHIFT, eq->shift);
6236 hr_reg_write(eqc, EQC_MSI_INDX, HNS_ROCE_EQ_INIT_MSI_IDX);
6237 hr_reg_write(eqc, EQC_CUR_EQE_BA_L, eqe_ba[0] >> 12);
6238 hr_reg_write(eqc, EQC_CUR_EQE_BA_M, eqe_ba[0] >> 28);
6239 hr_reg_write(eqc, EQC_CUR_EQE_BA_H, eqe_ba[0] >> 60);
6240 hr_reg_write(eqc, EQC_EQ_CONS_INDX, HNS_ROCE_EQ_INIT_CONS_IDX);
6241 hr_reg_write(eqc, EQC_NEX_EQE_BA_L, eqe_ba[1] >> 12);
6242 hr_reg_write(eqc, EQC_NEX_EQE_BA_H, eqe_ba[1] >> 44);
6243 hr_reg_write(eqc, EQC_EQE_SIZE,
6244 !!(eq->eqe_size == HNS_ROCE_V3_EQE_SIZE));
6249 static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
6251 struct hns_roce_buf_attr buf_attr = {};
6254 if (hr_dev->caps.eqe_hop_num == HNS_ROCE_HOP_NUM_0)
6257 eq->hop_num = hr_dev->caps.eqe_hop_num;
6259 buf_attr.page_shift = hr_dev->caps.eqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
6260 buf_attr.region[0].size = eq->entries * eq->eqe_size;
6261 buf_attr.region[0].hopnum = eq->hop_num;
6262 buf_attr.region_count = 1;
6264 err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr,
6265 hr_dev->caps.eqe_ba_pg_sz +
6266 HNS_HW_PAGE_SHIFT, NULL, 0);
6268 dev_err(hr_dev->dev, "Failed to alloc EQE mtr, err %d\n", err);
6273 static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
6274 struct hns_roce_eq *eq,
6275 unsigned int eq_cmd)
6277 struct hns_roce_cmd_mailbox *mailbox;
6280 /* Allocate mailbox memory */
6281 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
6282 if (IS_ERR_OR_NULL(mailbox))
6285 ret = alloc_eq_buf(hr_dev, eq);
6289 ret = config_eqc(hr_dev, eq, mailbox->buf);
6293 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
6294 eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
6296 dev_err(hr_dev->dev, "[mailbox cmd] create eqc failed.\n");
6300 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6305 free_eq_buf(hr_dev, eq);
6308 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6313 static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
6314 int comp_num, int aeq_num, int other_num)
6316 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6320 for (i = 0; i < irq_num; i++) {
6321 hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
6323 if (!hr_dev->irq_names[i]) {
6325 goto err_kzalloc_failed;
6329 /* irq contains: abnormal + AEQ + CEQ */
6330 for (j = 0; j < other_num; j++)
6331 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6334 for (j = other_num; j < (other_num + aeq_num); j++)
6335 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6336 "hns-aeq-%d", j - other_num);
6338 for (j = (other_num + aeq_num); j < irq_num; j++)
6339 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6340 "hns-ceq-%d", j - other_num - aeq_num);
6342 for (j = 0; j < irq_num; j++) {
6344 ret = request_irq(hr_dev->irq[j],
6345 hns_roce_v2_msix_interrupt_abn,
6346 0, hr_dev->irq_names[j], hr_dev);
6348 else if (j < (other_num + comp_num))
6349 ret = request_irq(eq_table->eq[j - other_num].irq,
6350 hns_roce_v2_msix_interrupt_eq,
6351 0, hr_dev->irq_names[j + aeq_num],
6352 &eq_table->eq[j - other_num]);
6354 ret = request_irq(eq_table->eq[j - other_num].irq,
6355 hns_roce_v2_msix_interrupt_eq,
6356 0, hr_dev->irq_names[j - comp_num],
6357 &eq_table->eq[j - other_num]);
6359 dev_err(hr_dev->dev, "Request irq error!\n");
6360 goto err_request_failed;
6367 for (j -= 1; j >= 0; j--)
6369 free_irq(hr_dev->irq[j], hr_dev);
6371 free_irq(eq_table->eq[j - other_num].irq,
6372 &eq_table->eq[j - other_num]);
6375 for (i -= 1; i >= 0; i--)
6376 kfree(hr_dev->irq_names[i]);
6381 static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev)
6387 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
6388 irq_num = eq_num + hr_dev->caps.num_other_vectors;
6390 for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
6391 free_irq(hr_dev->irq[i], hr_dev);
6393 for (i = 0; i < eq_num; i++)
6394 free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]);
6396 for (i = 0; i < irq_num; i++)
6397 kfree(hr_dev->irq_names[i]);
6400 static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
6402 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6403 struct device *dev = hr_dev->dev;
6404 struct hns_roce_eq *eq;
6405 unsigned int eq_cmd;
6414 other_num = hr_dev->caps.num_other_vectors;
6415 comp_num = hr_dev->caps.num_comp_vectors;
6416 aeq_num = hr_dev->caps.num_aeq_vectors;
6418 eq_num = comp_num + aeq_num;
6419 irq_num = eq_num + other_num;
6421 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
6426 for (i = 0; i < eq_num; i++) {
6427 eq = &eq_table->eq[i];
6428 eq->hr_dev = hr_dev;
6432 eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
6433 eq->type_flag = HNS_ROCE_CEQ;
6434 eq->entries = hr_dev->caps.ceqe_depth;
6435 eq->eqe_size = hr_dev->caps.ceqe_size;
6436 eq->irq = hr_dev->irq[i + other_num + aeq_num];
6437 eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
6438 eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
6441 eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
6442 eq->type_flag = HNS_ROCE_AEQ;
6443 eq->entries = hr_dev->caps.aeqe_depth;
6444 eq->eqe_size = hr_dev->caps.aeqe_size;
6445 eq->irq = hr_dev->irq[i - comp_num + other_num];
6446 eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
6447 eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
6450 ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
6452 dev_err(dev, "eq create failed.\n");
6453 goto err_create_eq_fail;
6458 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
6460 ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num,
6461 aeq_num, other_num);
6463 dev_err(dev, "Request irq failed.\n");
6464 goto err_request_irq_fail;
6467 hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0);
6468 if (!hr_dev->irq_workq) {
6469 dev_err(dev, "Create irq workqueue failed!\n");
6471 goto err_create_wq_fail;
6477 __hns_roce_free_irq(hr_dev);
6479 err_request_irq_fail:
6480 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
6483 for (i -= 1; i >= 0; i--)
6484 free_eq_buf(hr_dev, &eq_table->eq[i]);
6485 kfree(eq_table->eq);
6490 static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
6492 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6496 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
6499 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
6501 __hns_roce_free_irq(hr_dev);
6502 destroy_workqueue(hr_dev->irq_workq);
6504 for (i = 0; i < eq_num; i++) {
6505 hns_roce_v2_destroy_eqc(hr_dev, i);
6507 free_eq_buf(hr_dev, &eq_table->eq[i]);
6510 kfree(eq_table->eq);
6513 static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
6514 .query_cqc_info = hns_roce_v2_query_cqc_info,
6517 static const struct ib_device_ops hns_roce_v2_dev_ops = {
6518 .destroy_qp = hns_roce_v2_destroy_qp,
6519 .modify_cq = hns_roce_v2_modify_cq,
6520 .poll_cq = hns_roce_v2_poll_cq,
6521 .post_recv = hns_roce_v2_post_recv,
6522 .post_send = hns_roce_v2_post_send,
6523 .query_qp = hns_roce_v2_query_qp,
6524 .req_notify_cq = hns_roce_v2_req_notify_cq,
6527 static const struct ib_device_ops hns_roce_v2_dev_srq_ops = {
6528 .modify_srq = hns_roce_v2_modify_srq,
6529 .post_srq_recv = hns_roce_v2_post_srq_recv,
6530 .query_srq = hns_roce_v2_query_srq,
6533 static const struct hns_roce_hw hns_roce_hw_v2 = {
6534 .cmq_init = hns_roce_v2_cmq_init,
6535 .cmq_exit = hns_roce_v2_cmq_exit,
6536 .hw_profile = hns_roce_v2_profile,
6537 .hw_init = hns_roce_v2_init,
6538 .hw_exit = hns_roce_v2_exit,
6539 .post_mbox = v2_post_mbox,
6540 .poll_mbox_done = v2_poll_mbox_done,
6541 .chk_mbox_avail = v2_chk_mbox_is_avail,
6542 .set_gid = hns_roce_v2_set_gid,
6543 .set_mac = hns_roce_v2_set_mac,
6544 .write_mtpt = hns_roce_v2_write_mtpt,
6545 .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
6546 .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
6547 .mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
6548 .write_cqc = hns_roce_v2_write_cqc,
6549 .set_hem = hns_roce_v2_set_hem,
6550 .clear_hem = hns_roce_v2_clear_hem,
6551 .modify_qp = hns_roce_v2_modify_qp,
6552 .qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
6553 .init_eq = hns_roce_v2_init_eq_table,
6554 .cleanup_eq = hns_roce_v2_cleanup_eq_table,
6555 .write_srqc = hns_roce_v2_write_srqc,
6556 .hns_roce_dev_ops = &hns_roce_v2_dev_ops,
6557 .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
6560 static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
6561 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
6562 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
6563 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
6564 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
6565 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
6566 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
6567 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
6568 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
6569 /* required last entry */
6573 MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
6575 static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
6576 struct hnae3_handle *handle)
6578 struct hns_roce_v2_priv *priv = hr_dev->priv;
6579 const struct pci_device_id *id;
6582 hr_dev->pci_dev = handle->pdev;
6583 id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
6584 hr_dev->is_vf = id->driver_data;
6585 hr_dev->dev = &handle->pdev->dev;
6586 hr_dev->hw = &hns_roce_hw_v2;
6587 hr_dev->dfx = &hns_roce_dfx_hw_v2;
6588 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
6589 hr_dev->odb_offset = hr_dev->sdb_offset;
6591 /* Get info from NIC driver. */
6592 hr_dev->reg_base = handle->rinfo.roce_io_base;
6593 hr_dev->mem_base = handle->rinfo.roce_mem_base;
6594 hr_dev->caps.num_ports = 1;
6595 hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
6596 hr_dev->iboe.phy_port[0] = 0;
6598 addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
6599 hr_dev->iboe.netdevs[0]->dev_addr);
6601 for (i = 0; i < handle->rinfo.num_vectors; i++)
6602 hr_dev->irq[i] = pci_irq_vector(handle->pdev,
6603 i + handle->rinfo.base_vector);
6605 /* cmd issue mode: 0 is poll, 1 is event */
6606 hr_dev->cmd_mod = 1;
6607 hr_dev->loop_idc = 0;
6609 hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
6610 priv->handle = handle;
6613 static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6615 struct hns_roce_dev *hr_dev;
6618 hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
6622 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
6623 if (!hr_dev->priv) {
6625 goto error_failed_kzalloc;
6628 hns_roce_hw_v2_get_cfg(hr_dev, handle);
6630 ret = hns_roce_init(hr_dev);
6632 dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
6633 goto error_failed_get_cfg;
6636 handle->priv = hr_dev;
6640 error_failed_get_cfg:
6641 kfree(hr_dev->priv);
6643 error_failed_kzalloc:
6644 ib_dealloc_device(&hr_dev->ib_dev);
6649 static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6652 struct hns_roce_dev *hr_dev = handle->priv;
6657 handle->priv = NULL;
6659 hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT;
6660 hns_roce_handle_device_err(hr_dev);
6662 hns_roce_exit(hr_dev);
6663 kfree(hr_dev->priv);
6664 ib_dealloc_device(&hr_dev->ib_dev);
6667 static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6669 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
6670 const struct pci_device_id *id;
6671 struct device *dev = &handle->pdev->dev;
6674 handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
6676 if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
6677 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6681 id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev);
6685 if (id->driver_data && handle->pdev->revision < PCI_REVISION_ID_HIP09)
6688 ret = __hns_roce_hw_v2_init_instance(handle);
6690 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6691 dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
6692 if (ops->ae_dev_resetting(handle) ||
6693 ops->get_hw_reset_stat(handle))
6699 handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
6705 dev_err(dev, "Device is busy in resetting state.\n"
6706 "please retry later.\n");
6711 static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6714 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
6717 handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
6719 __hns_roce_hw_v2_uninit_instance(handle, reset);
6721 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6723 static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
6725 struct hns_roce_dev *hr_dev;
6727 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
6728 set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6732 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
6733 clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6735 hr_dev = handle->priv;
6739 hr_dev->is_reset = true;
6740 hr_dev->active = false;
6741 hr_dev->dis_db = true;
6743 hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
6748 static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
6750 struct device *dev = &handle->pdev->dev;
6753 if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
6754 &handle->rinfo.state)) {
6755 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6759 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
6761 dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
6762 ret = __hns_roce_hw_v2_init_instance(handle);
6764 /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
6765 * callback function, RoCE Engine reinitialize. If RoCE reinit
6766 * failed, we should inform NIC driver.
6768 handle->priv = NULL;
6769 dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
6771 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6772 dev_info(dev, "Reset done, RoCE client reinit finished.\n");
6778 static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
6780 if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
6783 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
6784 dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
6785 msleep(HNS_ROCE_V2_HW_RST_UNINT_DELAY);
6786 __hns_roce_hw_v2_uninit_instance(handle, false);
6791 static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
6792 enum hnae3_reset_notify_type type)
6797 case HNAE3_DOWN_CLIENT:
6798 ret = hns_roce_hw_v2_reset_notify_down(handle);
6800 case HNAE3_INIT_CLIENT:
6801 ret = hns_roce_hw_v2_reset_notify_init(handle);
6803 case HNAE3_UNINIT_CLIENT:
6804 ret = hns_roce_hw_v2_reset_notify_uninit(handle);
6813 static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
6814 .init_instance = hns_roce_hw_v2_init_instance,
6815 .uninit_instance = hns_roce_hw_v2_uninit_instance,
6816 .reset_notify = hns_roce_hw_v2_reset_notify,
6819 static struct hnae3_client hns_roce_hw_v2_client = {
6820 .name = "hns_roce_hw_v2",
6821 .type = HNAE3_CLIENT_ROCE,
6822 .ops = &hns_roce_hw_v2_ops,
6825 static int __init hns_roce_hw_v2_init(void)
6827 return hnae3_register_client(&hns_roce_hw_v2_client);
6830 static void __exit hns_roce_hw_v2_exit(void)
6832 hnae3_unregister_client(&hns_roce_hw_v2_client);
6835 module_init(hns_roce_hw_v2_init);
6836 module_exit(hns_roce_hw_v2_exit);
6838 MODULE_LICENSE("Dual BSD/GPL");
6839 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
6840 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
6841 MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
6842 MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");