1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
9 * irdma_set_fragment - set fragment in wqe
10 * @wqe: wqe for setting fragment
11 * @offset: offset value
12 * @sge: sge length and stag
13 * @valid: The wqe valid
15 static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge,
19 set_64bit_val(wqe, offset,
20 FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
21 set_64bit_val(wqe, offset + 8,
22 FIELD_PREP(IRDMAQPSQ_VALID, valid) |
23 FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->length) |
24 FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->lkey));
26 set_64bit_val(wqe, offset, 0);
27 set_64bit_val(wqe, offset + 8,
28 FIELD_PREP(IRDMAQPSQ_VALID, valid));
33 * irdma_set_fragment_gen_1 - set fragment in wqe
34 * @wqe: wqe for setting fragment
35 * @offset: offset value
36 * @sge: sge length and stag
37 * @valid: wqe valid flag
39 static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset,
40 struct ib_sge *sge, u8 valid)
43 set_64bit_val(wqe, offset,
44 FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
45 set_64bit_val(wqe, offset + 8,
46 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->length) |
47 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->lkey));
49 set_64bit_val(wqe, offset, 0);
50 set_64bit_val(wqe, offset + 8, 0);
55 * irdma_nop_1 - insert a NOP wqe
58 static int irdma_nop_1(struct irdma_qp_uk *qp)
63 bool signaled = false;
65 if (!qp->sq_ring.head)
68 wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
69 wqe = qp->sq_base[wqe_idx].elem;
71 qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA;
73 set_64bit_val(wqe, 0, 0);
74 set_64bit_val(wqe, 8, 0);
75 set_64bit_val(wqe, 16, 0);
77 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
78 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
79 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
81 /* make sure WQE is written before valid bit is set */
84 set_64bit_val(wqe, 24, hdr);
90 * irdma_clr_wqes - clear next 128 sq entries
92 * @qp_wqe_idx: wqe_idx
94 void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
96 struct irdma_qp_quanta *sq;
99 if (!(qp_wqe_idx & 0x7F)) {
100 wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
101 sq = qp->sq_base + wqe_idx;
103 memset(sq, qp->swqe_polarity ? 0 : 0xFF,
106 memset(sq, qp->swqe_polarity ? 0xFF : 0,
112 * irdma_uk_qp_post_wr - ring doorbell
115 void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
121 /* valid bit is written and loads completed before reading shadow */
124 /* read the doorbell shadow area */
125 get_64bit_val(qp->shadow_area, 0, &temp);
127 hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
128 sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
129 if (sw_sq_head != qp->initial_ring.head) {
130 if (qp->push_dropped) {
131 writel(qp->qp_id, qp->wqe_alloc_db);
132 qp->push_dropped = false;
133 } else if (sw_sq_head != hw_sq_tail) {
134 if (sw_sq_head > qp->initial_ring.head) {
135 if (hw_sq_tail >= qp->initial_ring.head &&
136 hw_sq_tail < sw_sq_head)
137 writel(qp->qp_id, qp->wqe_alloc_db);
139 if (hw_sq_tail >= qp->initial_ring.head ||
140 hw_sq_tail < sw_sq_head)
141 writel(qp->qp_id, qp->wqe_alloc_db);
146 qp->initial_ring.head = qp->sq_ring.head;
150 * irdma_qp_ring_push_db - ring qp doorbell
152 * @wqe_idx: wqe index
154 static void irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx)
156 set_32bit_val(qp->push_db, 0,
157 FIELD_PREP(IRDMA_WQEALLOC_WQE_DESC_INDEX, wqe_idx >> 3) | qp->qp_id);
158 qp->initial_ring.head = qp->sq_ring.head;
159 qp->push_mode = true;
160 qp->push_dropped = false;
163 void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
164 u32 wqe_idx, bool post_sq)
168 if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=
169 IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&
172 irdma_uk_qp_post_wr(qp);
174 push = (__le64 *)((uintptr_t)qp->push_wqe +
175 (wqe_idx & 0x7) * 0x20);
176 memcpy(push, wqe, quanta * IRDMA_QP_WQE_MIN_SIZE);
177 irdma_qp_ring_push_db(qp, wqe_idx);
182 * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
184 * @wqe_idx: return wqe index
185 * @quanta: size of WR in quanta
186 * @total_size: size of WR in bytes
189 __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
190 u16 quanta, u32 total_size,
191 struct irdma_post_sq_info *info)
194 __le64 *wqe_0 = NULL;
199 avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
200 (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) %
201 qp->uk_attrs->max_hw_sq_chunk);
202 if (quanta <= avail_quanta) {
203 /* WR fits in current chunk */
204 if (quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
207 /* Need to pad with NOP */
208 if (quanta + avail_quanta >
209 IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
212 nop_wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
213 for (i = 0; i < avail_quanta; i++) {
215 IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);
217 if (qp->push_db && info->push_wqe)
218 irdma_qp_push_wqe(qp, qp->sq_base[nop_wqe_idx].elem,
219 avail_quanta, nop_wqe_idx, true);
222 *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
224 qp->swqe_polarity = !qp->swqe_polarity;
226 IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta);
228 wqe = qp->sq_base[*wqe_idx].elem;
229 if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && quanta == 1 &&
230 (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
231 wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
232 wqe_0[3] = cpu_to_le64(FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity ? 0 : 1));
234 qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
235 qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
236 qp->sq_wrtrk_array[*wqe_idx].quanta = quanta;
242 * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
244 * @wqe_idx: return wqe index
246 __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
251 if (IRDMA_RING_FULL_ERR(qp->rq_ring))
254 IRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
259 qp->rwqe_polarity = !qp->rwqe_polarity;
260 /* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
261 wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem;
267 * irdma_uk_rdma_write - rdma write operation
269 * @info: post sq information
270 * @post_sq: flag to post sq
272 int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
277 struct irdma_rdma_write *op_info;
279 u32 total_size = 0, byte_off;
281 u32 frag_cnt, addl_frag_cnt;
282 bool read_fence = false;
285 info->push_wqe = qp->push_db ? true : false;
287 op_info = &info->op.rdma_write;
288 if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
291 for (i = 0; i < op_info->num_lo_sges; i++)
292 total_size += op_info->lo_sg_list[i].length;
294 read_fence |= info->read_fence;
296 if (info->imm_data_valid)
297 frag_cnt = op_info->num_lo_sges + 1;
299 frag_cnt = op_info->num_lo_sges;
300 addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
301 ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
305 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
310 irdma_clr_wqes(qp, wqe_idx);
312 set_64bit_val(wqe, 16,
313 FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
315 if (info->imm_data_valid) {
316 set_64bit_val(wqe, 0,
317 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
320 qp->wqe_ops.iw_set_fragment(wqe, 0,
326 for (byte_off = 32; i < op_info->num_lo_sges; i++) {
327 qp->wqe_ops.iw_set_fragment(wqe, byte_off,
328 &op_info->lo_sg_list[i],
333 /* if not an odd number set valid bit in next fragment */
334 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
336 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
338 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
342 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
343 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
344 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
345 FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
346 FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
347 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
348 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
349 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
350 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
351 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
353 dma_wmb(); /* make sure WQE is populated before valid bit is set */
355 set_64bit_val(wqe, 24, hdr);
356 if (info->push_wqe) {
357 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
360 irdma_uk_qp_post_wr(qp);
367 * irdma_uk_rdma_read - rdma read command
369 * @info: post sq information
370 * @inv_stag: flag for inv_stag
371 * @post_sq: flag to post sq
373 int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
374 bool inv_stag, bool post_sq)
376 struct irdma_rdma_read *op_info;
378 u32 i, byte_off, total_size = 0;
379 bool local_fence = false;
386 info->push_wqe = qp->push_db ? true : false;
388 op_info = &info->op.rdma_read;
389 if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
392 for (i = 0; i < op_info->num_lo_sges; i++)
393 total_size += op_info->lo_sg_list[i].length;
395 ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
399 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
404 irdma_clr_wqes(qp, wqe_idx);
406 addl_frag_cnt = op_info->num_lo_sges > 1 ?
407 (op_info->num_lo_sges - 1) : 0;
408 local_fence |= info->local_fence;
410 qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->lo_sg_list,
412 for (i = 1, byte_off = 32; i < op_info->num_lo_sges; ++i) {
413 qp->wqe_ops.iw_set_fragment(wqe, byte_off,
414 &op_info->lo_sg_list[i],
419 /* if not an odd number set valid bit in next fragment */
420 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 &&
421 !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) {
422 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
424 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
427 set_64bit_val(wqe, 16,
428 FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
429 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
430 FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
431 FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
432 FIELD_PREP(IRDMAQPSQ_OPCODE,
433 (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
434 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
435 FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
436 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
437 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
438 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
440 dma_wmb(); /* make sure WQE is populated before valid bit is set */
442 set_64bit_val(wqe, 24, hdr);
443 if (info->push_wqe) {
444 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
447 irdma_uk_qp_post_wr(qp);
454 * irdma_uk_send - rdma send command
456 * @info: post sq information
457 * @post_sq: flag to post sq
459 int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
463 struct irdma_post_send *op_info;
465 u32 i, wqe_idx, total_size = 0, byte_off;
467 u32 frag_cnt, addl_frag_cnt;
468 bool read_fence = false;
471 info->push_wqe = qp->push_db ? true : false;
473 op_info = &info->op.send;
474 if (qp->max_sq_frag_cnt < op_info->num_sges)
477 for (i = 0; i < op_info->num_sges; i++)
478 total_size += op_info->sg_list[i].length;
480 if (info->imm_data_valid)
481 frag_cnt = op_info->num_sges + 1;
483 frag_cnt = op_info->num_sges;
484 ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
488 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
493 irdma_clr_wqes(qp, wqe_idx);
495 read_fence |= info->read_fence;
496 addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
497 if (info->imm_data_valid) {
498 set_64bit_val(wqe, 0,
499 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
502 qp->wqe_ops.iw_set_fragment(wqe, 0,
503 frag_cnt ? op_info->sg_list : NULL,
508 for (byte_off = 32; i < op_info->num_sges; i++) {
509 qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i],
514 /* if not an odd number set valid bit in next fragment */
515 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
517 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
519 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
523 set_64bit_val(wqe, 16,
524 FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
525 FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
526 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
527 FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
528 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
529 (info->imm_data_valid ? 1 : 0)) |
530 FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
531 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
532 FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
533 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
534 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
535 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
536 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
537 FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
538 FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
539 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
541 dma_wmb(); /* make sure WQE is populated before valid bit is set */
543 set_64bit_val(wqe, 24, hdr);
544 if (info->push_wqe) {
545 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
548 irdma_uk_qp_post_wr(qp);
555 * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe
556 * @wqe: wqe for setting fragment
557 * @op_info: info for setting bind wqe values
559 static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe,
560 struct irdma_bind_window *op_info)
562 set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
563 set_64bit_val(wqe, 8,
564 FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mw_stag) |
565 FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mr_stag));
566 set_64bit_val(wqe, 16, op_info->bind_len);
570 * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
571 * @wqe: pointer to wqe
572 * @sge_list: table of pointers to inline data
573 * @num_sges: Total inline data length
574 * @polarity: compatibility parameter
576 static void irdma_copy_inline_data_gen_1(u8 *wqe, struct ib_sge *sge_list,
577 u32 num_sges, u8 polarity)
579 u32 quanta_bytes_remaining = 16;
582 for (i = 0; i < num_sges; i++) {
583 u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
584 u32 sge_len = sge_list[i].length;
589 bytes_copied = min(sge_len, quanta_bytes_remaining);
590 memcpy(wqe, cur_sge, bytes_copied);
592 cur_sge += bytes_copied;
593 quanta_bytes_remaining -= bytes_copied;
594 sge_len -= bytes_copied;
596 if (!quanta_bytes_remaining) {
597 /* Remaining inline bytes reside after hdr */
599 quanta_bytes_remaining = 32;
606 * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta
607 * @data_size: data size for inline
609 * Gets the quanta based on inline and immediate data.
611 static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size)
613 return data_size <= 16 ? IRDMA_QP_WQE_MIN_QUANTA : 2;
617 * irdma_set_mw_bind_wqe - set mw bind in wqe
618 * @wqe: wqe for setting mw bind
619 * @op_info: info for setting wqe values
621 static void irdma_set_mw_bind_wqe(__le64 *wqe,
622 struct irdma_bind_window *op_info)
624 set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
625 set_64bit_val(wqe, 8,
626 FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag) |
627 FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag));
628 set_64bit_val(wqe, 16, op_info->bind_len);
632 * irdma_copy_inline_data - Copy inline data to wqe
633 * @wqe: pointer to wqe
634 * @sge_list: table of pointers to inline data
635 * @num_sges: number of SGE's
636 * @polarity: polarity of wqe valid bit
638 static void irdma_copy_inline_data(u8 *wqe, struct ib_sge *sge_list,
639 u32 num_sges, u8 polarity)
641 u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
642 u32 quanta_bytes_remaining = 8;
643 bool first_quanta = true;
648 for (i = 0; i < num_sges; i++) {
649 u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
650 u32 sge_len = sge_list[i].length;
655 bytes_copied = min(sge_len, quanta_bytes_remaining);
656 memcpy(wqe, cur_sge, bytes_copied);
658 cur_sge += bytes_copied;
659 quanta_bytes_remaining -= bytes_copied;
660 sge_len -= bytes_copied;
662 if (!quanta_bytes_remaining) {
663 quanta_bytes_remaining = 31;
665 /* Remaining inline bytes reside after hdr */
667 first_quanta = false;
676 if (!first_quanta && quanta_bytes_remaining < 31)
677 *(wqe + quanta_bytes_remaining) = inline_valid;
681 * irdma_inline_data_size_to_quanta - based on inline data, quanta
682 * @data_size: data size for inline
684 * Gets the quanta based on inline and immediate data.
686 static u16 irdma_inline_data_size_to_quanta(u32 data_size)
689 return IRDMA_QP_WQE_MIN_QUANTA;
690 else if (data_size <= 39)
692 else if (data_size <= 70)
694 else if (data_size <= 101)
696 else if (data_size <= 132)
698 else if (data_size <= 163)
700 else if (data_size <= 194)
707 * irdma_uk_inline_rdma_write - inline rdma write operation
709 * @info: post sq information
710 * @post_sq: flag to post sq
712 int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
713 struct irdma_post_sq_info *info, bool post_sq)
716 struct irdma_rdma_write *op_info;
719 bool read_fence = false;
720 u32 i, total_size = 0;
723 info->push_wqe = qp->push_db ? true : false;
724 op_info = &info->op.rdma_write;
726 if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges))
729 for (i = 0; i < op_info->num_lo_sges; i++)
730 total_size += op_info->lo_sg_list[i].length;
732 if (unlikely(total_size > qp->max_inline_data))
735 quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
736 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
741 irdma_clr_wqes(qp, wqe_idx);
743 read_fence |= info->read_fence;
744 set_64bit_val(wqe, 16,
745 FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
747 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
748 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
749 FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
750 FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
751 FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
752 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
753 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe ? 1 : 0) |
754 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
755 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
756 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
757 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
759 if (info->imm_data_valid)
760 set_64bit_val(wqe, 0,
761 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
763 qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list,
764 op_info->num_lo_sges,
766 dma_wmb(); /* make sure WQE is populated before valid bit is set */
768 set_64bit_val(wqe, 24, hdr);
770 if (info->push_wqe) {
771 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
774 irdma_uk_qp_post_wr(qp);
781 * irdma_uk_inline_send - inline send operation
783 * @info: post sq information
784 * @post_sq: flag to post sq
786 int irdma_uk_inline_send(struct irdma_qp_uk *qp,
787 struct irdma_post_sq_info *info, bool post_sq)
790 struct irdma_post_send *op_info;
793 bool read_fence = false;
794 u32 i, total_size = 0;
797 info->push_wqe = qp->push_db ? true : false;
798 op_info = &info->op.send;
800 if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges))
803 for (i = 0; i < op_info->num_sges; i++)
804 total_size += op_info->sg_list[i].length;
806 if (unlikely(total_size > qp->max_inline_data))
809 quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
810 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
815 irdma_clr_wqes(qp, wqe_idx);
817 set_64bit_val(wqe, 16,
818 FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
819 FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
821 read_fence |= info->read_fence;
822 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
823 FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
824 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
825 FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
826 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
827 (info->imm_data_valid ? 1 : 0)) |
828 FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
829 FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
830 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
831 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
832 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
833 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
834 FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
835 FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
836 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
838 if (info->imm_data_valid)
839 set_64bit_val(wqe, 0,
840 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
841 qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list,
842 op_info->num_sges, qp->swqe_polarity);
844 dma_wmb(); /* make sure WQE is populated before valid bit is set */
846 set_64bit_val(wqe, 24, hdr);
848 if (info->push_wqe) {
849 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
852 irdma_uk_qp_post_wr(qp);
859 * irdma_uk_stag_local_invalidate - stag invalidate operation
861 * @info: post sq information
862 * @post_sq: flag to post sq
864 int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
865 struct irdma_post_sq_info *info,
869 struct irdma_inv_local_stag *op_info;
872 bool local_fence = false;
873 struct ib_sge sge = {};
875 info->push_wqe = qp->push_db ? true : false;
876 op_info = &info->op.inv_local_stag;
877 local_fence = info->local_fence;
879 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
884 irdma_clr_wqes(qp, wqe_idx);
886 sge.lkey = op_info->target_stag;
887 qp->wqe_ops.iw_set_fragment(wqe, 0, &sge, 0);
889 set_64bit_val(wqe, 16, 0);
891 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
892 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
893 FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
894 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
895 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
896 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
898 dma_wmb(); /* make sure WQE is populated before valid bit is set */
900 set_64bit_val(wqe, 24, hdr);
902 if (info->push_wqe) {
903 irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
907 irdma_uk_qp_post_wr(qp);
914 * irdma_uk_post_receive - post receive wqe
916 * @info: post rq information
918 int irdma_uk_post_receive(struct irdma_qp_uk *qp,
919 struct irdma_post_rq_info *info)
921 u32 wqe_idx, i, byte_off;
926 if (qp->max_rq_frag_cnt < info->num_sges)
929 wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
933 qp->rq_wrid_array[wqe_idx] = info->wr_id;
934 addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;
935 qp->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
938 for (i = 1, byte_off = 32; i < info->num_sges; i++) {
939 qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
944 /* if not an odd number set valid bit in next fragment */
945 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
947 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
949 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
953 set_64bit_val(wqe, 16, 0);
954 hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
955 FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity);
957 dma_wmb(); /* make sure WQE is populated before valid bit is set */
959 set_64bit_val(wqe, 24, hdr);
965 * irdma_uk_cq_resize - reset the cq buffer info
967 * @cq_base: new cq buffer addr
968 * @cq_size: number of cqes
970 void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size)
972 cq->cq_base = cq_base;
973 cq->cq_size = cq_size;
974 IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
979 * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers
981 * @cq_cnt: the count of the resized cq buffers
983 void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)
991 get_64bit_val(cq->shadow_area, 32, &temp_val);
993 sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
996 arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
997 arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
998 arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val);
1000 temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1001 FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1002 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1003 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1005 set_64bit_val(cq->shadow_area, 32, temp_val);
1009 * irdma_uk_cq_request_notification - cq notification request (door bell)
1011 * @cq_notify: notification type
1013 void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
1014 enum irdma_cmpl_notify cq_notify)
1022 get_64bit_val(cq->shadow_area, 32, &temp_val);
1023 arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
1025 sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
1026 arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
1028 if (cq_notify == IRDMA_CQ_COMPL_EVENT)
1030 temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1031 FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1032 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1033 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1035 set_64bit_val(cq->shadow_area, 32, temp_val);
1037 dma_wmb(); /* make sure WQE is populated before valid bit is set */
1039 writel(cq->cq_id, cq->cqe_alloc_db);
1043 * irdma_uk_cq_poll_cmpl - get cq completion info
1045 * @info: cq poll information returned
1047 int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
1048 struct irdma_cq_poll_info *info)
1050 u64 comp_ctx, qword0, qword2, qword3;
1052 struct irdma_qp_uk *qp;
1053 struct irdma_ring *pring = NULL;
1056 bool move_cq_head = true;
1061 if (cq->avoid_mem_cflct)
1062 cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
1064 cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
1066 get_64bit_val(cqe, 24, &qword3);
1067 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1068 if (polarity != cq->polarity)
1071 /* Ensure CQE contents are read after valid bit is checked */
1074 ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
1079 if (cq->avoid_mem_cflct) {
1080 ext_cqe = (__le64 *)((u8 *)cqe + 32);
1081 get_64bit_val(ext_cqe, 24, &qword7);
1082 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1084 peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
1085 ext_cqe = cq->cq_base[peek_head].buf;
1086 get_64bit_val(ext_cqe, 24, &qword7);
1087 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1091 if (polarity != cq->polarity)
1094 /* Ensure ext CQE contents are read after ext valid bit is checked */
1097 info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7);
1098 if (info->imm_valid) {
1101 get_64bit_val(ext_cqe, 0, &qword4);
1102 info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4);
1104 info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
1105 info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
1106 if (info->ud_smac_valid || info->ud_vlan_valid) {
1107 get_64bit_val(ext_cqe, 16, &qword6);
1108 if (info->ud_vlan_valid)
1109 info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
1110 if (info->ud_smac_valid) {
1111 info->ud_smac[5] = qword6 & 0xFF;
1112 info->ud_smac[4] = (qword6 >> 8) & 0xFF;
1113 info->ud_smac[3] = (qword6 >> 16) & 0xFF;
1114 info->ud_smac[2] = (qword6 >> 24) & 0xFF;
1115 info->ud_smac[1] = (qword6 >> 32) & 0xFF;
1116 info->ud_smac[0] = (qword6 >> 40) & 0xFF;
1120 info->imm_valid = false;
1121 info->ud_smac_valid = false;
1122 info->ud_vlan_valid = false;
1125 info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
1126 info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
1127 info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
1128 info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
1130 info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
1131 info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
1132 if (info->major_err == IRDMA_FLUSH_MAJOR_ERR) {
1133 info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
1134 /* Set the min error to standard flush error code for remaining cqes */
1135 if (info->minor_err != FLUSH_GENERAL_ERR) {
1136 qword3 &= ~IRDMA_CQ_MINERR;
1137 qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
1138 set_64bit_val(cqe, 24, qword3);
1141 info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
1144 info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
1147 get_64bit_val(cqe, 0, &qword0);
1148 get_64bit_val(cqe, 16, &qword2);
1150 info->tcp_seq_num_rtt = (u32)FIELD_GET(IRDMACQ_TCPSEQNUMRTT, qword0);
1151 info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
1152 info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
1154 get_64bit_val(cqe, 8, &comp_ctx);
1156 info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
1157 qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
1158 if (!qp || qp->destroy_pending) {
1162 wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
1163 info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
1164 info->op_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
1166 if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
1169 array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
1171 if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
1172 info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
1173 if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
1178 info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
1179 array_idx = qp->rq_ring.tail;
1181 info->wr_id = qp->rq_wrid_array[array_idx];
1184 info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
1186 if (qword3 & IRDMACQ_STAG) {
1187 info->stag_invalid_set = true;
1188 info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
1190 info->stag_invalid_set = false;
1192 IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
1193 if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
1194 qp->rq_flush_seen = true;
1195 if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
1196 qp->rq_flush_complete = true;
1198 move_cq_head = false;
1200 pring = &qp->rq_ring;
1201 } else { /* q_type is IRDMA_CQE_QTYPE_SQ */
1202 if (qp->first_sq_wq) {
1203 if (wqe_idx + 1 >= qp->conn_wqes)
1204 qp->first_sq_wq = false;
1206 if (wqe_idx < qp->conn_wqes && qp->sq_ring.head == qp->sq_ring.tail) {
1207 IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1208 IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1209 set_64bit_val(cq->shadow_area, 0,
1210 IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1212 sizeof(struct irdma_cq_poll_info));
1213 return irdma_uk_cq_poll_cmpl(cq, info);
1216 /*cease posting push mode on push drop*/
1217 if (info->push_dropped) {
1218 qp->push_mode = false;
1219 qp->push_dropped = true;
1221 if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
1222 info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
1223 if (!info->comp_status)
1224 info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
1225 info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1226 IRDMA_RING_SET_TAIL(qp->sq_ring,
1227 wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
1229 if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
1239 tail = qp->sq_ring.tail;
1240 sw_wqe = qp->sq_base[tail].elem;
1241 get_64bit_val(sw_wqe, 24,
1243 info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE,
1245 IRDMA_RING_SET_TAIL(qp->sq_ring,
1246 tail + qp->sq_wrtrk_array[tail].quanta);
1247 if (info->op_type != IRDMAQP_OP_NOP) {
1248 info->wr_id = qp->sq_wrtrk_array[tail].wrid;
1249 info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
1253 if (info->op_type == IRDMA_OP_TYPE_BIND_MW &&
1254 info->minor_err == FLUSH_PROT_ERR)
1255 info->minor_err = FLUSH_MW_BIND_ERR;
1256 qp->sq_flush_seen = true;
1257 if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
1258 qp->sq_flush_complete = true;
1260 pring = &qp->sq_ring;
1266 if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED)
1267 if (pring && IRDMA_RING_MORE_WORK(*pring))
1268 move_cq_head = false;
1271 IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1272 if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1275 if (ext_valid && !cq->avoid_mem_cflct) {
1276 IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1277 if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1281 IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1282 if (!cq->avoid_mem_cflct && ext_valid)
1283 IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1284 set_64bit_val(cq->shadow_area, 0,
1285 IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1287 qword3 &= ~IRDMA_CQ_WQEIDX;
1288 qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
1289 set_64bit_val(cqe, 24, qword3);
1296 * irdma_qp_round_up - return round up qp wq depth
1297 * @wqdepth: wq depth in quanta to round up
1299 static int irdma_qp_round_up(u32 wqdepth)
1303 for (wqdepth--; scount <= 16; scount *= 2)
1304 wqdepth |= wqdepth >> scount;
1310 * irdma_get_wqe_shift - get shift count for maximum wqe size
1311 * @uk_attrs: qp HW attributes
1312 * @sge: Maximum Scatter Gather Elements wqe
1313 * @inline_data: Maximum inline data size
1314 * @shift: Returns the shift needed based on sge
1316 * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
1317 * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32
1318 * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe
1319 * size of 64 bytes).
1320 * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe
1321 * size of 256 bytes).
1323 void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
1324 u32 inline_data, u8 *shift)
1327 if (uk_attrs->hw_rev >= IRDMA_GEN_2) {
1328 if (sge > 1 || inline_data > 8) {
1329 if (sge < 4 && inline_data <= 39)
1331 else if (sge < 8 && inline_data <= 101)
1336 } else if (sge > 1 || inline_data > 16) {
1337 *shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
1342 * irdma_get_sqdepth - get SQ depth (quanta)
1343 * @uk_attrs: qp HW attributes
1345 * @shift: shift which determines size of WQE
1346 * @sqdepth: depth of SQ
1349 int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
1352 *sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
1354 if (*sqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
1355 *sqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
1356 else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
1363 * irdma_get_rqdepth - get RQ depth (quanta)
1364 * @uk_attrs: qp HW attributes
1366 * @shift: shift which determines size of WQE
1367 * @rqdepth: depth of RQ
1369 int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
1372 *rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
1374 if (*rqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
1375 *rqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
1376 else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
1382 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
1383 .iw_copy_inline_data = irdma_copy_inline_data,
1384 .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
1385 .iw_set_fragment = irdma_set_fragment,
1386 .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe,
1389 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = {
1390 .iw_copy_inline_data = irdma_copy_inline_data_gen_1,
1391 .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1,
1392 .iw_set_fragment = irdma_set_fragment_gen_1,
1393 .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe_gen_1,
1397 * irdma_setup_connection_wqes - setup WQEs necessary to complete
1399 * @qp: hw qp (user and kernel)
1400 * @info: qp initialization info
1402 static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
1403 struct irdma_qp_uk_init_info *info)
1407 if (!info->legacy_mode &&
1408 (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE))
1411 qp->conn_wqes = move_cnt;
1412 IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
1413 IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
1414 IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
1418 * irdma_uk_qp_init - initialize shared qp
1419 * @qp: hw qp (user and kernel)
1420 * @info: qp initialization info
1422 * initializes the vars used in both user and kernel mode.
1423 * size of the wqe depends on numbers of max. fragements
1424 * allowed. Then size of wqe * the number of wqes should be the
1425 * amount of memory allocated for sq and rq.
1427 int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
1431 u8 sqshift, rqshift;
1433 qp->uk_attrs = info->uk_attrs;
1434 if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
1435 info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
1438 irdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift);
1439 if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) {
1440 irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt,
1441 info->max_inline_data, &sqshift);
1442 if (info->abi_ver > 4)
1443 rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
1445 irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt + 1,
1446 info->max_inline_data, &sqshift);
1448 qp->qp_caps = info->qp_caps;
1449 qp->sq_base = info->sq;
1450 qp->rq_base = info->rq;
1451 qp->qp_type = info->type ? info->type : IRDMA_QP_TYPE_IWARP;
1452 qp->shadow_area = info->shadow_area;
1453 qp->sq_wrtrk_array = info->sq_wrtrk_array;
1455 qp->rq_wrid_array = info->rq_wrid_array;
1456 qp->wqe_alloc_db = info->wqe_alloc_db;
1457 qp->qp_id = info->qp_id;
1458 qp->sq_size = info->sq_size;
1459 qp->push_mode = false;
1460 qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
1461 sq_ring_size = qp->sq_size << sqshift;
1462 IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
1463 IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
1464 if (info->first_sq_wq) {
1465 irdma_setup_connection_wqes(qp, info);
1466 qp->swqe_polarity = 1;
1467 qp->first_sq_wq = true;
1469 qp->swqe_polarity = 0;
1471 qp->swqe_polarity_deferred = 1;
1472 qp->rwqe_polarity = 0;
1473 qp->rq_size = info->rq_size;
1474 qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
1475 qp->max_inline_data = info->max_inline_data;
1476 qp->rq_wqe_size = rqshift;
1477 IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
1478 qp->rq_wqe_size_multiplier = 1 << rqshift;
1479 if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
1480 qp->wqe_ops = iw_wqe_uk_ops_gen_1;
1482 qp->wqe_ops = iw_wqe_uk_ops;
1487 * irdma_uk_cq_init - initialize shared cq (user and kernel)
1489 * @info: hw cq initialization info
1491 void irdma_uk_cq_init(struct irdma_cq_uk *cq,
1492 struct irdma_cq_uk_init_info *info)
1494 cq->cq_base = info->cq_base;
1495 cq->cq_id = info->cq_id;
1496 cq->cq_size = info->cq_size;
1497 cq->cqe_alloc_db = info->cqe_alloc_db;
1498 cq->cq_ack_db = info->cq_ack_db;
1499 cq->shadow_area = info->shadow_area;
1500 cq->avoid_mem_cflct = info->avoid_mem_cflct;
1501 IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
1506 * irdma_uk_clean_cq - clean cq entries
1507 * @q: completion context
1510 void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
1513 u64 qword3, comp_ctx;
1517 cq_head = cq->cq_ring.head;
1518 temp = cq->polarity;
1520 if (cq->avoid_mem_cflct)
1521 cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf;
1523 cqe = cq->cq_base[cq_head].buf;
1524 get_64bit_val(cqe, 24, &qword3);
1525 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1527 if (polarity != temp)
1530 get_64bit_val(cqe, 8, &comp_ctx);
1531 if ((void *)(unsigned long)comp_ctx == q)
1532 set_64bit_val(cqe, 8, 0);
1534 cq_head = (cq_head + 1) % cq->cq_ring.size;
1541 * irdma_nop - post a nop
1543 * @wr_id: work request id
1544 * @signaled: signaled for completion
1545 * @post_sq: ring doorbell
1547 int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq)
1552 struct irdma_post_sq_info info = {};
1554 info.push_wqe = false;
1556 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
1561 irdma_clr_wqes(qp, wqe_idx);
1563 set_64bit_val(wqe, 0, 0);
1564 set_64bit_val(wqe, 8, 0);
1565 set_64bit_val(wqe, 16, 0);
1567 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
1568 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
1569 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
1571 dma_wmb(); /* make sure WQE is populated before valid bit is set */
1573 set_64bit_val(wqe, 24, hdr);
1575 irdma_uk_qp_post_wr(qp);
1581 * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
1582 * @frag_cnt: number of fragments
1583 * @quanta: quanta for frag_cnt
1585 int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
1590 *quanta = IRDMA_QP_WQE_MIN_QUANTA;
1617 case 15: /* when immediate data is present */
1628 * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
1629 * @frag_cnt: number of fragments
1630 * @wqe_size: size in bytes given frag_cnt
1632 int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)