1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
7 #include <linux/skbuff.h>
11 #include "rxe_queue.h"
13 static char *resp_state_name[] = {
14 [RESPST_NONE] = "NONE",
15 [RESPST_GET_REQ] = "GET_REQ",
16 [RESPST_CHK_PSN] = "CHK_PSN",
17 [RESPST_CHK_OP_SEQ] = "CHK_OP_SEQ",
18 [RESPST_CHK_OP_VALID] = "CHK_OP_VALID",
19 [RESPST_CHK_RESOURCE] = "CHK_RESOURCE",
20 [RESPST_CHK_LENGTH] = "CHK_LENGTH",
21 [RESPST_CHK_RKEY] = "CHK_RKEY",
22 [RESPST_EXECUTE] = "EXECUTE",
23 [RESPST_READ_REPLY] = "READ_REPLY",
24 [RESPST_ATOMIC_REPLY] = "ATOMIC_REPLY",
25 [RESPST_ATOMIC_WRITE_REPLY] = "ATOMIC_WRITE_REPLY",
26 [RESPST_PROCESS_FLUSH] = "PROCESS_FLUSH",
27 [RESPST_COMPLETE] = "COMPLETE",
28 [RESPST_ACKNOWLEDGE] = "ACKNOWLEDGE",
29 [RESPST_CLEANUP] = "CLEANUP",
30 [RESPST_DUPLICATE_REQUEST] = "DUPLICATE_REQUEST",
31 [RESPST_ERR_MALFORMED_WQE] = "ERR_MALFORMED_WQE",
32 [RESPST_ERR_UNSUPPORTED_OPCODE] = "ERR_UNSUPPORTED_OPCODE",
33 [RESPST_ERR_MISALIGNED_ATOMIC] = "ERR_MISALIGNED_ATOMIC",
34 [RESPST_ERR_PSN_OUT_OF_SEQ] = "ERR_PSN_OUT_OF_SEQ",
35 [RESPST_ERR_MISSING_OPCODE_FIRST] = "ERR_MISSING_OPCODE_FIRST",
36 [RESPST_ERR_MISSING_OPCODE_LAST_C] = "ERR_MISSING_OPCODE_LAST_C",
37 [RESPST_ERR_MISSING_OPCODE_LAST_D1E] = "ERR_MISSING_OPCODE_LAST_D1E",
38 [RESPST_ERR_TOO_MANY_RDMA_ATM_REQ] = "ERR_TOO_MANY_RDMA_ATM_REQ",
39 [RESPST_ERR_RNR] = "ERR_RNR",
40 [RESPST_ERR_RKEY_VIOLATION] = "ERR_RKEY_VIOLATION",
41 [RESPST_ERR_INVALIDATE_RKEY] = "ERR_INVALIDATE_RKEY_VIOLATION",
42 [RESPST_ERR_LENGTH] = "ERR_LENGTH",
43 [RESPST_ERR_CQ_OVERFLOW] = "ERR_CQ_OVERFLOW",
44 [RESPST_ERROR] = "ERROR",
45 [RESPST_DONE] = "DONE",
46 [RESPST_EXIT] = "EXIT",
49 /* rxe_recv calls here to add a request packet to the input queue */
50 void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
53 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
55 skb_queue_tail(&qp->req_pkts, skb);
57 must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
58 (skb_queue_len(&qp->req_pkts) > 1);
61 rxe_sched_task(&qp->resp.task);
63 rxe_run_task(&qp->resp.task);
66 static inline enum resp_states get_req(struct rxe_qp *qp,
67 struct rxe_pkt_info **pkt_p)
71 skb = skb_peek(&qp->req_pkts);
75 *pkt_p = SKB_TO_PKT(skb);
77 return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
80 static enum resp_states check_psn(struct rxe_qp *qp,
81 struct rxe_pkt_info *pkt)
83 int diff = psn_compare(pkt->psn, qp->resp.psn);
84 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
86 switch (qp_type(qp)) {
89 if (qp->resp.sent_psn_nak)
90 return RESPST_CLEANUP;
92 qp->resp.sent_psn_nak = 1;
93 rxe_counter_inc(rxe, RXE_CNT_OUT_OF_SEQ_REQ);
94 return RESPST_ERR_PSN_OUT_OF_SEQ;
96 } else if (diff < 0) {
97 rxe_counter_inc(rxe, RXE_CNT_DUP_REQ);
98 return RESPST_DUPLICATE_REQUEST;
101 if (qp->resp.sent_psn_nak)
102 qp->resp.sent_psn_nak = 0;
107 if (qp->resp.drop_msg || diff != 0) {
108 if (pkt->mask & RXE_START_MASK) {
109 qp->resp.drop_msg = 0;
110 return RESPST_CHK_OP_SEQ;
113 qp->resp.drop_msg = 1;
114 return RESPST_CLEANUP;
121 return RESPST_CHK_OP_SEQ;
124 static enum resp_states check_op_seq(struct rxe_qp *qp,
125 struct rxe_pkt_info *pkt)
127 switch (qp_type(qp)) {
129 switch (qp->resp.opcode) {
130 case IB_OPCODE_RC_SEND_FIRST:
131 case IB_OPCODE_RC_SEND_MIDDLE:
132 switch (pkt->opcode) {
133 case IB_OPCODE_RC_SEND_MIDDLE:
134 case IB_OPCODE_RC_SEND_LAST:
135 case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
136 case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
137 return RESPST_CHK_OP_VALID;
139 return RESPST_ERR_MISSING_OPCODE_LAST_C;
142 case IB_OPCODE_RC_RDMA_WRITE_FIRST:
143 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
144 switch (pkt->opcode) {
145 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
146 case IB_OPCODE_RC_RDMA_WRITE_LAST:
147 case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
148 return RESPST_CHK_OP_VALID;
150 return RESPST_ERR_MISSING_OPCODE_LAST_C;
154 switch (pkt->opcode) {
155 case IB_OPCODE_RC_SEND_MIDDLE:
156 case IB_OPCODE_RC_SEND_LAST:
157 case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
158 case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
159 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
160 case IB_OPCODE_RC_RDMA_WRITE_LAST:
161 case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
162 return RESPST_ERR_MISSING_OPCODE_FIRST;
164 return RESPST_CHK_OP_VALID;
170 switch (qp->resp.opcode) {
171 case IB_OPCODE_UC_SEND_FIRST:
172 case IB_OPCODE_UC_SEND_MIDDLE:
173 switch (pkt->opcode) {
174 case IB_OPCODE_UC_SEND_MIDDLE:
175 case IB_OPCODE_UC_SEND_LAST:
176 case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
177 return RESPST_CHK_OP_VALID;
179 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
182 case IB_OPCODE_UC_RDMA_WRITE_FIRST:
183 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
184 switch (pkt->opcode) {
185 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
186 case IB_OPCODE_UC_RDMA_WRITE_LAST:
187 case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
188 return RESPST_CHK_OP_VALID;
190 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
194 switch (pkt->opcode) {
195 case IB_OPCODE_UC_SEND_MIDDLE:
196 case IB_OPCODE_UC_SEND_LAST:
197 case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
198 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
199 case IB_OPCODE_UC_RDMA_WRITE_LAST:
200 case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
201 qp->resp.drop_msg = 1;
202 return RESPST_CLEANUP;
204 return RESPST_CHK_OP_VALID;
210 return RESPST_CHK_OP_VALID;
214 static bool check_qp_attr_access(struct rxe_qp *qp,
215 struct rxe_pkt_info *pkt)
217 if (((pkt->mask & RXE_READ_MASK) &&
218 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
219 ((pkt->mask & (RXE_WRITE_MASK | RXE_ATOMIC_WRITE_MASK)) &&
220 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
221 ((pkt->mask & RXE_ATOMIC_MASK) &&
222 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
225 if (pkt->mask & RXE_FLUSH_MASK) {
226 u32 flush_type = feth_plt(pkt);
228 if ((flush_type & IB_FLUSH_GLOBAL &&
229 !(qp->attr.qp_access_flags & IB_ACCESS_FLUSH_GLOBAL)) ||
230 (flush_type & IB_FLUSH_PERSISTENT &&
231 !(qp->attr.qp_access_flags & IB_ACCESS_FLUSH_PERSISTENT)))
238 static enum resp_states check_op_valid(struct rxe_qp *qp,
239 struct rxe_pkt_info *pkt)
241 switch (qp_type(qp)) {
243 if (!check_qp_attr_access(qp, pkt))
244 return RESPST_ERR_UNSUPPORTED_OPCODE;
249 if ((pkt->mask & RXE_WRITE_MASK) &&
250 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
251 qp->resp.drop_msg = 1;
252 return RESPST_CLEANUP;
266 return RESPST_CHK_RESOURCE;
269 static enum resp_states get_srq_wqe(struct rxe_qp *qp)
271 struct rxe_srq *srq = qp->srq;
272 struct rxe_queue *q = srq->rq.queue;
273 struct rxe_recv_wqe *wqe;
280 return RESPST_ERR_RNR;
282 spin_lock_irqsave(&srq->rq.consumer_lock, flags);
284 wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
286 spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
287 return RESPST_ERR_RNR;
290 /* don't trust user space data */
291 if (unlikely(wqe->dma.num_sge > srq->rq.max_sge)) {
292 spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
293 rxe_dbg_qp(qp, "invalid num_sge in SRQ entry\n");
294 return RESPST_ERR_MALFORMED_WQE;
296 size = sizeof(*wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge);
297 memcpy(&qp->resp.srq_wqe, wqe, size);
299 qp->resp.wqe = &qp->resp.srq_wqe.wqe;
300 queue_advance_consumer(q, QUEUE_TYPE_FROM_CLIENT);
301 count = queue_count(q, QUEUE_TYPE_FROM_CLIENT);
303 if (srq->limit && srq->ibsrq.event_handler && (count < srq->limit)) {
308 spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
309 return RESPST_CHK_LENGTH;
312 spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
313 ev.device = qp->ibqp.device;
314 ev.element.srq = qp->ibqp.srq;
315 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
316 srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context);
317 return RESPST_CHK_LENGTH;
320 static enum resp_states check_resource(struct rxe_qp *qp,
321 struct rxe_pkt_info *pkt)
323 struct rxe_srq *srq = qp->srq;
325 if (pkt->mask & (RXE_READ_OR_ATOMIC_MASK | RXE_ATOMIC_WRITE_MASK)) {
326 /* it is the requesters job to not send
327 * too many read/atomic ops, we just
328 * recycle the responder resource queue
330 if (likely(qp->attr.max_dest_rd_atomic > 0))
331 return RESPST_CHK_LENGTH;
333 return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
336 if (pkt->mask & RXE_RWR_MASK) {
338 return get_srq_wqe(qp);
340 qp->resp.wqe = queue_head(qp->rq.queue,
341 QUEUE_TYPE_FROM_CLIENT);
342 return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
345 return RESPST_CHK_LENGTH;
348 static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
349 struct rxe_pkt_info *pkt)
353 * For UD QPs we only check if the packet will fit in the
354 * receive buffer later. For rmda operations additional
355 * length checks are performed in check_rkey.
357 if (pkt->mask & RXE_PAYLOAD_MASK && ((qp_type(qp) == IB_QPT_RC) ||
358 (qp_type(qp) == IB_QPT_UC))) {
359 unsigned int mtu = qp->mtu;
360 unsigned int payload = payload_size(pkt);
362 if ((pkt->mask & RXE_START_MASK) &&
363 (pkt->mask & RXE_END_MASK)) {
364 if (unlikely(payload > mtu)) {
365 rxe_dbg_qp(qp, "only packet too long");
366 return RESPST_ERR_LENGTH;
368 } else if ((pkt->mask & RXE_START_MASK) ||
369 (pkt->mask & RXE_MIDDLE_MASK)) {
370 if (unlikely(payload != mtu)) {
371 rxe_dbg_qp(qp, "first or middle packet not mtu");
372 return RESPST_ERR_LENGTH;
374 } else if (pkt->mask & RXE_END_MASK) {
375 if (unlikely((payload == 0) || (payload > mtu))) {
376 rxe_dbg_qp(qp, "last packet zero or too long");
377 return RESPST_ERR_LENGTH;
383 if (pkt->mask & RXE_RETH_MASK) {
384 if (reth_len(pkt) > (1U << 31)) {
385 rxe_dbg_qp(qp, "dma length too long");
386 return RESPST_ERR_LENGTH;
390 return RESPST_CHK_RKEY;
393 /* if the reth length field is zero we can assume nothing
394 * about the rkey value and should not validate or use it.
395 * Instead set qp->resp.rkey to 0 which is an invalid rkey
396 * value since the minimum index part is 1.
398 static void qp_resp_from_reth(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
400 unsigned int length = reth_len(pkt);
402 qp->resp.va = reth_va(pkt);
404 qp->resp.resid = length;
405 qp->resp.length = length;
406 if (pkt->mask & RXE_READ_OR_WRITE_MASK && length == 0)
409 qp->resp.rkey = reth_rkey(pkt);
412 static void qp_resp_from_atmeth(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
414 qp->resp.va = atmeth_va(pkt);
416 qp->resp.rkey = atmeth_rkey(pkt);
417 qp->resp.resid = sizeof(u64);
420 /* resolve the packet rkey to qp->resp.mr or set qp->resp.mr to NULL
421 * if an invalid rkey is received or the rdma length is zero. For middle
422 * or last packets use the stored value of mr.
424 static enum resp_states check_rkey(struct rxe_qp *qp,
425 struct rxe_pkt_info *pkt)
427 struct rxe_mr *mr = NULL;
428 struct rxe_mw *mw = NULL;
434 enum resp_states state;
437 if (pkt->mask & (RXE_READ_OR_WRITE_MASK | RXE_ATOMIC_WRITE_MASK)) {
438 if (pkt->mask & RXE_RETH_MASK)
439 qp_resp_from_reth(qp, pkt);
441 access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
442 : IB_ACCESS_REMOTE_WRITE;
443 } else if (pkt->mask & RXE_FLUSH_MASK) {
444 u32 flush_type = feth_plt(pkt);
446 if (pkt->mask & RXE_RETH_MASK)
447 qp_resp_from_reth(qp, pkt);
449 if (flush_type & IB_FLUSH_GLOBAL)
450 access |= IB_ACCESS_FLUSH_GLOBAL;
451 if (flush_type & IB_FLUSH_PERSISTENT)
452 access |= IB_ACCESS_FLUSH_PERSISTENT;
453 } else if (pkt->mask & RXE_ATOMIC_MASK) {
454 qp_resp_from_atmeth(qp, pkt);
455 access = IB_ACCESS_REMOTE_ATOMIC;
457 return RESPST_EXECUTE;
460 /* A zero-byte read or write op is not required to
461 * set an addr or rkey. See C9-88
463 if ((pkt->mask & RXE_READ_OR_WRITE_MASK) &&
464 (pkt->mask & RXE_RETH_MASK) && reth_len(pkt) == 0) {
466 return RESPST_EXECUTE;
470 rkey = qp->resp.rkey;
471 resid = qp->resp.resid;
472 pktlen = payload_size(pkt);
474 if (rkey_is_mw(rkey)) {
475 mw = rxe_lookup_mw(qp, access, rkey);
477 rxe_dbg_qp(qp, "no MW matches rkey %#x\n", rkey);
478 state = RESPST_ERR_RKEY_VIOLATION;
484 rxe_dbg_qp(qp, "MW doesn't have an MR\n");
485 state = RESPST_ERR_RKEY_VIOLATION;
489 if (mw->access & IB_ZERO_BASED)
490 qp->resp.offset = mw->addr;
495 mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE);
497 rxe_dbg_qp(qp, "no MR matches rkey %#x\n", rkey);
498 state = RESPST_ERR_RKEY_VIOLATION;
503 if (pkt->mask & RXE_FLUSH_MASK) {
504 /* FLUSH MR may not set va or resid
505 * no need to check range since we will flush whole mr
507 if (feth_sel(pkt) == IB_FLUSH_MR)
508 goto skip_check_range;
511 if (mr_check_range(mr, va + qp->resp.offset, resid)) {
512 state = RESPST_ERR_RKEY_VIOLATION;
517 if (pkt->mask & (RXE_WRITE_MASK | RXE_ATOMIC_WRITE_MASK)) {
519 if (pktlen != mtu || bth_pad(pkt)) {
520 state = RESPST_ERR_LENGTH;
524 if (pktlen != resid) {
525 state = RESPST_ERR_LENGTH;
528 if ((bth_pad(pkt) != (0x3 & (-resid)))) {
529 /* This case may not be exactly that
530 * but nothing else fits.
532 state = RESPST_ERR_LENGTH;
538 WARN_ON_ONCE(qp->resp.mr);
541 return RESPST_EXECUTE;
553 static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
558 err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
559 data_addr, data_len, RXE_TO_MR_OBJ);
561 return (err == -ENOSPC) ? RESPST_ERR_LENGTH
562 : RESPST_ERR_MALFORMED_WQE;
567 static enum resp_states write_data_in(struct rxe_qp *qp,
568 struct rxe_pkt_info *pkt)
570 enum resp_states rc = RESPST_NONE;
572 int data_len = payload_size(pkt);
574 err = rxe_mr_copy(qp->resp.mr, qp->resp.va + qp->resp.offset,
575 payload_addr(pkt), data_len, RXE_TO_MR_OBJ);
577 rc = RESPST_ERR_RKEY_VIOLATION;
581 qp->resp.va += data_len;
582 qp->resp.resid -= data_len;
588 static struct resp_res *rxe_prepare_res(struct rxe_qp *qp,
589 struct rxe_pkt_info *pkt,
592 struct resp_res *res;
595 res = &qp->resp.resources[qp->resp.res_head];
596 rxe_advance_resp_resource(qp);
597 free_rd_atomic_resource(res);
604 res->read.va = qp->resp.va + qp->resp.offset;
605 res->read.va_org = qp->resp.va + qp->resp.offset;
606 res->read.resid = qp->resp.resid;
607 res->read.length = qp->resp.resid;
608 res->read.rkey = qp->resp.rkey;
610 pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1);
611 res->first_psn = pkt->psn;
612 res->cur_psn = pkt->psn;
613 res->last_psn = (pkt->psn + pkts - 1) & BTH_PSN_MASK;
615 res->state = rdatm_res_state_new;
617 case RXE_ATOMIC_MASK:
618 case RXE_ATOMIC_WRITE_MASK:
619 res->first_psn = pkt->psn;
620 res->last_psn = pkt->psn;
621 res->cur_psn = pkt->psn;
624 res->flush.va = qp->resp.va + qp->resp.offset;
625 res->flush.length = qp->resp.length;
626 res->flush.type = feth_plt(pkt);
627 res->flush.level = feth_sel(pkt);
633 static enum resp_states process_flush(struct rxe_qp *qp,
634 struct rxe_pkt_info *pkt)
637 struct rxe_mr *mr = qp->resp.mr;
638 struct resp_res *res = qp->resp.res;
640 /* oA19-14, oA19-15 */
641 if (res && res->replay)
642 return RESPST_ACKNOWLEDGE;
644 res = rxe_prepare_res(qp, pkt, RXE_FLUSH_MASK);
648 if (res->flush.level == IB_FLUSH_RANGE) {
649 start = res->flush.va;
650 length = res->flush.length;
651 } else { /* level == IB_FLUSH_MR */
652 start = mr->ibmr.iova;
653 length = mr->ibmr.length;
656 if (res->flush.type & IB_FLUSH_PERSISTENT) {
657 if (rxe_flush_pmem_iova(mr, start, length))
658 return RESPST_ERR_RKEY_VIOLATION;
659 /* Make data persistent. */
661 } else if (res->flush.type & IB_FLUSH_GLOBAL) {
662 /* Make data global visibility. */
668 /* next expected psn, read handles this separately */
669 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
670 qp->resp.ack_psn = qp->resp.psn;
672 qp->resp.opcode = pkt->opcode;
673 qp->resp.status = IB_WC_SUCCESS;
675 return RESPST_ACKNOWLEDGE;
678 static enum resp_states atomic_reply(struct rxe_qp *qp,
679 struct rxe_pkt_info *pkt)
681 struct rxe_mr *mr = qp->resp.mr;
682 struct resp_res *res = qp->resp.res;
686 res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_MASK);
691 u64 iova = qp->resp.va + qp->resp.offset;
693 err = rxe_mr_do_atomic_op(mr, iova, pkt->opcode,
695 atmeth_swap_add(pkt),
696 &res->atomic.orig_val);
702 /* next expected psn, read handles this separately */
703 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
704 qp->resp.ack_psn = qp->resp.psn;
706 qp->resp.opcode = pkt->opcode;
707 qp->resp.status = IB_WC_SUCCESS;
710 return RESPST_ACKNOWLEDGE;
713 static enum resp_states atomic_write_reply(struct rxe_qp *qp,
714 struct rxe_pkt_info *pkt)
716 struct resp_res *res = qp->resp.res;
723 res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_WRITE_MASK);
728 return RESPST_ACKNOWLEDGE;
731 value = *(u64 *)payload_addr(pkt);
732 iova = qp->resp.va + qp->resp.offset;
734 err = rxe_mr_do_atomic_write(mr, iova, value);
741 /* next expected psn, read handles this separately */
742 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
743 qp->resp.ack_psn = qp->resp.psn;
745 qp->resp.opcode = pkt->opcode;
746 qp->resp.status = IB_WC_SUCCESS;
748 return RESPST_ACKNOWLEDGE;
751 static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
752 struct rxe_pkt_info *ack,
758 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
767 pad = (-payload) & 0x3;
768 paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
770 skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack);
775 ack->opcode = opcode;
776 ack->mask = rxe_opcode[opcode].mask;
777 ack->paylen = paylen;
780 bth_init(ack, opcode, 0, 0, pad, IB_DEFAULT_PKEY_FULL,
781 qp->attr.dest_qp_num, 0, psn);
783 if (ack->mask & RXE_AETH_MASK) {
784 aeth_set_syn(ack, syndrome);
785 aeth_set_msn(ack, qp->resp.msn);
788 if (ack->mask & RXE_ATMACK_MASK)
789 atmack_set_orig(ack, qp->resp.res->atomic.orig_val);
791 err = rxe_prepare(&qp->pri_av, ack, skb);
801 * rxe_recheck_mr - revalidate MR from rkey and get a reference
805 * This code allows the MR to be invalidated or deregistered or
806 * the MW if one was used to be invalidated or deallocated.
807 * It is assumed that the access permissions if originally good
808 * are OK and the mappings to be unchanged.
810 * TODO: If someone reregisters an MR to change its size or
811 * access permissions during the processing of an RDMA read
812 * we should kill the responder resource and complete the
813 * operation with an error.
815 * Return: mr on success else NULL
817 static struct rxe_mr *rxe_recheck_mr(struct rxe_qp *qp, u32 rkey)
819 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
823 if (rkey_is_mw(rkey)) {
824 mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8);
829 if (mw->rkey != rkey || mw->state != RXE_MW_STATE_VALID ||
830 !mr || mr->state != RXE_MR_STATE_VALID) {
841 mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
845 if (mr->rkey != rkey || mr->state != RXE_MR_STATE_VALID) {
853 /* RDMA read response. If res is not NULL, then we have a current RDMA request
854 * being processed or replayed.
856 static enum resp_states read_reply(struct rxe_qp *qp,
857 struct rxe_pkt_info *req_pkt)
859 struct rxe_pkt_info ack_pkt;
862 enum resp_states state;
866 struct resp_res *res = qp->resp.res;
870 res = rxe_prepare_res(qp, req_pkt, RXE_READ_MASK);
874 if (res->state == rdatm_res_state_new) {
875 if (!res->replay || qp->resp.length == 0) {
876 /* if length == 0 mr will be NULL (is ok)
877 * otherwise qp->resp.mr holds a ref on mr
878 * which we transfer to mr and drop below.
883 mr = rxe_recheck_mr(qp, res->read.rkey);
885 return RESPST_ERR_RKEY_VIOLATION;
888 if (res->read.resid <= mtu)
889 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
891 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
893 /* re-lookup mr from rkey on all later packets.
894 * length will be non-zero. This can fail if someone
895 * modifies or destroys the mr since the first packet.
897 mr = rxe_recheck_mr(qp, res->read.rkey);
899 return RESPST_ERR_RKEY_VIOLATION;
901 if (res->read.resid > mtu)
902 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
904 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
907 res->state = rdatm_res_state_next;
909 payload = min_t(int, res->read.resid, mtu);
911 skb = prepare_ack_packet(qp, &ack_pkt, opcode, payload,
912 res->cur_psn, AETH_ACK_UNLIMITED);
914 state = RESPST_ERR_RNR;
918 err = rxe_mr_copy(mr, res->read.va, payload_addr(&ack_pkt),
919 payload, RXE_FROM_MR_OBJ);
922 state = RESPST_ERR_RKEY_VIOLATION;
926 if (bth_pad(&ack_pkt)) {
927 u8 *pad = payload_addr(&ack_pkt) + payload;
929 memset(pad, 0, bth_pad(&ack_pkt));
932 /* rxe_xmit_packet always consumes the skb */
933 err = rxe_xmit_packet(qp, &ack_pkt, skb);
935 state = RESPST_ERR_RNR;
939 res->read.va += payload;
940 res->read.resid -= payload;
941 res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;
943 if (res->read.resid > 0) {
948 qp->resp.opcode = -1;
949 if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
950 qp->resp.psn = res->cur_psn;
951 state = RESPST_CLEANUP;
960 static int invalidate_rkey(struct rxe_qp *qp, u32 rkey)
962 if (rkey_is_mw(rkey))
963 return rxe_invalidate_mw(qp, rkey);
965 return rxe_invalidate_mr(qp, rkey);
968 /* Executes a new request. A retried request never reach that function (send
969 * and writes are discarded, and reads and atomics are retried elsewhere.
971 static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
973 enum resp_states err;
974 struct sk_buff *skb = PKT_TO_SKB(pkt);
975 union rdma_network_hdr hdr;
977 if (pkt->mask & RXE_SEND_MASK) {
978 if (qp_type(qp) == IB_QPT_UD ||
979 qp_type(qp) == IB_QPT_GSI) {
980 if (skb->protocol == htons(ETH_P_IP)) {
981 memset(&hdr.reserved, 0,
982 sizeof(hdr.reserved));
983 memcpy(&hdr.roce4grh, ip_hdr(skb),
984 sizeof(hdr.roce4grh));
985 err = send_data_in(qp, &hdr, sizeof(hdr));
987 err = send_data_in(qp, ipv6_hdr(skb),
993 err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
996 } else if (pkt->mask & RXE_WRITE_MASK) {
997 err = write_data_in(qp, pkt);
1000 } else if (pkt->mask & RXE_READ_MASK) {
1001 /* For RDMA Read we can increment the msn now. See C9-148. */
1003 return RESPST_READ_REPLY;
1004 } else if (pkt->mask & RXE_ATOMIC_MASK) {
1005 return RESPST_ATOMIC_REPLY;
1006 } else if (pkt->mask & RXE_ATOMIC_WRITE_MASK) {
1007 return RESPST_ATOMIC_WRITE_REPLY;
1008 } else if (pkt->mask & RXE_FLUSH_MASK) {
1009 return RESPST_PROCESS_FLUSH;
1015 if (pkt->mask & RXE_IETH_MASK) {
1016 u32 rkey = ieth_rkey(pkt);
1018 err = invalidate_rkey(qp, rkey);
1020 return RESPST_ERR_INVALIDATE_RKEY;
1023 if (pkt->mask & RXE_END_MASK)
1024 /* We successfully processed this new request. */
1027 /* next expected psn, read handles this separately */
1028 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
1029 qp->resp.ack_psn = qp->resp.psn;
1031 qp->resp.opcode = pkt->opcode;
1032 qp->resp.status = IB_WC_SUCCESS;
1034 if (pkt->mask & RXE_COMP_MASK)
1035 return RESPST_COMPLETE;
1036 else if (qp_type(qp) == IB_QPT_RC)
1037 return RESPST_ACKNOWLEDGE;
1039 return RESPST_CLEANUP;
1042 static enum resp_states do_complete(struct rxe_qp *qp,
1043 struct rxe_pkt_info *pkt)
1046 struct ib_wc *wc = &cqe.ibwc;
1047 struct ib_uverbs_wc *uwc = &cqe.uibwc;
1048 struct rxe_recv_wqe *wqe = qp->resp.wqe;
1049 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
1050 unsigned long flags;
1055 memset(&cqe, 0, sizeof(cqe));
1057 if (qp->rcq->is_user) {
1058 uwc->status = qp->resp.status;
1059 uwc->qp_num = qp->ibqp.qp_num;
1060 uwc->wr_id = wqe->wr_id;
1062 wc->status = qp->resp.status;
1064 wc->wr_id = wqe->wr_id;
1067 if (wc->status == IB_WC_SUCCESS) {
1068 rxe_counter_inc(rxe, RXE_CNT_RDMA_RECV);
1069 wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
1070 pkt->mask & RXE_WRITE_MASK) ?
1071 IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
1072 wc->byte_len = (pkt->mask & RXE_IMMDT_MASK &&
1073 pkt->mask & RXE_WRITE_MASK) ?
1074 qp->resp.length : wqe->dma.length - wqe->dma.resid;
1076 /* fields after byte_len are different between kernel and user
1079 if (qp->rcq->is_user) {
1080 uwc->wc_flags = IB_WC_GRH;
1082 if (pkt->mask & RXE_IMMDT_MASK) {
1083 uwc->wc_flags |= IB_WC_WITH_IMM;
1084 uwc->ex.imm_data = immdt_imm(pkt);
1087 if (pkt->mask & RXE_IETH_MASK) {
1088 uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
1089 uwc->ex.invalidate_rkey = ieth_rkey(pkt);
1092 if (pkt->mask & RXE_DETH_MASK)
1093 uwc->src_qp = deth_sqp(pkt);
1095 uwc->port_num = qp->attr.port_num;
1097 struct sk_buff *skb = PKT_TO_SKB(pkt);
1099 wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
1100 if (skb->protocol == htons(ETH_P_IP))
1101 wc->network_hdr_type = RDMA_NETWORK_IPV4;
1103 wc->network_hdr_type = RDMA_NETWORK_IPV6;
1105 if (is_vlan_dev(skb->dev)) {
1106 wc->wc_flags |= IB_WC_WITH_VLAN;
1107 wc->vlan_id = vlan_dev_vlan_id(skb->dev);
1110 if (pkt->mask & RXE_IMMDT_MASK) {
1111 wc->wc_flags |= IB_WC_WITH_IMM;
1112 wc->ex.imm_data = immdt_imm(pkt);
1115 if (pkt->mask & RXE_IETH_MASK) {
1116 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
1117 wc->ex.invalidate_rkey = ieth_rkey(pkt);
1120 if (pkt->mask & RXE_DETH_MASK)
1121 wc->src_qp = deth_sqp(pkt);
1123 wc->port_num = qp->attr.port_num;
1126 if (wc->status != IB_WC_WR_FLUSH_ERR)
1127 rxe_err_qp(qp, "non-flush error status = %d",
1131 /* have copy for srq and reference for !srq */
1133 queue_advance_consumer(qp->rq.queue, QUEUE_TYPE_FROM_CLIENT);
1135 qp->resp.wqe = NULL;
1137 if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
1138 return RESPST_ERR_CQ_OVERFLOW;
1141 spin_lock_irqsave(&qp->state_lock, flags);
1142 if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
1143 spin_unlock_irqrestore(&qp->state_lock, flags);
1144 return RESPST_CHK_RESOURCE;
1146 spin_unlock_irqrestore(&qp->state_lock, flags);
1150 if (qp_type(qp) == IB_QPT_RC)
1151 return RESPST_ACKNOWLEDGE;
1153 return RESPST_CLEANUP;
1157 static int send_common_ack(struct rxe_qp *qp, u8 syndrome, u32 psn,
1158 int opcode, const char *msg)
1161 struct rxe_pkt_info ack_pkt;
1162 struct sk_buff *skb;
1164 skb = prepare_ack_packet(qp, &ack_pkt, opcode, 0, psn, syndrome);
1168 err = rxe_xmit_packet(qp, &ack_pkt, skb);
1170 rxe_dbg_qp(qp, "Failed sending %s\n", msg);
1175 static int send_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
1177 return send_common_ack(qp, syndrome, psn,
1178 IB_OPCODE_RC_ACKNOWLEDGE, "ACK");
1181 static int send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
1183 int ret = send_common_ack(qp, syndrome, psn,
1184 IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, "ATOMIC ACK");
1186 /* have to clear this since it is used to trigger
1189 qp->resp.res = NULL;
1193 static int send_read_response_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
1195 int ret = send_common_ack(qp, syndrome, psn,
1196 IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY,
1197 "RDMA READ response of length zero ACK");
1199 /* have to clear this since it is used to trigger
1202 qp->resp.res = NULL;
1206 static enum resp_states acknowledge(struct rxe_qp *qp,
1207 struct rxe_pkt_info *pkt)
1209 if (qp_type(qp) != IB_QPT_RC)
1210 return RESPST_CLEANUP;
1212 if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
1213 send_ack(qp, qp->resp.aeth_syndrome, pkt->psn);
1214 else if (pkt->mask & RXE_ATOMIC_MASK)
1215 send_atomic_ack(qp, AETH_ACK_UNLIMITED, pkt->psn);
1216 else if (pkt->mask & (RXE_FLUSH_MASK | RXE_ATOMIC_WRITE_MASK))
1217 send_read_response_ack(qp, AETH_ACK_UNLIMITED, pkt->psn);
1218 else if (bth_ack(pkt))
1219 send_ack(qp, AETH_ACK_UNLIMITED, pkt->psn);
1221 return RESPST_CLEANUP;
1224 static enum resp_states cleanup(struct rxe_qp *qp,
1225 struct rxe_pkt_info *pkt)
1227 struct sk_buff *skb;
1230 skb = skb_dequeue(&qp->req_pkts);
1233 ib_device_put(qp->ibqp.device);
1237 rxe_put(qp->resp.mr);
1244 static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
1248 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
1249 struct resp_res *res = &qp->resp.resources[i];
1254 if (psn_compare(psn, res->first_psn) >= 0 &&
1255 psn_compare(psn, res->last_psn) <= 0) {
1263 static enum resp_states duplicate_request(struct rxe_qp *qp,
1264 struct rxe_pkt_info *pkt)
1266 enum resp_states rc;
1267 u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK;
1269 if (pkt->mask & RXE_SEND_MASK ||
1270 pkt->mask & RXE_WRITE_MASK) {
1271 /* SEND. Ack again and cleanup. C9-105. */
1272 send_ack(qp, AETH_ACK_UNLIMITED, prev_psn);
1273 return RESPST_CLEANUP;
1274 } else if (pkt->mask & RXE_FLUSH_MASK) {
1275 struct resp_res *res;
1277 /* Find the operation in our list of responder resources. */
1278 res = find_resource(qp, pkt->psn);
1281 res->cur_psn = pkt->psn;
1283 rc = RESPST_PROCESS_FLUSH;
1287 /* Resource not found. Class D error. Drop the request. */
1288 rc = RESPST_CLEANUP;
1290 } else if (pkt->mask & RXE_READ_MASK) {
1291 struct resp_res *res;
1293 res = find_resource(qp, pkt->psn);
1295 /* Resource not found. Class D error. Drop the
1298 rc = RESPST_CLEANUP;
1301 /* Ensure this new request is the same as the previous
1302 * one or a subset of it.
1304 u64 iova = reth_va(pkt);
1305 u32 resid = reth_len(pkt);
1307 if (iova < res->read.va_org ||
1308 resid > res->read.length ||
1309 (iova + resid) > (res->read.va_org +
1310 res->read.length)) {
1311 rc = RESPST_CLEANUP;
1315 if (reth_rkey(pkt) != res->read.rkey) {
1316 rc = RESPST_CLEANUP;
1320 res->cur_psn = pkt->psn;
1321 res->state = (pkt->psn == res->first_psn) ?
1322 rdatm_res_state_new :
1323 rdatm_res_state_replay;
1326 /* Reset the resource, except length. */
1327 res->read.va_org = iova;
1328 res->read.va = iova;
1329 res->read.resid = resid;
1331 /* Replay the RDMA read reply. */
1333 rc = RESPST_READ_REPLY;
1337 struct resp_res *res;
1339 /* Find the operation in our list of responder resources. */
1340 res = find_resource(qp, pkt->psn);
1343 res->cur_psn = pkt->psn;
1345 rc = pkt->mask & RXE_ATOMIC_MASK ?
1346 RESPST_ATOMIC_REPLY :
1347 RESPST_ATOMIC_WRITE_REPLY;
1351 /* Resource not found. Class D error. Drop the request. */
1352 rc = RESPST_CLEANUP;
1359 /* Process a class A or C. Both are treated the same in this implementation. */
1360 static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
1361 enum ib_wc_status status)
1363 qp->resp.aeth_syndrome = syndrome;
1364 qp->resp.status = status;
1366 /* indicate that we should go through the ERROR state */
1367 qp->resp.goto_error = 1;
1370 static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
1375 qp->resp.drop_msg = 1;
1377 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1378 return RESPST_COMPLETE;
1380 return RESPST_CLEANUP;
1383 /* Class D1. This packet may be the start of a
1384 * new message and could be valid. The previous
1385 * message is invalid and ignored. reset the
1386 * recv wr to its original state
1389 qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
1390 qp->resp.wqe->dma.cur_sge = 0;
1391 qp->resp.wqe->dma.sge_offset = 0;
1392 qp->resp.opcode = -1;
1396 rxe_put(qp->resp.mr);
1400 return RESPST_CLEANUP;
1404 /* drain incoming request packet queue */
1405 static void drain_req_pkts(struct rxe_qp *qp)
1407 struct sk_buff *skb;
1409 while ((skb = skb_dequeue(&qp->req_pkts))) {
1412 ib_device_put(qp->ibqp.device);
1416 /* complete receive wqe with flush error */
1417 static int flush_recv_wqe(struct rxe_qp *qp, struct rxe_recv_wqe *wqe)
1419 struct rxe_cqe cqe = {};
1420 struct ib_wc *wc = &cqe.ibwc;
1421 struct ib_uverbs_wc *uwc = &cqe.uibwc;
1424 if (qp->rcq->is_user) {
1425 uwc->wr_id = wqe->wr_id;
1426 uwc->status = IB_WC_WR_FLUSH_ERR;
1427 uwc->qp_num = qp_num(qp);
1429 wc->wr_id = wqe->wr_id;
1430 wc->status = IB_WC_WR_FLUSH_ERR;
1434 err = rxe_cq_post(qp->rcq, &cqe, 0);
1436 rxe_dbg_cq(qp->rcq, "post cq failed err = %d", err);
1441 /* drain and optionally complete the recive queue
1442 * if unable to complete a wqe stop completing and
1443 * just flush the remaining wqes
1445 static void flush_recv_queue(struct rxe_qp *qp, bool notify)
1447 struct rxe_queue *q = qp->rq.queue;
1448 struct rxe_recv_wqe *wqe;
1454 while ((wqe = queue_head(q, q->type))) {
1456 err = flush_recv_wqe(qp, wqe);
1460 queue_advance_consumer(q, q->type);
1463 qp->resp.wqe = NULL;
1466 int rxe_responder(struct rxe_qp *qp)
1468 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
1469 enum resp_states state;
1470 struct rxe_pkt_info *pkt = NULL;
1472 unsigned long flags;
1474 spin_lock_irqsave(&qp->state_lock, flags);
1475 if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
1476 qp_state(qp) == IB_QPS_RESET) {
1477 bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR);
1480 flush_recv_queue(qp, notify);
1481 spin_unlock_irqrestore(&qp->state_lock, flags);
1484 spin_unlock_irqrestore(&qp->state_lock, flags);
1486 qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
1488 state = RESPST_GET_REQ;
1491 rxe_dbg_qp(qp, "state = %s\n", resp_state_name[state]);
1493 case RESPST_GET_REQ:
1494 state = get_req(qp, &pkt);
1496 case RESPST_CHK_PSN:
1497 state = check_psn(qp, pkt);
1499 case RESPST_CHK_OP_SEQ:
1500 state = check_op_seq(qp, pkt);
1502 case RESPST_CHK_OP_VALID:
1503 state = check_op_valid(qp, pkt);
1505 case RESPST_CHK_RESOURCE:
1506 state = check_resource(qp, pkt);
1508 case RESPST_CHK_LENGTH:
1509 state = rxe_resp_check_length(qp, pkt);
1511 case RESPST_CHK_RKEY:
1512 state = check_rkey(qp, pkt);
1514 case RESPST_EXECUTE:
1515 state = execute(qp, pkt);
1517 case RESPST_COMPLETE:
1518 state = do_complete(qp, pkt);
1520 case RESPST_READ_REPLY:
1521 state = read_reply(qp, pkt);
1523 case RESPST_ATOMIC_REPLY:
1524 state = atomic_reply(qp, pkt);
1526 case RESPST_ATOMIC_WRITE_REPLY:
1527 state = atomic_write_reply(qp, pkt);
1529 case RESPST_PROCESS_FLUSH:
1530 state = process_flush(qp, pkt);
1532 case RESPST_ACKNOWLEDGE:
1533 state = acknowledge(qp, pkt);
1535 case RESPST_CLEANUP:
1536 state = cleanup(qp, pkt);
1538 case RESPST_DUPLICATE_REQUEST:
1539 state = duplicate_request(qp, pkt);
1541 case RESPST_ERR_PSN_OUT_OF_SEQ:
1542 /* RC only - Class B. Drop packet. */
1543 send_ack(qp, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
1544 state = RESPST_CLEANUP;
1547 case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ:
1548 case RESPST_ERR_MISSING_OPCODE_FIRST:
1549 case RESPST_ERR_MISSING_OPCODE_LAST_C:
1550 case RESPST_ERR_UNSUPPORTED_OPCODE:
1551 case RESPST_ERR_MISALIGNED_ATOMIC:
1552 /* RC Only - Class C. */
1553 do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1554 IB_WC_REM_INV_REQ_ERR);
1555 state = RESPST_COMPLETE;
1558 case RESPST_ERR_MISSING_OPCODE_LAST_D1E:
1559 state = do_class_d1e_error(qp);
1561 case RESPST_ERR_RNR:
1562 if (qp_type(qp) == IB_QPT_RC) {
1563 rxe_counter_inc(rxe, RXE_CNT_SND_RNR);
1565 send_ack(qp, AETH_RNR_NAK |
1567 qp->attr.min_rnr_timer),
1570 /* UD/UC - class D */
1571 qp->resp.drop_msg = 1;
1573 state = RESPST_CLEANUP;
1576 case RESPST_ERR_RKEY_VIOLATION:
1577 if (qp_type(qp) == IB_QPT_RC) {
1579 do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
1580 IB_WC_REM_ACCESS_ERR);
1581 state = RESPST_COMPLETE;
1583 qp->resp.drop_msg = 1;
1585 /* UC/SRQ Class D */
1586 qp->resp.status = IB_WC_REM_ACCESS_ERR;
1587 state = RESPST_COMPLETE;
1589 /* UC/non-SRQ Class E. */
1590 state = RESPST_CLEANUP;
1595 case RESPST_ERR_INVALIDATE_RKEY:
1597 qp->resp.goto_error = 1;
1598 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1599 state = RESPST_COMPLETE;
1602 case RESPST_ERR_LENGTH:
1603 if (qp_type(qp) == IB_QPT_RC) {
1605 do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1606 IB_WC_REM_INV_REQ_ERR);
1607 state = RESPST_COMPLETE;
1608 } else if (qp->srq) {
1609 /* UC/UD - class E */
1610 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1611 state = RESPST_COMPLETE;
1613 /* UC/UD - class D */
1614 qp->resp.drop_msg = 1;
1615 state = RESPST_CLEANUP;
1619 case RESPST_ERR_MALFORMED_WQE:
1621 do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
1622 IB_WC_LOC_QP_OP_ERR);
1623 state = RESPST_COMPLETE;
1626 case RESPST_ERR_CQ_OVERFLOW:
1628 state = RESPST_ERROR;
1632 if (qp->resp.goto_error) {
1633 state = RESPST_ERROR;
1640 if (qp->resp.goto_error) {
1641 state = RESPST_ERROR;
1648 qp->resp.goto_error = 0;
1649 rxe_dbg_qp(qp, "moved to error state\n");
1658 /* A non-zero return value will cause rxe_do_task to
1659 * exit its loop and end the tasklet. A zero return
1660 * will continue looping and return to rxe_responder