Merge tag 'v5.18' into rdma.git for-next
[platform/kernel/linux-starfive.git] / drivers / infiniband / sw / rxe / rxe_resp.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6
7 #include <linux/skbuff.h>
8
9 #include "rxe.h"
10 #include "rxe_loc.h"
11 #include "rxe_queue.h"
12
13 enum resp_states {
14         RESPST_NONE,
15         RESPST_GET_REQ,
16         RESPST_CHK_PSN,
17         RESPST_CHK_OP_SEQ,
18         RESPST_CHK_OP_VALID,
19         RESPST_CHK_RESOURCE,
20         RESPST_CHK_LENGTH,
21         RESPST_CHK_RKEY,
22         RESPST_EXECUTE,
23         RESPST_READ_REPLY,
24         RESPST_COMPLETE,
25         RESPST_ACKNOWLEDGE,
26         RESPST_CLEANUP,
27         RESPST_DUPLICATE_REQUEST,
28         RESPST_ERR_MALFORMED_WQE,
29         RESPST_ERR_UNSUPPORTED_OPCODE,
30         RESPST_ERR_MISALIGNED_ATOMIC,
31         RESPST_ERR_PSN_OUT_OF_SEQ,
32         RESPST_ERR_MISSING_OPCODE_FIRST,
33         RESPST_ERR_MISSING_OPCODE_LAST_C,
34         RESPST_ERR_MISSING_OPCODE_LAST_D1E,
35         RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
36         RESPST_ERR_RNR,
37         RESPST_ERR_RKEY_VIOLATION,
38         RESPST_ERR_INVALIDATE_RKEY,
39         RESPST_ERR_LENGTH,
40         RESPST_ERR_CQ_OVERFLOW,
41         RESPST_ERROR,
42         RESPST_RESET,
43         RESPST_DONE,
44         RESPST_EXIT,
45 };
46
47 static char *resp_state_name[] = {
48         [RESPST_NONE]                           = "NONE",
49         [RESPST_GET_REQ]                        = "GET_REQ",
50         [RESPST_CHK_PSN]                        = "CHK_PSN",
51         [RESPST_CHK_OP_SEQ]                     = "CHK_OP_SEQ",
52         [RESPST_CHK_OP_VALID]                   = "CHK_OP_VALID",
53         [RESPST_CHK_RESOURCE]                   = "CHK_RESOURCE",
54         [RESPST_CHK_LENGTH]                     = "CHK_LENGTH",
55         [RESPST_CHK_RKEY]                       = "CHK_RKEY",
56         [RESPST_EXECUTE]                        = "EXECUTE",
57         [RESPST_READ_REPLY]                     = "READ_REPLY",
58         [RESPST_COMPLETE]                       = "COMPLETE",
59         [RESPST_ACKNOWLEDGE]                    = "ACKNOWLEDGE",
60         [RESPST_CLEANUP]                        = "CLEANUP",
61         [RESPST_DUPLICATE_REQUEST]              = "DUPLICATE_REQUEST",
62         [RESPST_ERR_MALFORMED_WQE]              = "ERR_MALFORMED_WQE",
63         [RESPST_ERR_UNSUPPORTED_OPCODE]         = "ERR_UNSUPPORTED_OPCODE",
64         [RESPST_ERR_MISALIGNED_ATOMIC]          = "ERR_MISALIGNED_ATOMIC",
65         [RESPST_ERR_PSN_OUT_OF_SEQ]             = "ERR_PSN_OUT_OF_SEQ",
66         [RESPST_ERR_MISSING_OPCODE_FIRST]       = "ERR_MISSING_OPCODE_FIRST",
67         [RESPST_ERR_MISSING_OPCODE_LAST_C]      = "ERR_MISSING_OPCODE_LAST_C",
68         [RESPST_ERR_MISSING_OPCODE_LAST_D1E]    = "ERR_MISSING_OPCODE_LAST_D1E",
69         [RESPST_ERR_TOO_MANY_RDMA_ATM_REQ]      = "ERR_TOO_MANY_RDMA_ATM_REQ",
70         [RESPST_ERR_RNR]                        = "ERR_RNR",
71         [RESPST_ERR_RKEY_VIOLATION]             = "ERR_RKEY_VIOLATION",
72         [RESPST_ERR_INVALIDATE_RKEY]            = "ERR_INVALIDATE_RKEY_VIOLATION",
73         [RESPST_ERR_LENGTH]                     = "ERR_LENGTH",
74         [RESPST_ERR_CQ_OVERFLOW]                = "ERR_CQ_OVERFLOW",
75         [RESPST_ERROR]                          = "ERROR",
76         [RESPST_RESET]                          = "RESET",
77         [RESPST_DONE]                           = "DONE",
78         [RESPST_EXIT]                           = "EXIT",
79 };
80
81 /* rxe_recv calls here to add a request packet to the input queue */
82 void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
83 {
84         int must_sched;
85         struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
86
87         skb_queue_tail(&qp->req_pkts, skb);
88
89         must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
90                         (skb_queue_len(&qp->req_pkts) > 1);
91
92         rxe_run_task(&qp->resp.task, must_sched);
93 }
94
95 static inline enum resp_states get_req(struct rxe_qp *qp,
96                                        struct rxe_pkt_info **pkt_p)
97 {
98         struct sk_buff *skb;
99
100         if (qp->resp.state == QP_STATE_ERROR) {
101                 while ((skb = skb_dequeue(&qp->req_pkts))) {
102                         rxe_put(qp);
103                         kfree_skb(skb);
104                         ib_device_put(qp->ibqp.device);
105                 }
106
107                 /* go drain recv wr queue */
108                 return RESPST_CHK_RESOURCE;
109         }
110
111         skb = skb_peek(&qp->req_pkts);
112         if (!skb)
113                 return RESPST_EXIT;
114
115         *pkt_p = SKB_TO_PKT(skb);
116
117         return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
118 }
119
120 static enum resp_states check_psn(struct rxe_qp *qp,
121                                   struct rxe_pkt_info *pkt)
122 {
123         int diff = psn_compare(pkt->psn, qp->resp.psn);
124         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
125
126         switch (qp_type(qp)) {
127         case IB_QPT_RC:
128                 if (diff > 0) {
129                         if (qp->resp.sent_psn_nak)
130                                 return RESPST_CLEANUP;
131
132                         qp->resp.sent_psn_nak = 1;
133                         rxe_counter_inc(rxe, RXE_CNT_OUT_OF_SEQ_REQ);
134                         return RESPST_ERR_PSN_OUT_OF_SEQ;
135
136                 } else if (diff < 0) {
137                         rxe_counter_inc(rxe, RXE_CNT_DUP_REQ);
138                         return RESPST_DUPLICATE_REQUEST;
139                 }
140
141                 if (qp->resp.sent_psn_nak)
142                         qp->resp.sent_psn_nak = 0;
143
144                 break;
145
146         case IB_QPT_UC:
147                 if (qp->resp.drop_msg || diff != 0) {
148                         if (pkt->mask & RXE_START_MASK) {
149                                 qp->resp.drop_msg = 0;
150                                 return RESPST_CHK_OP_SEQ;
151                         }
152
153                         qp->resp.drop_msg = 1;
154                         return RESPST_CLEANUP;
155                 }
156                 break;
157         default:
158                 break;
159         }
160
161         return RESPST_CHK_OP_SEQ;
162 }
163
164 static enum resp_states check_op_seq(struct rxe_qp *qp,
165                                      struct rxe_pkt_info *pkt)
166 {
167         switch (qp_type(qp)) {
168         case IB_QPT_RC:
169                 switch (qp->resp.opcode) {
170                 case IB_OPCODE_RC_SEND_FIRST:
171                 case IB_OPCODE_RC_SEND_MIDDLE:
172                         switch (pkt->opcode) {
173                         case IB_OPCODE_RC_SEND_MIDDLE:
174                         case IB_OPCODE_RC_SEND_LAST:
175                         case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
176                         case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
177                                 return RESPST_CHK_OP_VALID;
178                         default:
179                                 return RESPST_ERR_MISSING_OPCODE_LAST_C;
180                         }
181
182                 case IB_OPCODE_RC_RDMA_WRITE_FIRST:
183                 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
184                         switch (pkt->opcode) {
185                         case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
186                         case IB_OPCODE_RC_RDMA_WRITE_LAST:
187                         case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
188                                 return RESPST_CHK_OP_VALID;
189                         default:
190                                 return RESPST_ERR_MISSING_OPCODE_LAST_C;
191                         }
192
193                 default:
194                         switch (pkt->opcode) {
195                         case IB_OPCODE_RC_SEND_MIDDLE:
196                         case IB_OPCODE_RC_SEND_LAST:
197                         case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
198                         case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
199                         case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
200                         case IB_OPCODE_RC_RDMA_WRITE_LAST:
201                         case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
202                                 return RESPST_ERR_MISSING_OPCODE_FIRST;
203                         default:
204                                 return RESPST_CHK_OP_VALID;
205                         }
206                 }
207                 break;
208
209         case IB_QPT_UC:
210                 switch (qp->resp.opcode) {
211                 case IB_OPCODE_UC_SEND_FIRST:
212                 case IB_OPCODE_UC_SEND_MIDDLE:
213                         switch (pkt->opcode) {
214                         case IB_OPCODE_UC_SEND_MIDDLE:
215                         case IB_OPCODE_UC_SEND_LAST:
216                         case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
217                                 return RESPST_CHK_OP_VALID;
218                         default:
219                                 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
220                         }
221
222                 case IB_OPCODE_UC_RDMA_WRITE_FIRST:
223                 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
224                         switch (pkt->opcode) {
225                         case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
226                         case IB_OPCODE_UC_RDMA_WRITE_LAST:
227                         case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
228                                 return RESPST_CHK_OP_VALID;
229                         default:
230                                 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
231                         }
232
233                 default:
234                         switch (pkt->opcode) {
235                         case IB_OPCODE_UC_SEND_MIDDLE:
236                         case IB_OPCODE_UC_SEND_LAST:
237                         case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
238                         case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
239                         case IB_OPCODE_UC_RDMA_WRITE_LAST:
240                         case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
241                                 qp->resp.drop_msg = 1;
242                                 return RESPST_CLEANUP;
243                         default:
244                                 return RESPST_CHK_OP_VALID;
245                         }
246                 }
247                 break;
248
249         default:
250                 return RESPST_CHK_OP_VALID;
251         }
252 }
253
254 static enum resp_states check_op_valid(struct rxe_qp *qp,
255                                        struct rxe_pkt_info *pkt)
256 {
257         switch (qp_type(qp)) {
258         case IB_QPT_RC:
259                 if (((pkt->mask & RXE_READ_MASK) &&
260                      !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
261                     ((pkt->mask & RXE_WRITE_MASK) &&
262                      !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
263                     ((pkt->mask & RXE_ATOMIC_MASK) &&
264                      !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) {
265                         return RESPST_ERR_UNSUPPORTED_OPCODE;
266                 }
267
268                 break;
269
270         case IB_QPT_UC:
271                 if ((pkt->mask & RXE_WRITE_MASK) &&
272                     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
273                         qp->resp.drop_msg = 1;
274                         return RESPST_CLEANUP;
275                 }
276
277                 break;
278
279         case IB_QPT_UD:
280         case IB_QPT_GSI:
281                 break;
282
283         default:
284                 WARN_ON_ONCE(1);
285                 break;
286         }
287
288         return RESPST_CHK_RESOURCE;
289 }
290
291 static enum resp_states get_srq_wqe(struct rxe_qp *qp)
292 {
293         struct rxe_srq *srq = qp->srq;
294         struct rxe_queue *q = srq->rq.queue;
295         struct rxe_recv_wqe *wqe;
296         struct ib_event ev;
297         unsigned int count;
298         size_t size;
299         unsigned long flags;
300
301         if (srq->error)
302                 return RESPST_ERR_RNR;
303
304         spin_lock_irqsave(&srq->rq.consumer_lock, flags);
305
306         wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
307         if (!wqe) {
308                 spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
309                 return RESPST_ERR_RNR;
310         }
311
312         /* don't trust user space data */
313         if (unlikely(wqe->dma.num_sge > srq->rq.max_sge)) {
314                 spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
315                 pr_warn("%s: invalid num_sge in SRQ entry\n", __func__);
316                 return RESPST_ERR_MALFORMED_WQE;
317         }
318         size = sizeof(*wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge);
319         memcpy(&qp->resp.srq_wqe, wqe, size);
320
321         qp->resp.wqe = &qp->resp.srq_wqe.wqe;
322         queue_advance_consumer(q, QUEUE_TYPE_FROM_CLIENT);
323         count = queue_count(q, QUEUE_TYPE_FROM_CLIENT);
324
325         if (srq->limit && srq->ibsrq.event_handler && (count < srq->limit)) {
326                 srq->limit = 0;
327                 goto event;
328         }
329
330         spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
331         return RESPST_CHK_LENGTH;
332
333 event:
334         spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
335         ev.device = qp->ibqp.device;
336         ev.element.srq = qp->ibqp.srq;
337         ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
338         srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context);
339         return RESPST_CHK_LENGTH;
340 }
341
342 static enum resp_states check_resource(struct rxe_qp *qp,
343                                        struct rxe_pkt_info *pkt)
344 {
345         struct rxe_srq *srq = qp->srq;
346
347         if (qp->resp.state == QP_STATE_ERROR) {
348                 if (qp->resp.wqe) {
349                         qp->resp.status = IB_WC_WR_FLUSH_ERR;
350                         return RESPST_COMPLETE;
351                 } else if (!srq) {
352                         qp->resp.wqe = queue_head(qp->rq.queue,
353                                         QUEUE_TYPE_FROM_CLIENT);
354                         if (qp->resp.wqe) {
355                                 qp->resp.status = IB_WC_WR_FLUSH_ERR;
356                                 return RESPST_COMPLETE;
357                         } else {
358                                 return RESPST_EXIT;
359                         }
360                 } else {
361                         return RESPST_EXIT;
362                 }
363         }
364
365         if (pkt->mask & RXE_READ_OR_ATOMIC_MASK) {
366                 /* it is the requesters job to not send
367                  * too many read/atomic ops, we just
368                  * recycle the responder resource queue
369                  */
370                 if (likely(qp->attr.max_dest_rd_atomic > 0))
371                         return RESPST_CHK_LENGTH;
372                 else
373                         return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
374         }
375
376         if (pkt->mask & RXE_RWR_MASK) {
377                 if (srq)
378                         return get_srq_wqe(qp);
379
380                 qp->resp.wqe = queue_head(qp->rq.queue,
381                                 QUEUE_TYPE_FROM_CLIENT);
382                 return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
383         }
384
385         return RESPST_CHK_LENGTH;
386 }
387
388 static enum resp_states check_length(struct rxe_qp *qp,
389                                      struct rxe_pkt_info *pkt)
390 {
391         switch (qp_type(qp)) {
392         case IB_QPT_RC:
393                 return RESPST_CHK_RKEY;
394
395         case IB_QPT_UC:
396                 return RESPST_CHK_RKEY;
397
398         default:
399                 return RESPST_CHK_RKEY;
400         }
401 }
402
403 static enum resp_states check_rkey(struct rxe_qp *qp,
404                                    struct rxe_pkt_info *pkt)
405 {
406         struct rxe_mr *mr = NULL;
407         struct rxe_mw *mw = NULL;
408         u64 va;
409         u32 rkey;
410         u32 resid;
411         u32 pktlen;
412         int mtu = qp->mtu;
413         enum resp_states state;
414         int access;
415
416         if (pkt->mask & RXE_READ_OR_WRITE_MASK) {
417                 if (pkt->mask & RXE_RETH_MASK) {
418                         qp->resp.va = reth_va(pkt);
419                         qp->resp.offset = 0;
420                         qp->resp.rkey = reth_rkey(pkt);
421                         qp->resp.resid = reth_len(pkt);
422                         qp->resp.length = reth_len(pkt);
423                 }
424                 access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
425                                                      : IB_ACCESS_REMOTE_WRITE;
426         } else if (pkt->mask & RXE_ATOMIC_MASK) {
427                 qp->resp.va = atmeth_va(pkt);
428                 qp->resp.offset = 0;
429                 qp->resp.rkey = atmeth_rkey(pkt);
430                 qp->resp.resid = sizeof(u64);
431                 access = IB_ACCESS_REMOTE_ATOMIC;
432         } else {
433                 return RESPST_EXECUTE;
434         }
435
436         /* A zero-byte op is not required to set an addr or rkey. */
437         if ((pkt->mask & RXE_READ_OR_WRITE_MASK) &&
438             (pkt->mask & RXE_RETH_MASK) &&
439             reth_len(pkt) == 0) {
440                 return RESPST_EXECUTE;
441         }
442
443         va      = qp->resp.va;
444         rkey    = qp->resp.rkey;
445         resid   = qp->resp.resid;
446         pktlen  = payload_size(pkt);
447
448         if (rkey_is_mw(rkey)) {
449                 mw = rxe_lookup_mw(qp, access, rkey);
450                 if (!mw) {
451                         pr_err("%s: no MW matches rkey %#x\n", __func__, rkey);
452                         state = RESPST_ERR_RKEY_VIOLATION;
453                         goto err;
454                 }
455
456                 mr = mw->mr;
457                 if (!mr) {
458                         pr_err("%s: MW doesn't have an MR\n", __func__);
459                         state = RESPST_ERR_RKEY_VIOLATION;
460                         goto err;
461                 }
462
463                 if (mw->access & IB_ZERO_BASED)
464                         qp->resp.offset = mw->addr;
465
466                 rxe_put(mw);
467                 rxe_get(mr);
468         } else {
469                 mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE);
470                 if (!mr) {
471                         pr_err("%s: no MR matches rkey %#x\n", __func__, rkey);
472                         state = RESPST_ERR_RKEY_VIOLATION;
473                         goto err;
474                 }
475         }
476
477         if (mr_check_range(mr, va + qp->resp.offset, resid)) {
478                 state = RESPST_ERR_RKEY_VIOLATION;
479                 goto err;
480         }
481
482         if (pkt->mask & RXE_WRITE_MASK)  {
483                 if (resid > mtu) {
484                         if (pktlen != mtu || bth_pad(pkt)) {
485                                 state = RESPST_ERR_LENGTH;
486                                 goto err;
487                         }
488                 } else {
489                         if (pktlen != resid) {
490                                 state = RESPST_ERR_LENGTH;
491                                 goto err;
492                         }
493                         if ((bth_pad(pkt) != (0x3 & (-resid)))) {
494                                 /* This case may not be exactly that
495                                  * but nothing else fits.
496                                  */
497                                 state = RESPST_ERR_LENGTH;
498                                 goto err;
499                         }
500                 }
501         }
502
503         WARN_ON_ONCE(qp->resp.mr);
504
505         qp->resp.mr = mr;
506         return RESPST_EXECUTE;
507
508 err:
509         if (mr)
510                 rxe_put(mr);
511         if (mw)
512                 rxe_put(mw);
513
514         return state;
515 }
516
517 static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
518                                      int data_len)
519 {
520         int err;
521
522         err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
523                         data_addr, data_len, RXE_TO_MR_OBJ);
524         if (unlikely(err))
525                 return (err == -ENOSPC) ? RESPST_ERR_LENGTH
526                                         : RESPST_ERR_MALFORMED_WQE;
527
528         return RESPST_NONE;
529 }
530
531 static enum resp_states write_data_in(struct rxe_qp *qp,
532                                       struct rxe_pkt_info *pkt)
533 {
534         enum resp_states rc = RESPST_NONE;
535         int     err;
536         int data_len = payload_size(pkt);
537
538         err = rxe_mr_copy(qp->resp.mr, qp->resp.va + qp->resp.offset,
539                           payload_addr(pkt), data_len, RXE_TO_MR_OBJ);
540         if (err) {
541                 rc = RESPST_ERR_RKEY_VIOLATION;
542                 goto out;
543         }
544
545         qp->resp.va += data_len;
546         qp->resp.resid -= data_len;
547
548 out:
549         return rc;
550 }
551
552 /* Guarantee atomicity of atomic operations at the machine level. */
553 static DEFINE_SPINLOCK(atomic_ops_lock);
554
555 static enum resp_states process_atomic(struct rxe_qp *qp,
556                                        struct rxe_pkt_info *pkt)
557 {
558         u64 *vaddr;
559         enum resp_states ret;
560         struct rxe_mr *mr = qp->resp.mr;
561
562         if (mr->state != RXE_MR_STATE_VALID) {
563                 ret = RESPST_ERR_RKEY_VIOLATION;
564                 goto out;
565         }
566
567         vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, sizeof(u64));
568
569         /* check vaddr is 8 bytes aligned. */
570         if (!vaddr || (uintptr_t)vaddr & 7) {
571                 ret = RESPST_ERR_MISALIGNED_ATOMIC;
572                 goto out;
573         }
574
575         spin_lock_bh(&atomic_ops_lock);
576
577         qp->resp.atomic_orig = *vaddr;
578
579         if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP) {
580                 if (*vaddr == atmeth_comp(pkt))
581                         *vaddr = atmeth_swap_add(pkt);
582         } else {
583                 *vaddr += atmeth_swap_add(pkt);
584         }
585
586         spin_unlock_bh(&atomic_ops_lock);
587
588         ret = RESPST_NONE;
589 out:
590         return ret;
591 }
592
593 static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
594                                           struct rxe_pkt_info *pkt,
595                                           struct rxe_pkt_info *ack,
596                                           int opcode,
597                                           int payload,
598                                           u32 psn,
599                                           u8 syndrome)
600 {
601         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
602         struct sk_buff *skb;
603         int paylen;
604         int pad;
605         int err;
606
607         /*
608          * allocate packet
609          */
610         pad = (-payload) & 0x3;
611         paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
612
613         skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack);
614         if (!skb)
615                 return NULL;
616
617         ack->qp = qp;
618         ack->opcode = opcode;
619         ack->mask = rxe_opcode[opcode].mask;
620         ack->paylen = paylen;
621         ack->psn = psn;
622
623         bth_init(ack, opcode, 0, 0, pad, IB_DEFAULT_PKEY_FULL,
624                  qp->attr.dest_qp_num, 0, psn);
625
626         if (ack->mask & RXE_AETH_MASK) {
627                 aeth_set_syn(ack, syndrome);
628                 aeth_set_msn(ack, qp->resp.msn);
629         }
630
631         if (ack->mask & RXE_ATMACK_MASK)
632                 atmack_set_orig(ack, qp->resp.atomic_orig);
633
634         err = rxe_prepare(&qp->pri_av, ack, skb);
635         if (err) {
636                 kfree_skb(skb);
637                 return NULL;
638         }
639
640         return skb;
641 }
642
643 static struct resp_res *rxe_prepare_read_res(struct rxe_qp *qp,
644                                         struct rxe_pkt_info *pkt)
645 {
646         struct resp_res *res;
647         u32 pkts;
648
649         res = &qp->resp.resources[qp->resp.res_head];
650         rxe_advance_resp_resource(qp);
651         free_rd_atomic_resource(qp, res);
652
653         res->type = RXE_READ_MASK;
654         res->replay = 0;
655         res->read.va = qp->resp.va + qp->resp.offset;
656         res->read.va_org = qp->resp.va + qp->resp.offset;
657         res->read.resid = qp->resp.resid;
658         res->read.length = qp->resp.resid;
659         res->read.rkey = qp->resp.rkey;
660
661         pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1);
662         res->first_psn = pkt->psn;
663         res->cur_psn = pkt->psn;
664         res->last_psn = (pkt->psn + pkts - 1) & BTH_PSN_MASK;
665
666         res->state = rdatm_res_state_new;
667
668         return res;
669 }
670
671 /**
672  * rxe_recheck_mr - revalidate MR from rkey and get a reference
673  * @qp: the qp
674  * @rkey: the rkey
675  *
676  * This code allows the MR to be invalidated or deregistered or
677  * the MW if one was used to be invalidated or deallocated.
678  * It is assumed that the access permissions if originally good
679  * are OK and the mappings to be unchanged.
680  *
681  * TODO: If someone reregisters an MR to change its size or
682  * access permissions during the processing of an RDMA read
683  * we should kill the responder resource and complete the
684  * operation with an error.
685  *
686  * Return: mr on success else NULL
687  */
688 static struct rxe_mr *rxe_recheck_mr(struct rxe_qp *qp, u32 rkey)
689 {
690         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
691         struct rxe_mr *mr;
692         struct rxe_mw *mw;
693
694         if (rkey_is_mw(rkey)) {
695                 mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8);
696                 if (!mw)
697                         return NULL;
698
699                 mr = mw->mr;
700                 if (mw->rkey != rkey || mw->state != RXE_MW_STATE_VALID ||
701                     !mr || mr->state != RXE_MR_STATE_VALID) {
702                         rxe_put(mw);
703                         return NULL;
704                 }
705
706                 rxe_get(mr);
707                 rxe_put(mw);
708
709                 return mr;
710         }
711
712         mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
713         if (!mr)
714                 return NULL;
715
716         if (mr->rkey != rkey || mr->state != RXE_MR_STATE_VALID) {
717                 rxe_put(mr);
718                 return NULL;
719         }
720
721         return mr;
722 }
723
724 /* RDMA read response. If res is not NULL, then we have a current RDMA request
725  * being processed or replayed.
726  */
727 static enum resp_states read_reply(struct rxe_qp *qp,
728                                    struct rxe_pkt_info *req_pkt)
729 {
730         struct rxe_pkt_info ack_pkt;
731         struct sk_buff *skb;
732         int mtu = qp->mtu;
733         enum resp_states state;
734         int payload;
735         int opcode;
736         int err;
737         struct resp_res *res = qp->resp.res;
738         struct rxe_mr *mr;
739
740         if (!res) {
741                 res = rxe_prepare_read_res(qp, req_pkt);
742                 qp->resp.res = res;
743         }
744
745         if (res->state == rdatm_res_state_new) {
746                 if (!res->replay) {
747                         mr = qp->resp.mr;
748                         qp->resp.mr = NULL;
749                 } else {
750                         mr = rxe_recheck_mr(qp, res->read.rkey);
751                         if (!mr)
752                                 return RESPST_ERR_RKEY_VIOLATION;
753                 }
754
755                 if (res->read.resid <= mtu)
756                         opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
757                 else
758                         opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
759         } else {
760                 mr = rxe_recheck_mr(qp, res->read.rkey);
761                 if (!mr)
762                         return RESPST_ERR_RKEY_VIOLATION;
763
764                 if (res->read.resid > mtu)
765                         opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
766                 else
767                         opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
768         }
769
770         res->state = rdatm_res_state_next;
771
772         payload = min_t(int, res->read.resid, mtu);
773
774         skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
775                                  res->cur_psn, AETH_ACK_UNLIMITED);
776         if (!skb)
777                 return RESPST_ERR_RNR;
778
779         err = rxe_mr_copy(mr, res->read.va, payload_addr(&ack_pkt),
780                           payload, RXE_FROM_MR_OBJ);
781         if (err)
782                 pr_err("Failed copying memory\n");
783         if (mr)
784                 rxe_put(mr);
785
786         if (bth_pad(&ack_pkt)) {
787                 u8 *pad = payload_addr(&ack_pkt) + payload;
788
789                 memset(pad, 0, bth_pad(&ack_pkt));
790         }
791
792         err = rxe_xmit_packet(qp, &ack_pkt, skb);
793         if (err) {
794                 pr_err("Failed sending RDMA reply.\n");
795                 return RESPST_ERR_RNR;
796         }
797
798         res->read.va += payload;
799         res->read.resid -= payload;
800         res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;
801
802         if (res->read.resid > 0) {
803                 state = RESPST_DONE;
804         } else {
805                 qp->resp.res = NULL;
806                 if (!res->replay)
807                         qp->resp.opcode = -1;
808                 if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
809                         qp->resp.psn = res->cur_psn;
810                 state = RESPST_CLEANUP;
811         }
812
813         return state;
814 }
815
816 static int invalidate_rkey(struct rxe_qp *qp, u32 rkey)
817 {
818         if (rkey_is_mw(rkey))
819                 return rxe_invalidate_mw(qp, rkey);
820         else
821                 return rxe_invalidate_mr(qp, rkey);
822 }
823
824 /* Executes a new request. A retried request never reach that function (send
825  * and writes are discarded, and reads and atomics are retried elsewhere.
826  */
827 static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
828 {
829         enum resp_states err;
830         struct sk_buff *skb = PKT_TO_SKB(pkt);
831         union rdma_network_hdr hdr;
832
833         if (pkt->mask & RXE_SEND_MASK) {
834                 if (qp_type(qp) == IB_QPT_UD ||
835                     qp_type(qp) == IB_QPT_GSI) {
836                         if (skb->protocol == htons(ETH_P_IP)) {
837                                 memset(&hdr.reserved, 0,
838                                                 sizeof(hdr.reserved));
839                                 memcpy(&hdr.roce4grh, ip_hdr(skb),
840                                                 sizeof(hdr.roce4grh));
841                                 err = send_data_in(qp, &hdr, sizeof(hdr));
842                         } else {
843                                 err = send_data_in(qp, ipv6_hdr(skb),
844                                                 sizeof(hdr));
845                         }
846                         if (err)
847                                 return err;
848                 }
849                 err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
850                 if (err)
851                         return err;
852         } else if (pkt->mask & RXE_WRITE_MASK) {
853                 err = write_data_in(qp, pkt);
854                 if (err)
855                         return err;
856         } else if (pkt->mask & RXE_READ_MASK) {
857                 /* For RDMA Read we can increment the msn now. See C9-148. */
858                 qp->resp.msn++;
859                 return RESPST_READ_REPLY;
860         } else if (pkt->mask & RXE_ATOMIC_MASK) {
861                 err = process_atomic(qp, pkt);
862                 if (err)
863                         return err;
864         } else {
865                 /* Unreachable */
866                 WARN_ON_ONCE(1);
867         }
868
869         if (pkt->mask & RXE_IETH_MASK) {
870                 u32 rkey = ieth_rkey(pkt);
871
872                 err = invalidate_rkey(qp, rkey);
873                 if (err)
874                         return RESPST_ERR_INVALIDATE_RKEY;
875         }
876
877         if (pkt->mask & RXE_END_MASK)
878                 /* We successfully processed this new request. */
879                 qp->resp.msn++;
880
881         /* next expected psn, read handles this separately */
882         qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
883         qp->resp.ack_psn = qp->resp.psn;
884
885         qp->resp.opcode = pkt->opcode;
886         qp->resp.status = IB_WC_SUCCESS;
887
888         if (pkt->mask & RXE_COMP_MASK)
889                 return RESPST_COMPLETE;
890         else if (qp_type(qp) == IB_QPT_RC)
891                 return RESPST_ACKNOWLEDGE;
892         else
893                 return RESPST_CLEANUP;
894 }
895
896 static enum resp_states do_complete(struct rxe_qp *qp,
897                                     struct rxe_pkt_info *pkt)
898 {
899         struct rxe_cqe cqe;
900         struct ib_wc *wc = &cqe.ibwc;
901         struct ib_uverbs_wc *uwc = &cqe.uibwc;
902         struct rxe_recv_wqe *wqe = qp->resp.wqe;
903         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
904
905         if (!wqe)
906                 goto finish;
907
908         memset(&cqe, 0, sizeof(cqe));
909
910         if (qp->rcq->is_user) {
911                 uwc->status             = qp->resp.status;
912                 uwc->qp_num             = qp->ibqp.qp_num;
913                 uwc->wr_id              = wqe->wr_id;
914         } else {
915                 wc->status              = qp->resp.status;
916                 wc->qp                  = &qp->ibqp;
917                 wc->wr_id               = wqe->wr_id;
918         }
919
920         if (wc->status == IB_WC_SUCCESS) {
921                 rxe_counter_inc(rxe, RXE_CNT_RDMA_RECV);
922                 wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
923                                 pkt->mask & RXE_WRITE_MASK) ?
924                                         IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
925                 wc->byte_len = (pkt->mask & RXE_IMMDT_MASK &&
926                                 pkt->mask & RXE_WRITE_MASK) ?
927                                         qp->resp.length : wqe->dma.length - wqe->dma.resid;
928
929                 /* fields after byte_len are different between kernel and user
930                  * space
931                  */
932                 if (qp->rcq->is_user) {
933                         uwc->wc_flags = IB_WC_GRH;
934
935                         if (pkt->mask & RXE_IMMDT_MASK) {
936                                 uwc->wc_flags |= IB_WC_WITH_IMM;
937                                 uwc->ex.imm_data = immdt_imm(pkt);
938                         }
939
940                         if (pkt->mask & RXE_IETH_MASK) {
941                                 uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
942                                 uwc->ex.invalidate_rkey = ieth_rkey(pkt);
943                         }
944
945                         if (pkt->mask & RXE_DETH_MASK)
946                                 uwc->src_qp = deth_sqp(pkt);
947
948                         uwc->port_num           = qp->attr.port_num;
949                 } else {
950                         struct sk_buff *skb = PKT_TO_SKB(pkt);
951
952                         wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
953                         if (skb->protocol == htons(ETH_P_IP))
954                                 wc->network_hdr_type = RDMA_NETWORK_IPV4;
955                         else
956                                 wc->network_hdr_type = RDMA_NETWORK_IPV6;
957
958                         if (is_vlan_dev(skb->dev)) {
959                                 wc->wc_flags |= IB_WC_WITH_VLAN;
960                                 wc->vlan_id = vlan_dev_vlan_id(skb->dev);
961                         }
962
963                         if (pkt->mask & RXE_IMMDT_MASK) {
964                                 wc->wc_flags |= IB_WC_WITH_IMM;
965                                 wc->ex.imm_data = immdt_imm(pkt);
966                         }
967
968                         if (pkt->mask & RXE_IETH_MASK) {
969                                 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
970                                 wc->ex.invalidate_rkey = ieth_rkey(pkt);
971                         }
972
973                         if (pkt->mask & RXE_DETH_MASK)
974                                 wc->src_qp = deth_sqp(pkt);
975
976                         wc->port_num            = qp->attr.port_num;
977                 }
978         }
979
980         /* have copy for srq and reference for !srq */
981         if (!qp->srq)
982                 queue_advance_consumer(qp->rq.queue, QUEUE_TYPE_FROM_CLIENT);
983
984         qp->resp.wqe = NULL;
985
986         if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
987                 return RESPST_ERR_CQ_OVERFLOW;
988
989 finish:
990         if (unlikely(qp->resp.state == QP_STATE_ERROR))
991                 return RESPST_CHK_RESOURCE;
992         if (unlikely(!pkt))
993                 return RESPST_DONE;
994         if (qp_type(qp) == IB_QPT_RC)
995                 return RESPST_ACKNOWLEDGE;
996         else
997                 return RESPST_CLEANUP;
998 }
999
1000 static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
1001                     u8 syndrome, u32 psn)
1002 {
1003         int err = 0;
1004         struct rxe_pkt_info ack_pkt;
1005         struct sk_buff *skb;
1006
1007         skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
1008                                  0, psn, syndrome);
1009         if (!skb) {
1010                 err = -ENOMEM;
1011                 goto err1;
1012         }
1013
1014         err = rxe_xmit_packet(qp, &ack_pkt, skb);
1015         if (err)
1016                 pr_err_ratelimited("Failed sending ack\n");
1017
1018 err1:
1019         return err;
1020 }
1021
1022 static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
1023                            u8 syndrome)
1024 {
1025         int rc = 0;
1026         struct rxe_pkt_info ack_pkt;
1027         struct sk_buff *skb;
1028         struct resp_res *res;
1029
1030         skb = prepare_ack_packet(qp, pkt, &ack_pkt,
1031                                  IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
1032                                  syndrome);
1033         if (!skb) {
1034                 rc = -ENOMEM;
1035                 goto out;
1036         }
1037
1038         res = &qp->resp.resources[qp->resp.res_head];
1039         free_rd_atomic_resource(qp, res);
1040         rxe_advance_resp_resource(qp);
1041
1042         skb_get(skb);
1043         res->type = RXE_ATOMIC_MASK;
1044         res->atomic.skb = skb;
1045         res->first_psn = ack_pkt.psn;
1046         res->last_psn  = ack_pkt.psn;
1047         res->cur_psn   = ack_pkt.psn;
1048
1049         rc = rxe_xmit_packet(qp, &ack_pkt, skb);
1050         if (rc) {
1051                 pr_err_ratelimited("Failed sending ack\n");
1052                 rxe_put(qp);
1053         }
1054 out:
1055         return rc;
1056 }
1057
1058 static enum resp_states acknowledge(struct rxe_qp *qp,
1059                                     struct rxe_pkt_info *pkt)
1060 {
1061         if (qp_type(qp) != IB_QPT_RC)
1062                 return RESPST_CLEANUP;
1063
1064         if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
1065                 send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
1066         else if (pkt->mask & RXE_ATOMIC_MASK)
1067                 send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
1068         else if (bth_ack(pkt))
1069                 send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
1070
1071         return RESPST_CLEANUP;
1072 }
1073
1074 static enum resp_states cleanup(struct rxe_qp *qp,
1075                                 struct rxe_pkt_info *pkt)
1076 {
1077         struct sk_buff *skb;
1078
1079         if (pkt) {
1080                 skb = skb_dequeue(&qp->req_pkts);
1081                 rxe_put(qp);
1082                 kfree_skb(skb);
1083                 ib_device_put(qp->ibqp.device);
1084         }
1085
1086         if (qp->resp.mr) {
1087                 rxe_put(qp->resp.mr);
1088                 qp->resp.mr = NULL;
1089         }
1090
1091         return RESPST_DONE;
1092 }
1093
1094 static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
1095 {
1096         int i;
1097
1098         for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
1099                 struct resp_res *res = &qp->resp.resources[i];
1100
1101                 if (res->type == 0)
1102                         continue;
1103
1104                 if (psn_compare(psn, res->first_psn) >= 0 &&
1105                     psn_compare(psn, res->last_psn) <= 0) {
1106                         return res;
1107                 }
1108         }
1109
1110         return NULL;
1111 }
1112
1113 static enum resp_states duplicate_request(struct rxe_qp *qp,
1114                                           struct rxe_pkt_info *pkt)
1115 {
1116         enum resp_states rc;
1117         u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK;
1118
1119         if (pkt->mask & RXE_SEND_MASK ||
1120             pkt->mask & RXE_WRITE_MASK) {
1121                 /* SEND. Ack again and cleanup. C9-105. */
1122                 send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
1123                 return RESPST_CLEANUP;
1124         } else if (pkt->mask & RXE_READ_MASK) {
1125                 struct resp_res *res;
1126
1127                 res = find_resource(qp, pkt->psn);
1128                 if (!res) {
1129                         /* Resource not found. Class D error.  Drop the
1130                          * request.
1131                          */
1132                         rc = RESPST_CLEANUP;
1133                         goto out;
1134                 } else {
1135                         /* Ensure this new request is the same as the previous
1136                          * one or a subset of it.
1137                          */
1138                         u64 iova = reth_va(pkt);
1139                         u32 resid = reth_len(pkt);
1140
1141                         if (iova < res->read.va_org ||
1142                             resid > res->read.length ||
1143                             (iova + resid) > (res->read.va_org +
1144                                               res->read.length)) {
1145                                 rc = RESPST_CLEANUP;
1146                                 goto out;
1147                         }
1148
1149                         if (reth_rkey(pkt) != res->read.rkey) {
1150                                 rc = RESPST_CLEANUP;
1151                                 goto out;
1152                         }
1153
1154                         res->cur_psn = pkt->psn;
1155                         res->state = (pkt->psn == res->first_psn) ?
1156                                         rdatm_res_state_new :
1157                                         rdatm_res_state_replay;
1158                         res->replay = 1;
1159
1160                         /* Reset the resource, except length. */
1161                         res->read.va_org = iova;
1162                         res->read.va = iova;
1163                         res->read.resid = resid;
1164
1165                         /* Replay the RDMA read reply. */
1166                         qp->resp.res = res;
1167                         rc = RESPST_READ_REPLY;
1168                         goto out;
1169                 }
1170         } else {
1171                 struct resp_res *res;
1172
1173                 /* Find the operation in our list of responder resources. */
1174                 res = find_resource(qp, pkt->psn);
1175                 if (res) {
1176                         skb_get(res->atomic.skb);
1177                         /* Resend the result. */
1178                         rc = rxe_xmit_packet(qp, pkt, res->atomic.skb);
1179                         if (rc) {
1180                                 pr_err("Failed resending result. This flow is not handled - skb ignored\n");
1181                                 rc = RESPST_CLEANUP;
1182                                 goto out;
1183                         }
1184                 }
1185
1186                 /* Resource not found. Class D error. Drop the request. */
1187                 rc = RESPST_CLEANUP;
1188                 goto out;
1189         }
1190 out:
1191         return rc;
1192 }
1193
1194 /* Process a class A or C. Both are treated the same in this implementation. */
1195 static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
1196                               enum ib_wc_status status)
1197 {
1198         qp->resp.aeth_syndrome  = syndrome;
1199         qp->resp.status         = status;
1200
1201         /* indicate that we should go through the ERROR state */
1202         qp->resp.goto_error     = 1;
1203 }
1204
1205 static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
1206 {
1207         /* UC */
1208         if (qp->srq) {
1209                 /* Class E */
1210                 qp->resp.drop_msg = 1;
1211                 if (qp->resp.wqe) {
1212                         qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1213                         return RESPST_COMPLETE;
1214                 } else {
1215                         return RESPST_CLEANUP;
1216                 }
1217         } else {
1218                 /* Class D1. This packet may be the start of a
1219                  * new message and could be valid. The previous
1220                  * message is invalid and ignored. reset the
1221                  * recv wr to its original state
1222                  */
1223                 if (qp->resp.wqe) {
1224                         qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
1225                         qp->resp.wqe->dma.cur_sge = 0;
1226                         qp->resp.wqe->dma.sge_offset = 0;
1227                         qp->resp.opcode = -1;
1228                 }
1229
1230                 if (qp->resp.mr) {
1231                         rxe_put(qp->resp.mr);
1232                         qp->resp.mr = NULL;
1233                 }
1234
1235                 return RESPST_CLEANUP;
1236         }
1237 }
1238
1239 static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
1240 {
1241         struct sk_buff *skb;
1242         struct rxe_queue *q = qp->rq.queue;
1243
1244         while ((skb = skb_dequeue(&qp->req_pkts))) {
1245                 rxe_put(qp);
1246                 kfree_skb(skb);
1247                 ib_device_put(qp->ibqp.device);
1248         }
1249
1250         if (notify)
1251                 return;
1252
1253         while (!qp->srq && q && queue_head(q, q->type))
1254                 queue_advance_consumer(q, q->type);
1255 }
1256
1257 int rxe_responder(void *arg)
1258 {
1259         struct rxe_qp *qp = (struct rxe_qp *)arg;
1260         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
1261         enum resp_states state;
1262         struct rxe_pkt_info *pkt = NULL;
1263         int ret = 0;
1264
1265         if (!rxe_get(qp))
1266                 return -EAGAIN;
1267
1268         qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
1269
1270         if (!qp->valid) {
1271                 ret = -EINVAL;
1272                 goto done;
1273         }
1274
1275         switch (qp->resp.state) {
1276         case QP_STATE_RESET:
1277                 state = RESPST_RESET;
1278                 break;
1279
1280         default:
1281                 state = RESPST_GET_REQ;
1282                 break;
1283         }
1284
1285         while (1) {
1286                 pr_debug("qp#%d state = %s\n", qp_num(qp),
1287                          resp_state_name[state]);
1288                 switch (state) {
1289                 case RESPST_GET_REQ:
1290                         state = get_req(qp, &pkt);
1291                         break;
1292                 case RESPST_CHK_PSN:
1293                         state = check_psn(qp, pkt);
1294                         break;
1295                 case RESPST_CHK_OP_SEQ:
1296                         state = check_op_seq(qp, pkt);
1297                         break;
1298                 case RESPST_CHK_OP_VALID:
1299                         state = check_op_valid(qp, pkt);
1300                         break;
1301                 case RESPST_CHK_RESOURCE:
1302                         state = check_resource(qp, pkt);
1303                         break;
1304                 case RESPST_CHK_LENGTH:
1305                         state = check_length(qp, pkt);
1306                         break;
1307                 case RESPST_CHK_RKEY:
1308                         state = check_rkey(qp, pkt);
1309                         break;
1310                 case RESPST_EXECUTE:
1311                         state = execute(qp, pkt);
1312                         break;
1313                 case RESPST_COMPLETE:
1314                         state = do_complete(qp, pkt);
1315                         break;
1316                 case RESPST_READ_REPLY:
1317                         state = read_reply(qp, pkt);
1318                         break;
1319                 case RESPST_ACKNOWLEDGE:
1320                         state = acknowledge(qp, pkt);
1321                         break;
1322                 case RESPST_CLEANUP:
1323                         state = cleanup(qp, pkt);
1324                         break;
1325                 case RESPST_DUPLICATE_REQUEST:
1326                         state = duplicate_request(qp, pkt);
1327                         break;
1328                 case RESPST_ERR_PSN_OUT_OF_SEQ:
1329                         /* RC only - Class B. Drop packet. */
1330                         send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
1331                         state = RESPST_CLEANUP;
1332                         break;
1333
1334                 case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ:
1335                 case RESPST_ERR_MISSING_OPCODE_FIRST:
1336                 case RESPST_ERR_MISSING_OPCODE_LAST_C:
1337                 case RESPST_ERR_UNSUPPORTED_OPCODE:
1338                 case RESPST_ERR_MISALIGNED_ATOMIC:
1339                         /* RC Only - Class C. */
1340                         do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1341                                           IB_WC_REM_INV_REQ_ERR);
1342                         state = RESPST_COMPLETE;
1343                         break;
1344
1345                 case RESPST_ERR_MISSING_OPCODE_LAST_D1E:
1346                         state = do_class_d1e_error(qp);
1347                         break;
1348                 case RESPST_ERR_RNR:
1349                         if (qp_type(qp) == IB_QPT_RC) {
1350                                 rxe_counter_inc(rxe, RXE_CNT_SND_RNR);
1351                                 /* RC - class B */
1352                                 send_ack(qp, pkt, AETH_RNR_NAK |
1353                                          (~AETH_TYPE_MASK &
1354                                          qp->attr.min_rnr_timer),
1355                                          pkt->psn);
1356                         } else {
1357                                 /* UD/UC - class D */
1358                                 qp->resp.drop_msg = 1;
1359                         }
1360                         state = RESPST_CLEANUP;
1361                         break;
1362
1363                 case RESPST_ERR_RKEY_VIOLATION:
1364                         if (qp_type(qp) == IB_QPT_RC) {
1365                                 /* Class C */
1366                                 do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
1367                                                   IB_WC_REM_ACCESS_ERR);
1368                                 state = RESPST_COMPLETE;
1369                         } else {
1370                                 qp->resp.drop_msg = 1;
1371                                 if (qp->srq) {
1372                                         /* UC/SRQ Class D */
1373                                         qp->resp.status = IB_WC_REM_ACCESS_ERR;
1374                                         state = RESPST_COMPLETE;
1375                                 } else {
1376                                         /* UC/non-SRQ Class E. */
1377                                         state = RESPST_CLEANUP;
1378                                 }
1379                         }
1380                         break;
1381
1382                 case RESPST_ERR_INVALIDATE_RKEY:
1383                         /* RC - Class J. */
1384                         qp->resp.goto_error = 1;
1385                         qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1386                         state = RESPST_COMPLETE;
1387                         break;
1388
1389                 case RESPST_ERR_LENGTH:
1390                         if (qp_type(qp) == IB_QPT_RC) {
1391                                 /* Class C */
1392                                 do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1393                                                   IB_WC_REM_INV_REQ_ERR);
1394                                 state = RESPST_COMPLETE;
1395                         } else if (qp->srq) {
1396                                 /* UC/UD - class E */
1397                                 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1398                                 state = RESPST_COMPLETE;
1399                         } else {
1400                                 /* UC/UD - class D */
1401                                 qp->resp.drop_msg = 1;
1402                                 state = RESPST_CLEANUP;
1403                         }
1404                         break;
1405
1406                 case RESPST_ERR_MALFORMED_WQE:
1407                         /* All, Class A. */
1408                         do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
1409                                           IB_WC_LOC_QP_OP_ERR);
1410                         state = RESPST_COMPLETE;
1411                         break;
1412
1413                 case RESPST_ERR_CQ_OVERFLOW:
1414                         /* All - Class G */
1415                         state = RESPST_ERROR;
1416                         break;
1417
1418                 case RESPST_DONE:
1419                         if (qp->resp.goto_error) {
1420                                 state = RESPST_ERROR;
1421                                 break;
1422                         }
1423
1424                         goto done;
1425
1426                 case RESPST_EXIT:
1427                         if (qp->resp.goto_error) {
1428                                 state = RESPST_ERROR;
1429                                 break;
1430                         }
1431
1432                         goto exit;
1433
1434                 case RESPST_RESET:
1435                         rxe_drain_req_pkts(qp, false);
1436                         qp->resp.wqe = NULL;
1437                         goto exit;
1438
1439                 case RESPST_ERROR:
1440                         qp->resp.goto_error = 0;
1441                         pr_warn("qp#%d moved to error state\n", qp_num(qp));
1442                         rxe_qp_error(qp);
1443                         goto exit;
1444
1445                 default:
1446                         WARN_ON_ONCE(1);
1447                 }
1448         }
1449
1450 exit:
1451         ret = -EAGAIN;
1452 done:
1453         rxe_put(qp);
1454         return ret;
1455 }