Merge 6.4-rc5 into usb-next
[platform/kernel/linux-starfive.git] / drivers / infiniband / sw / rxe / rxe_resp.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6
7 #include <linux/skbuff.h>
8
9 #include "rxe.h"
10 #include "rxe_loc.h"
11 #include "rxe_queue.h"
12
13 static char *resp_state_name[] = {
14         [RESPST_NONE]                           = "NONE",
15         [RESPST_GET_REQ]                        = "GET_REQ",
16         [RESPST_CHK_PSN]                        = "CHK_PSN",
17         [RESPST_CHK_OP_SEQ]                     = "CHK_OP_SEQ",
18         [RESPST_CHK_OP_VALID]                   = "CHK_OP_VALID",
19         [RESPST_CHK_RESOURCE]                   = "CHK_RESOURCE",
20         [RESPST_CHK_LENGTH]                     = "CHK_LENGTH",
21         [RESPST_CHK_RKEY]                       = "CHK_RKEY",
22         [RESPST_EXECUTE]                        = "EXECUTE",
23         [RESPST_READ_REPLY]                     = "READ_REPLY",
24         [RESPST_ATOMIC_REPLY]                   = "ATOMIC_REPLY",
25         [RESPST_ATOMIC_WRITE_REPLY]             = "ATOMIC_WRITE_REPLY",
26         [RESPST_PROCESS_FLUSH]                  = "PROCESS_FLUSH",
27         [RESPST_COMPLETE]                       = "COMPLETE",
28         [RESPST_ACKNOWLEDGE]                    = "ACKNOWLEDGE",
29         [RESPST_CLEANUP]                        = "CLEANUP",
30         [RESPST_DUPLICATE_REQUEST]              = "DUPLICATE_REQUEST",
31         [RESPST_ERR_MALFORMED_WQE]              = "ERR_MALFORMED_WQE",
32         [RESPST_ERR_UNSUPPORTED_OPCODE]         = "ERR_UNSUPPORTED_OPCODE",
33         [RESPST_ERR_MISALIGNED_ATOMIC]          = "ERR_MISALIGNED_ATOMIC",
34         [RESPST_ERR_PSN_OUT_OF_SEQ]             = "ERR_PSN_OUT_OF_SEQ",
35         [RESPST_ERR_MISSING_OPCODE_FIRST]       = "ERR_MISSING_OPCODE_FIRST",
36         [RESPST_ERR_MISSING_OPCODE_LAST_C]      = "ERR_MISSING_OPCODE_LAST_C",
37         [RESPST_ERR_MISSING_OPCODE_LAST_D1E]    = "ERR_MISSING_OPCODE_LAST_D1E",
38         [RESPST_ERR_TOO_MANY_RDMA_ATM_REQ]      = "ERR_TOO_MANY_RDMA_ATM_REQ",
39         [RESPST_ERR_RNR]                        = "ERR_RNR",
40         [RESPST_ERR_RKEY_VIOLATION]             = "ERR_RKEY_VIOLATION",
41         [RESPST_ERR_INVALIDATE_RKEY]            = "ERR_INVALIDATE_RKEY_VIOLATION",
42         [RESPST_ERR_LENGTH]                     = "ERR_LENGTH",
43         [RESPST_ERR_CQ_OVERFLOW]                = "ERR_CQ_OVERFLOW",
44         [RESPST_ERROR]                          = "ERROR",
45         [RESPST_DONE]                           = "DONE",
46         [RESPST_EXIT]                           = "EXIT",
47 };
48
49 /* rxe_recv calls here to add a request packet to the input queue */
50 void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
51 {
52         int must_sched;
53         struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
54
55         skb_queue_tail(&qp->req_pkts, skb);
56
57         must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
58                         (skb_queue_len(&qp->req_pkts) > 1);
59
60         if (must_sched)
61                 rxe_sched_task(&qp->resp.task);
62         else
63                 rxe_run_task(&qp->resp.task);
64 }
65
66 static inline enum resp_states get_req(struct rxe_qp *qp,
67                                        struct rxe_pkt_info **pkt_p)
68 {
69         struct sk_buff *skb;
70
71         skb = skb_peek(&qp->req_pkts);
72         if (!skb)
73                 return RESPST_EXIT;
74
75         *pkt_p = SKB_TO_PKT(skb);
76
77         return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
78 }
79
80 static enum resp_states check_psn(struct rxe_qp *qp,
81                                   struct rxe_pkt_info *pkt)
82 {
83         int diff = psn_compare(pkt->psn, qp->resp.psn);
84         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
85
86         switch (qp_type(qp)) {
87         case IB_QPT_RC:
88                 if (diff > 0) {
89                         if (qp->resp.sent_psn_nak)
90                                 return RESPST_CLEANUP;
91
92                         qp->resp.sent_psn_nak = 1;
93                         rxe_counter_inc(rxe, RXE_CNT_OUT_OF_SEQ_REQ);
94                         return RESPST_ERR_PSN_OUT_OF_SEQ;
95
96                 } else if (diff < 0) {
97                         rxe_counter_inc(rxe, RXE_CNT_DUP_REQ);
98                         return RESPST_DUPLICATE_REQUEST;
99                 }
100
101                 if (qp->resp.sent_psn_nak)
102                         qp->resp.sent_psn_nak = 0;
103
104                 break;
105
106         case IB_QPT_UC:
107                 if (qp->resp.drop_msg || diff != 0) {
108                         if (pkt->mask & RXE_START_MASK) {
109                                 qp->resp.drop_msg = 0;
110                                 return RESPST_CHK_OP_SEQ;
111                         }
112
113                         qp->resp.drop_msg = 1;
114                         return RESPST_CLEANUP;
115                 }
116                 break;
117         default:
118                 break;
119         }
120
121         return RESPST_CHK_OP_SEQ;
122 }
123
124 static enum resp_states check_op_seq(struct rxe_qp *qp,
125                                      struct rxe_pkt_info *pkt)
126 {
127         switch (qp_type(qp)) {
128         case IB_QPT_RC:
129                 switch (qp->resp.opcode) {
130                 case IB_OPCODE_RC_SEND_FIRST:
131                 case IB_OPCODE_RC_SEND_MIDDLE:
132                         switch (pkt->opcode) {
133                         case IB_OPCODE_RC_SEND_MIDDLE:
134                         case IB_OPCODE_RC_SEND_LAST:
135                         case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
136                         case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
137                                 return RESPST_CHK_OP_VALID;
138                         default:
139                                 return RESPST_ERR_MISSING_OPCODE_LAST_C;
140                         }
141
142                 case IB_OPCODE_RC_RDMA_WRITE_FIRST:
143                 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
144                         switch (pkt->opcode) {
145                         case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
146                         case IB_OPCODE_RC_RDMA_WRITE_LAST:
147                         case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
148                                 return RESPST_CHK_OP_VALID;
149                         default:
150                                 return RESPST_ERR_MISSING_OPCODE_LAST_C;
151                         }
152
153                 default:
154                         switch (pkt->opcode) {
155                         case IB_OPCODE_RC_SEND_MIDDLE:
156                         case IB_OPCODE_RC_SEND_LAST:
157                         case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
158                         case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
159                         case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
160                         case IB_OPCODE_RC_RDMA_WRITE_LAST:
161                         case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
162                                 return RESPST_ERR_MISSING_OPCODE_FIRST;
163                         default:
164                                 return RESPST_CHK_OP_VALID;
165                         }
166                 }
167                 break;
168
169         case IB_QPT_UC:
170                 switch (qp->resp.opcode) {
171                 case IB_OPCODE_UC_SEND_FIRST:
172                 case IB_OPCODE_UC_SEND_MIDDLE:
173                         switch (pkt->opcode) {
174                         case IB_OPCODE_UC_SEND_MIDDLE:
175                         case IB_OPCODE_UC_SEND_LAST:
176                         case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
177                                 return RESPST_CHK_OP_VALID;
178                         default:
179                                 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
180                         }
181
182                 case IB_OPCODE_UC_RDMA_WRITE_FIRST:
183                 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
184                         switch (pkt->opcode) {
185                         case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
186                         case IB_OPCODE_UC_RDMA_WRITE_LAST:
187                         case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
188                                 return RESPST_CHK_OP_VALID;
189                         default:
190                                 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
191                         }
192
193                 default:
194                         switch (pkt->opcode) {
195                         case IB_OPCODE_UC_SEND_MIDDLE:
196                         case IB_OPCODE_UC_SEND_LAST:
197                         case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
198                         case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
199                         case IB_OPCODE_UC_RDMA_WRITE_LAST:
200                         case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
201                                 qp->resp.drop_msg = 1;
202                                 return RESPST_CLEANUP;
203                         default:
204                                 return RESPST_CHK_OP_VALID;
205                         }
206                 }
207                 break;
208
209         default:
210                 return RESPST_CHK_OP_VALID;
211         }
212 }
213
214 static bool check_qp_attr_access(struct rxe_qp *qp,
215                                  struct rxe_pkt_info *pkt)
216 {
217         if (((pkt->mask & RXE_READ_MASK) &&
218              !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
219             ((pkt->mask & (RXE_WRITE_MASK | RXE_ATOMIC_WRITE_MASK)) &&
220              !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
221             ((pkt->mask & RXE_ATOMIC_MASK) &&
222              !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
223                 return false;
224
225         if (pkt->mask & RXE_FLUSH_MASK) {
226                 u32 flush_type = feth_plt(pkt);
227
228                 if ((flush_type & IB_FLUSH_GLOBAL &&
229                      !(qp->attr.qp_access_flags & IB_ACCESS_FLUSH_GLOBAL)) ||
230                     (flush_type & IB_FLUSH_PERSISTENT &&
231                      !(qp->attr.qp_access_flags & IB_ACCESS_FLUSH_PERSISTENT)))
232                         return false;
233         }
234
235         return true;
236 }
237
238 static enum resp_states check_op_valid(struct rxe_qp *qp,
239                                        struct rxe_pkt_info *pkt)
240 {
241         switch (qp_type(qp)) {
242         case IB_QPT_RC:
243                 if (!check_qp_attr_access(qp, pkt))
244                         return RESPST_ERR_UNSUPPORTED_OPCODE;
245
246                 break;
247
248         case IB_QPT_UC:
249                 if ((pkt->mask & RXE_WRITE_MASK) &&
250                     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
251                         qp->resp.drop_msg = 1;
252                         return RESPST_CLEANUP;
253                 }
254
255                 break;
256
257         case IB_QPT_UD:
258         case IB_QPT_GSI:
259                 break;
260
261         default:
262                 WARN_ON_ONCE(1);
263                 break;
264         }
265
266         return RESPST_CHK_RESOURCE;
267 }
268
269 static enum resp_states get_srq_wqe(struct rxe_qp *qp)
270 {
271         struct rxe_srq *srq = qp->srq;
272         struct rxe_queue *q = srq->rq.queue;
273         struct rxe_recv_wqe *wqe;
274         struct ib_event ev;
275         unsigned int count;
276         size_t size;
277         unsigned long flags;
278
279         if (srq->error)
280                 return RESPST_ERR_RNR;
281
282         spin_lock_irqsave(&srq->rq.consumer_lock, flags);
283
284         wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
285         if (!wqe) {
286                 spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
287                 return RESPST_ERR_RNR;
288         }
289
290         /* don't trust user space data */
291         if (unlikely(wqe->dma.num_sge > srq->rq.max_sge)) {
292                 spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
293                 rxe_dbg_qp(qp, "invalid num_sge in SRQ entry\n");
294                 return RESPST_ERR_MALFORMED_WQE;
295         }
296         size = sizeof(*wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge);
297         memcpy(&qp->resp.srq_wqe, wqe, size);
298
299         qp->resp.wqe = &qp->resp.srq_wqe.wqe;
300         queue_advance_consumer(q, QUEUE_TYPE_FROM_CLIENT);
301         count = queue_count(q, QUEUE_TYPE_FROM_CLIENT);
302
303         if (srq->limit && srq->ibsrq.event_handler && (count < srq->limit)) {
304                 srq->limit = 0;
305                 goto event;
306         }
307
308         spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
309         return RESPST_CHK_LENGTH;
310
311 event:
312         spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
313         ev.device = qp->ibqp.device;
314         ev.element.srq = qp->ibqp.srq;
315         ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
316         srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context);
317         return RESPST_CHK_LENGTH;
318 }
319
320 static enum resp_states check_resource(struct rxe_qp *qp,
321                                        struct rxe_pkt_info *pkt)
322 {
323         struct rxe_srq *srq = qp->srq;
324
325         if (pkt->mask & (RXE_READ_OR_ATOMIC_MASK | RXE_ATOMIC_WRITE_MASK)) {
326                 /* it is the requesters job to not send
327                  * too many read/atomic ops, we just
328                  * recycle the responder resource queue
329                  */
330                 if (likely(qp->attr.max_dest_rd_atomic > 0))
331                         return RESPST_CHK_LENGTH;
332                 else
333                         return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
334         }
335
336         if (pkt->mask & RXE_RWR_MASK) {
337                 if (srq)
338                         return get_srq_wqe(qp);
339
340                 qp->resp.wqe = queue_head(qp->rq.queue,
341                                 QUEUE_TYPE_FROM_CLIENT);
342                 return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
343         }
344
345         return RESPST_CHK_LENGTH;
346 }
347
348 static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
349                                               struct rxe_pkt_info *pkt)
350 {
351         /*
352          * See IBA C9-92
353          * For UD QPs we only check if the packet will fit in the
354          * receive buffer later. For rmda operations additional
355          * length checks are performed in check_rkey.
356          */
357         if (pkt->mask & RXE_PAYLOAD_MASK && ((qp_type(qp) == IB_QPT_RC) ||
358                                              (qp_type(qp) == IB_QPT_UC))) {
359                 unsigned int mtu = qp->mtu;
360                 unsigned int payload = payload_size(pkt);
361
362                 if ((pkt->mask & RXE_START_MASK) &&
363                     (pkt->mask & RXE_END_MASK)) {
364                         if (unlikely(payload > mtu)) {
365                                 rxe_dbg_qp(qp, "only packet too long");
366                                 return RESPST_ERR_LENGTH;
367                         }
368                 } else if ((pkt->mask & RXE_START_MASK) ||
369                            (pkt->mask & RXE_MIDDLE_MASK)) {
370                         if (unlikely(payload != mtu)) {
371                                 rxe_dbg_qp(qp, "first or middle packet not mtu");
372                                 return RESPST_ERR_LENGTH;
373                         }
374                 } else if (pkt->mask & RXE_END_MASK) {
375                         if (unlikely((payload == 0) || (payload > mtu))) {
376                                 rxe_dbg_qp(qp, "last packet zero or too long");
377                                 return RESPST_ERR_LENGTH;
378                         }
379                 }
380         }
381
382         /* See IBA C9-94 */
383         if (pkt->mask & RXE_RETH_MASK) {
384                 if (reth_len(pkt) > (1U << 31)) {
385                         rxe_dbg_qp(qp, "dma length too long");
386                         return RESPST_ERR_LENGTH;
387                 }
388         }
389
390         return RESPST_CHK_RKEY;
391 }
392
393 /* if the reth length field is zero we can assume nothing
394  * about the rkey value and should not validate or use it.
395  * Instead set qp->resp.rkey to 0 which is an invalid rkey
396  * value since the minimum index part is 1.
397  */
398 static void qp_resp_from_reth(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
399 {
400         unsigned int length = reth_len(pkt);
401
402         qp->resp.va = reth_va(pkt);
403         qp->resp.offset = 0;
404         qp->resp.resid = length;
405         qp->resp.length = length;
406         if (pkt->mask & RXE_READ_OR_WRITE_MASK && length == 0)
407                 qp->resp.rkey = 0;
408         else
409                 qp->resp.rkey = reth_rkey(pkt);
410 }
411
412 static void qp_resp_from_atmeth(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
413 {
414         qp->resp.va = atmeth_va(pkt);
415         qp->resp.offset = 0;
416         qp->resp.rkey = atmeth_rkey(pkt);
417         qp->resp.resid = sizeof(u64);
418 }
419
420 /* resolve the packet rkey to qp->resp.mr or set qp->resp.mr to NULL
421  * if an invalid rkey is received or the rdma length is zero. For middle
422  * or last packets use the stored value of mr.
423  */
424 static enum resp_states check_rkey(struct rxe_qp *qp,
425                                    struct rxe_pkt_info *pkt)
426 {
427         struct rxe_mr *mr = NULL;
428         struct rxe_mw *mw = NULL;
429         u64 va;
430         u32 rkey;
431         u32 resid;
432         u32 pktlen;
433         int mtu = qp->mtu;
434         enum resp_states state;
435         int access = 0;
436
437         if (pkt->mask & (RXE_READ_OR_WRITE_MASK | RXE_ATOMIC_WRITE_MASK)) {
438                 if (pkt->mask & RXE_RETH_MASK)
439                         qp_resp_from_reth(qp, pkt);
440
441                 access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
442                                                      : IB_ACCESS_REMOTE_WRITE;
443         } else if (pkt->mask & RXE_FLUSH_MASK) {
444                 u32 flush_type = feth_plt(pkt);
445
446                 if (pkt->mask & RXE_RETH_MASK)
447                         qp_resp_from_reth(qp, pkt);
448
449                 if (flush_type & IB_FLUSH_GLOBAL)
450                         access |= IB_ACCESS_FLUSH_GLOBAL;
451                 if (flush_type & IB_FLUSH_PERSISTENT)
452                         access |= IB_ACCESS_FLUSH_PERSISTENT;
453         } else if (pkt->mask & RXE_ATOMIC_MASK) {
454                 qp_resp_from_atmeth(qp, pkt);
455                 access = IB_ACCESS_REMOTE_ATOMIC;
456         } else {
457                 return RESPST_EXECUTE;
458         }
459
460         /* A zero-byte read or write op is not required to
461          * set an addr or rkey. See C9-88
462          */
463         if ((pkt->mask & RXE_READ_OR_WRITE_MASK) &&
464             (pkt->mask & RXE_RETH_MASK) && reth_len(pkt) == 0) {
465                 qp->resp.mr = NULL;
466                 return RESPST_EXECUTE;
467         }
468
469         va      = qp->resp.va;
470         rkey    = qp->resp.rkey;
471         resid   = qp->resp.resid;
472         pktlen  = payload_size(pkt);
473
474         if (rkey_is_mw(rkey)) {
475                 mw = rxe_lookup_mw(qp, access, rkey);
476                 if (!mw) {
477                         rxe_dbg_qp(qp, "no MW matches rkey %#x\n", rkey);
478                         state = RESPST_ERR_RKEY_VIOLATION;
479                         goto err;
480                 }
481
482                 mr = mw->mr;
483                 if (!mr) {
484                         rxe_dbg_qp(qp, "MW doesn't have an MR\n");
485                         state = RESPST_ERR_RKEY_VIOLATION;
486                         goto err;
487                 }
488
489                 if (mw->access & IB_ZERO_BASED)
490                         qp->resp.offset = mw->addr;
491
492                 rxe_put(mw);
493                 rxe_get(mr);
494         } else {
495                 mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE);
496                 if (!mr) {
497                         rxe_dbg_qp(qp, "no MR matches rkey %#x\n", rkey);
498                         state = RESPST_ERR_RKEY_VIOLATION;
499                         goto err;
500                 }
501         }
502
503         if (pkt->mask & RXE_FLUSH_MASK) {
504                 /* FLUSH MR may not set va or resid
505                  * no need to check range since we will flush whole mr
506                  */
507                 if (feth_sel(pkt) == IB_FLUSH_MR)
508                         goto skip_check_range;
509         }
510
511         if (mr_check_range(mr, va + qp->resp.offset, resid)) {
512                 state = RESPST_ERR_RKEY_VIOLATION;
513                 goto err;
514         }
515
516 skip_check_range:
517         if (pkt->mask & (RXE_WRITE_MASK | RXE_ATOMIC_WRITE_MASK)) {
518                 if (resid > mtu) {
519                         if (pktlen != mtu || bth_pad(pkt)) {
520                                 state = RESPST_ERR_LENGTH;
521                                 goto err;
522                         }
523                 } else {
524                         if (pktlen != resid) {
525                                 state = RESPST_ERR_LENGTH;
526                                 goto err;
527                         }
528                         if ((bth_pad(pkt) != (0x3 & (-resid)))) {
529                                 /* This case may not be exactly that
530                                  * but nothing else fits.
531                                  */
532                                 state = RESPST_ERR_LENGTH;
533                                 goto err;
534                         }
535                 }
536         }
537
538         WARN_ON_ONCE(qp->resp.mr);
539
540         qp->resp.mr = mr;
541         return RESPST_EXECUTE;
542
543 err:
544         qp->resp.mr = NULL;
545         if (mr)
546                 rxe_put(mr);
547         if (mw)
548                 rxe_put(mw);
549
550         return state;
551 }
552
553 static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
554                                      int data_len)
555 {
556         int err;
557
558         err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
559                         data_addr, data_len, RXE_TO_MR_OBJ);
560         if (unlikely(err))
561                 return (err == -ENOSPC) ? RESPST_ERR_LENGTH
562                                         : RESPST_ERR_MALFORMED_WQE;
563
564         return RESPST_NONE;
565 }
566
567 static enum resp_states write_data_in(struct rxe_qp *qp,
568                                       struct rxe_pkt_info *pkt)
569 {
570         enum resp_states rc = RESPST_NONE;
571         int     err;
572         int data_len = payload_size(pkt);
573
574         err = rxe_mr_copy(qp->resp.mr, qp->resp.va + qp->resp.offset,
575                           payload_addr(pkt), data_len, RXE_TO_MR_OBJ);
576         if (err) {
577                 rc = RESPST_ERR_RKEY_VIOLATION;
578                 goto out;
579         }
580
581         qp->resp.va += data_len;
582         qp->resp.resid -= data_len;
583
584 out:
585         return rc;
586 }
587
588 static struct resp_res *rxe_prepare_res(struct rxe_qp *qp,
589                                         struct rxe_pkt_info *pkt,
590                                         int type)
591 {
592         struct resp_res *res;
593         u32 pkts;
594
595         res = &qp->resp.resources[qp->resp.res_head];
596         rxe_advance_resp_resource(qp);
597         free_rd_atomic_resource(res);
598
599         res->type = type;
600         res->replay = 0;
601
602         switch (type) {
603         case RXE_READ_MASK:
604                 res->read.va = qp->resp.va + qp->resp.offset;
605                 res->read.va_org = qp->resp.va + qp->resp.offset;
606                 res->read.resid = qp->resp.resid;
607                 res->read.length = qp->resp.resid;
608                 res->read.rkey = qp->resp.rkey;
609
610                 pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1);
611                 res->first_psn = pkt->psn;
612                 res->cur_psn = pkt->psn;
613                 res->last_psn = (pkt->psn + pkts - 1) & BTH_PSN_MASK;
614
615                 res->state = rdatm_res_state_new;
616                 break;
617         case RXE_ATOMIC_MASK:
618         case RXE_ATOMIC_WRITE_MASK:
619                 res->first_psn = pkt->psn;
620                 res->last_psn = pkt->psn;
621                 res->cur_psn = pkt->psn;
622                 break;
623         case RXE_FLUSH_MASK:
624                 res->flush.va = qp->resp.va + qp->resp.offset;
625                 res->flush.length = qp->resp.length;
626                 res->flush.type = feth_plt(pkt);
627                 res->flush.level = feth_sel(pkt);
628         }
629
630         return res;
631 }
632
633 static enum resp_states process_flush(struct rxe_qp *qp,
634                                        struct rxe_pkt_info *pkt)
635 {
636         u64 length, start;
637         struct rxe_mr *mr = qp->resp.mr;
638         struct resp_res *res = qp->resp.res;
639
640         /* oA19-14, oA19-15 */
641         if (res && res->replay)
642                 return RESPST_ACKNOWLEDGE;
643         else if (!res) {
644                 res = rxe_prepare_res(qp, pkt, RXE_FLUSH_MASK);
645                 qp->resp.res = res;
646         }
647
648         if (res->flush.level == IB_FLUSH_RANGE) {
649                 start = res->flush.va;
650                 length = res->flush.length;
651         } else { /* level == IB_FLUSH_MR */
652                 start = mr->ibmr.iova;
653                 length = mr->ibmr.length;
654         }
655
656         if (res->flush.type & IB_FLUSH_PERSISTENT) {
657                 if (rxe_flush_pmem_iova(mr, start, length))
658                         return RESPST_ERR_RKEY_VIOLATION;
659                 /* Make data persistent. */
660                 wmb();
661         } else if (res->flush.type & IB_FLUSH_GLOBAL) {
662                 /* Make data global visibility. */
663                 wmb();
664         }
665
666         qp->resp.msn++;
667
668         /* next expected psn, read handles this separately */
669         qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
670         qp->resp.ack_psn = qp->resp.psn;
671
672         qp->resp.opcode = pkt->opcode;
673         qp->resp.status = IB_WC_SUCCESS;
674
675         return RESPST_ACKNOWLEDGE;
676 }
677
678 static enum resp_states atomic_reply(struct rxe_qp *qp,
679                                      struct rxe_pkt_info *pkt)
680 {
681         struct rxe_mr *mr = qp->resp.mr;
682         struct resp_res *res = qp->resp.res;
683         int err;
684
685         if (!res) {
686                 res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_MASK);
687                 qp->resp.res = res;
688         }
689
690         if (!res->replay) {
691                 u64 iova = qp->resp.va + qp->resp.offset;
692
693                 err = rxe_mr_do_atomic_op(mr, iova, pkt->opcode,
694                                           atmeth_comp(pkt),
695                                           atmeth_swap_add(pkt),
696                                           &res->atomic.orig_val);
697                 if (err)
698                         return err;
699
700                 qp->resp.msn++;
701
702                 /* next expected psn, read handles this separately */
703                 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
704                 qp->resp.ack_psn = qp->resp.psn;
705
706                 qp->resp.opcode = pkt->opcode;
707                 qp->resp.status = IB_WC_SUCCESS;
708         }
709
710         return RESPST_ACKNOWLEDGE;
711 }
712
713 static enum resp_states atomic_write_reply(struct rxe_qp *qp,
714                                            struct rxe_pkt_info *pkt)
715 {
716         struct resp_res *res = qp->resp.res;
717         struct rxe_mr *mr;
718         u64 value;
719         u64 iova;
720         int err;
721
722         if (!res) {
723                 res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_WRITE_MASK);
724                 qp->resp.res = res;
725         }
726
727         if (res->replay)
728                 return RESPST_ACKNOWLEDGE;
729
730         mr = qp->resp.mr;
731         value = *(u64 *)payload_addr(pkt);
732         iova = qp->resp.va + qp->resp.offset;
733
734         err = rxe_mr_do_atomic_write(mr, iova, value);
735         if (err)
736                 return err;
737
738         qp->resp.resid = 0;
739         qp->resp.msn++;
740
741         /* next expected psn, read handles this separately */
742         qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
743         qp->resp.ack_psn = qp->resp.psn;
744
745         qp->resp.opcode = pkt->opcode;
746         qp->resp.status = IB_WC_SUCCESS;
747
748         return RESPST_ACKNOWLEDGE;
749 }
750
751 static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
752                                           struct rxe_pkt_info *ack,
753                                           int opcode,
754                                           int payload,
755                                           u32 psn,
756                                           u8 syndrome)
757 {
758         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
759         struct sk_buff *skb;
760         int paylen;
761         int pad;
762         int err;
763
764         /*
765          * allocate packet
766          */
767         pad = (-payload) & 0x3;
768         paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
769
770         skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack);
771         if (!skb)
772                 return NULL;
773
774         ack->qp = qp;
775         ack->opcode = opcode;
776         ack->mask = rxe_opcode[opcode].mask;
777         ack->paylen = paylen;
778         ack->psn = psn;
779
780         bth_init(ack, opcode, 0, 0, pad, IB_DEFAULT_PKEY_FULL,
781                  qp->attr.dest_qp_num, 0, psn);
782
783         if (ack->mask & RXE_AETH_MASK) {
784                 aeth_set_syn(ack, syndrome);
785                 aeth_set_msn(ack, qp->resp.msn);
786         }
787
788         if (ack->mask & RXE_ATMACK_MASK)
789                 atmack_set_orig(ack, qp->resp.res->atomic.orig_val);
790
791         err = rxe_prepare(&qp->pri_av, ack, skb);
792         if (err) {
793                 kfree_skb(skb);
794                 return NULL;
795         }
796
797         return skb;
798 }
799
800 /**
801  * rxe_recheck_mr - revalidate MR from rkey and get a reference
802  * @qp: the qp
803  * @rkey: the rkey
804  *
805  * This code allows the MR to be invalidated or deregistered or
806  * the MW if one was used to be invalidated or deallocated.
807  * It is assumed that the access permissions if originally good
808  * are OK and the mappings to be unchanged.
809  *
810  * TODO: If someone reregisters an MR to change its size or
811  * access permissions during the processing of an RDMA read
812  * we should kill the responder resource and complete the
813  * operation with an error.
814  *
815  * Return: mr on success else NULL
816  */
817 static struct rxe_mr *rxe_recheck_mr(struct rxe_qp *qp, u32 rkey)
818 {
819         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
820         struct rxe_mr *mr;
821         struct rxe_mw *mw;
822
823         if (rkey_is_mw(rkey)) {
824                 mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8);
825                 if (!mw)
826                         return NULL;
827
828                 mr = mw->mr;
829                 if (mw->rkey != rkey || mw->state != RXE_MW_STATE_VALID ||
830                     !mr || mr->state != RXE_MR_STATE_VALID) {
831                         rxe_put(mw);
832                         return NULL;
833                 }
834
835                 rxe_get(mr);
836                 rxe_put(mw);
837
838                 return mr;
839         }
840
841         mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
842         if (!mr)
843                 return NULL;
844
845         if (mr->rkey != rkey || mr->state != RXE_MR_STATE_VALID) {
846                 rxe_put(mr);
847                 return NULL;
848         }
849
850         return mr;
851 }
852
853 /* RDMA read response. If res is not NULL, then we have a current RDMA request
854  * being processed or replayed.
855  */
856 static enum resp_states read_reply(struct rxe_qp *qp,
857                                    struct rxe_pkt_info *req_pkt)
858 {
859         struct rxe_pkt_info ack_pkt;
860         struct sk_buff *skb;
861         int mtu = qp->mtu;
862         enum resp_states state;
863         int payload;
864         int opcode;
865         int err;
866         struct resp_res *res = qp->resp.res;
867         struct rxe_mr *mr;
868
869         if (!res) {
870                 res = rxe_prepare_res(qp, req_pkt, RXE_READ_MASK);
871                 qp->resp.res = res;
872         }
873
874         if (res->state == rdatm_res_state_new) {
875                 if (!res->replay || qp->resp.length == 0) {
876                         /* if length == 0 mr will be NULL (is ok)
877                          * otherwise qp->resp.mr holds a ref on mr
878                          * which we transfer to mr and drop below.
879                          */
880                         mr = qp->resp.mr;
881                         qp->resp.mr = NULL;
882                 } else {
883                         mr = rxe_recheck_mr(qp, res->read.rkey);
884                         if (!mr)
885                                 return RESPST_ERR_RKEY_VIOLATION;
886                 }
887
888                 if (res->read.resid <= mtu)
889                         opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
890                 else
891                         opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
892         } else {
893                 /* re-lookup mr from rkey on all later packets.
894                  * length will be non-zero. This can fail if someone
895                  * modifies or destroys the mr since the first packet.
896                  */
897                 mr = rxe_recheck_mr(qp, res->read.rkey);
898                 if (!mr)
899                         return RESPST_ERR_RKEY_VIOLATION;
900
901                 if (res->read.resid > mtu)
902                         opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
903                 else
904                         opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
905         }
906
907         res->state = rdatm_res_state_next;
908
909         payload = min_t(int, res->read.resid, mtu);
910
911         skb = prepare_ack_packet(qp, &ack_pkt, opcode, payload,
912                                  res->cur_psn, AETH_ACK_UNLIMITED);
913         if (!skb) {
914                 state = RESPST_ERR_RNR;
915                 goto err_out;
916         }
917
918         err = rxe_mr_copy(mr, res->read.va, payload_addr(&ack_pkt),
919                           payload, RXE_FROM_MR_OBJ);
920         if (err) {
921                 kfree_skb(skb);
922                 state = RESPST_ERR_RKEY_VIOLATION;
923                 goto err_out;
924         }
925
926         if (bth_pad(&ack_pkt)) {
927                 u8 *pad = payload_addr(&ack_pkt) + payload;
928
929                 memset(pad, 0, bth_pad(&ack_pkt));
930         }
931
932         /* rxe_xmit_packet always consumes the skb */
933         err = rxe_xmit_packet(qp, &ack_pkt, skb);
934         if (err) {
935                 state = RESPST_ERR_RNR;
936                 goto err_out;
937         }
938
939         res->read.va += payload;
940         res->read.resid -= payload;
941         res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;
942
943         if (res->read.resid > 0) {
944                 state = RESPST_DONE;
945         } else {
946                 qp->resp.res = NULL;
947                 if (!res->replay)
948                         qp->resp.opcode = -1;
949                 if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
950                         qp->resp.psn = res->cur_psn;
951                 state = RESPST_CLEANUP;
952         }
953
954 err_out:
955         if (mr)
956                 rxe_put(mr);
957         return state;
958 }
959
960 static int invalidate_rkey(struct rxe_qp *qp, u32 rkey)
961 {
962         if (rkey_is_mw(rkey))
963                 return rxe_invalidate_mw(qp, rkey);
964         else
965                 return rxe_invalidate_mr(qp, rkey);
966 }
967
968 /* Executes a new request. A retried request never reach that function (send
969  * and writes are discarded, and reads and atomics are retried elsewhere.
970  */
971 static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
972 {
973         enum resp_states err;
974         struct sk_buff *skb = PKT_TO_SKB(pkt);
975         union rdma_network_hdr hdr;
976
977         if (pkt->mask & RXE_SEND_MASK) {
978                 if (qp_type(qp) == IB_QPT_UD ||
979                     qp_type(qp) == IB_QPT_GSI) {
980                         if (skb->protocol == htons(ETH_P_IP)) {
981                                 memset(&hdr.reserved, 0,
982                                                 sizeof(hdr.reserved));
983                                 memcpy(&hdr.roce4grh, ip_hdr(skb),
984                                                 sizeof(hdr.roce4grh));
985                                 err = send_data_in(qp, &hdr, sizeof(hdr));
986                         } else {
987                                 err = send_data_in(qp, ipv6_hdr(skb),
988                                                 sizeof(hdr));
989                         }
990                         if (err)
991                                 return err;
992                 }
993                 err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
994                 if (err)
995                         return err;
996         } else if (pkt->mask & RXE_WRITE_MASK) {
997                 err = write_data_in(qp, pkt);
998                 if (err)
999                         return err;
1000         } else if (pkt->mask & RXE_READ_MASK) {
1001                 /* For RDMA Read we can increment the msn now. See C9-148. */
1002                 qp->resp.msn++;
1003                 return RESPST_READ_REPLY;
1004         } else if (pkt->mask & RXE_ATOMIC_MASK) {
1005                 return RESPST_ATOMIC_REPLY;
1006         } else if (pkt->mask & RXE_ATOMIC_WRITE_MASK) {
1007                 return RESPST_ATOMIC_WRITE_REPLY;
1008         } else if (pkt->mask & RXE_FLUSH_MASK) {
1009                 return RESPST_PROCESS_FLUSH;
1010         } else {
1011                 /* Unreachable */
1012                 WARN_ON_ONCE(1);
1013         }
1014
1015         if (pkt->mask & RXE_IETH_MASK) {
1016                 u32 rkey = ieth_rkey(pkt);
1017
1018                 err = invalidate_rkey(qp, rkey);
1019                 if (err)
1020                         return RESPST_ERR_INVALIDATE_RKEY;
1021         }
1022
1023         if (pkt->mask & RXE_END_MASK)
1024                 /* We successfully processed this new request. */
1025                 qp->resp.msn++;
1026
1027         /* next expected psn, read handles this separately */
1028         qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
1029         qp->resp.ack_psn = qp->resp.psn;
1030
1031         qp->resp.opcode = pkt->opcode;
1032         qp->resp.status = IB_WC_SUCCESS;
1033
1034         if (pkt->mask & RXE_COMP_MASK)
1035                 return RESPST_COMPLETE;
1036         else if (qp_type(qp) == IB_QPT_RC)
1037                 return RESPST_ACKNOWLEDGE;
1038         else
1039                 return RESPST_CLEANUP;
1040 }
1041
1042 static enum resp_states do_complete(struct rxe_qp *qp,
1043                                     struct rxe_pkt_info *pkt)
1044 {
1045         struct rxe_cqe cqe;
1046         struct ib_wc *wc = &cqe.ibwc;
1047         struct ib_uverbs_wc *uwc = &cqe.uibwc;
1048         struct rxe_recv_wqe *wqe = qp->resp.wqe;
1049         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
1050         unsigned long flags;
1051
1052         if (!wqe)
1053                 goto finish;
1054
1055         memset(&cqe, 0, sizeof(cqe));
1056
1057         if (qp->rcq->is_user) {
1058                 uwc->status             = qp->resp.status;
1059                 uwc->qp_num             = qp->ibqp.qp_num;
1060                 uwc->wr_id              = wqe->wr_id;
1061         } else {
1062                 wc->status              = qp->resp.status;
1063                 wc->qp                  = &qp->ibqp;
1064                 wc->wr_id               = wqe->wr_id;
1065         }
1066
1067         if (wc->status == IB_WC_SUCCESS) {
1068                 rxe_counter_inc(rxe, RXE_CNT_RDMA_RECV);
1069                 wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
1070                                 pkt->mask & RXE_WRITE_MASK) ?
1071                                         IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
1072                 wc->byte_len = (pkt->mask & RXE_IMMDT_MASK &&
1073                                 pkt->mask & RXE_WRITE_MASK) ?
1074                                         qp->resp.length : wqe->dma.length - wqe->dma.resid;
1075
1076                 /* fields after byte_len are different between kernel and user
1077                  * space
1078                  */
1079                 if (qp->rcq->is_user) {
1080                         uwc->wc_flags = IB_WC_GRH;
1081
1082                         if (pkt->mask & RXE_IMMDT_MASK) {
1083                                 uwc->wc_flags |= IB_WC_WITH_IMM;
1084                                 uwc->ex.imm_data = immdt_imm(pkt);
1085                         }
1086
1087                         if (pkt->mask & RXE_IETH_MASK) {
1088                                 uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
1089                                 uwc->ex.invalidate_rkey = ieth_rkey(pkt);
1090                         }
1091
1092                         if (pkt->mask & RXE_DETH_MASK)
1093                                 uwc->src_qp = deth_sqp(pkt);
1094
1095                         uwc->port_num           = qp->attr.port_num;
1096                 } else {
1097                         struct sk_buff *skb = PKT_TO_SKB(pkt);
1098
1099                         wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
1100                         if (skb->protocol == htons(ETH_P_IP))
1101                                 wc->network_hdr_type = RDMA_NETWORK_IPV4;
1102                         else
1103                                 wc->network_hdr_type = RDMA_NETWORK_IPV6;
1104
1105                         if (is_vlan_dev(skb->dev)) {
1106                                 wc->wc_flags |= IB_WC_WITH_VLAN;
1107                                 wc->vlan_id = vlan_dev_vlan_id(skb->dev);
1108                         }
1109
1110                         if (pkt->mask & RXE_IMMDT_MASK) {
1111                                 wc->wc_flags |= IB_WC_WITH_IMM;
1112                                 wc->ex.imm_data = immdt_imm(pkt);
1113                         }
1114
1115                         if (pkt->mask & RXE_IETH_MASK) {
1116                                 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
1117                                 wc->ex.invalidate_rkey = ieth_rkey(pkt);
1118                         }
1119
1120                         if (pkt->mask & RXE_DETH_MASK)
1121                                 wc->src_qp = deth_sqp(pkt);
1122
1123                         wc->port_num            = qp->attr.port_num;
1124                 }
1125         } else {
1126                 if (wc->status != IB_WC_WR_FLUSH_ERR)
1127                         rxe_err_qp(qp, "non-flush error status = %d",
1128                                 wc->status);
1129         }
1130
1131         /* have copy for srq and reference for !srq */
1132         if (!qp->srq)
1133                 queue_advance_consumer(qp->rq.queue, QUEUE_TYPE_FROM_CLIENT);
1134
1135         qp->resp.wqe = NULL;
1136
1137         if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
1138                 return RESPST_ERR_CQ_OVERFLOW;
1139
1140 finish:
1141         spin_lock_irqsave(&qp->state_lock, flags);
1142         if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
1143                 spin_unlock_irqrestore(&qp->state_lock, flags);
1144                 return RESPST_CHK_RESOURCE;
1145         }
1146         spin_unlock_irqrestore(&qp->state_lock, flags);
1147
1148         if (unlikely(!pkt))
1149                 return RESPST_DONE;
1150         if (qp_type(qp) == IB_QPT_RC)
1151                 return RESPST_ACKNOWLEDGE;
1152         else
1153                 return RESPST_CLEANUP;
1154 }
1155
1156
1157 static int send_common_ack(struct rxe_qp *qp, u8 syndrome, u32 psn,
1158                                   int opcode, const char *msg)
1159 {
1160         int err;
1161         struct rxe_pkt_info ack_pkt;
1162         struct sk_buff *skb;
1163
1164         skb = prepare_ack_packet(qp, &ack_pkt, opcode, 0, psn, syndrome);
1165         if (!skb)
1166                 return -ENOMEM;
1167
1168         err = rxe_xmit_packet(qp, &ack_pkt, skb);
1169         if (err)
1170                 rxe_dbg_qp(qp, "Failed sending %s\n", msg);
1171
1172         return err;
1173 }
1174
1175 static int send_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
1176 {
1177         return send_common_ack(qp, syndrome, psn,
1178                         IB_OPCODE_RC_ACKNOWLEDGE, "ACK");
1179 }
1180
1181 static int send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
1182 {
1183         int ret = send_common_ack(qp, syndrome, psn,
1184                         IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, "ATOMIC ACK");
1185
1186         /* have to clear this since it is used to trigger
1187          * long read replies
1188          */
1189         qp->resp.res = NULL;
1190         return ret;
1191 }
1192
1193 static int send_read_response_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
1194 {
1195         int ret = send_common_ack(qp, syndrome, psn,
1196                         IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY,
1197                         "RDMA READ response of length zero ACK");
1198
1199         /* have to clear this since it is used to trigger
1200          * long read replies
1201          */
1202         qp->resp.res = NULL;
1203         return ret;
1204 }
1205
1206 static enum resp_states acknowledge(struct rxe_qp *qp,
1207                                     struct rxe_pkt_info *pkt)
1208 {
1209         if (qp_type(qp) != IB_QPT_RC)
1210                 return RESPST_CLEANUP;
1211
1212         if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
1213                 send_ack(qp, qp->resp.aeth_syndrome, pkt->psn);
1214         else if (pkt->mask & RXE_ATOMIC_MASK)
1215                 send_atomic_ack(qp, AETH_ACK_UNLIMITED, pkt->psn);
1216         else if (pkt->mask & (RXE_FLUSH_MASK | RXE_ATOMIC_WRITE_MASK))
1217                 send_read_response_ack(qp, AETH_ACK_UNLIMITED, pkt->psn);
1218         else if (bth_ack(pkt))
1219                 send_ack(qp, AETH_ACK_UNLIMITED, pkt->psn);
1220
1221         return RESPST_CLEANUP;
1222 }
1223
1224 static enum resp_states cleanup(struct rxe_qp *qp,
1225                                 struct rxe_pkt_info *pkt)
1226 {
1227         struct sk_buff *skb;
1228
1229         if (pkt) {
1230                 skb = skb_dequeue(&qp->req_pkts);
1231                 rxe_put(qp);
1232                 kfree_skb(skb);
1233                 ib_device_put(qp->ibqp.device);
1234         }
1235
1236         if (qp->resp.mr) {
1237                 rxe_put(qp->resp.mr);
1238                 qp->resp.mr = NULL;
1239         }
1240
1241         return RESPST_DONE;
1242 }
1243
1244 static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
1245 {
1246         int i;
1247
1248         for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
1249                 struct resp_res *res = &qp->resp.resources[i];
1250
1251                 if (res->type == 0)
1252                         continue;
1253
1254                 if (psn_compare(psn, res->first_psn) >= 0 &&
1255                     psn_compare(psn, res->last_psn) <= 0) {
1256                         return res;
1257                 }
1258         }
1259
1260         return NULL;
1261 }
1262
1263 static enum resp_states duplicate_request(struct rxe_qp *qp,
1264                                           struct rxe_pkt_info *pkt)
1265 {
1266         enum resp_states rc;
1267         u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK;
1268
1269         if (pkt->mask & RXE_SEND_MASK ||
1270             pkt->mask & RXE_WRITE_MASK) {
1271                 /* SEND. Ack again and cleanup. C9-105. */
1272                 send_ack(qp, AETH_ACK_UNLIMITED, prev_psn);
1273                 return RESPST_CLEANUP;
1274         } else if (pkt->mask & RXE_FLUSH_MASK) {
1275                 struct resp_res *res;
1276
1277                 /* Find the operation in our list of responder resources. */
1278                 res = find_resource(qp, pkt->psn);
1279                 if (res) {
1280                         res->replay = 1;
1281                         res->cur_psn = pkt->psn;
1282                         qp->resp.res = res;
1283                         rc = RESPST_PROCESS_FLUSH;
1284                         goto out;
1285                 }
1286
1287                 /* Resource not found. Class D error. Drop the request. */
1288                 rc = RESPST_CLEANUP;
1289                 goto out;
1290         } else if (pkt->mask & RXE_READ_MASK) {
1291                 struct resp_res *res;
1292
1293                 res = find_resource(qp, pkt->psn);
1294                 if (!res) {
1295                         /* Resource not found. Class D error.  Drop the
1296                          * request.
1297                          */
1298                         rc = RESPST_CLEANUP;
1299                         goto out;
1300                 } else {
1301                         /* Ensure this new request is the same as the previous
1302                          * one or a subset of it.
1303                          */
1304                         u64 iova = reth_va(pkt);
1305                         u32 resid = reth_len(pkt);
1306
1307                         if (iova < res->read.va_org ||
1308                             resid > res->read.length ||
1309                             (iova + resid) > (res->read.va_org +
1310                                               res->read.length)) {
1311                                 rc = RESPST_CLEANUP;
1312                                 goto out;
1313                         }
1314
1315                         if (reth_rkey(pkt) != res->read.rkey) {
1316                                 rc = RESPST_CLEANUP;
1317                                 goto out;
1318                         }
1319
1320                         res->cur_psn = pkt->psn;
1321                         res->state = (pkt->psn == res->first_psn) ?
1322                                         rdatm_res_state_new :
1323                                         rdatm_res_state_replay;
1324                         res->replay = 1;
1325
1326                         /* Reset the resource, except length. */
1327                         res->read.va_org = iova;
1328                         res->read.va = iova;
1329                         res->read.resid = resid;
1330
1331                         /* Replay the RDMA read reply. */
1332                         qp->resp.res = res;
1333                         rc = RESPST_READ_REPLY;
1334                         goto out;
1335                 }
1336         } else {
1337                 struct resp_res *res;
1338
1339                 /* Find the operation in our list of responder resources. */
1340                 res = find_resource(qp, pkt->psn);
1341                 if (res) {
1342                         res->replay = 1;
1343                         res->cur_psn = pkt->psn;
1344                         qp->resp.res = res;
1345                         rc = pkt->mask & RXE_ATOMIC_MASK ?
1346                                         RESPST_ATOMIC_REPLY :
1347                                         RESPST_ATOMIC_WRITE_REPLY;
1348                         goto out;
1349                 }
1350
1351                 /* Resource not found. Class D error. Drop the request. */
1352                 rc = RESPST_CLEANUP;
1353                 goto out;
1354         }
1355 out:
1356         return rc;
1357 }
1358
1359 /* Process a class A or C. Both are treated the same in this implementation. */
1360 static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
1361                               enum ib_wc_status status)
1362 {
1363         qp->resp.aeth_syndrome  = syndrome;
1364         qp->resp.status         = status;
1365
1366         /* indicate that we should go through the ERROR state */
1367         qp->resp.goto_error     = 1;
1368 }
1369
1370 static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
1371 {
1372         /* UC */
1373         if (qp->srq) {
1374                 /* Class E */
1375                 qp->resp.drop_msg = 1;
1376                 if (qp->resp.wqe) {
1377                         qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1378                         return RESPST_COMPLETE;
1379                 } else {
1380                         return RESPST_CLEANUP;
1381                 }
1382         } else {
1383                 /* Class D1. This packet may be the start of a
1384                  * new message and could be valid. The previous
1385                  * message is invalid and ignored. reset the
1386                  * recv wr to its original state
1387                  */
1388                 if (qp->resp.wqe) {
1389                         qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
1390                         qp->resp.wqe->dma.cur_sge = 0;
1391                         qp->resp.wqe->dma.sge_offset = 0;
1392                         qp->resp.opcode = -1;
1393                 }
1394
1395                 if (qp->resp.mr) {
1396                         rxe_put(qp->resp.mr);
1397                         qp->resp.mr = NULL;
1398                 }
1399
1400                 return RESPST_CLEANUP;
1401         }
1402 }
1403
1404 /* drain incoming request packet queue */
1405 static void drain_req_pkts(struct rxe_qp *qp)
1406 {
1407         struct sk_buff *skb;
1408
1409         while ((skb = skb_dequeue(&qp->req_pkts))) {
1410                 rxe_put(qp);
1411                 kfree_skb(skb);
1412                 ib_device_put(qp->ibqp.device);
1413         }
1414 }
1415
1416 /* complete receive wqe with flush error */
1417 static int flush_recv_wqe(struct rxe_qp *qp, struct rxe_recv_wqe *wqe)
1418 {
1419         struct rxe_cqe cqe = {};
1420         struct ib_wc *wc = &cqe.ibwc;
1421         struct ib_uverbs_wc *uwc = &cqe.uibwc;
1422         int err;
1423
1424         if (qp->rcq->is_user) {
1425                 uwc->wr_id = wqe->wr_id;
1426                 uwc->status = IB_WC_WR_FLUSH_ERR;
1427                 uwc->qp_num = qp_num(qp);
1428         } else {
1429                 wc->wr_id = wqe->wr_id;
1430                 wc->status = IB_WC_WR_FLUSH_ERR;
1431                 wc->qp = &qp->ibqp;
1432         }
1433
1434         err = rxe_cq_post(qp->rcq, &cqe, 0);
1435         if (err)
1436                 rxe_dbg_cq(qp->rcq, "post cq failed err = %d", err);
1437
1438         return err;
1439 }
1440
1441 /* drain and optionally complete the recive queue
1442  * if unable to complete a wqe stop completing and
1443  * just flush the remaining wqes
1444  */
1445 static void flush_recv_queue(struct rxe_qp *qp, bool notify)
1446 {
1447         struct rxe_queue *q = qp->rq.queue;
1448         struct rxe_recv_wqe *wqe;
1449         int err;
1450
1451         if (qp->srq)
1452                 return;
1453
1454         while ((wqe = queue_head(q, q->type))) {
1455                 if (notify) {
1456                         err = flush_recv_wqe(qp, wqe);
1457                         if (err)
1458                                 notify = 0;
1459                 }
1460                 queue_advance_consumer(q, q->type);
1461         }
1462
1463         qp->resp.wqe = NULL;
1464 }
1465
1466 int rxe_responder(struct rxe_qp *qp)
1467 {
1468         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
1469         enum resp_states state;
1470         struct rxe_pkt_info *pkt = NULL;
1471         int ret;
1472         unsigned long flags;
1473
1474         spin_lock_irqsave(&qp->state_lock, flags);
1475         if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
1476                           qp_state(qp) == IB_QPS_RESET) {
1477                 bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR);
1478
1479                 drain_req_pkts(qp);
1480                 flush_recv_queue(qp, notify);
1481                 spin_unlock_irqrestore(&qp->state_lock, flags);
1482                 goto exit;
1483         }
1484         spin_unlock_irqrestore(&qp->state_lock, flags);
1485
1486         qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
1487
1488         state = RESPST_GET_REQ;
1489
1490         while (1) {
1491                 rxe_dbg_qp(qp, "state = %s\n", resp_state_name[state]);
1492                 switch (state) {
1493                 case RESPST_GET_REQ:
1494                         state = get_req(qp, &pkt);
1495                         break;
1496                 case RESPST_CHK_PSN:
1497                         state = check_psn(qp, pkt);
1498                         break;
1499                 case RESPST_CHK_OP_SEQ:
1500                         state = check_op_seq(qp, pkt);
1501                         break;
1502                 case RESPST_CHK_OP_VALID:
1503                         state = check_op_valid(qp, pkt);
1504                         break;
1505                 case RESPST_CHK_RESOURCE:
1506                         state = check_resource(qp, pkt);
1507                         break;
1508                 case RESPST_CHK_LENGTH:
1509                         state = rxe_resp_check_length(qp, pkt);
1510                         break;
1511                 case RESPST_CHK_RKEY:
1512                         state = check_rkey(qp, pkt);
1513                         break;
1514                 case RESPST_EXECUTE:
1515                         state = execute(qp, pkt);
1516                         break;
1517                 case RESPST_COMPLETE:
1518                         state = do_complete(qp, pkt);
1519                         break;
1520                 case RESPST_READ_REPLY:
1521                         state = read_reply(qp, pkt);
1522                         break;
1523                 case RESPST_ATOMIC_REPLY:
1524                         state = atomic_reply(qp, pkt);
1525                         break;
1526                 case RESPST_ATOMIC_WRITE_REPLY:
1527                         state = atomic_write_reply(qp, pkt);
1528                         break;
1529                 case RESPST_PROCESS_FLUSH:
1530                         state = process_flush(qp, pkt);
1531                         break;
1532                 case RESPST_ACKNOWLEDGE:
1533                         state = acknowledge(qp, pkt);
1534                         break;
1535                 case RESPST_CLEANUP:
1536                         state = cleanup(qp, pkt);
1537                         break;
1538                 case RESPST_DUPLICATE_REQUEST:
1539                         state = duplicate_request(qp, pkt);
1540                         break;
1541                 case RESPST_ERR_PSN_OUT_OF_SEQ:
1542                         /* RC only - Class B. Drop packet. */
1543                         send_ack(qp, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
1544                         state = RESPST_CLEANUP;
1545                         break;
1546
1547                 case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ:
1548                 case RESPST_ERR_MISSING_OPCODE_FIRST:
1549                 case RESPST_ERR_MISSING_OPCODE_LAST_C:
1550                 case RESPST_ERR_UNSUPPORTED_OPCODE:
1551                 case RESPST_ERR_MISALIGNED_ATOMIC:
1552                         /* RC Only - Class C. */
1553                         do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1554                                           IB_WC_REM_INV_REQ_ERR);
1555                         state = RESPST_COMPLETE;
1556                         break;
1557
1558                 case RESPST_ERR_MISSING_OPCODE_LAST_D1E:
1559                         state = do_class_d1e_error(qp);
1560                         break;
1561                 case RESPST_ERR_RNR:
1562                         if (qp_type(qp) == IB_QPT_RC) {
1563                                 rxe_counter_inc(rxe, RXE_CNT_SND_RNR);
1564                                 /* RC - class B */
1565                                 send_ack(qp, AETH_RNR_NAK |
1566                                          (~AETH_TYPE_MASK &
1567                                          qp->attr.min_rnr_timer),
1568                                          pkt->psn);
1569                         } else {
1570                                 /* UD/UC - class D */
1571                                 qp->resp.drop_msg = 1;
1572                         }
1573                         state = RESPST_CLEANUP;
1574                         break;
1575
1576                 case RESPST_ERR_RKEY_VIOLATION:
1577                         if (qp_type(qp) == IB_QPT_RC) {
1578                                 /* Class C */
1579                                 do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
1580                                                   IB_WC_REM_ACCESS_ERR);
1581                                 state = RESPST_COMPLETE;
1582                         } else {
1583                                 qp->resp.drop_msg = 1;
1584                                 if (qp->srq) {
1585                                         /* UC/SRQ Class D */
1586                                         qp->resp.status = IB_WC_REM_ACCESS_ERR;
1587                                         state = RESPST_COMPLETE;
1588                                 } else {
1589                                         /* UC/non-SRQ Class E. */
1590                                         state = RESPST_CLEANUP;
1591                                 }
1592                         }
1593                         break;
1594
1595                 case RESPST_ERR_INVALIDATE_RKEY:
1596                         /* RC - Class J. */
1597                         qp->resp.goto_error = 1;
1598                         qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1599                         state = RESPST_COMPLETE;
1600                         break;
1601
1602                 case RESPST_ERR_LENGTH:
1603                         if (qp_type(qp) == IB_QPT_RC) {
1604                                 /* Class C */
1605                                 do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1606                                                   IB_WC_REM_INV_REQ_ERR);
1607                                 state = RESPST_COMPLETE;
1608                         } else if (qp->srq) {
1609                                 /* UC/UD - class E */
1610                                 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1611                                 state = RESPST_COMPLETE;
1612                         } else {
1613                                 /* UC/UD - class D */
1614                                 qp->resp.drop_msg = 1;
1615                                 state = RESPST_CLEANUP;
1616                         }
1617                         break;
1618
1619                 case RESPST_ERR_MALFORMED_WQE:
1620                         /* All, Class A. */
1621                         do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
1622                                           IB_WC_LOC_QP_OP_ERR);
1623                         state = RESPST_COMPLETE;
1624                         break;
1625
1626                 case RESPST_ERR_CQ_OVERFLOW:
1627                         /* All - Class G */
1628                         state = RESPST_ERROR;
1629                         break;
1630
1631                 case RESPST_DONE:
1632                         if (qp->resp.goto_error) {
1633                                 state = RESPST_ERROR;
1634                                 break;
1635                         }
1636
1637                         goto done;
1638
1639                 case RESPST_EXIT:
1640                         if (qp->resp.goto_error) {
1641                                 state = RESPST_ERROR;
1642                                 break;
1643                         }
1644
1645                         goto exit;
1646
1647                 case RESPST_ERROR:
1648                         qp->resp.goto_error = 0;
1649                         rxe_dbg_qp(qp, "moved to error state\n");
1650                         rxe_qp_error(qp);
1651                         goto exit;
1652
1653                 default:
1654                         WARN_ON_ONCE(1);
1655                 }
1656         }
1657
1658         /* A non-zero return value will cause rxe_do_task to
1659          * exit its loop and end the tasklet. A zero return
1660          * will continue looping and return to rxe_responder
1661          */
1662 done:
1663         ret = 0;
1664         goto out;
1665 exit:
1666         ret = -EAGAIN;
1667 out:
1668         return ret;
1669 }