usb: typec: mux: fix static inline syntax error
[platform/kernel/linux-starfive.git] / drivers / infiniband / sw / rxe / rxe_qp.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6
7 #include <linux/skbuff.h>
8 #include <linux/delay.h>
9 #include <linux/sched.h>
10 #include <linux/vmalloc.h>
11 #include <rdma/uverbs_ioctl.h>
12
13 #include "rxe.h"
14 #include "rxe_loc.h"
15 #include "rxe_queue.h"
16 #include "rxe_task.h"
17
18 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
19                           int has_srq)
20 {
21         if (cap->max_send_wr > rxe->attr.max_qp_wr) {
22                 rxe_dbg_dev(rxe, "invalid send wr = %u > %d\n",
23                          cap->max_send_wr, rxe->attr.max_qp_wr);
24                 goto err1;
25         }
26
27         if (cap->max_send_sge > rxe->attr.max_send_sge) {
28                 rxe_dbg_dev(rxe, "invalid send sge = %u > %d\n",
29                          cap->max_send_sge, rxe->attr.max_send_sge);
30                 goto err1;
31         }
32
33         if (!has_srq) {
34                 if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
35                         rxe_dbg_dev(rxe, "invalid recv wr = %u > %d\n",
36                                  cap->max_recv_wr, rxe->attr.max_qp_wr);
37                         goto err1;
38                 }
39
40                 if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
41                         rxe_dbg_dev(rxe, "invalid recv sge = %u > %d\n",
42                                  cap->max_recv_sge, rxe->attr.max_recv_sge);
43                         goto err1;
44                 }
45         }
46
47         if (cap->max_inline_data > rxe->max_inline_data) {
48                 rxe_dbg_dev(rxe, "invalid max inline data = %u > %d\n",
49                          cap->max_inline_data, rxe->max_inline_data);
50                 goto err1;
51         }
52
53         return 0;
54
55 err1:
56         return -EINVAL;
57 }
58
59 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
60 {
61         struct ib_qp_cap *cap = &init->cap;
62         struct rxe_port *port;
63         int port_num = init->port_num;
64
65         switch (init->qp_type) {
66         case IB_QPT_GSI:
67         case IB_QPT_RC:
68         case IB_QPT_UC:
69         case IB_QPT_UD:
70                 break;
71         default:
72                 return -EOPNOTSUPP;
73         }
74
75         if (!init->recv_cq || !init->send_cq) {
76                 rxe_dbg_dev(rxe, "missing cq\n");
77                 goto err1;
78         }
79
80         if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
81                 goto err1;
82
83         if (init->qp_type == IB_QPT_GSI) {
84                 if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
85                         rxe_dbg_dev(rxe, "invalid port = %d\n", port_num);
86                         goto err1;
87                 }
88
89                 port = &rxe->port;
90
91                 if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
92                         rxe_dbg_dev(rxe, "GSI QP exists for port %d\n", port_num);
93                         goto err1;
94                 }
95         }
96
97         return 0;
98
99 err1:
100         return -EINVAL;
101 }
102
103 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
104 {
105         qp->resp.res_head = 0;
106         qp->resp.res_tail = 0;
107         qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
108
109         if (!qp->resp.resources)
110                 return -ENOMEM;
111
112         return 0;
113 }
114
115 static void free_rd_atomic_resources(struct rxe_qp *qp)
116 {
117         if (qp->resp.resources) {
118                 int i;
119
120                 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
121                         struct resp_res *res = &qp->resp.resources[i];
122
123                         free_rd_atomic_resource(res);
124                 }
125                 kfree(qp->resp.resources);
126                 qp->resp.resources = NULL;
127         }
128 }
129
130 void free_rd_atomic_resource(struct resp_res *res)
131 {
132         res->type = 0;
133 }
134
135 static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
136 {
137         int i;
138         struct resp_res *res;
139
140         if (qp->resp.resources) {
141                 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
142                         res = &qp->resp.resources[i];
143                         free_rd_atomic_resource(res);
144                 }
145         }
146 }
147
148 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
149                              struct ib_qp_init_attr *init)
150 {
151         struct rxe_port *port;
152         u32 qpn;
153
154         qp->sq_sig_type         = init->sq_sig_type;
155         qp->attr.path_mtu       = 1;
156         qp->mtu                 = ib_mtu_enum_to_int(qp->attr.path_mtu);
157
158         qpn                     = qp->elem.index;
159         port                    = &rxe->port;
160
161         switch (init->qp_type) {
162         case IB_QPT_GSI:
163                 qp->ibqp.qp_num         = 1;
164                 port->qp_gsi_index      = qpn;
165                 qp->attr.port_num       = init->port_num;
166                 break;
167
168         default:
169                 qp->ibqp.qp_num         = qpn;
170                 break;
171         }
172
173         spin_lock_init(&qp->state_lock);
174
175         spin_lock_init(&qp->sq.sq_lock);
176         spin_lock_init(&qp->rq.producer_lock);
177         spin_lock_init(&qp->rq.consumer_lock);
178
179         atomic_set(&qp->ssn, 0);
180         atomic_set(&qp->skb_out, 0);
181 }
182
183 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
184                            struct ib_qp_init_attr *init, struct ib_udata *udata,
185                            struct rxe_create_qp_resp __user *uresp)
186 {
187         int err;
188         int wqe_size;
189         enum queue_type type;
190
191         err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
192         if (err < 0)
193                 return err;
194         qp->sk->sk->sk_user_data = qp;
195
196         /* pick a source UDP port number for this QP based on
197          * the source QPN. this spreads traffic for different QPs
198          * across different NIC RX queues (while using a single
199          * flow for a given QP to maintain packet order).
200          * the port number must be in the Dynamic Ports range
201          * (0xc000 - 0xffff).
202          */
203         qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff);
204         qp->sq.max_wr           = init->cap.max_send_wr;
205
206         /* These caps are limited by rxe_qp_chk_cap() done by the caller */
207         wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
208                          init->cap.max_inline_data);
209         qp->sq.max_sge = init->cap.max_send_sge =
210                 wqe_size / sizeof(struct ib_sge);
211         qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
212         wqe_size += sizeof(struct rxe_send_wqe);
213
214         type = QUEUE_TYPE_FROM_CLIENT;
215         qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr,
216                                 wqe_size, type);
217         if (!qp->sq.queue)
218                 return -ENOMEM;
219
220         err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
221                            qp->sq.queue->buf, qp->sq.queue->buf_size,
222                            &qp->sq.queue->ip);
223
224         if (err) {
225                 vfree(qp->sq.queue->buf);
226                 kfree(qp->sq.queue);
227                 qp->sq.queue = NULL;
228                 return err;
229         }
230
231         qp->req.wqe_index = queue_get_producer(qp->sq.queue,
232                                                QUEUE_TYPE_FROM_CLIENT);
233
234         qp->req.opcode          = -1;
235         qp->comp.opcode         = -1;
236
237         skb_queue_head_init(&qp->req_pkts);
238
239         rxe_init_task(&qp->req.task, qp, rxe_requester);
240         rxe_init_task(&qp->comp.task, qp, rxe_completer);
241
242         qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
243         if (init->qp_type == IB_QPT_RC) {
244                 timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
245                 timer_setup(&qp->retrans_timer, retransmit_timer, 0);
246         }
247         return 0;
248 }
249
250 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
251                             struct ib_qp_init_attr *init,
252                             struct ib_udata *udata,
253                             struct rxe_create_qp_resp __user *uresp)
254 {
255         int err;
256         int wqe_size;
257         enum queue_type type;
258
259         if (!qp->srq) {
260                 qp->rq.max_wr           = init->cap.max_recv_wr;
261                 qp->rq.max_sge          = init->cap.max_recv_sge;
262
263                 wqe_size = rcv_wqe_size(qp->rq.max_sge);
264
265                 type = QUEUE_TYPE_FROM_CLIENT;
266                 qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr,
267                                         wqe_size, type);
268                 if (!qp->rq.queue)
269                         return -ENOMEM;
270
271                 err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
272                                    qp->rq.queue->buf, qp->rq.queue->buf_size,
273                                    &qp->rq.queue->ip);
274                 if (err) {
275                         vfree(qp->rq.queue->buf);
276                         kfree(qp->rq.queue);
277                         qp->rq.queue = NULL;
278                         return err;
279                 }
280         }
281
282         skb_queue_head_init(&qp->resp_pkts);
283
284         rxe_init_task(&qp->resp.task, qp, rxe_responder);
285
286         qp->resp.opcode         = OPCODE_NONE;
287         qp->resp.msn            = 0;
288
289         return 0;
290 }
291
292 /* called by the create qp verb */
293 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
294                      struct ib_qp_init_attr *init,
295                      struct rxe_create_qp_resp __user *uresp,
296                      struct ib_pd *ibpd,
297                      struct ib_udata *udata)
298 {
299         int err;
300         struct rxe_cq *rcq = to_rcq(init->recv_cq);
301         struct rxe_cq *scq = to_rcq(init->send_cq);
302         struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
303
304         rxe_get(pd);
305         rxe_get(rcq);
306         rxe_get(scq);
307         if (srq)
308                 rxe_get(srq);
309
310         qp->pd                  = pd;
311         qp->rcq                 = rcq;
312         qp->scq                 = scq;
313         qp->srq                 = srq;
314
315         atomic_inc(&rcq->num_wq);
316         atomic_inc(&scq->num_wq);
317
318         rxe_qp_init_misc(rxe, qp, init);
319
320         err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
321         if (err)
322                 goto err1;
323
324         err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
325         if (err)
326                 goto err2;
327
328         spin_lock_bh(&qp->state_lock);
329         qp->attr.qp_state = IB_QPS_RESET;
330         qp->valid = 1;
331         spin_unlock_bh(&qp->state_lock);
332
333         return 0;
334
335 err2:
336         rxe_queue_cleanup(qp->sq.queue);
337         qp->sq.queue = NULL;
338 err1:
339         atomic_dec(&rcq->num_wq);
340         atomic_dec(&scq->num_wq);
341
342         qp->pd = NULL;
343         qp->rcq = NULL;
344         qp->scq = NULL;
345         qp->srq = NULL;
346
347         if (srq)
348                 rxe_put(srq);
349         rxe_put(scq);
350         rxe_put(rcq);
351         rxe_put(pd);
352
353         return err;
354 }
355
356 /* called by the query qp verb */
357 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
358 {
359         init->event_handler             = qp->ibqp.event_handler;
360         init->qp_context                = qp->ibqp.qp_context;
361         init->send_cq                   = qp->ibqp.send_cq;
362         init->recv_cq                   = qp->ibqp.recv_cq;
363         init->srq                       = qp->ibqp.srq;
364
365         init->cap.max_send_wr           = qp->sq.max_wr;
366         init->cap.max_send_sge          = qp->sq.max_sge;
367         init->cap.max_inline_data       = qp->sq.max_inline;
368
369         if (!qp->srq) {
370                 init->cap.max_recv_wr           = qp->rq.max_wr;
371                 init->cap.max_recv_sge          = qp->rq.max_sge;
372         }
373
374         init->sq_sig_type               = qp->sq_sig_type;
375
376         init->qp_type                   = qp->ibqp.qp_type;
377         init->port_num                  = 1;
378
379         return 0;
380 }
381
382 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
383                     struct ib_qp_attr *attr, int mask)
384 {
385         if (mask & IB_QP_PORT) {
386                 if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
387                         rxe_dbg_qp(qp, "invalid port %d\n", attr->port_num);
388                         goto err1;
389                 }
390         }
391
392         if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
393                 goto err1;
394
395         if (mask & IB_QP_AV && rxe_av_chk_attr(qp, &attr->ah_attr))
396                 goto err1;
397
398         if (mask & IB_QP_ALT_PATH) {
399                 if (rxe_av_chk_attr(qp, &attr->alt_ah_attr))
400                         goto err1;
401                 if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num))  {
402                         rxe_dbg_qp(qp, "invalid alt port %d\n", attr->alt_port_num);
403                         goto err1;
404                 }
405                 if (attr->alt_timeout > 31) {
406                         rxe_dbg_qp(qp, "invalid alt timeout %d > 31\n",
407                                  attr->alt_timeout);
408                         goto err1;
409                 }
410         }
411
412         if (mask & IB_QP_PATH_MTU) {
413                 struct rxe_port *port = &rxe->port;
414
415                 enum ib_mtu max_mtu = port->attr.max_mtu;
416                 enum ib_mtu mtu = attr->path_mtu;
417
418                 if (mtu > max_mtu) {
419                         rxe_dbg_qp(qp, "invalid mtu (%d) > (%d)\n",
420                                  ib_mtu_enum_to_int(mtu),
421                                  ib_mtu_enum_to_int(max_mtu));
422                         goto err1;
423                 }
424         }
425
426         if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
427                 if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
428                         rxe_dbg_qp(qp, "invalid max_rd_atomic %d > %d\n",
429                                  attr->max_rd_atomic,
430                                  rxe->attr.max_qp_rd_atom);
431                         goto err1;
432                 }
433         }
434
435         if (mask & IB_QP_TIMEOUT) {
436                 if (attr->timeout > 31) {
437                         rxe_dbg_qp(qp, "invalid timeout %d > 31\n",
438                                         attr->timeout);
439                         goto err1;
440                 }
441         }
442
443         return 0;
444
445 err1:
446         return -EINVAL;
447 }
448
449 /* move the qp to the reset state */
450 static void rxe_qp_reset(struct rxe_qp *qp)
451 {
452         /* stop tasks from running */
453         rxe_disable_task(&qp->resp.task);
454         rxe_disable_task(&qp->comp.task);
455         rxe_disable_task(&qp->req.task);
456
457         /* drain work and packet queuesc */
458         rxe_requester(qp);
459         rxe_completer(qp);
460         rxe_responder(qp);
461
462         if (qp->rq.queue)
463                 rxe_queue_reset(qp->rq.queue);
464         if (qp->sq.queue)
465                 rxe_queue_reset(qp->sq.queue);
466
467         /* cleanup attributes */
468         atomic_set(&qp->ssn, 0);
469         qp->req.opcode = -1;
470         qp->req.need_retry = 0;
471         qp->req.wait_for_rnr_timer = 0;
472         qp->req.noack_pkts = 0;
473         qp->resp.msn = 0;
474         qp->resp.opcode = -1;
475         qp->resp.drop_msg = 0;
476         qp->resp.goto_error = 0;
477         qp->resp.sent_psn_nak = 0;
478
479         if (qp->resp.mr) {
480                 rxe_put(qp->resp.mr);
481                 qp->resp.mr = NULL;
482         }
483
484         cleanup_rd_atomic_resources(qp);
485
486         /* reenable tasks */
487         rxe_enable_task(&qp->resp.task);
488         rxe_enable_task(&qp->comp.task);
489         rxe_enable_task(&qp->req.task);
490 }
491
492 /* move the qp to the error state */
493 void rxe_qp_error(struct rxe_qp *qp)
494 {
495         spin_lock_bh(&qp->state_lock);
496         qp->attr.qp_state = IB_QPS_ERR;
497
498         /* drain work and packet queues */
499         rxe_sched_task(&qp->resp.task);
500         rxe_sched_task(&qp->comp.task);
501         rxe_sched_task(&qp->req.task);
502         spin_unlock_bh(&qp->state_lock);
503 }
504
505 static void rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr,
506                        int mask)
507 {
508         spin_lock_bh(&qp->state_lock);
509         qp->attr.sq_draining = 1;
510         rxe_sched_task(&qp->comp.task);
511         rxe_sched_task(&qp->req.task);
512         spin_unlock_bh(&qp->state_lock);
513 }
514
515 /* caller should hold qp->state_lock */
516 static int __qp_chk_state(struct rxe_qp *qp, struct ib_qp_attr *attr,
517                             int mask)
518 {
519         enum ib_qp_state cur_state;
520         enum ib_qp_state new_state;
521
522         cur_state = (mask & IB_QP_CUR_STATE) ?
523                                 attr->cur_qp_state : qp->attr.qp_state;
524         new_state = (mask & IB_QP_STATE) ?
525                                 attr->qp_state : cur_state;
526
527         if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask))
528                 return -EINVAL;
529
530         if (mask & IB_QP_STATE && cur_state == IB_QPS_SQD) {
531                 if (qp->attr.sq_draining && new_state != IB_QPS_ERR)
532                         return -EINVAL;
533         }
534
535         return 0;
536 }
537
538 static const char *const qps2str[] = {
539         [IB_QPS_RESET]  = "RESET",
540         [IB_QPS_INIT]   = "INIT",
541         [IB_QPS_RTR]    = "RTR",
542         [IB_QPS_RTS]    = "RTS",
543         [IB_QPS_SQD]    = "SQD",
544         [IB_QPS_SQE]    = "SQE",
545         [IB_QPS_ERR]    = "ERR",
546 };
547
548 /* called by the modify qp verb */
549 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
550                      struct ib_udata *udata)
551 {
552         int err;
553
554         if (mask & IB_QP_CUR_STATE)
555                 qp->attr.cur_qp_state = attr->qp_state;
556
557         if (mask & IB_QP_STATE) {
558                 spin_lock_bh(&qp->state_lock);
559                 err = __qp_chk_state(qp, attr, mask);
560                 if (!err) {
561                         qp->attr.qp_state = attr->qp_state;
562                         rxe_dbg_qp(qp, "state -> %s\n",
563                                         qps2str[attr->qp_state]);
564                 }
565                 spin_unlock_bh(&qp->state_lock);
566
567                 if (err)
568                         return err;
569
570                 switch (attr->qp_state) {
571                 case IB_QPS_RESET:
572                         rxe_qp_reset(qp);
573                         break;
574                 case IB_QPS_SQD:
575                         rxe_qp_sqd(qp, attr, mask);
576                         break;
577                 case IB_QPS_ERR:
578                         rxe_qp_error(qp);
579                         break;
580                 default:
581                         break;
582                 }
583         }
584
585         if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
586                 int max_rd_atomic = attr->max_rd_atomic ?
587                         roundup_pow_of_two(attr->max_rd_atomic) : 0;
588
589                 qp->attr.max_rd_atomic = max_rd_atomic;
590                 atomic_set(&qp->req.rd_atomic, max_rd_atomic);
591         }
592
593         if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
594                 int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
595                         roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
596
597                 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
598
599                 free_rd_atomic_resources(qp);
600
601                 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
602                 if (err)
603                         return err;
604         }
605
606         if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
607                 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
608
609         if (mask & IB_QP_ACCESS_FLAGS)
610                 qp->attr.qp_access_flags = attr->qp_access_flags;
611
612         if (mask & IB_QP_PKEY_INDEX)
613                 qp->attr.pkey_index = attr->pkey_index;
614
615         if (mask & IB_QP_PORT)
616                 qp->attr.port_num = attr->port_num;
617
618         if (mask & IB_QP_QKEY)
619                 qp->attr.qkey = attr->qkey;
620
621         if (mask & IB_QP_AV)
622                 rxe_init_av(&attr->ah_attr, &qp->pri_av);
623
624         if (mask & IB_QP_ALT_PATH) {
625                 rxe_init_av(&attr->alt_ah_attr, &qp->alt_av);
626                 qp->attr.alt_port_num = attr->alt_port_num;
627                 qp->attr.alt_pkey_index = attr->alt_pkey_index;
628                 qp->attr.alt_timeout = attr->alt_timeout;
629         }
630
631         if (mask & IB_QP_PATH_MTU) {
632                 qp->attr.path_mtu = attr->path_mtu;
633                 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
634         }
635
636         if (mask & IB_QP_TIMEOUT) {
637                 qp->attr.timeout = attr->timeout;
638                 if (attr->timeout == 0) {
639                         qp->qp_timeout_jiffies = 0;
640                 } else {
641                         /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
642                         int j = nsecs_to_jiffies(4096ULL << attr->timeout);
643
644                         qp->qp_timeout_jiffies = j ? j : 1;
645                 }
646         }
647
648         if (mask & IB_QP_RETRY_CNT) {
649                 qp->attr.retry_cnt = attr->retry_cnt;
650                 qp->comp.retry_cnt = attr->retry_cnt;
651                 rxe_dbg_qp(qp, "set retry count = %d\n", attr->retry_cnt);
652         }
653
654         if (mask & IB_QP_RNR_RETRY) {
655                 qp->attr.rnr_retry = attr->rnr_retry;
656                 qp->comp.rnr_retry = attr->rnr_retry;
657                 rxe_dbg_qp(qp, "set rnr retry count = %d\n", attr->rnr_retry);
658         }
659
660         if (mask & IB_QP_RQ_PSN) {
661                 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
662                 qp->resp.psn = qp->attr.rq_psn;
663                 rxe_dbg_qp(qp, "set resp psn = 0x%x\n", qp->resp.psn);
664         }
665
666         if (mask & IB_QP_MIN_RNR_TIMER) {
667                 qp->attr.min_rnr_timer = attr->min_rnr_timer;
668                 rxe_dbg_qp(qp, "set min rnr timer = 0x%x\n",
669                          attr->min_rnr_timer);
670         }
671
672         if (mask & IB_QP_SQ_PSN) {
673                 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
674                 qp->req.psn = qp->attr.sq_psn;
675                 qp->comp.psn = qp->attr.sq_psn;
676                 rxe_dbg_qp(qp, "set req psn = 0x%x\n", qp->req.psn);
677         }
678
679         if (mask & IB_QP_PATH_MIG_STATE)
680                 qp->attr.path_mig_state = attr->path_mig_state;
681
682         if (mask & IB_QP_DEST_QPN)
683                 qp->attr.dest_qp_num = attr->dest_qp_num;
684
685         return 0;
686 }
687
688 /* called by the query qp verb */
689 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
690 {
691         *attr = qp->attr;
692
693         attr->rq_psn                            = qp->resp.psn;
694         attr->sq_psn                            = qp->req.psn;
695
696         attr->cap.max_send_wr                   = qp->sq.max_wr;
697         attr->cap.max_send_sge                  = qp->sq.max_sge;
698         attr->cap.max_inline_data               = qp->sq.max_inline;
699
700         if (!qp->srq) {
701                 attr->cap.max_recv_wr           = qp->rq.max_wr;
702                 attr->cap.max_recv_sge          = qp->rq.max_sge;
703         }
704
705         rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
706         rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
707
708         /* Applications that get this state typically spin on it.
709          * Yield the processor
710          */
711         spin_lock_bh(&qp->state_lock);
712         if (qp->attr.sq_draining) {
713                 spin_unlock_bh(&qp->state_lock);
714                 cond_resched();
715         }
716         spin_unlock_bh(&qp->state_lock);
717
718         return 0;
719 }
720
721 int rxe_qp_chk_destroy(struct rxe_qp *qp)
722 {
723         /* See IBA o10-2.2.3
724          * An attempt to destroy a QP while attached to a mcast group
725          * will fail immediately.
726          */
727         if (atomic_read(&qp->mcg_num)) {
728                 rxe_dbg_qp(qp, "Attempt to destroy while attached to multicast group\n");
729                 return -EBUSY;
730         }
731
732         return 0;
733 }
734
735 /* called when the last reference to the qp is dropped */
736 static void rxe_qp_do_cleanup(struct work_struct *work)
737 {
738         struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
739
740         spin_lock_bh(&qp->state_lock);
741         qp->valid = 0;
742         spin_unlock_bh(&qp->state_lock);
743         qp->qp_timeout_jiffies = 0;
744
745         if (qp_type(qp) == IB_QPT_RC) {
746                 del_timer_sync(&qp->retrans_timer);
747                 del_timer_sync(&qp->rnr_nak_timer);
748         }
749
750         if (qp->resp.task.func)
751                 rxe_cleanup_task(&qp->resp.task);
752
753         if (qp->req.task.func)
754                 rxe_cleanup_task(&qp->req.task);
755
756         if (qp->comp.task.func)
757                 rxe_cleanup_task(&qp->comp.task);
758
759         /* flush out any receive wr's or pending requests */
760         rxe_requester(qp);
761         rxe_completer(qp);
762         rxe_responder(qp);
763
764         if (qp->sq.queue)
765                 rxe_queue_cleanup(qp->sq.queue);
766
767         if (qp->srq)
768                 rxe_put(qp->srq);
769
770         if (qp->rq.queue)
771                 rxe_queue_cleanup(qp->rq.queue);
772
773         if (qp->scq) {
774                 atomic_dec(&qp->scq->num_wq);
775                 rxe_put(qp->scq);
776         }
777
778         if (qp->rcq) {
779                 atomic_dec(&qp->rcq->num_wq);
780                 rxe_put(qp->rcq);
781         }
782
783         if (qp->pd)
784                 rxe_put(qp->pd);
785
786         if (qp->resp.mr)
787                 rxe_put(qp->resp.mr);
788
789         free_rd_atomic_resources(qp);
790
791         if (qp->sk) {
792                 if (qp_type(qp) == IB_QPT_RC)
793                         sk_dst_reset(qp->sk->sk);
794
795                 kernel_sock_shutdown(qp->sk, SHUT_RDWR);
796                 sock_release(qp->sk);
797         }
798 }
799
800 /* called when the last reference to the qp is dropped */
801 void rxe_qp_cleanup(struct rxe_pool_elem *elem)
802 {
803         struct rxe_qp *qp = container_of(elem, typeof(*qp), elem);
804
805         execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
806 }