26a684536109cc36ddb0a23813db006155d5e561
[profile/ivi/kernel-x86-ivi.git] / drivers / infiniband / hw / mthca / mthca_qp.c
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Cisco Systems. All rights reserved.
4  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/string.h>
37 #include <linux/slab.h>
38 #include <linux/sched.h>
39
40 #include <asm/io.h>
41
42 #include <rdma/ib_verbs.h>
43 #include <rdma/ib_cache.h>
44 #include <rdma/ib_pack.h>
45
46 #include "mthca_dev.h"
47 #include "mthca_cmd.h"
48 #include "mthca_memfree.h"
49 #include "mthca_wqe.h"
50
51 enum {
52         MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
53         MTHCA_ACK_REQ_FREQ       = 10,
54         MTHCA_FLIGHT_LIMIT       = 9,
55         MTHCA_UD_HEADER_SIZE     = 72, /* largest UD header possible */
56         MTHCA_INLINE_HEADER_SIZE = 4,  /* data segment overhead for inline */
57         MTHCA_INLINE_CHUNK_SIZE  = 16  /* inline data segment chunk */
58 };
59
60 enum {
61         MTHCA_QP_STATE_RST  = 0,
62         MTHCA_QP_STATE_INIT = 1,
63         MTHCA_QP_STATE_RTR  = 2,
64         MTHCA_QP_STATE_RTS  = 3,
65         MTHCA_QP_STATE_SQE  = 4,
66         MTHCA_QP_STATE_SQD  = 5,
67         MTHCA_QP_STATE_ERR  = 6,
68         MTHCA_QP_STATE_DRAINING = 7
69 };
70
71 enum {
72         MTHCA_QP_ST_RC  = 0x0,
73         MTHCA_QP_ST_UC  = 0x1,
74         MTHCA_QP_ST_RD  = 0x2,
75         MTHCA_QP_ST_UD  = 0x3,
76         MTHCA_QP_ST_MLX = 0x7
77 };
78
79 enum {
80         MTHCA_QP_PM_MIGRATED = 0x3,
81         MTHCA_QP_PM_ARMED    = 0x0,
82         MTHCA_QP_PM_REARM    = 0x1
83 };
84
85 enum {
86         /* qp_context flags */
87         MTHCA_QP_BIT_DE  = 1 <<  8,
88         /* params1 */
89         MTHCA_QP_BIT_SRE = 1 << 15,
90         MTHCA_QP_BIT_SWE = 1 << 14,
91         MTHCA_QP_BIT_SAE = 1 << 13,
92         MTHCA_QP_BIT_SIC = 1 <<  4,
93         MTHCA_QP_BIT_SSC = 1 <<  3,
94         /* params2 */
95         MTHCA_QP_BIT_RRE = 1 << 15,
96         MTHCA_QP_BIT_RWE = 1 << 14,
97         MTHCA_QP_BIT_RAE = 1 << 13,
98         MTHCA_QP_BIT_RIC = 1 <<  4,
99         MTHCA_QP_BIT_RSC = 1 <<  3
100 };
101
102 enum {
103         MTHCA_SEND_DOORBELL_FENCE = 1 << 5
104 };
105
106 struct mthca_qp_path {
107         __be32 port_pkey;
108         u8     rnr_retry;
109         u8     g_mylmc;
110         __be16 rlid;
111         u8     ackto;
112         u8     mgid_index;
113         u8     static_rate;
114         u8     hop_limit;
115         __be32 sl_tclass_flowlabel;
116         u8     rgid[16];
117 } __attribute__((packed));
118
119 struct mthca_qp_context {
120         __be32 flags;
121         __be32 tavor_sched_queue; /* Reserved on Arbel */
122         u8     mtu_msgmax;
123         u8     rq_size_stride;  /* Reserved on Tavor */
124         u8     sq_size_stride;  /* Reserved on Tavor */
125         u8     rlkey_arbel_sched_queue; /* Reserved on Tavor */
126         __be32 usr_page;
127         __be32 local_qpn;
128         __be32 remote_qpn;
129         u32    reserved1[2];
130         struct mthca_qp_path pri_path;
131         struct mthca_qp_path alt_path;
132         __be32 rdd;
133         __be32 pd;
134         __be32 wqe_base;
135         __be32 wqe_lkey;
136         __be32 params1;
137         __be32 reserved2;
138         __be32 next_send_psn;
139         __be32 cqn_snd;
140         __be32 snd_wqe_base_l;  /* Next send WQE on Tavor */
141         __be32 snd_db_index;    /* (debugging only entries) */
142         __be32 last_acked_psn;
143         __be32 ssn;
144         __be32 params2;
145         __be32 rnr_nextrecvpsn;
146         __be32 ra_buff_indx;
147         __be32 cqn_rcv;
148         __be32 rcv_wqe_base_l;  /* Next recv WQE on Tavor */
149         __be32 rcv_db_index;    /* (debugging only entries) */
150         __be32 qkey;
151         __be32 srqn;
152         __be32 rmsn;
153         __be16 rq_wqe_counter;  /* reserved on Tavor */
154         __be16 sq_wqe_counter;  /* reserved on Tavor */
155         u32    reserved3[18];
156 } __attribute__((packed));
157
158 struct mthca_qp_param {
159         __be32 opt_param_mask;
160         u32    reserved1;
161         struct mthca_qp_context context;
162         u32    reserved2[62];
163 } __attribute__((packed));
164
165 enum {
166         MTHCA_QP_OPTPAR_ALT_ADDR_PATH     = 1 << 0,
167         MTHCA_QP_OPTPAR_RRE               = 1 << 1,
168         MTHCA_QP_OPTPAR_RAE               = 1 << 2,
169         MTHCA_QP_OPTPAR_RWE               = 1 << 3,
170         MTHCA_QP_OPTPAR_PKEY_INDEX        = 1 << 4,
171         MTHCA_QP_OPTPAR_Q_KEY             = 1 << 5,
172         MTHCA_QP_OPTPAR_RNR_TIMEOUT       = 1 << 6,
173         MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
174         MTHCA_QP_OPTPAR_SRA_MAX           = 1 << 8,
175         MTHCA_QP_OPTPAR_RRA_MAX           = 1 << 9,
176         MTHCA_QP_OPTPAR_PM_STATE          = 1 << 10,
177         MTHCA_QP_OPTPAR_PORT_NUM          = 1 << 11,
178         MTHCA_QP_OPTPAR_RETRY_COUNT       = 1 << 12,
179         MTHCA_QP_OPTPAR_ALT_RNR_RETRY     = 1 << 13,
180         MTHCA_QP_OPTPAR_ACK_TIMEOUT       = 1 << 14,
181         MTHCA_QP_OPTPAR_RNR_RETRY         = 1 << 15,
182         MTHCA_QP_OPTPAR_SCHED_QUEUE       = 1 << 16
183 };
184
185 static const u8 mthca_opcode[] = {
186         [IB_WR_SEND]                 = MTHCA_OPCODE_SEND,
187         [IB_WR_SEND_WITH_IMM]        = MTHCA_OPCODE_SEND_IMM,
188         [IB_WR_RDMA_WRITE]           = MTHCA_OPCODE_RDMA_WRITE,
189         [IB_WR_RDMA_WRITE_WITH_IMM]  = MTHCA_OPCODE_RDMA_WRITE_IMM,
190         [IB_WR_RDMA_READ]            = MTHCA_OPCODE_RDMA_READ,
191         [IB_WR_ATOMIC_CMP_AND_SWP]   = MTHCA_OPCODE_ATOMIC_CS,
192         [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA,
193 };
194
195 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp)
196 {
197         return qp->qpn >= dev->qp_table.sqp_start &&
198                 qp->qpn <= dev->qp_table.sqp_start + 3;
199 }
200
201 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp)
202 {
203         return qp->qpn >= dev->qp_table.sqp_start &&
204                 qp->qpn <= dev->qp_table.sqp_start + 1;
205 }
206
207 static void *get_recv_wqe(struct mthca_qp *qp, int n)
208 {
209         if (qp->is_direct)
210                 return qp->queue.direct.buf + (n << qp->rq.wqe_shift);
211         else
212                 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +
213                         ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1));
214 }
215
216 static void *get_send_wqe(struct mthca_qp *qp, int n)
217 {
218         if (qp->is_direct)
219                 return qp->queue.direct.buf + qp->send_wqe_offset +
220                         (n << qp->sq.wqe_shift);
221         else
222                 return qp->queue.page_list[(qp->send_wqe_offset +
223                                             (n << qp->sq.wqe_shift)) >>
224                                            PAGE_SHIFT].buf +
225                         ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) &
226                          (PAGE_SIZE - 1));
227 }
228
229 static void mthca_wq_reset(struct mthca_wq *wq)
230 {
231         wq->next_ind  = 0;
232         wq->last_comp = wq->max - 1;
233         wq->head      = 0;
234         wq->tail      = 0;
235 }
236
237 void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
238                     enum ib_event_type event_type)
239 {
240         struct mthca_qp *qp;
241         struct ib_event event;
242
243         spin_lock(&dev->qp_table.lock);
244         qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
245         if (qp)
246                 ++qp->refcount;
247         spin_unlock(&dev->qp_table.lock);
248
249         if (!qp) {
250                 mthca_warn(dev, "Async event %d for bogus QP %08x\n",
251                            event_type, qpn);
252                 return;
253         }
254
255         if (event_type == IB_EVENT_PATH_MIG)
256                 qp->port = qp->alt_port;
257
258         event.device      = &dev->ib_dev;
259         event.event       = event_type;
260         event.element.qp  = &qp->ibqp;
261         if (qp->ibqp.event_handler)
262                 qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
263
264         spin_lock(&dev->qp_table.lock);
265         if (!--qp->refcount)
266                 wake_up(&qp->wait);
267         spin_unlock(&dev->qp_table.lock);
268 }
269
270 static int to_mthca_state(enum ib_qp_state ib_state)
271 {
272         switch (ib_state) {
273         case IB_QPS_RESET: return MTHCA_QP_STATE_RST;
274         case IB_QPS_INIT:  return MTHCA_QP_STATE_INIT;
275         case IB_QPS_RTR:   return MTHCA_QP_STATE_RTR;
276         case IB_QPS_RTS:   return MTHCA_QP_STATE_RTS;
277         case IB_QPS_SQD:   return MTHCA_QP_STATE_SQD;
278         case IB_QPS_SQE:   return MTHCA_QP_STATE_SQE;
279         case IB_QPS_ERR:   return MTHCA_QP_STATE_ERR;
280         default:                return -1;
281         }
282 }
283
284 enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS };
285
286 static int to_mthca_st(int transport)
287 {
288         switch (transport) {
289         case RC:  return MTHCA_QP_ST_RC;
290         case UC:  return MTHCA_QP_ST_UC;
291         case UD:  return MTHCA_QP_ST_UD;
292         case RD:  return MTHCA_QP_ST_RD;
293         case MLX: return MTHCA_QP_ST_MLX;
294         default:  return -1;
295         }
296 }
297
298 static void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr,
299                         int attr_mask)
300 {
301         if (attr_mask & IB_QP_PKEY_INDEX)
302                 sqp->pkey_index = attr->pkey_index;
303         if (attr_mask & IB_QP_QKEY)
304                 sqp->qkey = attr->qkey;
305         if (attr_mask & IB_QP_SQ_PSN)
306                 sqp->send_psn = attr->sq_psn;
307 }
308
309 static void init_port(struct mthca_dev *dev, int port)
310 {
311         int err;
312         struct mthca_init_ib_param param;
313
314         memset(&param, 0, sizeof param);
315
316         param.port_width = dev->limits.port_width_cap;
317         param.vl_cap     = dev->limits.vl_cap;
318         param.mtu_cap    = dev->limits.mtu_cap;
319         param.gid_cap    = dev->limits.gid_table_len;
320         param.pkey_cap   = dev->limits.pkey_table_len;
321
322         err = mthca_INIT_IB(dev, &param, port);
323         if (err)
324                 mthca_warn(dev, "INIT_IB failed, return code %d.\n", err);
325 }
326
327 static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr,
328                                   int attr_mask)
329 {
330         u8 dest_rd_atomic;
331         u32 access_flags;
332         u32 hw_access_flags = 0;
333
334         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
335                 dest_rd_atomic = attr->max_dest_rd_atomic;
336         else
337                 dest_rd_atomic = qp->resp_depth;
338
339         if (attr_mask & IB_QP_ACCESS_FLAGS)
340                 access_flags = attr->qp_access_flags;
341         else
342                 access_flags = qp->atomic_rd_en;
343
344         if (!dest_rd_atomic)
345                 access_flags &= IB_ACCESS_REMOTE_WRITE;
346
347         if (access_flags & IB_ACCESS_REMOTE_READ)
348                 hw_access_flags |= MTHCA_QP_BIT_RRE;
349         if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
350                 hw_access_flags |= MTHCA_QP_BIT_RAE;
351         if (access_flags & IB_ACCESS_REMOTE_WRITE)
352                 hw_access_flags |= MTHCA_QP_BIT_RWE;
353
354         return cpu_to_be32(hw_access_flags);
355 }
356
357 static inline enum ib_qp_state to_ib_qp_state(int mthca_state)
358 {
359         switch (mthca_state) {
360         case MTHCA_QP_STATE_RST:      return IB_QPS_RESET;
361         case MTHCA_QP_STATE_INIT:     return IB_QPS_INIT;
362         case MTHCA_QP_STATE_RTR:      return IB_QPS_RTR;
363         case MTHCA_QP_STATE_RTS:      return IB_QPS_RTS;
364         case MTHCA_QP_STATE_DRAINING:
365         case MTHCA_QP_STATE_SQD:      return IB_QPS_SQD;
366         case MTHCA_QP_STATE_SQE:      return IB_QPS_SQE;
367         case MTHCA_QP_STATE_ERR:      return IB_QPS_ERR;
368         default:                      return -1;
369         }
370 }
371
372 static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state)
373 {
374         switch (mthca_mig_state) {
375         case 0:  return IB_MIG_ARMED;
376         case 1:  return IB_MIG_REARM;
377         case 3:  return IB_MIG_MIGRATED;
378         default: return -1;
379         }
380 }
381
382 static int to_ib_qp_access_flags(int mthca_flags)
383 {
384         int ib_flags = 0;
385
386         if (mthca_flags & MTHCA_QP_BIT_RRE)
387                 ib_flags |= IB_ACCESS_REMOTE_READ;
388         if (mthca_flags & MTHCA_QP_BIT_RWE)
389                 ib_flags |= IB_ACCESS_REMOTE_WRITE;
390         if (mthca_flags & MTHCA_QP_BIT_RAE)
391                 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
392
393         return ib_flags;
394 }
395
396 static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr,
397                                 struct mthca_qp_path *path)
398 {
399         memset(ib_ah_attr, 0, sizeof *ib_ah_attr);
400         ib_ah_attr->port_num      = (be32_to_cpu(path->port_pkey) >> 24) & 0x3;
401
402         if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports)
403                 return;
404
405         ib_ah_attr->dlid          = be16_to_cpu(path->rlid);
406         ib_ah_attr->sl            = be32_to_cpu(path->sl_tclass_flowlabel) >> 28;
407         ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;
408         ib_ah_attr->static_rate   = mthca_rate_to_ib(dev,
409                                                      path->static_rate & 0xf,
410                                                      ib_ah_attr->port_num);
411         ib_ah_attr->ah_flags      = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
412         if (ib_ah_attr->ah_flags) {
413                 ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1);
414                 ib_ah_attr->grh.hop_limit  = path->hop_limit;
415                 ib_ah_attr->grh.traffic_class =
416                         (be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff;
417                 ib_ah_attr->grh.flow_label =
418                         be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff;
419                 memcpy(ib_ah_attr->grh.dgid.raw,
420                         path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
421         }
422 }
423
424 int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
425                    struct ib_qp_init_attr *qp_init_attr)
426 {
427         struct mthca_dev *dev = to_mdev(ibqp->device);
428         struct mthca_qp *qp = to_mqp(ibqp);
429         int err = 0;
430         struct mthca_mailbox *mailbox = NULL;
431         struct mthca_qp_param *qp_param;
432         struct mthca_qp_context *context;
433         int mthca_state;
434
435         mutex_lock(&qp->mutex);
436
437         if (qp->state == IB_QPS_RESET) {
438                 qp_attr->qp_state = IB_QPS_RESET;
439                 goto done;
440         }
441
442         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
443         if (IS_ERR(mailbox)) {
444                 err = PTR_ERR(mailbox);
445                 goto out;
446         }
447
448         err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox);
449         if (err) {
450                 mthca_warn(dev, "QUERY_QP failed (%d)\n", err);
451                 goto out_mailbox;
452         }
453
454         qp_param    = mailbox->buf;
455         context     = &qp_param->context;
456         mthca_state = be32_to_cpu(context->flags) >> 28;
457
458         qp->state                    = to_ib_qp_state(mthca_state);
459         qp_attr->qp_state            = qp->state;
460         qp_attr->path_mtu            = context->mtu_msgmax >> 5;
461         qp_attr->path_mig_state      =
462                 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
463         qp_attr->qkey                = be32_to_cpu(context->qkey);
464         qp_attr->rq_psn              = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
465         qp_attr->sq_psn              = be32_to_cpu(context->next_send_psn) & 0xffffff;
466         qp_attr->dest_qp_num         = be32_to_cpu(context->remote_qpn) & 0xffffff;
467         qp_attr->qp_access_flags     =
468                 to_ib_qp_access_flags(be32_to_cpu(context->params2));
469
470         if (qp->transport == RC || qp->transport == UC) {
471                 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
472                 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
473                 qp_attr->alt_pkey_index =
474                         be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
475                 qp_attr->alt_port_num   = qp_attr->alt_ah_attr.port_num;
476         }
477
478         qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
479         qp_attr->port_num   =
480                 (be32_to_cpu(context->pri_path.port_pkey) >> 24) & 0x3;
481
482         /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
483         qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING;
484
485         qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
486
487         qp_attr->max_dest_rd_atomic =
488                 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
489         qp_attr->min_rnr_timer      =
490                 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
491         qp_attr->timeout            = context->pri_path.ackto >> 3;
492         qp_attr->retry_cnt          = (be32_to_cpu(context->params1) >> 16) & 0x7;
493         qp_attr->rnr_retry          = context->pri_path.rnr_retry >> 5;
494         qp_attr->alt_timeout        = context->alt_path.ackto >> 3;
495
496 done:
497         qp_attr->cur_qp_state        = qp_attr->qp_state;
498         qp_attr->cap.max_send_wr     = qp->sq.max;
499         qp_attr->cap.max_recv_wr     = qp->rq.max;
500         qp_attr->cap.max_send_sge    = qp->sq.max_gs;
501         qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
502         qp_attr->cap.max_inline_data = qp->max_inline_data;
503
504         qp_init_attr->cap            = qp_attr->cap;
505         qp_init_attr->sq_sig_type    = qp->sq_policy;
506
507 out_mailbox:
508         mthca_free_mailbox(dev, mailbox);
509
510 out:
511         mutex_unlock(&qp->mutex);
512         return err;
513 }
514
515 static int mthca_path_set(struct mthca_dev *dev, const struct ib_ah_attr *ah,
516                           struct mthca_qp_path *path, u8 port)
517 {
518         path->g_mylmc     = ah->src_path_bits & 0x7f;
519         path->rlid        = cpu_to_be16(ah->dlid);
520         path->static_rate = mthca_get_rate(dev, ah->static_rate, port);
521
522         if (ah->ah_flags & IB_AH_GRH) {
523                 if (ah->grh.sgid_index >= dev->limits.gid_table_len) {
524                         mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n",
525                                   ah->grh.sgid_index, dev->limits.gid_table_len-1);
526                         return -1;
527                 }
528
529                 path->g_mylmc   |= 1 << 7;
530                 path->mgid_index = ah->grh.sgid_index;
531                 path->hop_limit  = ah->grh.hop_limit;
532                 path->sl_tclass_flowlabel =
533                         cpu_to_be32((ah->sl << 28)                |
534                                     (ah->grh.traffic_class << 20) |
535                                     (ah->grh.flow_label));
536                 memcpy(path->rgid, ah->grh.dgid.raw, 16);
537         } else
538                 path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28);
539
540         return 0;
541 }
542
543 static int __mthca_modify_qp(struct ib_qp *ibqp,
544                              const struct ib_qp_attr *attr, int attr_mask,
545                              enum ib_qp_state cur_state, enum ib_qp_state new_state)
546 {
547         struct mthca_dev *dev = to_mdev(ibqp->device);
548         struct mthca_qp *qp = to_mqp(ibqp);
549         struct mthca_mailbox *mailbox;
550         struct mthca_qp_param *qp_param;
551         struct mthca_qp_context *qp_context;
552         u32 sqd_event = 0;
553         int err = -EINVAL;
554
555         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
556         if (IS_ERR(mailbox)) {
557                 err = PTR_ERR(mailbox);
558                 goto out;
559         }
560         qp_param = mailbox->buf;
561         qp_context = &qp_param->context;
562         memset(qp_param, 0, sizeof *qp_param);
563
564         qp_context->flags      = cpu_to_be32((to_mthca_state(new_state) << 28) |
565                                              (to_mthca_st(qp->transport) << 16));
566         qp_context->flags     |= cpu_to_be32(MTHCA_QP_BIT_DE);
567         if (!(attr_mask & IB_QP_PATH_MIG_STATE))
568                 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
569         else {
570                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE);
571                 switch (attr->path_mig_state) {
572                 case IB_MIG_MIGRATED:
573                         qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
574                         break;
575                 case IB_MIG_REARM:
576                         qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11);
577                         break;
578                 case IB_MIG_ARMED:
579                         qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11);
580                         break;
581                 }
582         }
583
584         /* leave tavor_sched_queue as 0 */
585
586         if (qp->transport == MLX || qp->transport == UD)
587                 qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;
588         else if (attr_mask & IB_QP_PATH_MTU) {
589                 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) {
590                         mthca_dbg(dev, "path MTU (%u) is invalid\n",
591                                   attr->path_mtu);
592                         goto out_mailbox;
593                 }
594                 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
595         }
596
597         if (mthca_is_memfree(dev)) {
598                 if (qp->rq.max)
599                         qp_context->rq_size_stride = ilog2(qp->rq.max) << 3;
600                 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
601
602                 if (qp->sq.max)
603                         qp_context->sq_size_stride = ilog2(qp->sq.max) << 3;
604                 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
605         }
606
607         /* leave arbel_sched_queue as 0 */
608
609         if (qp->ibqp.uobject)
610                 qp_context->usr_page =
611                         cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index);
612         else
613                 qp_context->usr_page = cpu_to_be32(dev->driver_uar.index);
614         qp_context->local_qpn  = cpu_to_be32(qp->qpn);
615         if (attr_mask & IB_QP_DEST_QPN) {
616                 qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
617         }
618
619         if (qp->transport == MLX)
620                 qp_context->pri_path.port_pkey |=
621                         cpu_to_be32(qp->port << 24);
622         else {
623                 if (attr_mask & IB_QP_PORT) {
624                         qp_context->pri_path.port_pkey |=
625                                 cpu_to_be32(attr->port_num << 24);
626                         qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM);
627                 }
628         }
629
630         if (attr_mask & IB_QP_PKEY_INDEX) {
631                 qp_context->pri_path.port_pkey |=
632                         cpu_to_be32(attr->pkey_index);
633                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX);
634         }
635
636         if (attr_mask & IB_QP_RNR_RETRY) {
637                 qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry =
638                         attr->rnr_retry << 5;
639                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY |
640                                                         MTHCA_QP_OPTPAR_ALT_RNR_RETRY);
641         }
642
643         if (attr_mask & IB_QP_AV) {
644                 if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path,
645                                    attr_mask & IB_QP_PORT ? attr->port_num : qp->port))
646                         goto out_mailbox;
647
648                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
649         }
650
651         if (ibqp->qp_type == IB_QPT_RC &&
652             cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
653                 u8 sched_queue = ibqp->uobject ? 0x2 : 0x1;
654
655                 if (mthca_is_memfree(dev))
656                         qp_context->rlkey_arbel_sched_queue |= sched_queue;
657                 else
658                         qp_context->tavor_sched_queue |= cpu_to_be32(sched_queue);
659
660                 qp_param->opt_param_mask |=
661                         cpu_to_be32(MTHCA_QP_OPTPAR_SCHED_QUEUE);
662         }
663
664         if (attr_mask & IB_QP_TIMEOUT) {
665                 qp_context->pri_path.ackto = attr->timeout << 3;
666                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
667         }
668
669         if (attr_mask & IB_QP_ALT_PATH) {
670                 if (attr->alt_pkey_index >= dev->limits.pkey_table_len) {
671                         mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n",
672                                   attr->alt_pkey_index, dev->limits.pkey_table_len-1);
673                         goto out_mailbox;
674                 }
675
676                 if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) {
677                         mthca_dbg(dev, "Alternate port number (%u) is invalid\n",
678                                 attr->alt_port_num);
679                         goto out_mailbox;
680                 }
681
682                 if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path,
683                                    attr->alt_ah_attr.port_num))
684                         goto out_mailbox;
685
686                 qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
687                                                               attr->alt_port_num << 24);
688                 qp_context->alt_path.ackto = attr->alt_timeout << 3;
689                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH);
690         }
691
692         /* leave rdd as 0 */
693         qp_context->pd         = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);
694         /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */
695         qp_context->wqe_lkey   = cpu_to_be32(qp->mr.ibmr.lkey);
696         qp_context->params1    = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) |
697                                              (MTHCA_FLIGHT_LIMIT << 24) |
698                                              MTHCA_QP_BIT_SWE);
699         if (qp->sq_policy == IB_SIGNAL_ALL_WR)
700                 qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC);
701         if (attr_mask & IB_QP_RETRY_CNT) {
702                 qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
703                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);
704         }
705
706         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
707                 if (attr->max_rd_atomic) {
708                         qp_context->params1 |=
709                                 cpu_to_be32(MTHCA_QP_BIT_SRE |
710                                             MTHCA_QP_BIT_SAE);
711                         qp_context->params1 |=
712                                 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
713                 }
714                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);
715         }
716
717         if (attr_mask & IB_QP_SQ_PSN)
718                 qp_context->next_send_psn = cpu_to_be32(attr->sq_psn);
719         qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);
720
721         if (mthca_is_memfree(dev)) {
722                 qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset);
723                 qp_context->snd_db_index   = cpu_to_be32(qp->sq.db_index);
724         }
725
726         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
727                 if (attr->max_dest_rd_atomic)
728                         qp_context->params2 |=
729                                 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
730
731                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
732         }
733
734         if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
735                 qp_context->params2      |= get_hw_access_flags(qp, attr, attr_mask);
736                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
737                                                         MTHCA_QP_OPTPAR_RRE |
738                                                         MTHCA_QP_OPTPAR_RAE);
739         }
740
741         qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
742
743         if (ibqp->srq)
744                 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC);
745
746         if (attr_mask & IB_QP_MIN_RNR_TIMER) {
747                 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
748                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);
749         }
750         if (attr_mask & IB_QP_RQ_PSN)
751                 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
752
753         qp_context->ra_buff_indx =
754                 cpu_to_be32(dev->qp_table.rdb_base +
755                             ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<
756                              dev->qp_table.rdb_shift));
757
758         qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);
759
760         if (mthca_is_memfree(dev))
761                 qp_context->rcv_db_index   = cpu_to_be32(qp->rq.db_index);
762
763         if (attr_mask & IB_QP_QKEY) {
764                 qp_context->qkey = cpu_to_be32(attr->qkey);
765                 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);
766         }
767
768         if (ibqp->srq)
769                 qp_context->srqn = cpu_to_be32(1 << 24 |
770                                                to_msrq(ibqp->srq)->srqn);
771
772         if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD  &&
773             attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY               &&
774             attr->en_sqd_async_notify)
775                 sqd_event = 1 << 31;
776
777         err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0,
778                               mailbox, sqd_event);
779         if (err) {
780                 mthca_warn(dev, "modify QP %d->%d returned %d.\n",
781                            cur_state, new_state, err);
782                 goto out_mailbox;
783         }
784
785         qp->state = new_state;
786         if (attr_mask & IB_QP_ACCESS_FLAGS)
787                 qp->atomic_rd_en = attr->qp_access_flags;
788         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
789                 qp->resp_depth = attr->max_dest_rd_atomic;
790         if (attr_mask & IB_QP_PORT)
791                 qp->port = attr->port_num;
792         if (attr_mask & IB_QP_ALT_PATH)
793                 qp->alt_port = attr->alt_port_num;
794
795         if (is_sqp(dev, qp))
796                 store_attrs(to_msqp(qp), attr, attr_mask);
797
798         /*
799          * If we moved QP0 to RTR, bring the IB link up; if we moved
800          * QP0 to RESET or ERROR, bring the link back down.
801          */
802         if (is_qp0(dev, qp)) {
803                 if (cur_state != IB_QPS_RTR &&
804                     new_state == IB_QPS_RTR)
805                         init_port(dev, qp->port);
806
807                 if (cur_state != IB_QPS_RESET &&
808                     cur_state != IB_QPS_ERR &&
809                     (new_state == IB_QPS_RESET ||
810                      new_state == IB_QPS_ERR))
811                         mthca_CLOSE_IB(dev, qp->port);
812         }
813
814         /*
815          * If we moved a kernel QP to RESET, clean up all old CQ
816          * entries and reinitialize the QP.
817          */
818         if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
819                 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
820                                qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
821                 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
822                         mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL);
823
824                 mthca_wq_reset(&qp->sq);
825                 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
826
827                 mthca_wq_reset(&qp->rq);
828                 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
829
830                 if (mthca_is_memfree(dev)) {
831                         *qp->sq.db = 0;
832                         *qp->rq.db = 0;
833                 }
834         }
835
836 out_mailbox:
837         mthca_free_mailbox(dev, mailbox);
838 out:
839         return err;
840 }
841
842 int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
843                     struct ib_udata *udata)
844 {
845         struct mthca_dev *dev = to_mdev(ibqp->device);
846         struct mthca_qp *qp = to_mqp(ibqp);
847         enum ib_qp_state cur_state, new_state;
848         int err = -EINVAL;
849
850         mutex_lock(&qp->mutex);
851         if (attr_mask & IB_QP_CUR_STATE) {
852                 cur_state = attr->cur_qp_state;
853         } else {
854                 spin_lock_irq(&qp->sq.lock);
855                 spin_lock(&qp->rq.lock);
856                 cur_state = qp->state;
857                 spin_unlock(&qp->rq.lock);
858                 spin_unlock_irq(&qp->sq.lock);
859         }
860
861         new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
862
863         if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
864                 mthca_dbg(dev, "Bad QP transition (transport %d) "
865                           "%d->%d with attr 0x%08x\n",
866                           qp->transport, cur_state, new_state,
867                           attr_mask);
868                 goto out;
869         }
870
871         if ((attr_mask & IB_QP_PKEY_INDEX) &&
872              attr->pkey_index >= dev->limits.pkey_table_len) {
873                 mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n",
874                           attr->pkey_index, dev->limits.pkey_table_len-1);
875                 goto out;
876         }
877
878         if ((attr_mask & IB_QP_PORT) &&
879             (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
880                 mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);
881                 goto out;
882         }
883
884         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
885             attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
886                 mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",
887                           attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
888                 goto out;
889         }
890
891         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
892             attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
893                 mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",
894                           attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
895                 goto out;
896         }
897
898         if (cur_state == new_state && cur_state == IB_QPS_RESET) {
899                 err = 0;
900                 goto out;
901         }
902
903         err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
904
905 out:
906         mutex_unlock(&qp->mutex);
907         return err;
908 }
909
910 static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz)
911 {
912         /*
913          * Calculate the maximum size of WQE s/g segments, excluding
914          * the next segment and other non-data segments.
915          */
916         int max_data_size = desc_sz - sizeof (struct mthca_next_seg);
917
918         switch (qp->transport) {
919         case MLX:
920                 max_data_size -= 2 * sizeof (struct mthca_data_seg);
921                 break;
922
923         case UD:
924                 if (mthca_is_memfree(dev))
925                         max_data_size -= sizeof (struct mthca_arbel_ud_seg);
926                 else
927                         max_data_size -= sizeof (struct mthca_tavor_ud_seg);
928                 break;
929
930         default:
931                 max_data_size -= sizeof (struct mthca_raddr_seg);
932                 break;
933         }
934
935         return max_data_size;
936 }
937
938 static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size)
939 {
940         /* We don't support inline data for kernel QPs (yet). */
941         return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0;
942 }
943
944 static void mthca_adjust_qp_caps(struct mthca_dev *dev,
945                                  struct mthca_pd *pd,
946                                  struct mthca_qp *qp)
947 {
948         int max_data_size = mthca_max_data_size(dev, qp,
949                                                 min(dev->limits.max_desc_sz,
950                                                     1 << qp->sq.wqe_shift));
951
952         qp->max_inline_data = mthca_max_inline_data(pd, max_data_size);
953
954         qp->sq.max_gs = min_t(int, dev->limits.max_sg,
955                               max_data_size / sizeof (struct mthca_data_seg));
956         qp->rq.max_gs = min_t(int, dev->limits.max_sg,
957                                (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -
958                                 sizeof (struct mthca_next_seg)) /
959                                sizeof (struct mthca_data_seg));
960 }
961
962 /*
963  * Allocate and register buffer for WQEs.  qp->rq.max, sq.max,
964  * rq.max_gs and sq.max_gs must all be assigned.
965  * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
966  * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
967  * queue)
968  */
969 static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
970                                struct mthca_pd *pd,
971                                struct mthca_qp *qp)
972 {
973         int size;
974         int err = -ENOMEM;
975
976         size = sizeof (struct mthca_next_seg) +
977                 qp->rq.max_gs * sizeof (struct mthca_data_seg);
978
979         if (size > dev->limits.max_desc_sz)
980                 return -EINVAL;
981
982         for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
983              qp->rq.wqe_shift++)
984                 ; /* nothing */
985
986         size = qp->sq.max_gs * sizeof (struct mthca_data_seg);
987         switch (qp->transport) {
988         case MLX:
989                 size += 2 * sizeof (struct mthca_data_seg);
990                 break;
991
992         case UD:
993                 size += mthca_is_memfree(dev) ?
994                         sizeof (struct mthca_arbel_ud_seg) :
995                         sizeof (struct mthca_tavor_ud_seg);
996                 break;
997
998         case UC:
999                 size += sizeof (struct mthca_raddr_seg);
1000                 break;
1001
1002         case RC:
1003                 size += sizeof (struct mthca_raddr_seg);
1004                 /*
1005                  * An atomic op will require an atomic segment, a
1006                  * remote address segment and one scatter entry.
1007                  */
1008                 size = max_t(int, size,
1009                              sizeof (struct mthca_atomic_seg) +
1010                              sizeof (struct mthca_raddr_seg) +
1011                              sizeof (struct mthca_data_seg));
1012                 break;
1013
1014         default:
1015                 break;
1016         }
1017
1018         /* Make sure that we have enough space for a bind request */
1019         size = max_t(int, size, sizeof (struct mthca_bind_seg));
1020
1021         size += sizeof (struct mthca_next_seg);
1022
1023         if (size > dev->limits.max_desc_sz)
1024                 return -EINVAL;
1025
1026         for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
1027              qp->sq.wqe_shift++)
1028                 ; /* nothing */
1029
1030         qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
1031                                     1 << qp->sq.wqe_shift);
1032
1033         /*
1034          * If this is a userspace QP, we don't actually have to
1035          * allocate anything.  All we need is to calculate the WQE
1036          * sizes and the send_wqe_offset, so we're done now.
1037          */
1038         if (pd->ibpd.uobject)
1039                 return 0;
1040
1041         size = PAGE_ALIGN(qp->send_wqe_offset +
1042                           (qp->sq.max << qp->sq.wqe_shift));
1043
1044         qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64),
1045                            GFP_KERNEL);
1046         if (!qp->wrid)
1047                 goto err_out;
1048
1049         err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE,
1050                               &qp->queue, &qp->is_direct, pd, 0, &qp->mr);
1051         if (err)
1052                 goto err_out;
1053
1054         return 0;
1055
1056 err_out:
1057         kfree(qp->wrid);
1058         return err;
1059 }
1060
1061 static void mthca_free_wqe_buf(struct mthca_dev *dev,
1062                                struct mthca_qp *qp)
1063 {
1064         mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset +
1065                                        (qp->sq.max << qp->sq.wqe_shift)),
1066                        &qp->queue, qp->is_direct, &qp->mr);
1067         kfree(qp->wrid);
1068 }
1069
1070 static int mthca_map_memfree(struct mthca_dev *dev,
1071                              struct mthca_qp *qp)
1072 {
1073         int ret;
1074
1075         if (mthca_is_memfree(dev)) {
1076                 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn);
1077                 if (ret)
1078                         return ret;
1079
1080                 ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn);
1081                 if (ret)
1082                         goto err_qpc;
1083
1084                 ret = mthca_table_get(dev, dev->qp_table.rdb_table,
1085                                       qp->qpn << dev->qp_table.rdb_shift);
1086                 if (ret)
1087                         goto err_eqpc;
1088
1089         }
1090
1091         return 0;
1092
1093 err_eqpc:
1094         mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1095
1096 err_qpc:
1097         mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1098
1099         return ret;
1100 }
1101
1102 static void mthca_unmap_memfree(struct mthca_dev *dev,
1103                                 struct mthca_qp *qp)
1104 {
1105         mthca_table_put(dev, dev->qp_table.rdb_table,
1106                         qp->qpn << dev->qp_table.rdb_shift);
1107         mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1108         mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1109 }
1110
1111 static int mthca_alloc_memfree(struct mthca_dev *dev,
1112                                struct mthca_qp *qp)
1113 {
1114         if (mthca_is_memfree(dev)) {
1115                 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
1116                                                  qp->qpn, &qp->rq.db);
1117                 if (qp->rq.db_index < 0)
1118                         return -ENOMEM;
1119
1120                 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,
1121                                                  qp->qpn, &qp->sq.db);
1122                 if (qp->sq.db_index < 0) {
1123                         mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1124                         return -ENOMEM;
1125                 }
1126         }
1127
1128         return 0;
1129 }
1130
1131 static void mthca_free_memfree(struct mthca_dev *dev,
1132                                struct mthca_qp *qp)
1133 {
1134         if (mthca_is_memfree(dev)) {
1135                 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
1136                 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1137         }
1138 }
1139
1140 static int mthca_alloc_qp_common(struct mthca_dev *dev,
1141                                  struct mthca_pd *pd,
1142                                  struct mthca_cq *send_cq,
1143                                  struct mthca_cq *recv_cq,
1144                                  enum ib_sig_type send_policy,
1145                                  struct mthca_qp *qp)
1146 {
1147         int ret;
1148         int i;
1149         struct mthca_next_seg *next;
1150
1151         qp->refcount = 1;
1152         init_waitqueue_head(&qp->wait);
1153         mutex_init(&qp->mutex);
1154         qp->state        = IB_QPS_RESET;
1155         qp->atomic_rd_en = 0;
1156         qp->resp_depth   = 0;
1157         qp->sq_policy    = send_policy;
1158         mthca_wq_reset(&qp->sq);
1159         mthca_wq_reset(&qp->rq);
1160
1161         spin_lock_init(&qp->sq.lock);
1162         spin_lock_init(&qp->rq.lock);
1163
1164         ret = mthca_map_memfree(dev, qp);
1165         if (ret)
1166                 return ret;
1167
1168         ret = mthca_alloc_wqe_buf(dev, pd, qp);
1169         if (ret) {
1170                 mthca_unmap_memfree(dev, qp);
1171                 return ret;
1172         }
1173
1174         mthca_adjust_qp_caps(dev, pd, qp);
1175
1176         /*
1177          * If this is a userspace QP, we're done now.  The doorbells
1178          * will be allocated and buffers will be initialized in
1179          * userspace.
1180          */
1181         if (pd->ibpd.uobject)
1182                 return 0;
1183
1184         ret = mthca_alloc_memfree(dev, qp);
1185         if (ret) {
1186                 mthca_free_wqe_buf(dev, qp);
1187                 mthca_unmap_memfree(dev, qp);
1188                 return ret;
1189         }
1190
1191         if (mthca_is_memfree(dev)) {
1192                 struct mthca_data_seg *scatter;
1193                 int size = (sizeof (struct mthca_next_seg) +
1194                             qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
1195
1196                 for (i = 0; i < qp->rq.max; ++i) {
1197                         next = get_recv_wqe(qp, i);
1198                         next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) <<
1199                                                    qp->rq.wqe_shift);
1200                         next->ee_nds = cpu_to_be32(size);
1201
1202                         for (scatter = (void *) (next + 1);
1203                              (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift);
1204                              ++scatter)
1205                                 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
1206                 }
1207
1208                 for (i = 0; i < qp->sq.max; ++i) {
1209                         next = get_send_wqe(qp, i);
1210                         next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) <<
1211                                                     qp->sq.wqe_shift) +
1212                                                    qp->send_wqe_offset);
1213                 }
1214         } else {
1215                 for (i = 0; i < qp->rq.max; ++i) {
1216                         next = get_recv_wqe(qp, i);
1217                         next->nda_op = htonl((((i + 1) % qp->rq.max) <<
1218                                               qp->rq.wqe_shift) | 1);
1219                 }
1220
1221         }
1222
1223         qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
1224         qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
1225
1226         return 0;
1227 }
1228
1229 static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
1230                              struct mthca_pd *pd, struct mthca_qp *qp)
1231 {
1232         int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz);
1233
1234         /* Sanity check QP size before proceeding */
1235         if (cap->max_send_wr     > dev->limits.max_wqes ||
1236             cap->max_recv_wr     > dev->limits.max_wqes ||
1237             cap->max_send_sge    > dev->limits.max_sg   ||
1238             cap->max_recv_sge    > dev->limits.max_sg   ||
1239             cap->max_inline_data > mthca_max_inline_data(pd, max_data_size))
1240                 return -EINVAL;
1241
1242         /*
1243          * For MLX transport we need 2 extra send gather entries:
1244          * one for the header and one for the checksum at the end
1245          */
1246         if (qp->transport == MLX && cap->max_send_sge + 2 > dev->limits.max_sg)
1247                 return -EINVAL;
1248
1249         if (mthca_is_memfree(dev)) {
1250                 qp->rq.max = cap->max_recv_wr ?
1251                         roundup_pow_of_two(cap->max_recv_wr) : 0;
1252                 qp->sq.max = cap->max_send_wr ?
1253                         roundup_pow_of_two(cap->max_send_wr) : 0;
1254         } else {
1255                 qp->rq.max = cap->max_recv_wr;
1256                 qp->sq.max = cap->max_send_wr;
1257         }
1258
1259         qp->rq.max_gs = cap->max_recv_sge;
1260         qp->sq.max_gs = max_t(int, cap->max_send_sge,
1261                               ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE,
1262                                     MTHCA_INLINE_CHUNK_SIZE) /
1263                               sizeof (struct mthca_data_seg));
1264
1265         return 0;
1266 }
1267
1268 int mthca_alloc_qp(struct mthca_dev *dev,
1269                    struct mthca_pd *pd,
1270                    struct mthca_cq *send_cq,
1271                    struct mthca_cq *recv_cq,
1272                    enum ib_qp_type type,
1273                    enum ib_sig_type send_policy,
1274                    struct ib_qp_cap *cap,
1275                    struct mthca_qp *qp)
1276 {
1277         int err;
1278
1279         switch (type) {
1280         case IB_QPT_RC: qp->transport = RC; break;
1281         case IB_QPT_UC: qp->transport = UC; break;
1282         case IB_QPT_UD: qp->transport = UD; break;
1283         default: return -EINVAL;
1284         }
1285
1286         err = mthca_set_qp_size(dev, cap, pd, qp);
1287         if (err)
1288                 return err;
1289
1290         qp->qpn = mthca_alloc(&dev->qp_table.alloc);
1291         if (qp->qpn == -1)
1292                 return -ENOMEM;
1293
1294         /* initialize port to zero for error-catching. */
1295         qp->port = 0;
1296
1297         err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1298                                     send_policy, qp);
1299         if (err) {
1300                 mthca_free(&dev->qp_table.alloc, qp->qpn);
1301                 return err;
1302         }
1303
1304         spin_lock_irq(&dev->qp_table.lock);
1305         mthca_array_set(&dev->qp_table.qp,
1306                         qp->qpn & (dev->limits.num_qps - 1), qp);
1307         spin_unlock_irq(&dev->qp_table.lock);
1308
1309         return 0;
1310 }
1311
1312 static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1313         __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1314 {
1315         if (send_cq == recv_cq) {
1316                 spin_lock_irq(&send_cq->lock);
1317                 __acquire(&recv_cq->lock);
1318         } else if (send_cq->cqn < recv_cq->cqn) {
1319                 spin_lock_irq(&send_cq->lock);
1320                 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1321         } else {
1322                 spin_lock_irq(&recv_cq->lock);
1323                 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
1324         }
1325 }
1326
1327 static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1328         __releases(&send_cq->lock) __releases(&recv_cq->lock)
1329 {
1330         if (send_cq == recv_cq) {
1331                 __release(&recv_cq->lock);
1332                 spin_unlock_irq(&send_cq->lock);
1333         } else if (send_cq->cqn < recv_cq->cqn) {
1334                 spin_unlock(&recv_cq->lock);
1335                 spin_unlock_irq(&send_cq->lock);
1336         } else {
1337                 spin_unlock(&send_cq->lock);
1338                 spin_unlock_irq(&recv_cq->lock);
1339         }
1340 }
1341
1342 int mthca_alloc_sqp(struct mthca_dev *dev,
1343                     struct mthca_pd *pd,
1344                     struct mthca_cq *send_cq,
1345                     struct mthca_cq *recv_cq,
1346                     enum ib_sig_type send_policy,
1347                     struct ib_qp_cap *cap,
1348                     int qpn,
1349                     int port,
1350                     struct mthca_sqp *sqp)
1351 {
1352         u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
1353         int err;
1354
1355         sqp->qp.transport = MLX;
1356         err = mthca_set_qp_size(dev, cap, pd, &sqp->qp);
1357         if (err)
1358                 return err;
1359
1360         sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;
1361         sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
1362                                              &sqp->header_dma, GFP_KERNEL);
1363         if (!sqp->header_buf)
1364                 return -ENOMEM;
1365
1366         spin_lock_irq(&dev->qp_table.lock);
1367         if (mthca_array_get(&dev->qp_table.qp, mqpn))
1368                 err = -EBUSY;
1369         else
1370                 mthca_array_set(&dev->qp_table.qp, mqpn, sqp);
1371         spin_unlock_irq(&dev->qp_table.lock);
1372
1373         if (err)
1374                 goto err_out;
1375
1376         sqp->qp.port      = port;
1377         sqp->qp.qpn       = mqpn;
1378         sqp->qp.transport = MLX;
1379
1380         err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1381                                     send_policy, &sqp->qp);
1382         if (err)
1383                 goto err_out_free;
1384
1385         atomic_inc(&pd->sqp_count);
1386
1387         return 0;
1388
1389  err_out_free:
1390         /*
1391          * Lock CQs here, so that CQ polling code can do QP lookup
1392          * without taking a lock.
1393          */
1394         mthca_lock_cqs(send_cq, recv_cq);
1395
1396         spin_lock(&dev->qp_table.lock);
1397         mthca_array_clear(&dev->qp_table.qp, mqpn);
1398         spin_unlock(&dev->qp_table.lock);
1399
1400         mthca_unlock_cqs(send_cq, recv_cq);
1401
1402  err_out:
1403         dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
1404                           sqp->header_buf, sqp->header_dma);
1405
1406         return err;
1407 }
1408
1409 static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp)
1410 {
1411         int c;
1412
1413         spin_lock_irq(&dev->qp_table.lock);
1414         c = qp->refcount;
1415         spin_unlock_irq(&dev->qp_table.lock);
1416
1417         return c;
1418 }
1419
1420 void mthca_free_qp(struct mthca_dev *dev,
1421                    struct mthca_qp *qp)
1422 {
1423         struct mthca_cq *send_cq;
1424         struct mthca_cq *recv_cq;
1425
1426         send_cq = to_mcq(qp->ibqp.send_cq);
1427         recv_cq = to_mcq(qp->ibqp.recv_cq);
1428
1429         /*
1430          * Lock CQs here, so that CQ polling code can do QP lookup
1431          * without taking a lock.
1432          */
1433         mthca_lock_cqs(send_cq, recv_cq);
1434
1435         spin_lock(&dev->qp_table.lock);
1436         mthca_array_clear(&dev->qp_table.qp,
1437                           qp->qpn & (dev->limits.num_qps - 1));
1438         --qp->refcount;
1439         spin_unlock(&dev->qp_table.lock);
1440
1441         mthca_unlock_cqs(send_cq, recv_cq);
1442
1443         wait_event(qp->wait, !get_qp_refcount(dev, qp));
1444
1445         if (qp->state != IB_QPS_RESET)
1446                 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,
1447                                 NULL, 0);
1448
1449         /*
1450          * If this is a userspace QP, the buffers, MR, CQs and so on
1451          * will be cleaned up in userspace, so all we have to do is
1452          * unref the mem-free tables and free the QPN in our table.
1453          */
1454         if (!qp->ibqp.uobject) {
1455                 mthca_cq_clean(dev, recv_cq, qp->qpn,
1456                                qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1457                 if (send_cq != recv_cq)
1458                         mthca_cq_clean(dev, send_cq, qp->qpn, NULL);
1459
1460                 mthca_free_memfree(dev, qp);
1461                 mthca_free_wqe_buf(dev, qp);
1462         }
1463
1464         mthca_unmap_memfree(dev, qp);
1465
1466         if (is_sqp(dev, qp)) {
1467                 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
1468                 dma_free_coherent(&dev->pdev->dev,
1469                                   to_msqp(qp)->header_buf_size,
1470                                   to_msqp(qp)->header_buf,
1471                                   to_msqp(qp)->header_dma);
1472         } else
1473                 mthca_free(&dev->qp_table.alloc, qp->qpn);
1474 }
1475
1476 /* Create UD header for an MLX send and build a data segment for it */
1477 static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1478                             int ind, struct ib_send_wr *wr,
1479                             struct mthca_mlx_seg *mlx,
1480                             struct mthca_data_seg *data)
1481 {
1482         int header_size;
1483         int err;
1484         u16 pkey;
1485
1486         ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0,
1487                           mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 0,
1488                           &sqp->ud_header);
1489
1490         err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
1491         if (err)
1492                 return err;
1493         mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
1494         mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
1495                                   (sqp->ud_header.lrh.destination_lid ==
1496                                    IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
1497                                   (sqp->ud_header.lrh.service_level << 8));
1498         mlx->rlid = sqp->ud_header.lrh.destination_lid;
1499         mlx->vcrc = 0;
1500
1501         switch (wr->opcode) {
1502         case IB_WR_SEND:
1503                 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1504                 sqp->ud_header.immediate_present = 0;
1505                 break;
1506         case IB_WR_SEND_WITH_IMM:
1507                 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1508                 sqp->ud_header.immediate_present = 1;
1509                 sqp->ud_header.immediate_data = wr->ex.imm_data;
1510                 break;
1511         default:
1512                 return -EINVAL;
1513         }
1514
1515         sqp->ud_header.lrh.virtual_lane    = !sqp->qp.ibqp.qp_num ? 15 : 0;
1516         if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
1517                 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
1518         sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
1519         if (!sqp->qp.ibqp.qp_num)
1520                 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
1521                                    sqp->pkey_index, &pkey);
1522         else
1523                 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
1524                                    wr->wr.ud.pkey_index, &pkey);
1525         sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
1526         sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1527         sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
1528         sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
1529                                                sqp->qkey : wr->wr.ud.remote_qkey);
1530         sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
1531
1532         header_size = ib_ud_header_pack(&sqp->ud_header,
1533                                         sqp->header_buf +
1534                                         ind * MTHCA_UD_HEADER_SIZE);
1535
1536         data->byte_count = cpu_to_be32(header_size);
1537         data->lkey       = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey);
1538         data->addr       = cpu_to_be64(sqp->header_dma +
1539                                        ind * MTHCA_UD_HEADER_SIZE);
1540
1541         return 0;
1542 }
1543
1544 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,
1545                                     struct ib_cq *ib_cq)
1546 {
1547         unsigned cur;
1548         struct mthca_cq *cq;
1549
1550         cur = wq->head - wq->tail;
1551         if (likely(cur + nreq < wq->max))
1552                 return 0;
1553
1554         cq = to_mcq(ib_cq);
1555         spin_lock(&cq->lock);
1556         cur = wq->head - wq->tail;
1557         spin_unlock(&cq->lock);
1558
1559         return cur + nreq >= wq->max;
1560 }
1561
1562 static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg,
1563                                           u64 remote_addr, u32 rkey)
1564 {
1565         rseg->raddr    = cpu_to_be64(remote_addr);
1566         rseg->rkey     = cpu_to_be32(rkey);
1567         rseg->reserved = 0;
1568 }
1569
1570 static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,
1571                                            struct ib_send_wr *wr)
1572 {
1573         if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1574                 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1575                 aseg->compare  = cpu_to_be64(wr->wr.atomic.compare_add);
1576         } else {
1577                 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1578                 aseg->compare  = 0;
1579         }
1580
1581 }
1582
1583 static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg,
1584                              struct ib_send_wr *wr)
1585 {
1586         useg->lkey    = cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
1587         useg->av_addr = cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
1588         useg->dqpn    = cpu_to_be32(wr->wr.ud.remote_qpn);
1589         useg->qkey    = cpu_to_be32(wr->wr.ud.remote_qkey);
1590
1591 }
1592
1593 static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg,
1594                              struct ib_send_wr *wr)
1595 {
1596         memcpy(useg->av, to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
1597         useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1598         useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
1599 }
1600
1601 int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1602                           struct ib_send_wr **bad_wr)
1603 {
1604         struct mthca_dev *dev = to_mdev(ibqp->device);
1605         struct mthca_qp *qp = to_mqp(ibqp);
1606         void *wqe;
1607         void *prev_wqe;
1608         unsigned long flags;
1609         int err = 0;
1610         int nreq;
1611         int i;
1612         int size;
1613         /*
1614          * f0 and size0 are only used if nreq != 0, and they will
1615          * always be initialized the first time through the main loop
1616          * before nreq is incremented.  So nreq cannot become non-zero
1617          * without initializing f0 and size0, and they are in fact
1618          * never used uninitialized.
1619          */
1620         int uninitialized_var(size0);
1621         u32 uninitialized_var(f0);
1622         int ind;
1623         u8 op0 = 0;
1624
1625         spin_lock_irqsave(&qp->sq.lock, flags);
1626
1627         /* XXX check that state is OK to post send */
1628
1629         ind = qp->sq.next_ind;
1630
1631         for (nreq = 0; wr; ++nreq, wr = wr->next) {
1632                 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1633                         mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1634                                         " %d max, %d nreq)\n", qp->qpn,
1635                                         qp->sq.head, qp->sq.tail,
1636                                         qp->sq.max, nreq);
1637                         err = -ENOMEM;
1638                         *bad_wr = wr;
1639                         goto out;
1640                 }
1641
1642                 wqe = get_send_wqe(qp, ind);
1643                 prev_wqe = qp->sq.last;
1644                 qp->sq.last = wqe;
1645
1646                 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1647                 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
1648                 ((struct mthca_next_seg *) wqe)->flags =
1649                         ((wr->send_flags & IB_SEND_SIGNALED) ?
1650                          cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1651                         ((wr->send_flags & IB_SEND_SOLICITED) ?
1652                          cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0)   |
1653                         cpu_to_be32(1);
1654                 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1655                     wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1656                         ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
1657
1658                 wqe += sizeof (struct mthca_next_seg);
1659                 size = sizeof (struct mthca_next_seg) / 16;
1660
1661                 switch (qp->transport) {
1662                 case RC:
1663                         switch (wr->opcode) {
1664                         case IB_WR_ATOMIC_CMP_AND_SWP:
1665                         case IB_WR_ATOMIC_FETCH_AND_ADD:
1666                                 set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
1667                                               wr->wr.atomic.rkey);
1668                                 wqe += sizeof (struct mthca_raddr_seg);
1669
1670                                 set_atomic_seg(wqe, wr);
1671                                 wqe += sizeof (struct mthca_atomic_seg);
1672                                 size += (sizeof (struct mthca_raddr_seg) +
1673                                          sizeof (struct mthca_atomic_seg)) / 16;
1674                                 break;
1675
1676                         case IB_WR_RDMA_WRITE:
1677                         case IB_WR_RDMA_WRITE_WITH_IMM:
1678                         case IB_WR_RDMA_READ:
1679                                 set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
1680                                               wr->wr.rdma.rkey);
1681                                 wqe  += sizeof (struct mthca_raddr_seg);
1682                                 size += sizeof (struct mthca_raddr_seg) / 16;
1683                                 break;
1684
1685                         default:
1686                                 /* No extra segments required for sends */
1687                                 break;
1688                         }
1689
1690                         break;
1691
1692                 case UC:
1693                         switch (wr->opcode) {
1694                         case IB_WR_RDMA_WRITE:
1695                         case IB_WR_RDMA_WRITE_WITH_IMM:
1696                                 set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
1697                                               wr->wr.rdma.rkey);
1698                                 wqe  += sizeof (struct mthca_raddr_seg);
1699                                 size += sizeof (struct mthca_raddr_seg) / 16;
1700                                 break;
1701
1702                         default:
1703                                 /* No extra segments required for sends */
1704                                 break;
1705                         }
1706
1707                         break;
1708
1709                 case UD:
1710                         set_tavor_ud_seg(wqe, wr);
1711                         wqe  += sizeof (struct mthca_tavor_ud_seg);
1712                         size += sizeof (struct mthca_tavor_ud_seg) / 16;
1713                         break;
1714
1715                 case MLX:
1716                         err = build_mlx_header(dev, to_msqp(qp), ind, wr,
1717                                                wqe - sizeof (struct mthca_next_seg),
1718                                                wqe);
1719                         if (err) {
1720                                 *bad_wr = wr;
1721                                 goto out;
1722                         }
1723                         wqe += sizeof (struct mthca_data_seg);
1724                         size += sizeof (struct mthca_data_seg) / 16;
1725                         break;
1726                 }
1727
1728                 if (wr->num_sge > qp->sq.max_gs) {
1729                         mthca_err(dev, "too many gathers\n");
1730                         err = -EINVAL;
1731                         *bad_wr = wr;
1732                         goto out;
1733                 }
1734
1735                 for (i = 0; i < wr->num_sge; ++i) {
1736                         mthca_set_data_seg(wqe, wr->sg_list + i);
1737                         wqe  += sizeof (struct mthca_data_seg);
1738                         size += sizeof (struct mthca_data_seg) / 16;
1739                 }
1740
1741                 /* Add one more inline data segment for ICRC */
1742                 if (qp->transport == MLX) {
1743                         ((struct mthca_data_seg *) wqe)->byte_count =
1744                                 cpu_to_be32((1 << 31) | 4);
1745                         ((u32 *) wqe)[1] = 0;
1746                         wqe += sizeof (struct mthca_data_seg);
1747                         size += sizeof (struct mthca_data_seg) / 16;
1748                 }
1749
1750                 qp->wrid[ind + qp->rq.max] = wr->wr_id;
1751
1752                 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
1753                         mthca_err(dev, "opcode invalid\n");
1754                         err = -EINVAL;
1755                         *bad_wr = wr;
1756                         goto out;
1757                 }
1758
1759                 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1760                         cpu_to_be32(((ind << qp->sq.wqe_shift) +
1761                                      qp->send_wqe_offset) |
1762                                     mthca_opcode[wr->opcode]);
1763                 wmb();
1764                 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1765                         cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size |
1766                                     ((wr->send_flags & IB_SEND_FENCE) ?
1767                                     MTHCA_NEXT_FENCE : 0));
1768
1769                 if (!nreq) {
1770                         size0 = size;
1771                         op0   = mthca_opcode[wr->opcode];
1772                         f0    = wr->send_flags & IB_SEND_FENCE ?
1773                                 MTHCA_SEND_DOORBELL_FENCE : 0;
1774                 }
1775
1776                 ++ind;
1777                 if (unlikely(ind >= qp->sq.max))
1778                         ind -= qp->sq.max;
1779         }
1780
1781 out:
1782         if (likely(nreq)) {
1783                 wmb();
1784
1785                 mthca_write64(((qp->sq.next_ind << qp->sq.wqe_shift) +
1786                                qp->send_wqe_offset) | f0 | op0,
1787                               (qp->qpn << 8) | size0,
1788                               dev->kar + MTHCA_SEND_DOORBELL,
1789                               MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1790                 /*
1791                  * Make sure doorbells don't leak out of SQ spinlock
1792                  * and reach the HCA out of order:
1793                  */
1794                 mmiowb();
1795         }
1796
1797         qp->sq.next_ind = ind;
1798         qp->sq.head    += nreq;
1799
1800         spin_unlock_irqrestore(&qp->sq.lock, flags);
1801         return err;
1802 }
1803
1804 int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1805                              struct ib_recv_wr **bad_wr)
1806 {
1807         struct mthca_dev *dev = to_mdev(ibqp->device);
1808         struct mthca_qp *qp = to_mqp(ibqp);
1809         unsigned long flags;
1810         int err = 0;
1811         int nreq;
1812         int i;
1813         int size;
1814         /*
1815          * size0 is only used if nreq != 0, and it will always be
1816          * initialized the first time through the main loop before
1817          * nreq is incremented.  So nreq cannot become non-zero
1818          * without initializing size0, and it is in fact never used
1819          * uninitialized.
1820          */
1821         int uninitialized_var(size0);
1822         int ind;
1823         void *wqe;
1824         void *prev_wqe;
1825
1826         spin_lock_irqsave(&qp->rq.lock, flags);
1827
1828         /* XXX check that state is OK to post receive */
1829
1830         ind = qp->rq.next_ind;
1831
1832         for (nreq = 0; wr; wr = wr->next) {
1833                 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
1834                         mthca_err(dev, "RQ %06x full (%u head, %u tail,"
1835                                         " %d max, %d nreq)\n", qp->qpn,
1836                                         qp->rq.head, qp->rq.tail,
1837                                         qp->rq.max, nreq);
1838                         err = -ENOMEM;
1839                         *bad_wr = wr;
1840                         goto out;
1841                 }
1842
1843                 wqe = get_recv_wqe(qp, ind);
1844                 prev_wqe = qp->rq.last;
1845                 qp->rq.last = wqe;
1846
1847                 ((struct mthca_next_seg *) wqe)->ee_nds =
1848                         cpu_to_be32(MTHCA_NEXT_DBD);
1849                 ((struct mthca_next_seg *) wqe)->flags = 0;
1850
1851                 wqe += sizeof (struct mthca_next_seg);
1852                 size = sizeof (struct mthca_next_seg) / 16;
1853
1854                 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
1855                         err = -EINVAL;
1856                         *bad_wr = wr;
1857                         goto out;
1858                 }
1859
1860                 for (i = 0; i < wr->num_sge; ++i) {
1861                         mthca_set_data_seg(wqe, wr->sg_list + i);
1862                         wqe  += sizeof (struct mthca_data_seg);
1863                         size += sizeof (struct mthca_data_seg) / 16;
1864                 }
1865
1866                 qp->wrid[ind] = wr->wr_id;
1867
1868                 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1869                         cpu_to_be32(MTHCA_NEXT_DBD | size);
1870
1871                 if (!nreq)
1872                         size0 = size;
1873
1874                 ++ind;
1875                 if (unlikely(ind >= qp->rq.max))
1876                         ind -= qp->rq.max;
1877
1878                 ++nreq;
1879                 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
1880                         nreq = 0;
1881
1882                         wmb();
1883
1884                         mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0,
1885                                       qp->qpn << 8, dev->kar + MTHCA_RECEIVE_DOORBELL,
1886                                       MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1887
1888                         qp->rq.next_ind = ind;
1889                         qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
1890                 }
1891         }
1892
1893 out:
1894         if (likely(nreq)) {
1895                 wmb();
1896
1897                 mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0,
1898                               qp->qpn << 8 | nreq, dev->kar + MTHCA_RECEIVE_DOORBELL,
1899                               MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1900         }
1901
1902         qp->rq.next_ind = ind;
1903         qp->rq.head    += nreq;
1904
1905         /*
1906          * Make sure doorbells don't leak out of RQ spinlock and reach
1907          * the HCA out of order:
1908          */
1909         mmiowb();
1910
1911         spin_unlock_irqrestore(&qp->rq.lock, flags);
1912         return err;
1913 }
1914
1915 int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1916                           struct ib_send_wr **bad_wr)
1917 {
1918         struct mthca_dev *dev = to_mdev(ibqp->device);
1919         struct mthca_qp *qp = to_mqp(ibqp);
1920         u32 dbhi;
1921         void *wqe;
1922         void *prev_wqe;
1923         unsigned long flags;
1924         int err = 0;
1925         int nreq;
1926         int i;
1927         int size;
1928         /*
1929          * f0 and size0 are only used if nreq != 0, and they will
1930          * always be initialized the first time through the main loop
1931          * before nreq is incremented.  So nreq cannot become non-zero
1932          * without initializing f0 and size0, and they are in fact
1933          * never used uninitialized.
1934          */
1935         int uninitialized_var(size0);
1936         u32 uninitialized_var(f0);
1937         int ind;
1938         u8 op0 = 0;
1939
1940         spin_lock_irqsave(&qp->sq.lock, flags);
1941
1942         /* XXX check that state is OK to post send */
1943
1944         ind = qp->sq.head & (qp->sq.max - 1);
1945
1946         for (nreq = 0; wr; ++nreq, wr = wr->next) {
1947                 if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) {
1948                         nreq = 0;
1949
1950                         dbhi = (MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) |
1951                                 ((qp->sq.head & 0xffff) << 8) | f0 | op0;
1952
1953                         qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB;
1954
1955                         /*
1956                          * Make sure that descriptors are written before
1957                          * doorbell record.
1958                          */
1959                         wmb();
1960                         *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
1961
1962                         /*
1963                          * Make sure doorbell record is written before we
1964                          * write MMIO send doorbell.
1965                          */
1966                         wmb();
1967
1968                         mthca_write64(dbhi, (qp->qpn << 8) | size0,
1969                                       dev->kar + MTHCA_SEND_DOORBELL,
1970                                       MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1971                 }
1972
1973                 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1974                         mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1975                                         " %d max, %d nreq)\n", qp->qpn,
1976                                         qp->sq.head, qp->sq.tail,
1977                                         qp->sq.max, nreq);
1978                         err = -ENOMEM;
1979                         *bad_wr = wr;
1980                         goto out;
1981                 }
1982
1983                 wqe = get_send_wqe(qp, ind);
1984                 prev_wqe = qp->sq.last;
1985                 qp->sq.last = wqe;
1986
1987                 ((struct mthca_next_seg *) wqe)->flags =
1988                         ((wr->send_flags & IB_SEND_SIGNALED) ?
1989                          cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1990                         ((wr->send_flags & IB_SEND_SOLICITED) ?
1991                          cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0)   |
1992                         ((wr->send_flags & IB_SEND_IP_CSUM) ?
1993                          cpu_to_be32(MTHCA_NEXT_IP_CSUM | MTHCA_NEXT_TCP_UDP_CSUM) : 0) |
1994                         cpu_to_be32(1);
1995                 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1996                     wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1997                         ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
1998
1999                 wqe += sizeof (struct mthca_next_seg);
2000                 size = sizeof (struct mthca_next_seg) / 16;
2001
2002                 switch (qp->transport) {
2003                 case RC:
2004                         switch (wr->opcode) {
2005                         case IB_WR_ATOMIC_CMP_AND_SWP:
2006                         case IB_WR_ATOMIC_FETCH_AND_ADD:
2007                                 set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
2008                                               wr->wr.atomic.rkey);
2009                                 wqe += sizeof (struct mthca_raddr_seg);
2010
2011                                 set_atomic_seg(wqe, wr);
2012                                 wqe  += sizeof (struct mthca_atomic_seg);
2013                                 size += (sizeof (struct mthca_raddr_seg) +
2014                                          sizeof (struct mthca_atomic_seg)) / 16;
2015                                 break;
2016
2017                         case IB_WR_RDMA_READ:
2018                         case IB_WR_RDMA_WRITE:
2019                         case IB_WR_RDMA_WRITE_WITH_IMM:
2020                                 set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
2021                                               wr->wr.rdma.rkey);
2022                                 wqe  += sizeof (struct mthca_raddr_seg);
2023                                 size += sizeof (struct mthca_raddr_seg) / 16;
2024                                 break;
2025
2026                         default:
2027                                 /* No extra segments required for sends */
2028                                 break;
2029                         }
2030
2031                         break;
2032
2033                 case UC:
2034                         switch (wr->opcode) {
2035                         case IB_WR_RDMA_WRITE:
2036                         case IB_WR_RDMA_WRITE_WITH_IMM:
2037                                 set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
2038                                               wr->wr.rdma.rkey);
2039                                 wqe  += sizeof (struct mthca_raddr_seg);
2040                                 size += sizeof (struct mthca_raddr_seg) / 16;
2041                                 break;
2042
2043                         default:
2044                                 /* No extra segments required for sends */
2045                                 break;
2046                         }
2047
2048                         break;
2049
2050                 case UD:
2051                         set_arbel_ud_seg(wqe, wr);
2052                         wqe  += sizeof (struct mthca_arbel_ud_seg);
2053                         size += sizeof (struct mthca_arbel_ud_seg) / 16;
2054                         break;
2055
2056                 case MLX:
2057                         err = build_mlx_header(dev, to_msqp(qp), ind, wr,
2058                                                wqe - sizeof (struct mthca_next_seg),
2059                                                wqe);
2060                         if (err) {
2061                                 *bad_wr = wr;
2062                                 goto out;
2063                         }
2064                         wqe += sizeof (struct mthca_data_seg);
2065                         size += sizeof (struct mthca_data_seg) / 16;
2066                         break;
2067                 }
2068
2069                 if (wr->num_sge > qp->sq.max_gs) {
2070                         mthca_err(dev, "too many gathers\n");
2071                         err = -EINVAL;
2072                         *bad_wr = wr;
2073                         goto out;
2074                 }
2075
2076                 for (i = 0; i < wr->num_sge; ++i) {
2077                         mthca_set_data_seg(wqe, wr->sg_list + i);
2078                         wqe  += sizeof (struct mthca_data_seg);
2079                         size += sizeof (struct mthca_data_seg) / 16;
2080                 }
2081
2082                 /* Add one more inline data segment for ICRC */
2083                 if (qp->transport == MLX) {
2084                         ((struct mthca_data_seg *) wqe)->byte_count =
2085                                 cpu_to_be32((1 << 31) | 4);
2086                         ((u32 *) wqe)[1] = 0;
2087                         wqe += sizeof (struct mthca_data_seg);
2088                         size += sizeof (struct mthca_data_seg) / 16;
2089                 }
2090
2091                 qp->wrid[ind + qp->rq.max] = wr->wr_id;
2092
2093                 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
2094                         mthca_err(dev, "opcode invalid\n");
2095                         err = -EINVAL;
2096                         *bad_wr = wr;
2097                         goto out;
2098                 }
2099
2100                 ((struct mthca_next_seg *) prev_wqe)->nda_op =
2101                         cpu_to_be32(((ind << qp->sq.wqe_shift) +
2102                                      qp->send_wqe_offset) |
2103                                     mthca_opcode[wr->opcode]);
2104                 wmb();
2105                 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
2106                         cpu_to_be32(MTHCA_NEXT_DBD | size |
2107                                     ((wr->send_flags & IB_SEND_FENCE) ?
2108                                      MTHCA_NEXT_FENCE : 0));
2109
2110                 if (!nreq) {
2111                         size0 = size;
2112                         op0   = mthca_opcode[wr->opcode];
2113                         f0    = wr->send_flags & IB_SEND_FENCE ?
2114                                 MTHCA_SEND_DOORBELL_FENCE : 0;
2115                 }
2116
2117                 ++ind;
2118                 if (unlikely(ind >= qp->sq.max))
2119                         ind -= qp->sq.max;
2120         }
2121
2122 out:
2123         if (likely(nreq)) {
2124                 dbhi = (nreq << 24) | ((qp->sq.head & 0xffff) << 8) | f0 | op0;
2125
2126                 qp->sq.head += nreq;
2127
2128                 /*
2129                  * Make sure that descriptors are written before
2130                  * doorbell record.
2131                  */
2132                 wmb();
2133                 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
2134
2135                 /*
2136                  * Make sure doorbell record is written before we
2137                  * write MMIO send doorbell.
2138                  */
2139                 wmb();
2140
2141                 mthca_write64(dbhi, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL,
2142                               MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
2143         }
2144
2145         /*
2146          * Make sure doorbells don't leak out of SQ spinlock and reach
2147          * the HCA out of order:
2148          */
2149         mmiowb();
2150
2151         spin_unlock_irqrestore(&qp->sq.lock, flags);
2152         return err;
2153 }
2154
2155 int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2156                              struct ib_recv_wr **bad_wr)
2157 {
2158         struct mthca_dev *dev = to_mdev(ibqp->device);
2159         struct mthca_qp *qp = to_mqp(ibqp);
2160         unsigned long flags;
2161         int err = 0;
2162         int nreq;
2163         int ind;
2164         int i;
2165         void *wqe;
2166
2167         spin_lock_irqsave(&qp->rq.lock, flags);
2168
2169         /* XXX check that state is OK to post receive */
2170
2171         ind = qp->rq.head & (qp->rq.max - 1);
2172
2173         for (nreq = 0; wr; ++nreq, wr = wr->next) {
2174                 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2175                         mthca_err(dev, "RQ %06x full (%u head, %u tail,"
2176                                         " %d max, %d nreq)\n", qp->qpn,
2177                                         qp->rq.head, qp->rq.tail,
2178                                         qp->rq.max, nreq);
2179                         err = -ENOMEM;
2180                         *bad_wr = wr;
2181                         goto out;
2182                 }
2183
2184                 wqe = get_recv_wqe(qp, ind);
2185
2186                 ((struct mthca_next_seg *) wqe)->flags = 0;
2187
2188                 wqe += sizeof (struct mthca_next_seg);
2189
2190                 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2191                         err = -EINVAL;
2192                         *bad_wr = wr;
2193                         goto out;
2194                 }
2195
2196                 for (i = 0; i < wr->num_sge; ++i) {
2197                         mthca_set_data_seg(wqe, wr->sg_list + i);
2198                         wqe += sizeof (struct mthca_data_seg);
2199                 }
2200
2201                 if (i < qp->rq.max_gs)
2202                         mthca_set_data_seg_inval(wqe);
2203
2204                 qp->wrid[ind] = wr->wr_id;
2205
2206                 ++ind;
2207                 if (unlikely(ind >= qp->rq.max))
2208                         ind -= qp->rq.max;
2209         }
2210 out:
2211         if (likely(nreq)) {
2212                 qp->rq.head += nreq;
2213
2214                 /*
2215                  * Make sure that descriptors are written before
2216                  * doorbell record.
2217                  */
2218                 wmb();
2219                 *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff);
2220         }
2221
2222         spin_unlock_irqrestore(&qp->rq.lock, flags);
2223         return err;
2224 }
2225
2226 void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
2227                         int index, int *dbd, __be32 *new_wqe)
2228 {
2229         struct mthca_next_seg *next;
2230
2231         /*
2232          * For SRQs, all receive WQEs generate a CQE, so we're always
2233          * at the end of the doorbell chain.
2234          */
2235         if (qp->ibqp.srq && !is_send) {
2236                 *new_wqe = 0;
2237                 return;
2238         }
2239
2240         if (is_send)
2241                 next = get_send_wqe(qp, index);
2242         else
2243                 next = get_recv_wqe(qp, index);
2244
2245         *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));
2246         if (next->ee_nds & cpu_to_be32(0x3f))
2247                 *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) |
2248                         (next->ee_nds & cpu_to_be32(0x3f));
2249         else
2250                 *new_wqe = 0;
2251 }
2252
2253 int mthca_init_qp_table(struct mthca_dev *dev)
2254 {
2255         int err;
2256         int i;
2257
2258         spin_lock_init(&dev->qp_table.lock);
2259
2260         /*
2261          * We reserve 2 extra QPs per port for the special QPs.  The
2262          * special QP for port 1 has to be even, so round up.
2263          */
2264         dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL;
2265         err = mthca_alloc_init(&dev->qp_table.alloc,
2266                                dev->limits.num_qps,
2267                                (1 << 24) - 1,
2268                                dev->qp_table.sqp_start +
2269                                MTHCA_MAX_PORTS * 2);
2270         if (err)
2271                 return err;
2272
2273         err = mthca_array_init(&dev->qp_table.qp,
2274                                dev->limits.num_qps);
2275         if (err) {
2276                 mthca_alloc_cleanup(&dev->qp_table.alloc);
2277                 return err;
2278         }
2279
2280         for (i = 0; i < 2; ++i) {
2281                 err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI,
2282                                     dev->qp_table.sqp_start + i * 2);
2283                 if (err) {
2284                         mthca_warn(dev, "CONF_SPECIAL_QP returned "
2285                                    "%d, aborting.\n", err);
2286                         goto err_out;
2287                 }
2288         }
2289         return 0;
2290
2291  err_out:
2292         for (i = 0; i < 2; ++i)
2293                 mthca_CONF_SPECIAL_QP(dev, i, 0);
2294
2295         mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2296         mthca_alloc_cleanup(&dev->qp_table.alloc);
2297
2298         return err;
2299 }
2300
2301 void mthca_cleanup_qp_table(struct mthca_dev *dev)
2302 {
2303         int i;
2304
2305         for (i = 0; i < 2; ++i)
2306                 mthca_CONF_SPECIAL_QP(dev, i, 0);
2307
2308         mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2309         mthca_alloc_cleanup(&dev->qp_table.alloc);
2310 }