IB/qib: Clear WAIT_SEND flags when setting QP to error state
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / infiniband / hw / qib / qib_qp.c
1 /*
2  * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3  * All rights reserved.
4  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #include <linux/err.h>
36 #include <linux/vmalloc.h>
37
38 #include "qib.h"
39
40 #define BITS_PER_PAGE           (PAGE_SIZE*BITS_PER_BYTE)
41 #define BITS_PER_PAGE_MASK      (BITS_PER_PAGE-1)
42
43 static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
44                               struct qpn_map *map, unsigned off)
45 {
46         return (map - qpt->map) * BITS_PER_PAGE + off;
47 }
48
49 static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
50                                         struct qpn_map *map, unsigned off,
51                                         unsigned r)
52 {
53         if (qpt->mask) {
54                 off++;
55                 if ((off & qpt->mask) >> 1 != r)
56                         off = ((off & qpt->mask) ?
57                                 (off | qpt->mask) + 1 : off) | (r << 1);
58         } else
59                 off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
60         return off;
61 }
62
63 /*
64  * Convert the AETH credit code into the number of credits.
65  */
66 static u32 credit_table[31] = {
67         0,                      /* 0 */
68         1,                      /* 1 */
69         2,                      /* 2 */
70         3,                      /* 3 */
71         4,                      /* 4 */
72         6,                      /* 5 */
73         8,                      /* 6 */
74         12,                     /* 7 */
75         16,                     /* 8 */
76         24,                     /* 9 */
77         32,                     /* A */
78         48,                     /* B */
79         64,                     /* C */
80         96,                     /* D */
81         128,                    /* E */
82         192,                    /* F */
83         256,                    /* 10 */
84         384,                    /* 11 */
85         512,                    /* 12 */
86         768,                    /* 13 */
87         1024,                   /* 14 */
88         1536,                   /* 15 */
89         2048,                   /* 16 */
90         3072,                   /* 17 */
91         4096,                   /* 18 */
92         6144,                   /* 19 */
93         8192,                   /* 1A */
94         12288,                  /* 1B */
95         16384,                  /* 1C */
96         24576,                  /* 1D */
97         32768                   /* 1E */
98 };
99
100 static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
101 {
102         unsigned long page = get_zeroed_page(GFP_KERNEL);
103
104         /*
105          * Free the page if someone raced with us installing it.
106          */
107
108         spin_lock(&qpt->lock);
109         if (map->page)
110                 free_page(page);
111         else
112                 map->page = (void *)page;
113         spin_unlock(&qpt->lock);
114 }
115
116 /*
117  * Allocate the next available QPN or
118  * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
119  */
120 static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
121                      enum ib_qp_type type, u8 port)
122 {
123         u32 i, offset, max_scan, qpn;
124         struct qpn_map *map;
125         u32 ret;
126         int r;
127
128         if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
129                 unsigned n;
130
131                 ret = type == IB_QPT_GSI;
132                 n = 1 << (ret + 2 * (port - 1));
133                 spin_lock(&qpt->lock);
134                 if (qpt->flags & n)
135                         ret = -EINVAL;
136                 else
137                         qpt->flags |= n;
138                 spin_unlock(&qpt->lock);
139                 goto bail;
140         }
141
142         r = smp_processor_id();
143         if (r >= dd->n_krcv_queues)
144                 r %= dd->n_krcv_queues;
145         qpn = qpt->last + 1;
146         if (qpn >= QPN_MAX)
147                 qpn = 2;
148         if (qpt->mask && ((qpn & qpt->mask) >> 1) != r)
149                 qpn = ((qpn & qpt->mask) ? (qpn | qpt->mask) + 1 : qpn) |
150                         (r << 1);
151         offset = qpn & BITS_PER_PAGE_MASK;
152         map = &qpt->map[qpn / BITS_PER_PAGE];
153         max_scan = qpt->nmaps - !offset;
154         for (i = 0;;) {
155                 if (unlikely(!map->page)) {
156                         get_map_page(qpt, map);
157                         if (unlikely(!map->page))
158                                 break;
159                 }
160                 do {
161                         if (!test_and_set_bit(offset, map->page)) {
162                                 qpt->last = qpn;
163                                 ret = qpn;
164                                 goto bail;
165                         }
166                         offset = find_next_offset(qpt, map, offset, r);
167                         qpn = mk_qpn(qpt, map, offset);
168                         /*
169                          * This test differs from alloc_pidmap().
170                          * If find_next_offset() does find a zero
171                          * bit, we don't need to check for QPN
172                          * wrapping around past our starting QPN.
173                          * We just need to be sure we don't loop
174                          * forever.
175                          */
176                 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
177                 /*
178                  * In order to keep the number of pages allocated to a
179                  * minimum, we scan the all existing pages before increasing
180                  * the size of the bitmap table.
181                  */
182                 if (++i > max_scan) {
183                         if (qpt->nmaps == QPNMAP_ENTRIES)
184                                 break;
185                         map = &qpt->map[qpt->nmaps++];
186                         offset = qpt->mask ? (r << 1) : 0;
187                 } else if (map < &qpt->map[qpt->nmaps]) {
188                         ++map;
189                         offset = qpt->mask ? (r << 1) : 0;
190                 } else {
191                         map = &qpt->map[0];
192                         offset = qpt->mask ? (r << 1) : 2;
193                 }
194                 qpn = mk_qpn(qpt, map, offset);
195         }
196
197         ret = -ENOMEM;
198
199 bail:
200         return ret;
201 }
202
203 static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
204 {
205         struct qpn_map *map;
206
207         map = qpt->map + qpn / BITS_PER_PAGE;
208         if (map->page)
209                 clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
210 }
211
212 /*
213  * Put the QP into the hash table.
214  * The hash table holds a reference to the QP.
215  */
216 static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
217 {
218         struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
219         unsigned n = qp->ibqp.qp_num % dev->qp_table_size;
220         unsigned long flags;
221
222         spin_lock_irqsave(&dev->qpt_lock, flags);
223
224         if (qp->ibqp.qp_num == 0)
225                 ibp->qp0 = qp;
226         else if (qp->ibqp.qp_num == 1)
227                 ibp->qp1 = qp;
228         else {
229                 qp->next = dev->qp_table[n];
230                 dev->qp_table[n] = qp;
231         }
232         atomic_inc(&qp->refcount);
233
234         spin_unlock_irqrestore(&dev->qpt_lock, flags);
235 }
236
237 /*
238  * Remove the QP from the table so it can't be found asynchronously by
239  * the receive interrupt routine.
240  */
241 static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
242 {
243         struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
244         struct qib_qp *q, **qpp;
245         unsigned long flags;
246
247         qpp = &dev->qp_table[qp->ibqp.qp_num % dev->qp_table_size];
248
249         spin_lock_irqsave(&dev->qpt_lock, flags);
250
251         if (ibp->qp0 == qp) {
252                 ibp->qp0 = NULL;
253                 atomic_dec(&qp->refcount);
254         } else if (ibp->qp1 == qp) {
255                 ibp->qp1 = NULL;
256                 atomic_dec(&qp->refcount);
257         } else
258                 for (; (q = *qpp) != NULL; qpp = &q->next)
259                         if (q == qp) {
260                                 *qpp = qp->next;
261                                 qp->next = NULL;
262                                 atomic_dec(&qp->refcount);
263                                 break;
264                         }
265
266         spin_unlock_irqrestore(&dev->qpt_lock, flags);
267 }
268
269 /**
270  * qib_free_all_qps - check for QPs still in use
271  * @qpt: the QP table to empty
272  *
273  * There should not be any QPs still in use.
274  * Free memory for table.
275  */
276 unsigned qib_free_all_qps(struct qib_devdata *dd)
277 {
278         struct qib_ibdev *dev = &dd->verbs_dev;
279         unsigned long flags;
280         struct qib_qp *qp;
281         unsigned n, qp_inuse = 0;
282
283         for (n = 0; n < dd->num_pports; n++) {
284                 struct qib_ibport *ibp = &dd->pport[n].ibport_data;
285
286                 if (!qib_mcast_tree_empty(ibp))
287                         qp_inuse++;
288                 if (ibp->qp0)
289                         qp_inuse++;
290                 if (ibp->qp1)
291                         qp_inuse++;
292         }
293
294         spin_lock_irqsave(&dev->qpt_lock, flags);
295         for (n = 0; n < dev->qp_table_size; n++) {
296                 qp = dev->qp_table[n];
297                 dev->qp_table[n] = NULL;
298
299                 for (; qp; qp = qp->next)
300                         qp_inuse++;
301         }
302         spin_unlock_irqrestore(&dev->qpt_lock, flags);
303
304         return qp_inuse;
305 }
306
307 /**
308  * qib_lookup_qpn - return the QP with the given QPN
309  * @qpt: the QP table
310  * @qpn: the QP number to look up
311  *
312  * The caller is responsible for decrementing the QP reference count
313  * when done.
314  */
315 struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
316 {
317         struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
318         unsigned long flags;
319         struct qib_qp *qp;
320
321         spin_lock_irqsave(&dev->qpt_lock, flags);
322
323         if (qpn == 0)
324                 qp = ibp->qp0;
325         else if (qpn == 1)
326                 qp = ibp->qp1;
327         else
328                 for (qp = dev->qp_table[qpn % dev->qp_table_size]; qp;
329                      qp = qp->next)
330                         if (qp->ibqp.qp_num == qpn)
331                                 break;
332         if (qp)
333                 atomic_inc(&qp->refcount);
334
335         spin_unlock_irqrestore(&dev->qpt_lock, flags);
336         return qp;
337 }
338
339 /**
340  * qib_reset_qp - initialize the QP state to the reset state
341  * @qp: the QP to reset
342  * @type: the QP type
343  */
344 static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
345 {
346         qp->remote_qpn = 0;
347         qp->qkey = 0;
348         qp->qp_access_flags = 0;
349         atomic_set(&qp->s_dma_busy, 0);
350         qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
351         qp->s_hdrwords = 0;
352         qp->s_wqe = NULL;
353         qp->s_draining = 0;
354         qp->s_next_psn = 0;
355         qp->s_last_psn = 0;
356         qp->s_sending_psn = 0;
357         qp->s_sending_hpsn = 0;
358         qp->s_psn = 0;
359         qp->r_psn = 0;
360         qp->r_msn = 0;
361         if (type == IB_QPT_RC) {
362                 qp->s_state = IB_OPCODE_RC_SEND_LAST;
363                 qp->r_state = IB_OPCODE_RC_SEND_LAST;
364         } else {
365                 qp->s_state = IB_OPCODE_UC_SEND_LAST;
366                 qp->r_state = IB_OPCODE_UC_SEND_LAST;
367         }
368         qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
369         qp->r_nak_state = 0;
370         qp->r_aflags = 0;
371         qp->r_flags = 0;
372         qp->s_head = 0;
373         qp->s_tail = 0;
374         qp->s_cur = 0;
375         qp->s_acked = 0;
376         qp->s_last = 0;
377         qp->s_ssn = 1;
378         qp->s_lsn = 0;
379         qp->s_mig_state = IB_MIG_MIGRATED;
380         memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
381         qp->r_head_ack_queue = 0;
382         qp->s_tail_ack_queue = 0;
383         qp->s_num_rd_atomic = 0;
384         if (qp->r_rq.wq) {
385                 qp->r_rq.wq->head = 0;
386                 qp->r_rq.wq->tail = 0;
387         }
388         qp->r_sge.num_sge = 0;
389 }
390
391 static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
392 {
393         unsigned n;
394
395         if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
396                 while (qp->s_rdma_read_sge.num_sge) {
397                         atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
398                         if (--qp->s_rdma_read_sge.num_sge)
399                                 qp->s_rdma_read_sge.sge =
400                                         *qp->s_rdma_read_sge.sg_list++;
401                 }
402
403         while (qp->r_sge.num_sge) {
404                 atomic_dec(&qp->r_sge.sge.mr->refcount);
405                 if (--qp->r_sge.num_sge)
406                         qp->r_sge.sge = *qp->r_sge.sg_list++;
407         }
408
409         if (clr_sends) {
410                 while (qp->s_last != qp->s_head) {
411                         struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
412                         unsigned i;
413
414                         for (i = 0; i < wqe->wr.num_sge; i++) {
415                                 struct qib_sge *sge = &wqe->sg_list[i];
416
417                                 atomic_dec(&sge->mr->refcount);
418                         }
419                         if (qp->ibqp.qp_type == IB_QPT_UD ||
420                             qp->ibqp.qp_type == IB_QPT_SMI ||
421                             qp->ibqp.qp_type == IB_QPT_GSI)
422                                 atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
423                         if (++qp->s_last >= qp->s_size)
424                                 qp->s_last = 0;
425                 }
426                 if (qp->s_rdma_mr) {
427                         atomic_dec(&qp->s_rdma_mr->refcount);
428                         qp->s_rdma_mr = NULL;
429                 }
430         }
431
432         if (qp->ibqp.qp_type != IB_QPT_RC)
433                 return;
434
435         for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
436                 struct qib_ack_entry *e = &qp->s_ack_queue[n];
437
438                 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
439                     e->rdma_sge.mr) {
440                         atomic_dec(&e->rdma_sge.mr->refcount);
441                         e->rdma_sge.mr = NULL;
442                 }
443         }
444 }
445
446 /**
447  * qib_error_qp - put a QP into the error state
448  * @qp: the QP to put into the error state
449  * @err: the receive completion error to signal if a RWQE is active
450  *
451  * Flushes both send and receive work queues.
452  * Returns true if last WQE event should be generated.
453  * The QP r_lock and s_lock should be held and interrupts disabled.
454  * If we are already in error state, just return.
455  */
456 int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
457 {
458         struct qib_ibdev *dev = to_idev(qp->ibqp.device);
459         struct ib_wc wc;
460         int ret = 0;
461
462         if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
463                 goto bail;
464
465         qp->state = IB_QPS_ERR;
466
467         if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
468                 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
469                 del_timer(&qp->s_timer);
470         }
471
472         if (qp->s_flags & QIB_S_ANY_WAIT_SEND)
473                 qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
474
475         spin_lock(&dev->pending_lock);
476         if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
477                 qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
478                 list_del_init(&qp->iowait);
479         }
480         spin_unlock(&dev->pending_lock);
481
482         if (!(qp->s_flags & QIB_S_BUSY)) {
483                 qp->s_hdrwords = 0;
484                 if (qp->s_rdma_mr) {
485                         atomic_dec(&qp->s_rdma_mr->refcount);
486                         qp->s_rdma_mr = NULL;
487                 }
488                 if (qp->s_tx) {
489                         qib_put_txreq(qp->s_tx);
490                         qp->s_tx = NULL;
491                 }
492         }
493
494         /* Schedule the sending tasklet to drain the send work queue. */
495         if (qp->s_last != qp->s_head)
496                 qib_schedule_send(qp);
497
498         clear_mr_refs(qp, 0);
499
500         memset(&wc, 0, sizeof(wc));
501         wc.qp = &qp->ibqp;
502         wc.opcode = IB_WC_RECV;
503
504         if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) {
505                 wc.wr_id = qp->r_wr_id;
506                 wc.status = err;
507                 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
508         }
509         wc.status = IB_WC_WR_FLUSH_ERR;
510
511         if (qp->r_rq.wq) {
512                 struct qib_rwq *wq;
513                 u32 head;
514                 u32 tail;
515
516                 spin_lock(&qp->r_rq.lock);
517
518                 /* sanity check pointers before trusting them */
519                 wq = qp->r_rq.wq;
520                 head = wq->head;
521                 if (head >= qp->r_rq.size)
522                         head = 0;
523                 tail = wq->tail;
524                 if (tail >= qp->r_rq.size)
525                         tail = 0;
526                 while (tail != head) {
527                         wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
528                         if (++tail >= qp->r_rq.size)
529                                 tail = 0;
530                         qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
531                 }
532                 wq->tail = tail;
533
534                 spin_unlock(&qp->r_rq.lock);
535         } else if (qp->ibqp.event_handler)
536                 ret = 1;
537
538 bail:
539         return ret;
540 }
541
542 /**
543  * qib_modify_qp - modify the attributes of a queue pair
544  * @ibqp: the queue pair who's attributes we're modifying
545  * @attr: the new attributes
546  * @attr_mask: the mask of attributes to modify
547  * @udata: user data for libibverbs.so
548  *
549  * Returns 0 on success, otherwise returns an errno.
550  */
551 int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
552                   int attr_mask, struct ib_udata *udata)
553 {
554         struct qib_ibdev *dev = to_idev(ibqp->device);
555         struct qib_qp *qp = to_iqp(ibqp);
556         enum ib_qp_state cur_state, new_state;
557         struct ib_event ev;
558         int lastwqe = 0;
559         int mig = 0;
560         int ret;
561         u32 pmtu = 0; /* for gcc warning only */
562
563         spin_lock_irq(&qp->r_lock);
564         spin_lock(&qp->s_lock);
565
566         cur_state = attr_mask & IB_QP_CUR_STATE ?
567                 attr->cur_qp_state : qp->state;
568         new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
569
570         if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
571                                 attr_mask))
572                 goto inval;
573
574         if (attr_mask & IB_QP_AV) {
575                 if (attr->ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
576                         goto inval;
577                 if (qib_check_ah(qp->ibqp.device, &attr->ah_attr))
578                         goto inval;
579         }
580
581         if (attr_mask & IB_QP_ALT_PATH) {
582                 if (attr->alt_ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
583                         goto inval;
584                 if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
585                         goto inval;
586                 if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
587                         goto inval;
588         }
589
590         if (attr_mask & IB_QP_PKEY_INDEX)
591                 if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
592                         goto inval;
593
594         if (attr_mask & IB_QP_MIN_RNR_TIMER)
595                 if (attr->min_rnr_timer > 31)
596                         goto inval;
597
598         if (attr_mask & IB_QP_PORT)
599                 if (qp->ibqp.qp_type == IB_QPT_SMI ||
600                     qp->ibqp.qp_type == IB_QPT_GSI ||
601                     attr->port_num == 0 ||
602                     attr->port_num > ibqp->device->phys_port_cnt)
603                         goto inval;
604
605         if (attr_mask & IB_QP_DEST_QPN)
606                 if (attr->dest_qp_num > QIB_QPN_MASK)
607                         goto inval;
608
609         if (attr_mask & IB_QP_RETRY_CNT)
610                 if (attr->retry_cnt > 7)
611                         goto inval;
612
613         if (attr_mask & IB_QP_RNR_RETRY)
614                 if (attr->rnr_retry > 7)
615                         goto inval;
616
617         /*
618          * Don't allow invalid path_mtu values.  OK to set greater
619          * than the active mtu (or even the max_cap, if we have tuned
620          * that to a small mtu.  We'll set qp->path_mtu
621          * to the lesser of requested attribute mtu and active,
622          * for packetizing messages.
623          * Note that the QP port has to be set in INIT and MTU in RTR.
624          */
625         if (attr_mask & IB_QP_PATH_MTU) {
626                 struct qib_devdata *dd = dd_from_dev(dev);
627                 int mtu, pidx = qp->port_num - 1;
628
629                 mtu = ib_mtu_enum_to_int(attr->path_mtu);
630                 if (mtu == -1)
631                         goto inval;
632                 if (mtu > dd->pport[pidx].ibmtu) {
633                         switch (dd->pport[pidx].ibmtu) {
634                         case 4096:
635                                 pmtu = IB_MTU_4096;
636                                 break;
637                         case 2048:
638                                 pmtu = IB_MTU_2048;
639                                 break;
640                         case 1024:
641                                 pmtu = IB_MTU_1024;
642                                 break;
643                         case 512:
644                                 pmtu = IB_MTU_512;
645                                 break;
646                         case 256:
647                                 pmtu = IB_MTU_256;
648                                 break;
649                         default:
650                                 pmtu = IB_MTU_2048;
651                         }
652                 } else
653                         pmtu = attr->path_mtu;
654         }
655
656         if (attr_mask & IB_QP_PATH_MIG_STATE) {
657                 if (attr->path_mig_state == IB_MIG_REARM) {
658                         if (qp->s_mig_state == IB_MIG_ARMED)
659                                 goto inval;
660                         if (new_state != IB_QPS_RTS)
661                                 goto inval;
662                 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
663                         if (qp->s_mig_state == IB_MIG_REARM)
664                                 goto inval;
665                         if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
666                                 goto inval;
667                         if (qp->s_mig_state == IB_MIG_ARMED)
668                                 mig = 1;
669                 } else
670                         goto inval;
671         }
672
673         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
674                 if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
675                         goto inval;
676
677         switch (new_state) {
678         case IB_QPS_RESET:
679                 if (qp->state != IB_QPS_RESET) {
680                         qp->state = IB_QPS_RESET;
681                         spin_lock(&dev->pending_lock);
682                         if (!list_empty(&qp->iowait))
683                                 list_del_init(&qp->iowait);
684                         spin_unlock(&dev->pending_lock);
685                         qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
686                         spin_unlock(&qp->s_lock);
687                         spin_unlock_irq(&qp->r_lock);
688                         /* Stop the sending work queue and retry timer */
689                         cancel_work_sync(&qp->s_work);
690                         del_timer_sync(&qp->s_timer);
691                         wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
692                         if (qp->s_tx) {
693                                 qib_put_txreq(qp->s_tx);
694                                 qp->s_tx = NULL;
695                         }
696                         remove_qp(dev, qp);
697                         wait_event(qp->wait, !atomic_read(&qp->refcount));
698                         spin_lock_irq(&qp->r_lock);
699                         spin_lock(&qp->s_lock);
700                         clear_mr_refs(qp, 1);
701                         qib_reset_qp(qp, ibqp->qp_type);
702                 }
703                 break;
704
705         case IB_QPS_RTR:
706                 /* Allow event to retrigger if QP set to RTR more than once */
707                 qp->r_flags &= ~QIB_R_COMM_EST;
708                 qp->state = new_state;
709                 break;
710
711         case IB_QPS_SQD:
712                 qp->s_draining = qp->s_last != qp->s_cur;
713                 qp->state = new_state;
714                 break;
715
716         case IB_QPS_SQE:
717                 if (qp->ibqp.qp_type == IB_QPT_RC)
718                         goto inval;
719                 qp->state = new_state;
720                 break;
721
722         case IB_QPS_ERR:
723                 lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
724                 break;
725
726         default:
727                 qp->state = new_state;
728                 break;
729         }
730
731         if (attr_mask & IB_QP_PKEY_INDEX)
732                 qp->s_pkey_index = attr->pkey_index;
733
734         if (attr_mask & IB_QP_PORT)
735                 qp->port_num = attr->port_num;
736
737         if (attr_mask & IB_QP_DEST_QPN)
738                 qp->remote_qpn = attr->dest_qp_num;
739
740         if (attr_mask & IB_QP_SQ_PSN) {
741                 qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
742                 qp->s_psn = qp->s_next_psn;
743                 qp->s_sending_psn = qp->s_next_psn;
744                 qp->s_last_psn = qp->s_next_psn - 1;
745                 qp->s_sending_hpsn = qp->s_last_psn;
746         }
747
748         if (attr_mask & IB_QP_RQ_PSN)
749                 qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
750
751         if (attr_mask & IB_QP_ACCESS_FLAGS)
752                 qp->qp_access_flags = attr->qp_access_flags;
753
754         if (attr_mask & IB_QP_AV) {
755                 qp->remote_ah_attr = attr->ah_attr;
756                 qp->s_srate = attr->ah_attr.static_rate;
757         }
758
759         if (attr_mask & IB_QP_ALT_PATH) {
760                 qp->alt_ah_attr = attr->alt_ah_attr;
761                 qp->s_alt_pkey_index = attr->alt_pkey_index;
762         }
763
764         if (attr_mask & IB_QP_PATH_MIG_STATE) {
765                 qp->s_mig_state = attr->path_mig_state;
766                 if (mig) {
767                         qp->remote_ah_attr = qp->alt_ah_attr;
768                         qp->port_num = qp->alt_ah_attr.port_num;
769                         qp->s_pkey_index = qp->s_alt_pkey_index;
770                 }
771         }
772
773         if (attr_mask & IB_QP_PATH_MTU)
774                 qp->path_mtu = pmtu;
775
776         if (attr_mask & IB_QP_RETRY_CNT) {
777                 qp->s_retry_cnt = attr->retry_cnt;
778                 qp->s_retry = attr->retry_cnt;
779         }
780
781         if (attr_mask & IB_QP_RNR_RETRY) {
782                 qp->s_rnr_retry_cnt = attr->rnr_retry;
783                 qp->s_rnr_retry = attr->rnr_retry;
784         }
785
786         if (attr_mask & IB_QP_MIN_RNR_TIMER)
787                 qp->r_min_rnr_timer = attr->min_rnr_timer;
788
789         if (attr_mask & IB_QP_TIMEOUT)
790                 qp->timeout = attr->timeout;
791
792         if (attr_mask & IB_QP_QKEY)
793                 qp->qkey = attr->qkey;
794
795         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
796                 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
797
798         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
799                 qp->s_max_rd_atomic = attr->max_rd_atomic;
800
801         spin_unlock(&qp->s_lock);
802         spin_unlock_irq(&qp->r_lock);
803
804         if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
805                 insert_qp(dev, qp);
806
807         if (lastwqe) {
808                 ev.device = qp->ibqp.device;
809                 ev.element.qp = &qp->ibqp;
810                 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
811                 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
812         }
813         if (mig) {
814                 ev.device = qp->ibqp.device;
815                 ev.element.qp = &qp->ibqp;
816                 ev.event = IB_EVENT_PATH_MIG;
817                 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
818         }
819         ret = 0;
820         goto bail;
821
822 inval:
823         spin_unlock(&qp->s_lock);
824         spin_unlock_irq(&qp->r_lock);
825         ret = -EINVAL;
826
827 bail:
828         return ret;
829 }
830
831 int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
832                  int attr_mask, struct ib_qp_init_attr *init_attr)
833 {
834         struct qib_qp *qp = to_iqp(ibqp);
835
836         attr->qp_state = qp->state;
837         attr->cur_qp_state = attr->qp_state;
838         attr->path_mtu = qp->path_mtu;
839         attr->path_mig_state = qp->s_mig_state;
840         attr->qkey = qp->qkey;
841         attr->rq_psn = qp->r_psn & QIB_PSN_MASK;
842         attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK;
843         attr->dest_qp_num = qp->remote_qpn;
844         attr->qp_access_flags = qp->qp_access_flags;
845         attr->cap.max_send_wr = qp->s_size - 1;
846         attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
847         attr->cap.max_send_sge = qp->s_max_sge;
848         attr->cap.max_recv_sge = qp->r_rq.max_sge;
849         attr->cap.max_inline_data = 0;
850         attr->ah_attr = qp->remote_ah_attr;
851         attr->alt_ah_attr = qp->alt_ah_attr;
852         attr->pkey_index = qp->s_pkey_index;
853         attr->alt_pkey_index = qp->s_alt_pkey_index;
854         attr->en_sqd_async_notify = 0;
855         attr->sq_draining = qp->s_draining;
856         attr->max_rd_atomic = qp->s_max_rd_atomic;
857         attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
858         attr->min_rnr_timer = qp->r_min_rnr_timer;
859         attr->port_num = qp->port_num;
860         attr->timeout = qp->timeout;
861         attr->retry_cnt = qp->s_retry_cnt;
862         attr->rnr_retry = qp->s_rnr_retry_cnt;
863         attr->alt_port_num = qp->alt_ah_attr.port_num;
864         attr->alt_timeout = qp->alt_timeout;
865
866         init_attr->event_handler = qp->ibqp.event_handler;
867         init_attr->qp_context = qp->ibqp.qp_context;
868         init_attr->send_cq = qp->ibqp.send_cq;
869         init_attr->recv_cq = qp->ibqp.recv_cq;
870         init_attr->srq = qp->ibqp.srq;
871         init_attr->cap = attr->cap;
872         if (qp->s_flags & QIB_S_SIGNAL_REQ_WR)
873                 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
874         else
875                 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
876         init_attr->qp_type = qp->ibqp.qp_type;
877         init_attr->port_num = qp->port_num;
878         return 0;
879 }
880
881 /**
882  * qib_compute_aeth - compute the AETH (syndrome + MSN)
883  * @qp: the queue pair to compute the AETH for
884  *
885  * Returns the AETH.
886  */
887 __be32 qib_compute_aeth(struct qib_qp *qp)
888 {
889         u32 aeth = qp->r_msn & QIB_MSN_MASK;
890
891         if (qp->ibqp.srq) {
892                 /*
893                  * Shared receive queues don't generate credits.
894                  * Set the credit field to the invalid value.
895                  */
896                 aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
897         } else {
898                 u32 min, max, x;
899                 u32 credits;
900                 struct qib_rwq *wq = qp->r_rq.wq;
901                 u32 head;
902                 u32 tail;
903
904                 /* sanity check pointers before trusting them */
905                 head = wq->head;
906                 if (head >= qp->r_rq.size)
907                         head = 0;
908                 tail = wq->tail;
909                 if (tail >= qp->r_rq.size)
910                         tail = 0;
911                 /*
912                  * Compute the number of credits available (RWQEs).
913                  * XXX Not holding the r_rq.lock here so there is a small
914                  * chance that the pair of reads are not atomic.
915                  */
916                 credits = head - tail;
917                 if ((int)credits < 0)
918                         credits += qp->r_rq.size;
919                 /*
920                  * Binary search the credit table to find the code to
921                  * use.
922                  */
923                 min = 0;
924                 max = 31;
925                 for (;;) {
926                         x = (min + max) / 2;
927                         if (credit_table[x] == credits)
928                                 break;
929                         if (credit_table[x] > credits)
930                                 max = x;
931                         else if (min == x)
932                                 break;
933                         else
934                                 min = x;
935                 }
936                 aeth |= x << QIB_AETH_CREDIT_SHIFT;
937         }
938         return cpu_to_be32(aeth);
939 }
940
941 /**
942  * qib_create_qp - create a queue pair for a device
943  * @ibpd: the protection domain who's device we create the queue pair for
944  * @init_attr: the attributes of the queue pair
945  * @udata: user data for libibverbs.so
946  *
947  * Returns the queue pair on success, otherwise returns an errno.
948  *
949  * Called by the ib_create_qp() core verbs function.
950  */
951 struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
952                             struct ib_qp_init_attr *init_attr,
953                             struct ib_udata *udata)
954 {
955         struct qib_qp *qp;
956         int err;
957         struct qib_swqe *swq = NULL;
958         struct qib_ibdev *dev;
959         struct qib_devdata *dd;
960         size_t sz;
961         size_t sg_list_sz;
962         struct ib_qp *ret;
963
964         if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
965             init_attr->cap.max_send_wr > ib_qib_max_qp_wrs) {
966                 ret = ERR_PTR(-EINVAL);
967                 goto bail;
968         }
969
970         /* Check receive queue parameters if no SRQ is specified. */
971         if (!init_attr->srq) {
972                 if (init_attr->cap.max_recv_sge > ib_qib_max_sges ||
973                     init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) {
974                         ret = ERR_PTR(-EINVAL);
975                         goto bail;
976                 }
977                 if (init_attr->cap.max_send_sge +
978                     init_attr->cap.max_send_wr +
979                     init_attr->cap.max_recv_sge +
980                     init_attr->cap.max_recv_wr == 0) {
981                         ret = ERR_PTR(-EINVAL);
982                         goto bail;
983                 }
984         }
985
986         switch (init_attr->qp_type) {
987         case IB_QPT_SMI:
988         case IB_QPT_GSI:
989                 if (init_attr->port_num == 0 ||
990                     init_attr->port_num > ibpd->device->phys_port_cnt) {
991                         ret = ERR_PTR(-EINVAL);
992                         goto bail;
993                 }
994         case IB_QPT_UC:
995         case IB_QPT_RC:
996         case IB_QPT_UD:
997                 sz = sizeof(struct qib_sge) *
998                         init_attr->cap.max_send_sge +
999                         sizeof(struct qib_swqe);
1000                 swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
1001                 if (swq == NULL) {
1002                         ret = ERR_PTR(-ENOMEM);
1003                         goto bail;
1004                 }
1005                 sz = sizeof(*qp);
1006                 sg_list_sz = 0;
1007                 if (init_attr->srq) {
1008                         struct qib_srq *srq = to_isrq(init_attr->srq);
1009
1010                         if (srq->rq.max_sge > 1)
1011                                 sg_list_sz = sizeof(*qp->r_sg_list) *
1012                                         (srq->rq.max_sge - 1);
1013                 } else if (init_attr->cap.max_recv_sge > 1)
1014                         sg_list_sz = sizeof(*qp->r_sg_list) *
1015                                 (init_attr->cap.max_recv_sge - 1);
1016                 qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
1017                 if (!qp) {
1018                         ret = ERR_PTR(-ENOMEM);
1019                         goto bail_swq;
1020                 }
1021                 if (init_attr->srq)
1022                         sz = 0;
1023                 else {
1024                         qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1025                         qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1026                         sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1027                                 sizeof(struct qib_rwqe);
1028                         qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
1029                                                    qp->r_rq.size * sz);
1030                         if (!qp->r_rq.wq) {
1031                                 ret = ERR_PTR(-ENOMEM);
1032                                 goto bail_qp;
1033                         }
1034                 }
1035
1036                 /*
1037                  * ib_create_qp() will initialize qp->ibqp
1038                  * except for qp->ibqp.qp_num.
1039                  */
1040                 spin_lock_init(&qp->r_lock);
1041                 spin_lock_init(&qp->s_lock);
1042                 spin_lock_init(&qp->r_rq.lock);
1043                 atomic_set(&qp->refcount, 0);
1044                 init_waitqueue_head(&qp->wait);
1045                 init_waitqueue_head(&qp->wait_dma);
1046                 init_timer(&qp->s_timer);
1047                 qp->s_timer.data = (unsigned long)qp;
1048                 INIT_WORK(&qp->s_work, qib_do_send);
1049                 INIT_LIST_HEAD(&qp->iowait);
1050                 INIT_LIST_HEAD(&qp->rspwait);
1051                 qp->state = IB_QPS_RESET;
1052                 qp->s_wq = swq;
1053                 qp->s_size = init_attr->cap.max_send_wr + 1;
1054                 qp->s_max_sge = init_attr->cap.max_send_sge;
1055                 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1056                         qp->s_flags = QIB_S_SIGNAL_REQ_WR;
1057                 dev = to_idev(ibpd->device);
1058                 dd = dd_from_dev(dev);
1059                 err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
1060                                 init_attr->port_num);
1061                 if (err < 0) {
1062                         ret = ERR_PTR(err);
1063                         vfree(qp->r_rq.wq);
1064                         goto bail_qp;
1065                 }
1066                 qp->ibqp.qp_num = err;
1067                 qp->port_num = init_attr->port_num;
1068                 qp->processor_id = smp_processor_id();
1069                 qib_reset_qp(qp, init_attr->qp_type);
1070                 break;
1071
1072         default:
1073                 /* Don't support raw QPs */
1074                 ret = ERR_PTR(-ENOSYS);
1075                 goto bail;
1076         }
1077
1078         init_attr->cap.max_inline_data = 0;
1079
1080         /*
1081          * Return the address of the RWQ as the offset to mmap.
1082          * See qib_mmap() for details.
1083          */
1084         if (udata && udata->outlen >= sizeof(__u64)) {
1085                 if (!qp->r_rq.wq) {
1086                         __u64 offset = 0;
1087
1088                         err = ib_copy_to_udata(udata, &offset,
1089                                                sizeof(offset));
1090                         if (err) {
1091                                 ret = ERR_PTR(err);
1092                                 goto bail_ip;
1093                         }
1094                 } else {
1095                         u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz;
1096
1097                         qp->ip = qib_create_mmap_info(dev, s,
1098                                                       ibpd->uobject->context,
1099                                                       qp->r_rq.wq);
1100                         if (!qp->ip) {
1101                                 ret = ERR_PTR(-ENOMEM);
1102                                 goto bail_ip;
1103                         }
1104
1105                         err = ib_copy_to_udata(udata, &(qp->ip->offset),
1106                                                sizeof(qp->ip->offset));
1107                         if (err) {
1108                                 ret = ERR_PTR(err);
1109                                 goto bail_ip;
1110                         }
1111                 }
1112         }
1113
1114         spin_lock(&dev->n_qps_lock);
1115         if (dev->n_qps_allocated == ib_qib_max_qps) {
1116                 spin_unlock(&dev->n_qps_lock);
1117                 ret = ERR_PTR(-ENOMEM);
1118                 goto bail_ip;
1119         }
1120
1121         dev->n_qps_allocated++;
1122         spin_unlock(&dev->n_qps_lock);
1123
1124         if (qp->ip) {
1125                 spin_lock_irq(&dev->pending_lock);
1126                 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
1127                 spin_unlock_irq(&dev->pending_lock);
1128         }
1129
1130         ret = &qp->ibqp;
1131         goto bail;
1132
1133 bail_ip:
1134         if (qp->ip)
1135                 kref_put(&qp->ip->ref, qib_release_mmap_info);
1136         else
1137                 vfree(qp->r_rq.wq);
1138         free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1139 bail_qp:
1140         kfree(qp);
1141 bail_swq:
1142         vfree(swq);
1143 bail:
1144         return ret;
1145 }
1146
1147 /**
1148  * qib_destroy_qp - destroy a queue pair
1149  * @ibqp: the queue pair to destroy
1150  *
1151  * Returns 0 on success.
1152  *
1153  * Note that this can be called while the QP is actively sending or
1154  * receiving!
1155  */
1156 int qib_destroy_qp(struct ib_qp *ibqp)
1157 {
1158         struct qib_qp *qp = to_iqp(ibqp);
1159         struct qib_ibdev *dev = to_idev(ibqp->device);
1160
1161         /* Make sure HW and driver activity is stopped. */
1162         spin_lock_irq(&qp->s_lock);
1163         if (qp->state != IB_QPS_RESET) {
1164                 qp->state = IB_QPS_RESET;
1165                 spin_lock(&dev->pending_lock);
1166                 if (!list_empty(&qp->iowait))
1167                         list_del_init(&qp->iowait);
1168                 spin_unlock(&dev->pending_lock);
1169                 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
1170                 spin_unlock_irq(&qp->s_lock);
1171                 cancel_work_sync(&qp->s_work);
1172                 del_timer_sync(&qp->s_timer);
1173                 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
1174                 if (qp->s_tx) {
1175                         qib_put_txreq(qp->s_tx);
1176                         qp->s_tx = NULL;
1177                 }
1178                 remove_qp(dev, qp);
1179                 wait_event(qp->wait, !atomic_read(&qp->refcount));
1180                 clear_mr_refs(qp, 1);
1181         } else
1182                 spin_unlock_irq(&qp->s_lock);
1183
1184         /* all user's cleaned up, mark it available */
1185         free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1186         spin_lock(&dev->n_qps_lock);
1187         dev->n_qps_allocated--;
1188         spin_unlock(&dev->n_qps_lock);
1189
1190         if (qp->ip)
1191                 kref_put(&qp->ip->ref, qib_release_mmap_info);
1192         else
1193                 vfree(qp->r_rq.wq);
1194         vfree(qp->s_wq);
1195         kfree(qp);
1196         return 0;
1197 }
1198
1199 /**
1200  * qib_init_qpn_table - initialize the QP number table for a device
1201  * @qpt: the QPN table
1202  */
1203 void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt)
1204 {
1205         spin_lock_init(&qpt->lock);
1206         qpt->last = 1;          /* start with QPN 2 */
1207         qpt->nmaps = 1;
1208         qpt->mask = dd->qpn_mask;
1209 }
1210
1211 /**
1212  * qib_free_qpn_table - free the QP number table for a device
1213  * @qpt: the QPN table
1214  */
1215 void qib_free_qpn_table(struct qib_qpn_table *qpt)
1216 {
1217         int i;
1218
1219         for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
1220                 if (qpt->map[i].page)
1221                         free_page((unsigned long) qpt->map[i].page);
1222 }
1223
1224 /**
1225  * qib_get_credit - flush the send work queue of a QP
1226  * @qp: the qp who's send work queue to flush
1227  * @aeth: the Acknowledge Extended Transport Header
1228  *
1229  * The QP s_lock should be held.
1230  */
1231 void qib_get_credit(struct qib_qp *qp, u32 aeth)
1232 {
1233         u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
1234
1235         /*
1236          * If the credit is invalid, we can send
1237          * as many packets as we like.  Otherwise, we have to
1238          * honor the credit field.
1239          */
1240         if (credit == QIB_AETH_CREDIT_INVAL) {
1241                 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1242                         qp->s_flags |= QIB_S_UNLIMITED_CREDIT;
1243                         if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1244                                 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1245                                 qib_schedule_send(qp);
1246                         }
1247                 }
1248         } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1249                 /* Compute new LSN (i.e., MSN + credit) */
1250                 credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
1251                 if (qib_cmp24(credit, qp->s_lsn) > 0) {
1252                         qp->s_lsn = credit;
1253                         if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1254                                 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1255                                 qib_schedule_send(qp);
1256                         }
1257                 }
1258         }
1259 }