2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/err.h>
36 #include <linux/vmalloc.h>
37 #include <linux/jhash.h>
38 #include <rdma/rdma_vt.h>
39 #ifdef CONFIG_DEBUG_FS
40 #include <linux/seq_file.h>
46 * mask field which was present in now deleted qib_qpn_table
47 * is not present in rvt_qpn_table. Defining the same field
48 * as qpt_mask here instead of adding the mask field to
53 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
54 struct rvt_qpn_map *map, unsigned off)
56 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
59 static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
60 struct rvt_qpn_map *map, unsigned off,
65 if (((off & qpt_mask) >> 1) >= n)
66 off = (off | qpt_mask) + 2;
68 off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
74 * Convert the AETH credit code into the number of credits.
76 static u32 credit_table[31] = {
110 static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
113 unsigned long page = get_zeroed_page(gfp);
116 * Free the page if someone raced with us installing it.
119 spin_lock(&qpt->lock);
123 map->page = (void *)page;
124 spin_unlock(&qpt->lock);
128 * Allocate the next available QPN or
129 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
131 static int alloc_qpn(struct qib_devdata *dd, struct rvt_qpn_table *qpt,
132 enum ib_qp_type type, u8 port, gfp_t gfp)
134 u32 i, offset, max_scan, qpn;
135 struct rvt_qpn_map *map;
138 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
141 ret = type == IB_QPT_GSI;
142 n = 1 << (ret + 2 * (port - 1));
143 spin_lock(&qpt->lock);
148 spin_unlock(&qpt->lock);
153 if (qpn >= RVT_QPN_MAX)
155 if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues)
156 qpn = (qpn | qpt_mask) + 2;
157 offset = qpn & RVT_BITS_PER_PAGE_MASK;
158 map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
159 max_scan = qpt->nmaps - !offset;
161 if (unlikely(!map->page)) {
162 get_map_page(qpt, map, gfp);
163 if (unlikely(!map->page))
167 if (!test_and_set_bit(offset, map->page)) {
172 offset = find_next_offset(qpt, map, offset,
174 qpn = mk_qpn(qpt, map, offset);
176 * This test differs from alloc_pidmap().
177 * If find_next_offset() does find a zero
178 * bit, we don't need to check for QPN
179 * wrapping around past our starting QPN.
180 * We just need to be sure we don't loop
183 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
185 * In order to keep the number of pages allocated to a
186 * minimum, we scan the all existing pages before increasing
187 * the size of the bitmap table.
189 if (++i > max_scan) {
190 if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
192 map = &qpt->map[qpt->nmaps++];
194 } else if (map < &qpt->map[qpt->nmaps]) {
201 qpn = mk_qpn(qpt, map, offset);
210 static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
212 struct rvt_qpn_map *map;
214 map = qpt->map + qpn / RVT_BITS_PER_PAGE;
216 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
219 static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
221 return jhash_1word(qpn, dev->qp_rnd) &
222 (dev->rdi.qp_dev->qp_table_size - 1);
227 * Put the QP into the hash table.
228 * The hash table holds a reference to the QP.
230 static void insert_qp(struct qib_ibdev *dev, struct rvt_qp *qp)
232 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
234 unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
236 atomic_inc(&qp->refcount);
237 spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags);
239 if (qp->ibqp.qp_num == 0)
240 rcu_assign_pointer(ibp->rvp.qp[0], qp);
241 else if (qp->ibqp.qp_num == 1)
242 rcu_assign_pointer(ibp->rvp.qp[1], qp);
244 qp->next = dev->rdi.qp_dev->qp_table[n];
245 rcu_assign_pointer(dev->rdi.qp_dev->qp_table[n], qp);
248 spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags);
252 * Remove the QP from the table so it can't be found asynchronously by
253 * the receive interrupt routine.
255 static void remove_qp(struct qib_ibdev *dev, struct rvt_qp *qp)
257 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
258 unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
261 spinlock_t *qpt_lock_ptr; /* Pointer to make checkpatch happy */
263 spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags);
265 qpt_lock_ptr = &dev->rdi.qp_dev->qpt_lock;
266 if (rcu_dereference_protected(ibp->rvp.qp[0],
267 lockdep_is_held(qpt_lock_ptr)) == qp) {
268 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
269 } else if (rcu_dereference_protected(ibp->rvp.qp[1],
270 lockdep_is_held(&dev->rdi.qp_dev->qpt_lock)) == qp) {
271 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
274 struct rvt_qp __rcu **qpp;
277 qpp = &dev->rdi.qp_dev->qp_table[n];
278 for (; (q = rcu_dereference_protected(*qpp,
279 lockdep_is_held(qpt_lock_ptr))) != NULL;
282 RCU_INIT_POINTER(*qpp,
283 rcu_dereference_protected(qp->next,
284 lockdep_is_held(qpt_lock_ptr)));
290 spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags);
293 atomic_dec(&qp->refcount);
298 * qib_free_all_qps - check for QPs still in use
299 * @qpt: the QP table to empty
301 * There should not be any QPs still in use.
302 * Free memory for table.
304 unsigned qib_free_all_qps(struct qib_devdata *dd)
306 struct qib_ibdev *dev = &dd->verbs_dev;
309 unsigned n, qp_inuse = 0;
310 spinlock_t *qpt_lock_ptr; /* Pointer to make checkpatch happy */
312 for (n = 0; n < dd->num_pports; n++) {
313 struct qib_ibport *ibp = &dd->pport[n].ibport_data;
315 if (!qib_mcast_tree_empty(ibp))
318 if (rcu_dereference(ibp->rvp.qp[0]))
320 if (rcu_dereference(ibp->rvp.qp[1]))
325 spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags);
326 qpt_lock_ptr = &dev->rdi.qp_dev->qpt_lock;
327 for (n = 0; n < dev->rdi.qp_dev->qp_table_size; n++) {
328 qp = rcu_dereference_protected(dev->rdi.qp_dev->qp_table[n],
329 lockdep_is_held(qpt_lock_ptr));
330 RCU_INIT_POINTER(dev->rdi.qp_dev->qp_table[n], NULL);
332 for (; qp; qp = rcu_dereference_protected(qp->next,
333 lockdep_is_held(qpt_lock_ptr)))
336 spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags);
343 * qib_lookup_qpn - return the QP with the given QPN
345 * @qpn: the QP number to look up
347 * The caller is responsible for decrementing the QP reference count
350 struct rvt_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
352 struct rvt_qp *qp = NULL;
355 if (unlikely(qpn <= 1)) {
357 qp = rcu_dereference(ibp->rvp.qp[0]);
359 qp = rcu_dereference(ibp->rvp.qp[1]);
361 atomic_inc(&qp->refcount);
363 struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
364 unsigned n = qpn_hash(dev, qpn);
366 for (qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]); qp;
367 qp = rcu_dereference(qp->next))
368 if (qp->ibqp.qp_num == qpn) {
369 atomic_inc(&qp->refcount);
378 * qib_reset_qp - initialize the QP state to the reset state
379 * @qp: the QP to reset
382 static void qib_reset_qp(struct rvt_qp *qp, enum ib_qp_type type)
384 struct qib_qp_priv *priv = qp->priv;
387 qp->qp_access_flags = 0;
388 atomic_set(&priv->s_dma_busy, 0);
389 qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
395 qp->s_sending_psn = 0;
396 qp->s_sending_hpsn = 0;
400 if (type == IB_QPT_RC) {
401 qp->s_state = IB_OPCODE_RC_SEND_LAST;
402 qp->r_state = IB_OPCODE_RC_SEND_LAST;
404 qp->s_state = IB_OPCODE_UC_SEND_LAST;
405 qp->r_state = IB_OPCODE_UC_SEND_LAST;
407 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
418 qp->s_mig_state = IB_MIG_MIGRATED;
419 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
420 qp->r_head_ack_queue = 0;
421 qp->s_tail_ack_queue = 0;
422 qp->s_num_rd_atomic = 0;
424 qp->r_rq.wq->head = 0;
425 qp->r_rq.wq->tail = 0;
427 qp->r_sge.num_sge = 0;
430 static void clear_mr_refs(struct rvt_qp *qp, int clr_sends)
434 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
435 qib_put_ss(&qp->s_rdma_read_sge);
437 qib_put_ss(&qp->r_sge);
440 while (qp->s_last != qp->s_head) {
441 struct rvt_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
444 for (i = 0; i < wqe->wr.num_sge; i++) {
445 struct rvt_sge *sge = &wqe->sg_list[i];
449 if (qp->ibqp.qp_type == IB_QPT_UD ||
450 qp->ibqp.qp_type == IB_QPT_SMI ||
451 qp->ibqp.qp_type == IB_QPT_GSI)
453 &ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
454 if (++qp->s_last >= qp->s_size)
458 rvt_put_mr(qp->s_rdma_mr);
459 qp->s_rdma_mr = NULL;
463 if (qp->ibqp.qp_type != IB_QPT_RC)
466 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
467 struct rvt_ack_entry *e = &qp->s_ack_queue[n];
469 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
471 rvt_put_mr(e->rdma_sge.mr);
472 e->rdma_sge.mr = NULL;
478 * qib_error_qp - put a QP into the error state
479 * @qp: the QP to put into the error state
480 * @err: the receive completion error to signal if a RWQE is active
482 * Flushes both send and receive work queues.
483 * Returns true if last WQE event should be generated.
484 * The QP r_lock and s_lock should be held and interrupts disabled.
485 * If we are already in error state, just return.
487 int qib_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
489 struct qib_qp_priv *priv = qp->priv;
490 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
494 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
497 qp->state = IB_QPS_ERR;
499 if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
500 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
501 del_timer(&qp->s_timer);
504 if (qp->s_flags & QIB_S_ANY_WAIT_SEND)
505 qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
507 spin_lock(&dev->rdi.pending_lock);
508 if (!list_empty(&priv->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
509 qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
510 list_del_init(&priv->iowait);
512 spin_unlock(&dev->rdi.pending_lock);
514 if (!(qp->s_flags & QIB_S_BUSY)) {
517 rvt_put_mr(qp->s_rdma_mr);
518 qp->s_rdma_mr = NULL;
521 qib_put_txreq(priv->s_tx);
526 /* Schedule the sending tasklet to drain the send work queue. */
527 if (qp->s_last != qp->s_head)
528 qib_schedule_send(qp);
530 clear_mr_refs(qp, 0);
532 memset(&wc, 0, sizeof(wc));
534 wc.opcode = IB_WC_RECV;
536 if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) {
537 wc.wr_id = qp->r_wr_id;
539 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
541 wc.status = IB_WC_WR_FLUSH_ERR;
548 spin_lock(&qp->r_rq.lock);
550 /* sanity check pointers before trusting them */
553 if (head >= qp->r_rq.size)
556 if (tail >= qp->r_rq.size)
558 while (tail != head) {
559 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
560 if (++tail >= qp->r_rq.size)
562 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
566 spin_unlock(&qp->r_rq.lock);
567 } else if (qp->ibqp.event_handler)
575 * qib_modify_qp - modify the attributes of a queue pair
576 * @ibqp: the queue pair who's attributes we're modifying
577 * @attr: the new attributes
578 * @attr_mask: the mask of attributes to modify
579 * @udata: user data for libibverbs.so
581 * Returns 0 on success, otherwise returns an errno.
583 int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
584 int attr_mask, struct ib_udata *udata)
586 struct qib_ibdev *dev = to_idev(ibqp->device);
587 struct rvt_qp *qp = to_iqp(ibqp);
588 struct qib_qp_priv *priv = qp->priv;
589 enum ib_qp_state cur_state, new_state;
594 u32 pmtu = 0; /* for gcc warning only */
596 spin_lock_irq(&qp->r_lock);
597 spin_lock(&qp->s_lock);
599 cur_state = attr_mask & IB_QP_CUR_STATE ?
600 attr->cur_qp_state : qp->state;
601 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
603 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
604 attr_mask, IB_LINK_LAYER_UNSPECIFIED))
607 if (attr_mask & IB_QP_AV) {
608 if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
610 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
614 if (attr_mask & IB_QP_ALT_PATH) {
615 if (attr->alt_ah_attr.dlid >=
616 be16_to_cpu(IB_MULTICAST_LID_BASE))
618 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
620 if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
624 if (attr_mask & IB_QP_PKEY_INDEX)
625 if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
628 if (attr_mask & IB_QP_MIN_RNR_TIMER)
629 if (attr->min_rnr_timer > 31)
632 if (attr_mask & IB_QP_PORT)
633 if (qp->ibqp.qp_type == IB_QPT_SMI ||
634 qp->ibqp.qp_type == IB_QPT_GSI ||
635 attr->port_num == 0 ||
636 attr->port_num > ibqp->device->phys_port_cnt)
639 if (attr_mask & IB_QP_DEST_QPN)
640 if (attr->dest_qp_num > QIB_QPN_MASK)
643 if (attr_mask & IB_QP_RETRY_CNT)
644 if (attr->retry_cnt > 7)
647 if (attr_mask & IB_QP_RNR_RETRY)
648 if (attr->rnr_retry > 7)
652 * Don't allow invalid path_mtu values. OK to set greater
653 * than the active mtu (or even the max_cap, if we have tuned
654 * that to a small mtu. We'll set qp->path_mtu
655 * to the lesser of requested attribute mtu and active,
656 * for packetizing messages.
657 * Note that the QP port has to be set in INIT and MTU in RTR.
659 if (attr_mask & IB_QP_PATH_MTU) {
660 struct qib_devdata *dd = dd_from_dev(dev);
661 int mtu, pidx = qp->port_num - 1;
663 mtu = ib_mtu_enum_to_int(attr->path_mtu);
666 if (mtu > dd->pport[pidx].ibmtu) {
667 switch (dd->pport[pidx].ibmtu) {
687 pmtu = attr->path_mtu;
690 if (attr_mask & IB_QP_PATH_MIG_STATE) {
691 if (attr->path_mig_state == IB_MIG_REARM) {
692 if (qp->s_mig_state == IB_MIG_ARMED)
694 if (new_state != IB_QPS_RTS)
696 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
697 if (qp->s_mig_state == IB_MIG_REARM)
699 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
701 if (qp->s_mig_state == IB_MIG_ARMED)
707 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
708 if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
713 if (qp->state != IB_QPS_RESET) {
714 qp->state = IB_QPS_RESET;
715 spin_lock(&dev->rdi.pending_lock);
716 if (!list_empty(&priv->iowait))
717 list_del_init(&priv->iowait);
718 spin_unlock(&dev->rdi.pending_lock);
719 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
720 spin_unlock(&qp->s_lock);
721 spin_unlock_irq(&qp->r_lock);
722 /* Stop the sending work queue and retry timer */
723 cancel_work_sync(&priv->s_work);
724 del_timer_sync(&qp->s_timer);
725 wait_event(priv->wait_dma,
726 !atomic_read(&priv->s_dma_busy));
728 qib_put_txreq(priv->s_tx);
732 wait_event(qp->wait, !atomic_read(&qp->refcount));
733 spin_lock_irq(&qp->r_lock);
734 spin_lock(&qp->s_lock);
735 clear_mr_refs(qp, 1);
736 qib_reset_qp(qp, ibqp->qp_type);
741 /* Allow event to retrigger if QP set to RTR more than once */
742 qp->r_flags &= ~QIB_R_COMM_EST;
743 qp->state = new_state;
747 qp->s_draining = qp->s_last != qp->s_cur;
748 qp->state = new_state;
752 if (qp->ibqp.qp_type == IB_QPT_RC)
754 qp->state = new_state;
758 lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
762 qp->state = new_state;
766 if (attr_mask & IB_QP_PKEY_INDEX)
767 qp->s_pkey_index = attr->pkey_index;
769 if (attr_mask & IB_QP_PORT)
770 qp->port_num = attr->port_num;
772 if (attr_mask & IB_QP_DEST_QPN)
773 qp->remote_qpn = attr->dest_qp_num;
775 if (attr_mask & IB_QP_SQ_PSN) {
776 qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
777 qp->s_psn = qp->s_next_psn;
778 qp->s_sending_psn = qp->s_next_psn;
779 qp->s_last_psn = qp->s_next_psn - 1;
780 qp->s_sending_hpsn = qp->s_last_psn;
783 if (attr_mask & IB_QP_RQ_PSN)
784 qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
786 if (attr_mask & IB_QP_ACCESS_FLAGS)
787 qp->qp_access_flags = attr->qp_access_flags;
789 if (attr_mask & IB_QP_AV) {
790 qp->remote_ah_attr = attr->ah_attr;
791 qp->s_srate = attr->ah_attr.static_rate;
794 if (attr_mask & IB_QP_ALT_PATH) {
795 qp->alt_ah_attr = attr->alt_ah_attr;
796 qp->s_alt_pkey_index = attr->alt_pkey_index;
799 if (attr_mask & IB_QP_PATH_MIG_STATE) {
800 qp->s_mig_state = attr->path_mig_state;
802 qp->remote_ah_attr = qp->alt_ah_attr;
803 qp->port_num = qp->alt_ah_attr.port_num;
804 qp->s_pkey_index = qp->s_alt_pkey_index;
808 if (attr_mask & IB_QP_PATH_MTU) {
810 qp->pmtu = ib_mtu_enum_to_int(pmtu);
813 if (attr_mask & IB_QP_RETRY_CNT) {
814 qp->s_retry_cnt = attr->retry_cnt;
815 qp->s_retry = attr->retry_cnt;
818 if (attr_mask & IB_QP_RNR_RETRY) {
819 qp->s_rnr_retry_cnt = attr->rnr_retry;
820 qp->s_rnr_retry = attr->rnr_retry;
823 if (attr_mask & IB_QP_MIN_RNR_TIMER)
824 qp->r_min_rnr_timer = attr->min_rnr_timer;
826 if (attr_mask & IB_QP_TIMEOUT) {
827 qp->timeout = attr->timeout;
828 qp->timeout_jiffies =
829 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
833 if (attr_mask & IB_QP_QKEY)
834 qp->qkey = attr->qkey;
836 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
837 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
839 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
840 qp->s_max_rd_atomic = attr->max_rd_atomic;
842 spin_unlock(&qp->s_lock);
843 spin_unlock_irq(&qp->r_lock);
845 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
849 ev.device = qp->ibqp.device;
850 ev.element.qp = &qp->ibqp;
851 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
852 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
855 ev.device = qp->ibqp.device;
856 ev.element.qp = &qp->ibqp;
857 ev.event = IB_EVENT_PATH_MIG;
858 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
864 spin_unlock(&qp->s_lock);
865 spin_unlock_irq(&qp->r_lock);
872 int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
873 int attr_mask, struct ib_qp_init_attr *init_attr)
875 struct rvt_qp *qp = to_iqp(ibqp);
877 attr->qp_state = qp->state;
878 attr->cur_qp_state = attr->qp_state;
879 attr->path_mtu = qp->path_mtu;
880 attr->path_mig_state = qp->s_mig_state;
881 attr->qkey = qp->qkey;
882 attr->rq_psn = qp->r_psn & QIB_PSN_MASK;
883 attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK;
884 attr->dest_qp_num = qp->remote_qpn;
885 attr->qp_access_flags = qp->qp_access_flags;
886 attr->cap.max_send_wr = qp->s_size - 1;
887 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
888 attr->cap.max_send_sge = qp->s_max_sge;
889 attr->cap.max_recv_sge = qp->r_rq.max_sge;
890 attr->cap.max_inline_data = 0;
891 attr->ah_attr = qp->remote_ah_attr;
892 attr->alt_ah_attr = qp->alt_ah_attr;
893 attr->pkey_index = qp->s_pkey_index;
894 attr->alt_pkey_index = qp->s_alt_pkey_index;
895 attr->en_sqd_async_notify = 0;
896 attr->sq_draining = qp->s_draining;
897 attr->max_rd_atomic = qp->s_max_rd_atomic;
898 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
899 attr->min_rnr_timer = qp->r_min_rnr_timer;
900 attr->port_num = qp->port_num;
901 attr->timeout = qp->timeout;
902 attr->retry_cnt = qp->s_retry_cnt;
903 attr->rnr_retry = qp->s_rnr_retry_cnt;
904 attr->alt_port_num = qp->alt_ah_attr.port_num;
905 attr->alt_timeout = qp->alt_timeout;
907 init_attr->event_handler = qp->ibqp.event_handler;
908 init_attr->qp_context = qp->ibqp.qp_context;
909 init_attr->send_cq = qp->ibqp.send_cq;
910 init_attr->recv_cq = qp->ibqp.recv_cq;
911 init_attr->srq = qp->ibqp.srq;
912 init_attr->cap = attr->cap;
913 if (qp->s_flags & QIB_S_SIGNAL_REQ_WR)
914 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
916 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
917 init_attr->qp_type = qp->ibqp.qp_type;
918 init_attr->port_num = qp->port_num;
923 * qib_compute_aeth - compute the AETH (syndrome + MSN)
924 * @qp: the queue pair to compute the AETH for
928 __be32 qib_compute_aeth(struct rvt_qp *qp)
930 u32 aeth = qp->r_msn & QIB_MSN_MASK;
934 * Shared receive queues don't generate credits.
935 * Set the credit field to the invalid value.
937 aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
941 struct rvt_rwq *wq = qp->r_rq.wq;
945 /* sanity check pointers before trusting them */
947 if (head >= qp->r_rq.size)
950 if (tail >= qp->r_rq.size)
953 * Compute the number of credits available (RWQEs).
954 * XXX Not holding the r_rq.lock here so there is a small
955 * chance that the pair of reads are not atomic.
957 credits = head - tail;
958 if ((int)credits < 0)
959 credits += qp->r_rq.size;
961 * Binary search the credit table to find the code to
968 if (credit_table[x] == credits)
970 if (credit_table[x] > credits)
977 aeth |= x << QIB_AETH_CREDIT_SHIFT;
979 return cpu_to_be32(aeth);
983 * qib_create_qp - create a queue pair for a device
984 * @ibpd: the protection domain who's device we create the queue pair for
985 * @init_attr: the attributes of the queue pair
986 * @udata: user data for libibverbs.so
988 * Returns the queue pair on success, otherwise returns an errno.
990 * Called by the ib_create_qp() core verbs function.
992 struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
993 struct ib_qp_init_attr *init_attr,
994 struct ib_udata *udata)
998 struct rvt_swqe *swq = NULL;
999 struct qib_ibdev *dev;
1000 struct qib_devdata *dd;
1005 struct qib_qp_priv *priv;
1007 if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
1008 init_attr->cap.max_send_wr > ib_qib_max_qp_wrs ||
1009 init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
1010 return ERR_PTR(-EINVAL);
1012 /* GFP_NOIO is applicable in RC QPs only */
1013 if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
1014 init_attr->qp_type != IB_QPT_RC)
1015 return ERR_PTR(-EINVAL);
1017 gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
1018 GFP_NOIO : GFP_KERNEL;
1020 /* Check receive queue parameters if no SRQ is specified. */
1021 if (!init_attr->srq) {
1022 if (init_attr->cap.max_recv_sge > ib_qib_max_sges ||
1023 init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) {
1024 ret = ERR_PTR(-EINVAL);
1027 if (init_attr->cap.max_send_sge +
1028 init_attr->cap.max_send_wr +
1029 init_attr->cap.max_recv_sge +
1030 init_attr->cap.max_recv_wr == 0) {
1031 ret = ERR_PTR(-EINVAL);
1036 switch (init_attr->qp_type) {
1039 if (init_attr->port_num == 0 ||
1040 init_attr->port_num > ibpd->device->phys_port_cnt) {
1041 ret = ERR_PTR(-EINVAL);
1047 sz = sizeof(struct rvt_sge) *
1048 init_attr->cap.max_send_sge +
1049 sizeof(struct rvt_swqe);
1050 swq = __vmalloc((init_attr->cap.max_send_wr + 1) * sz,
1053 ret = ERR_PTR(-ENOMEM);
1058 if (init_attr->srq) {
1059 struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
1061 if (srq->rq.max_sge > 1)
1062 sg_list_sz = sizeof(*qp->r_sg_list) *
1063 (srq->rq.max_sge - 1);
1064 } else if (init_attr->cap.max_recv_sge > 1)
1065 sg_list_sz = sizeof(*qp->r_sg_list) *
1066 (init_attr->cap.max_recv_sge - 1);
1067 qp = kzalloc(sz + sg_list_sz, gfp);
1069 ret = ERR_PTR(-ENOMEM);
1072 RCU_INIT_POINTER(qp->next, NULL);
1073 priv = kzalloc(sizeof(*priv), gfp);
1075 ret = ERR_PTR(-ENOMEM);
1079 priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp);
1081 ret = ERR_PTR(-ENOMEM);
1085 qp->timeout_jiffies =
1086 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1091 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1092 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1093 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1094 sizeof(struct rvt_rwqe);
1095 if (gfp != GFP_NOIO)
1096 qp->r_rq.wq = vmalloc_user(
1097 sizeof(struct rvt_rwq) +
1098 qp->r_rq.size * sz);
1100 qp->r_rq.wq = __vmalloc(
1101 sizeof(struct rvt_rwq) +
1106 ret = ERR_PTR(-ENOMEM);
1112 * ib_create_qp() will initialize qp->ibqp
1113 * except for qp->ibqp.qp_num.
1115 spin_lock_init(&qp->r_lock);
1116 spin_lock_init(&qp->s_lock);
1117 spin_lock_init(&qp->r_rq.lock);
1118 atomic_set(&qp->refcount, 0);
1119 init_waitqueue_head(&qp->wait);
1120 init_waitqueue_head(&priv->wait_dma);
1121 init_timer(&qp->s_timer);
1122 qp->s_timer.data = (unsigned long)qp;
1123 INIT_WORK(&priv->s_work, qib_do_send);
1124 INIT_LIST_HEAD(&priv->iowait);
1125 INIT_LIST_HEAD(&qp->rspwait);
1126 qp->state = IB_QPS_RESET;
1128 qp->s_size = init_attr->cap.max_send_wr + 1;
1129 qp->s_max_sge = init_attr->cap.max_send_sge;
1130 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1131 qp->s_flags = QIB_S_SIGNAL_REQ_WR;
1132 dev = to_idev(ibpd->device);
1133 dd = dd_from_dev(dev);
1134 err = alloc_qpn(dd, &dev->rdi.qp_dev->qpn_table,
1135 init_attr->qp_type, init_attr->port_num, gfp);
1141 qp->ibqp.qp_num = err;
1142 qp->port_num = init_attr->port_num;
1143 qib_reset_qp(qp, init_attr->qp_type);
1147 /* Don't support raw QPs */
1148 ret = ERR_PTR(-ENOSYS);
1152 init_attr->cap.max_inline_data = 0;
1155 * Return the address of the RWQ as the offset to mmap.
1156 * See qib_mmap() for details.
1158 if (udata && udata->outlen >= sizeof(__u64)) {
1162 err = ib_copy_to_udata(udata, &offset,
1169 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
1171 qp->ip = rvt_create_mmap_info(&dev->rdi, s,
1172 ibpd->uobject->context,
1175 ret = ERR_PTR(-ENOMEM);
1179 err = ib_copy_to_udata(udata, &(qp->ip->offset),
1180 sizeof(qp->ip->offset));
1188 spin_lock(&dev->n_qps_lock);
1189 if (dev->n_qps_allocated == ib_qib_max_qps) {
1190 spin_unlock(&dev->n_qps_lock);
1191 ret = ERR_PTR(-ENOMEM);
1195 dev->n_qps_allocated++;
1196 spin_unlock(&dev->n_qps_lock);
1199 spin_lock_irq(&dev->rdi.pending_lock);
1200 list_add(&qp->ip->pending_mmaps, &dev->rdi.pending_mmaps);
1201 spin_unlock_irq(&dev->rdi.pending_lock);
1209 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1212 free_qpn(&dev->rdi.qp_dev->qpn_table, qp->ibqp.qp_num);
1225 * qib_destroy_qp - destroy a queue pair
1226 * @ibqp: the queue pair to destroy
1228 * Returns 0 on success.
1230 * Note that this can be called while the QP is actively sending or
1233 int qib_destroy_qp(struct ib_qp *ibqp)
1235 struct rvt_qp *qp = to_iqp(ibqp);
1236 struct qib_ibdev *dev = to_idev(ibqp->device);
1237 struct qib_qp_priv *priv = qp->priv;
1239 /* Make sure HW and driver activity is stopped. */
1240 spin_lock_irq(&qp->s_lock);
1241 if (qp->state != IB_QPS_RESET) {
1242 qp->state = IB_QPS_RESET;
1243 spin_lock(&dev->rdi.pending_lock);
1244 if (!list_empty(&priv->iowait))
1245 list_del_init(&priv->iowait);
1246 spin_unlock(&dev->rdi.pending_lock);
1247 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
1248 spin_unlock_irq(&qp->s_lock);
1249 cancel_work_sync(&priv->s_work);
1250 del_timer_sync(&qp->s_timer);
1251 wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
1253 qib_put_txreq(priv->s_tx);
1257 wait_event(qp->wait, !atomic_read(&qp->refcount));
1258 clear_mr_refs(qp, 1);
1260 spin_unlock_irq(&qp->s_lock);
1262 /* all user's cleaned up, mark it available */
1263 free_qpn(&dev->rdi.qp_dev->qpn_table, qp->ibqp.qp_num);
1264 spin_lock(&dev->n_qps_lock);
1265 dev->n_qps_allocated--;
1266 spin_unlock(&dev->n_qps_lock);
1269 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1280 * qib_init_qpn_table - initialize the QP number table for a device
1281 * @qpt: the QPN table
1283 void qib_init_qpn_table(struct qib_devdata *dd, struct rvt_qpn_table *qpt)
1285 spin_lock_init(&qpt->lock);
1286 qpt->last = 1; /* start with QPN 2 */
1288 qpt_mask = dd->qpn_mask;
1292 * qib_free_qpn_table - free the QP number table for a device
1293 * @qpt: the QPN table
1295 void qib_free_qpn_table(struct rvt_qpn_table *qpt)
1299 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
1300 if (qpt->map[i].page)
1301 free_page((unsigned long) qpt->map[i].page);
1305 * qib_get_credit - flush the send work queue of a QP
1306 * @qp: the qp who's send work queue to flush
1307 * @aeth: the Acknowledge Extended Transport Header
1309 * The QP s_lock should be held.
1311 void qib_get_credit(struct rvt_qp *qp, u32 aeth)
1313 u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
1316 * If the credit is invalid, we can send
1317 * as many packets as we like. Otherwise, we have to
1318 * honor the credit field.
1320 if (credit == QIB_AETH_CREDIT_INVAL) {
1321 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1322 qp->s_flags |= QIB_S_UNLIMITED_CREDIT;
1323 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1324 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1325 qib_schedule_send(qp);
1328 } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1329 /* Compute new LSN (i.e., MSN + credit) */
1330 credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
1331 if (qib_cmp24(credit, qp->s_lsn) > 0) {
1333 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1334 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1335 qib_schedule_send(qp);
1341 #ifdef CONFIG_DEBUG_FS
1343 struct qib_qp_iter {
1344 struct qib_ibdev *dev;
1349 struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
1351 struct qib_qp_iter *iter;
1353 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1358 if (qib_qp_iter_next(iter)) {
1366 int qib_qp_iter_next(struct qib_qp_iter *iter)
1368 struct qib_ibdev *dev = iter->dev;
1371 struct rvt_qp *pqp = iter->qp;
1374 for (; n < dev->rdi.qp_dev->qp_table_size; n++) {
1376 qp = rcu_dereference(pqp->next);
1378 qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]);
1389 static const char * const qp_type_str[] = {
1390 "SMI", "GSI", "RC", "UC", "UD",
1393 void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
1395 struct rvt_swqe *wqe;
1396 struct rvt_qp *qp = iter->qp;
1397 struct qib_qp_priv *priv = qp->priv;
1399 wqe = get_swqe_ptr(qp, qp->s_last);
1401 "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
1404 qp_type_str[qp->ibqp.qp_type],
1409 atomic_read(&priv->s_dma_busy),
1410 !list_empty(&priv->iowait),
1415 qp->s_psn, qp->s_next_psn,
1416 qp->s_sending_psn, qp->s_sending_hpsn,
1417 qp->s_last, qp->s_acked, qp->s_cur,
1418 qp->s_tail, qp->s_head, qp->s_size,
1420 qp->remote_ah_attr.dlid);