2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/err.h>
36 #include <linux/vmalloc.h>
37 #include <rdma/rdma_vt.h>
38 #ifdef CONFIG_DEBUG_FS
39 #include <linux/seq_file.h>
45 * mask field which was present in now deleted qib_qpn_table
46 * is not present in rvt_qpn_table. Defining the same field
47 * as qpt_mask here instead of adding the mask field to
52 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
53 struct rvt_qpn_map *map, unsigned off)
55 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
58 static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
59 struct rvt_qpn_map *map, unsigned off,
64 if (((off & qpt_mask) >> 1) >= n)
65 off = (off | qpt_mask) + 2;
67 off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
73 * Convert the AETH credit code into the number of credits.
75 static u32 credit_table[31] = {
109 const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = {
110 [IB_WR_RDMA_WRITE] = {
111 .length = sizeof(struct ib_rdma_wr),
112 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
115 [IB_WR_RDMA_READ] = {
116 .length = sizeof(struct ib_rdma_wr),
117 .qpt_support = BIT(IB_QPT_RC),
118 .flags = RVT_OPERATION_ATOMIC,
121 [IB_WR_ATOMIC_CMP_AND_SWP] = {
122 .length = sizeof(struct ib_atomic_wr),
123 .qpt_support = BIT(IB_QPT_RC),
124 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
127 [IB_WR_ATOMIC_FETCH_AND_ADD] = {
128 .length = sizeof(struct ib_atomic_wr),
129 .qpt_support = BIT(IB_QPT_RC),
130 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
133 [IB_WR_RDMA_WRITE_WITH_IMM] = {
134 .length = sizeof(struct ib_rdma_wr),
135 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
139 .length = sizeof(struct ib_send_wr),
140 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
141 BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
144 [IB_WR_SEND_WITH_IMM] = {
145 .length = sizeof(struct ib_send_wr),
146 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
147 BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
152 static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
155 unsigned long page = get_zeroed_page(gfp);
158 * Free the page if someone raced with us installing it.
161 spin_lock(&qpt->lock);
165 map->page = (void *)page;
166 spin_unlock(&qpt->lock);
170 * Allocate the next available QPN or
171 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
173 int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
174 enum ib_qp_type type, u8 port, gfp_t gfp)
176 u32 i, offset, max_scan, qpn;
177 struct rvt_qpn_map *map;
179 struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
180 struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
183 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
186 ret = type == IB_QPT_GSI;
187 n = 1 << (ret + 2 * (port - 1));
188 spin_lock(&qpt->lock);
193 spin_unlock(&qpt->lock);
198 if (qpn >= RVT_QPN_MAX)
200 if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues)
201 qpn = (qpn | qpt_mask) + 2;
202 offset = qpn & RVT_BITS_PER_PAGE_MASK;
203 map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
204 max_scan = qpt->nmaps - !offset;
206 if (unlikely(!map->page)) {
207 get_map_page(qpt, map, gfp);
208 if (unlikely(!map->page))
212 if (!test_and_set_bit(offset, map->page)) {
217 offset = find_next_offset(qpt, map, offset,
219 qpn = mk_qpn(qpt, map, offset);
221 * This test differs from alloc_pidmap().
222 * If find_next_offset() does find a zero
223 * bit, we don't need to check for QPN
224 * wrapping around past our starting QPN.
225 * We just need to be sure we don't loop
228 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
230 * In order to keep the number of pages allocated to a
231 * minimum, we scan the all existing pages before increasing
232 * the size of the bitmap table.
234 if (++i > max_scan) {
235 if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
237 map = &qpt->map[qpt->nmaps++];
239 } else if (map < &qpt->map[qpt->nmaps]) {
246 qpn = mk_qpn(qpt, map, offset);
256 * qib_free_all_qps - check for QPs still in use
258 unsigned qib_free_all_qps(struct rvt_dev_info *rdi)
260 struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
261 struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
263 unsigned n, qp_inuse = 0;
265 for (n = 0; n < dd->num_pports; n++) {
266 struct qib_ibport *ibp = &dd->pport[n].ibport_data;
269 if (rcu_dereference(ibp->rvp.qp[0]))
271 if (rcu_dereference(ibp->rvp.qp[1]))
278 void qib_notify_qp_reset(struct rvt_qp *qp)
280 struct qib_qp_priv *priv = qp->priv;
282 atomic_set(&priv->s_dma_busy, 0);
285 void qib_notify_error_qp(struct rvt_qp *qp)
287 struct qib_qp_priv *priv = qp->priv;
288 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
290 spin_lock(&dev->rdi.pending_lock);
291 if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) {
292 qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
293 list_del_init(&priv->iowait);
295 spin_unlock(&dev->rdi.pending_lock);
297 if (!(qp->s_flags & RVT_S_BUSY)) {
300 rvt_put_mr(qp->s_rdma_mr);
301 qp->s_rdma_mr = NULL;
304 qib_put_txreq(priv->s_tx);
310 static int mtu_to_enum(u32 mtu)
316 enum_mtu = IB_MTU_4096;
319 enum_mtu = IB_MTU_2048;
322 enum_mtu = IB_MTU_1024;
325 enum_mtu = IB_MTU_512;
328 enum_mtu = IB_MTU_256;
331 enum_mtu = IB_MTU_2048;
336 int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
337 struct ib_qp_attr *attr)
339 int mtu, pmtu, pidx = qp->port_num - 1;
340 struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
341 struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
343 mtu = ib_mtu_enum_to_int(attr->path_mtu);
347 if (mtu > dd->pport[pidx].ibmtu)
348 pmtu = mtu_to_enum(dd->pport[pidx].ibmtu);
350 pmtu = attr->path_mtu;
354 int qib_mtu_to_path_mtu(u32 mtu)
356 return mtu_to_enum(mtu);
359 u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
361 return ib_mtu_enum_to_int(pmtu);
365 * qib_compute_aeth - compute the AETH (syndrome + MSN)
366 * @qp: the queue pair to compute the AETH for
370 __be32 qib_compute_aeth(struct rvt_qp *qp)
372 u32 aeth = qp->r_msn & QIB_MSN_MASK;
376 * Shared receive queues don't generate credits.
377 * Set the credit field to the invalid value.
379 aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
383 struct rvt_rwq *wq = qp->r_rq.wq;
387 /* sanity check pointers before trusting them */
389 if (head >= qp->r_rq.size)
392 if (tail >= qp->r_rq.size)
395 * Compute the number of credits available (RWQEs).
396 * XXX Not holding the r_rq.lock here so there is a small
397 * chance that the pair of reads are not atomic.
399 credits = head - tail;
400 if ((int)credits < 0)
401 credits += qp->r_rq.size;
403 * Binary search the credit table to find the code to
410 if (credit_table[x] == credits)
412 if (credit_table[x] > credits)
419 aeth |= x << QIB_AETH_CREDIT_SHIFT;
421 return cpu_to_be32(aeth);
424 void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp)
426 struct qib_qp_priv *priv;
428 priv = kzalloc(sizeof(*priv), gfp);
430 return ERR_PTR(-ENOMEM);
433 priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp);
436 return ERR_PTR(-ENOMEM);
438 init_waitqueue_head(&priv->wait_dma);
439 INIT_WORK(&priv->s_work, _qib_do_send);
440 INIT_LIST_HEAD(&priv->iowait);
445 void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
447 struct qib_qp_priv *priv = qp->priv;
453 void qib_stop_send_queue(struct rvt_qp *qp)
455 struct qib_qp_priv *priv = qp->priv;
457 cancel_work_sync(&priv->s_work);
458 del_timer_sync(&qp->s_timer);
461 void qib_quiesce_qp(struct rvt_qp *qp)
463 struct qib_qp_priv *priv = qp->priv;
465 wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
467 qib_put_txreq(priv->s_tx);
472 void qib_flush_qp_waiters(struct rvt_qp *qp)
474 struct qib_qp_priv *priv = qp->priv;
475 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
477 spin_lock(&dev->rdi.pending_lock);
478 if (!list_empty(&priv->iowait))
479 list_del_init(&priv->iowait);
480 spin_unlock(&dev->rdi.pending_lock);
484 * qib_get_credit - flush the send work queue of a QP
485 * @qp: the qp who's send work queue to flush
486 * @aeth: the Acknowledge Extended Transport Header
488 * The QP s_lock should be held.
490 void qib_get_credit(struct rvt_qp *qp, u32 aeth)
492 u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
495 * If the credit is invalid, we can send
496 * as many packets as we like. Otherwise, we have to
497 * honor the credit field.
499 if (credit == QIB_AETH_CREDIT_INVAL) {
500 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
501 qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
502 if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
503 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
504 qib_schedule_send(qp);
507 } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
508 /* Compute new LSN (i.e., MSN + credit) */
509 credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
510 if (qib_cmp24(credit, qp->s_lsn) > 0) {
512 if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
513 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
514 qib_schedule_send(qp);
521 * qib_check_send_wqe - validate wr/wqe
523 * @wqe - The built wqe
525 * validate wr/wqe. This is called
526 * prior to inserting the wqe into
527 * the ring but after the wqe has been
530 * Returns 1 to force direct progress, 0 otherwise, -EINVAL on failure
532 int qib_check_send_wqe(struct rvt_qp *qp,
533 struct rvt_swqe *wqe)
538 switch (qp->ibqp.qp_type) {
541 if (wqe->length > 0x80000000U)
547 ah = ibah_to_rvtah(wqe->ud_wr.ah);
548 if (wqe->length > (1 << ah->log_pmtu))
559 #ifdef CONFIG_DEBUG_FS
562 struct qib_ibdev *dev;
567 struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
569 struct qib_qp_iter *iter;
571 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
580 int qib_qp_iter_next(struct qib_qp_iter *iter)
582 struct qib_ibdev *dev = iter->dev;
585 struct rvt_qp *pqp = iter->qp;
588 for (; n < dev->rdi.qp_dev->qp_table_size; n++) {
590 qp = rcu_dereference(pqp->next);
592 qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]);
603 static const char * const qp_type_str[] = {
604 "SMI", "GSI", "RC", "UC", "UD",
607 void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
609 struct rvt_swqe *wqe;
610 struct rvt_qp *qp = iter->qp;
611 struct qib_qp_priv *priv = qp->priv;
613 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
615 "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
618 qp_type_str[qp->ibqp.qp_type],
623 atomic_read(&priv->s_dma_busy),
624 !list_empty(&priv->iowait),
629 qp->s_psn, qp->s_next_psn,
630 qp->s_sending_psn, qp->s_sending_hpsn,
631 qp->s_last, qp->s_acked, qp->s_cur,
632 qp->s_tail, qp->s_head, qp->s_size,
634 qp->remote_ah_attr.dlid);