qp->alt_port = attr->alt_port_num;
if (is_sqp(dev, qp))
- store_attrs(to_msqp(qp), attr, attr_mask);
+ store_attrs(qp->sqp, attr, attr_mask);
/*
* If we moved QP0 to RTR, bring the IB link up; if we moved
struct ib_qp_cap *cap,
int qpn,
int port,
- struct mthca_sqp *sqp,
+ struct mthca_qp *qp,
struct ib_udata *udata)
{
u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
int err;
- sqp->qp.transport = MLX;
- err = mthca_set_qp_size(dev, cap, pd, &sqp->qp);
+ qp->transport = MLX;
+ err = mthca_set_qp_size(dev, cap, pd, qp);
if (err)
return err;
- sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;
- sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
- &sqp->header_dma, GFP_KERNEL);
- if (!sqp->header_buf)
+ qp->sqp->header_buf_size = qp->sq.max * MTHCA_UD_HEADER_SIZE;
+ qp->sqp->header_buf =
+ dma_alloc_coherent(&dev->pdev->dev, qp->sqp->header_buf_size,
+ &qp->sqp->header_dma, GFP_KERNEL);
+ if (!qp->sqp->header_buf)
return -ENOMEM;
spin_lock_irq(&dev->qp_table.lock);
if (mthca_array_get(&dev->qp_table.qp, mqpn))
err = -EBUSY;
else
- mthca_array_set(&dev->qp_table.qp, mqpn, sqp);
+ mthca_array_set(&dev->qp_table.qp, mqpn, qp->sqp);
spin_unlock_irq(&dev->qp_table.lock);
if (err)
goto err_out;
- sqp->qp.port = port;
- sqp->qp.qpn = mqpn;
- sqp->qp.transport = MLX;
+ qp->port = port;
+ qp->qpn = mqpn;
+ qp->transport = MLX;
err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
- send_policy, &sqp->qp, udata);
+ send_policy, qp, udata);
if (err)
goto err_out_free;
mthca_unlock_cqs(send_cq, recv_cq);
- err_out:
- dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
- sqp->header_buf, sqp->header_dma);
-
+err_out:
+ dma_free_coherent(&dev->pdev->dev, qp->sqp->header_buf_size,
+ qp->sqp->header_buf, qp->sqp->header_dma);
return err;
}
if (is_sqp(dev, qp)) {
atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
- dma_free_coherent(&dev->pdev->dev,
- to_msqp(qp)->header_buf_size,
- to_msqp(qp)->header_buf,
- to_msqp(qp)->header_dma);
+ dma_free_coherent(&dev->pdev->dev, qp->sqp->header_buf_size,
+ qp->sqp->header_buf, qp->sqp->header_dma);
} else
mthca_free(&dev->qp_table.alloc, qp->qpn);
}
/* Create UD header for an MLX send and build a data segment for it */
-static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
- int ind, const struct ib_ud_wr *wr,
+static int build_mlx_header(struct mthca_dev *dev, struct mthca_qp *qp, int ind,
+ const struct ib_ud_wr *wr,
struct mthca_mlx_seg *mlx,
struct mthca_data_seg *data)
{
+ struct mthca_sqp *sqp = qp->sqp;
int header_size;
int err;
u16 pkey;
if (err)
return err;
mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
- mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
+ mlx->flags |= cpu_to_be32((!qp->ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
(sqp->ud_header.lrh.destination_lid ==
IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
(sqp->ud_header.lrh.service_level << 8));
return -EINVAL;
}
- sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
+ sqp->ud_header.lrh.virtual_lane = !qp->ibqp.qp_num ? 15 : 0;
if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
- if (!sqp->qp.ibqp.qp_num)
- ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
- sqp->pkey_index, &pkey);
+ if (!qp->ibqp.qp_num)
+ ib_get_cached_pkey(&dev->ib_dev, qp->port, sqp->pkey_index,
+ &pkey);
else
- ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
- wr->pkey_index, &pkey);
+ ib_get_cached_pkey(&dev->ib_dev, qp->port, wr->pkey_index,
+ &pkey);
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ?
sqp->qkey : wr->remote_qkey);
- sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
+ sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->ibqp.qp_num);
header_size = ib_ud_header_pack(&sqp->ud_header,
sqp->header_buf +
ind * MTHCA_UD_HEADER_SIZE);
data->byte_count = cpu_to_be32(header_size);
- data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey);
+ data->lkey = cpu_to_be32(to_mpd(qp->ibqp.pd)->ntmr.ibmr.lkey);
data->addr = cpu_to_be64(sqp->header_dma +
ind * MTHCA_UD_HEADER_SIZE);
break;
case MLX:
- err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr),
- wqe - sizeof (struct mthca_next_seg),
- wqe);
+ err = build_mlx_header(
+ dev, qp, ind, ud_wr(wr),
+ wqe - sizeof(struct mthca_next_seg), wqe);
if (err) {
*bad_wr = wr;
goto out;
break;
case MLX:
- err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr),
- wqe - sizeof (struct mthca_next_seg),
- wqe);
+ err = build_mlx_header(
+ dev, qp, ind, ud_wr(wr),
+ wqe - sizeof(struct mthca_next_seg), wqe);
if (err) {
*bad_wr = wr;
goto out;