{
if (!cm_req_get_primary_subnet_local(req_msg)) {
if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
- req_msg->primary_local_lid = ib_slid_be16(wc->slid);
+ req_msg->primary_local_lid = ib_lid_be16(wc->slid);
cm_req_set_primary_sl(req_msg, wc->sl);
}
if (!cm_req_get_alt_subnet_local(req_msg)) {
if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
- req_msg->alt_local_lid = ib_slid_be16(wc->slid);
+ req_msg->alt_local_lid = ib_lid_be16(wc->slid);
cm_req_set_alt_sl(req_msg, wc->sl);
}
packet->mad.hdr.status = 0;
packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len;
packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp);
- packet->mad.hdr.lid = ib_slid_be16(mad_recv_wc->wc->slid);
+ packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid);
packet->mad.hdr.sl = mad_recv_wc->wc->sl;
packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits;
packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index;
resp.bad_pkey_cntr = attr.bad_pkey_cntr;
resp.qkey_viol_cntr = attr.qkey_viol_cntr;
resp.pkey_tbl_len = attr.pkey_tbl_len;
+
if (rdma_cap_opa_ah(ib_dev, cmd.port_num)) {
- resp.lid = OPA_TO_IB_UCAST_LID(attr.lid);
+ resp.lid = OPA_TO_IB_UCAST_LID(attr.lid);
resp.sm_lid = OPA_TO_IB_UCAST_LID(attr.sm_lid);
} else {
- resp.lid = (u16)attr.lid;
- resp.sm_lid = (u16)attr.sm_lid;
+ resp.lid = ib_lid_cpu16(attr.lid);
+ resp.sm_lid = ib_lid_cpu16(attr.sm_lid);
}
resp.lmc = attr.lmc;
resp.max_vl_num = attr.max_vl_num;
tmp.wc_flags = wc->wc_flags;
tmp.pkey_index = wc->pkey_index;
if (rdma_cap_opa_ah(ib_dev, wc->port_num))
- tmp.slid = OPA_TO_IB_UCAST_LID(wc->slid);
+ tmp.slid = OPA_TO_IB_UCAST_LID(wc->slid);
else
- tmp.slid = ib_slid_cpu16(wc->slid);
+ tmp.slid = ib_lid_cpu16(wc->slid);
tmp.sl = wc->sl;
tmp.dlid_path_bits = wc->dlid_path_bits;
tmp.port_num = wc->port_num;
const struct ib_wc *in_wc)
{
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u16 slid = ib_slid_cpu16(in_wc->slid);
+ u16 slid = ib_lid_cpu16(in_wc->slid);
u16 pkey;
if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys))
memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec));
- guid_info_rec.lid = cpu_to_be16((u16)attr.lid);
+ guid_info_rec.lid = ib_lid_be16(attr.lid);
guid_info_rec.block_num = index;
memcpy(guid_info_rec.guid_info_list, rec_det->all_recs,
op_modifier |= 0x4;
- in_modifier |= ib_slid_cpu16(in_wc->slid) << 16;
+ in_modifier |= ib_lid_cpu16(in_wc->slid) << 16;
}
err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier,
memcpy((char *)&tun_mad->hdr.slid_mac_47_32, &(wc->smac[4]), 2);
} else {
tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
- tun_mad->hdr.slid_mac_47_32 = ib_slid_be16(wc->slid);
+ tun_mad->hdr.slid_mac_47_32 = ib_lid_be16(wc->slid);
}
ib_dma_sync_single_for_device(&dev->ib_dev,
}
}
- slid = in_wc ? ib_slid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
+ slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
forward_trap(to_mdev(ibdev), port_num, in_mad);
in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
!ib_query_port(ibdev, port_num, &pattr))
- prev_lid = (u16)pattr.lid;
+ prev_lid = ib_lid_cpu16(pattr.lid);
err = mlx4_MAD_IFC(to_mdev(ibdev),
(mad_flags & IB_MAD_IGNORE_MKEY ? MLX4_MAD_IFC_IGNORE_MKEY : 0) |
u16 slid;
int err;
- slid = in_wc ? ib_slid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
+ slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0)
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
(in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
MTHCA_PUT(inbox, val, MAD_IFC_G_PATH_OFFSET);
- MTHCA_PUT(inbox, ib_slid_cpu16(in_wc->slid), MAD_IFC_RLID_OFFSET);
+ MTHCA_PUT(inbox, ib_lid_cpu16(in_wc->slid), MAD_IFC_RLID_OFFSET);
MTHCA_PUT(inbox, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET);
if (in_grh)
op_modifier |= 0x4;
- in_modifier |= ib_slid_cpu16(in_wc->slid) << 16;
+ in_modifier |= ib_lid_cpu16(in_wc->slid) << 16;
}
err = mthca_cmd_box(dev, inmailbox->dma, outmailbox->dma,
u16 *out_mad_pkey_index)
{
int err;
- u16 slid = in_wc ? ib_slid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
+ u16 slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
u16 prev_lid = 0;
struct ib_port_attr pattr;
const struct ib_mad *in_mad = (const struct ib_mad *)in;
in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
!ib_query_port(ibdev, port_num, &pattr))
- prev_lid = (u16)pattr.lid;
+ prev_lid = ib_lid_cpu16(pattr.lid);
err = mthca_MAD_IFC(to_mdev(ibdev),
mad_flags & IB_MAD_IGNORE_MKEY,
wc->uqueue[head].src_qp = entry->src_qp;
wc->uqueue[head].wc_flags = entry->wc_flags;
wc->uqueue[head].pkey_index = entry->pkey_index;
- wc->uqueue[head].slid = ib_slid_cpu16(entry->slid);
+ wc->uqueue[head].slid = ib_lid_cpu16(entry->slid);
wc->uqueue[head].sl = entry->sl;
wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
wc->uqueue[head].port_num = entry->port_num;
return RDMA_AH_ATTR_TYPE_IB;
}
-/* Return slid in 16bit CPU encoding */
-static inline u16 ib_slid_cpu16(u32 slid)
+/**
+ * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
+ * In the current implementation the only way to get
+ * get the 32bit lid is from other sources for OPA.
+ * For IB, lids will always be 16bits so cast the
+ * value accordingly.
+ *
+ * @lid: A 32bit LID
+ */
+static inline u16 ib_lid_cpu16(u32 lid)
{
- return (u16)slid;
+ WARN_ON_ONCE(lid & 0xFFFF0000);
+ return (u16)lid;
}
-/* Return slid in 16bit BE encoding */
-static inline u16 ib_slid_be16(u32 slid)
+/**
+ * ib_lid_be16 - Return lid in 16bit BE encoding.
+ *
+ * @lid: A 32bit LID
+ */
+static inline __be16 ib_lid_be16(u32 lid)
{
- return cpu_to_be16((u16)slid);
+ WARN_ON_ONCE(lid & 0xFFFF0000);
+ return cpu_to_be16((u16)lid);
}
/**