struct ib_sa_query {
void (*callback)(struct ib_sa_query *sa_query, int status,
- int num_prs, struct ib_sa_mad *mad);
+ struct ib_sa_mad *mad);
void (*release)(struct ib_sa_query *);
struct ib_sa_client *client;
struct ib_sa_port *port;
u32 seq; /* Local svc request sequence number */
unsigned long timeout; /* Local svc timeout */
u8 path_use; /* How will the pathrecord be used */
-
- /* A separate buffer to save pathrecords of a response, as in cases
- * like IB/netlink, mulptiple pathrecords are supported, so that
- * mad->data is not large enough to hold them
- */
- void *resp_pr_data;
};
#define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
struct ib_sa_path_query {
void (*callback)(int status, struct sa_path_rec *rec,
- int num_paths, void *context);
+ unsigned int num_paths, void *context);
void *context;
struct ib_sa_query sa_query;
struct sa_path_rec *conv_pr;
.size_bits = 512 },
};
+#define RDMA_PRIMARY_PATH_MAX_REC_NUM 3
+
static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
{
query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
const struct nlmsghdr *nlh)
{
- struct ib_path_rec_data *srec, *drec;
+ struct sa_path_rec recs[RDMA_PRIMARY_PATH_MAX_REC_NUM];
struct ib_sa_path_query *path_query;
+ struct ib_path_rec_data *rec_data;
struct ib_mad_send_wc mad_send_wc;
const struct nlattr *head, *curr;
struct ib_sa_mad *mad = NULL;
- int len, rem, num_prs = 0;
+ int len, rem, status = -EIO;
+ unsigned int num_prs = 0;
u32 mask = 0;
- int status = -EIO;
if (!query->callback)
goto out;
path_query = container_of(query, struct ib_sa_path_query, sa_query);
mad = query->mad_buf->mad;
- if (!path_query->conv_pr &&
- (be16_to_cpu(mad->mad_hdr.attr_id) == IB_SA_ATTR_PATH_REC)) {
- /* Need a larger buffer for possible multiple PRs */
- query->resp_pr_data = kvcalloc(RDMA_PRIMARY_PATH_MAX_REC_NUM,
- sizeof(*drec), GFP_KERNEL);
- if (!query->resp_pr_data) {
- query->callback(query, -ENOMEM, 0, NULL);
- return;
- }
- }
head = (const struct nlattr *) nlmsg_data(nlh);
len = nlmsg_len(nlh);
break;
}
- drec = (struct ib_path_rec_data *)query->resp_pr_data;
nla_for_each_attr(curr, head, len, rem) {
if (curr->nla_type != LS_NLA_TYPE_PATH_RECORD)
continue;
- srec = nla_data(curr);
- if ((srec->flags & mask) != mask)
+ rec_data = nla_data(curr);
+ if ((rec_data->flags & mask) != mask)
continue;
- status = 0;
- if (!drec) {
- memcpy(mad->data, srec->path_rec,
- sizeof(srec->path_rec));
- num_prs = 1;
- break;
+ if ((query->flags & IB_SA_QUERY_OPA) ||
+ path_query->conv_pr) {
+ mad->mad_hdr.method |= IB_MGMT_METHOD_RESP;
+ memcpy(mad->data, rec_data->path_rec,
+ sizeof(rec_data->path_rec));
+ query->callback(query, 0, mad);
+ goto out;
}
- memcpy(drec, srec, sizeof(*drec));
- drec++;
+ status = 0;
+ ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
+ rec_data->path_rec, &recs[num_prs]);
+ recs[num_prs].flags = rec_data->flags;
+ recs[num_prs].rec_type = SA_PATH_REC_TYPE_IB;
+ sa_path_set_dmac_zero(&recs[num_prs]);
+
num_prs++;
if (num_prs >= RDMA_PRIMARY_PATH_MAX_REC_NUM)
break;
}
- if (!status)
+ if (!status) {
mad->mad_hdr.method |= IB_MGMT_METHOD_RESP;
-
- query->callback(query, status, num_prs, mad);
- kvfree(query->resp_pr_data);
- query->resp_pr_data = NULL;
+ path_query->callback(status, recs, num_prs,
+ path_query->context);
+ } else
+ query->callback(query, status, mad);
out:
mad_send_wc.send_buf = query->mad_buf;
return PR_IB_SUPPORTED;
}
-static void ib_sa_pr_callback_single(struct ib_sa_path_query *query,
- int status, struct ib_sa_mad *mad)
+static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
+ int status, struct ib_sa_mad *mad)
{
+ struct ib_sa_path_query *query =
+ container_of(sa_query, struct ib_sa_path_query, sa_query);
struct sa_path_rec rec = {};
+ if (!mad) {
+ query->callback(status, NULL, 0, query->context);
+ return;
+ }
+
+ if (sa_query->flags & IB_SA_QUERY_OPA) {
+ ib_unpack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
+ mad->data, &rec);
+ rec.rec_type = SA_PATH_REC_TYPE_OPA;
+ query->callback(status, &rec, 1, query->context);
+ return;
+ }
+
ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
mad->data, &rec);
rec.rec_type = SA_PATH_REC_TYPE_IB;
}
}
-/**
- * ib_sa_pr_callback_multiple() - Parse path records then do callback.
- *
- * In a multiple-PR case the PRs are saved in "query->resp_pr_data"
- * (instead of"mad->data") and with "ib_path_rec_data" structure format,
- * so that rec->flags can be set to indicate the type of PR.
- * This is valid only in IB fabric.
- */
-static void ib_sa_pr_callback_multiple(struct ib_sa_path_query *query,
- int status, int num_prs,
- struct ib_path_rec_data *rec_data)
-{
- struct sa_path_rec *rec;
- int i;
-
- rec = kvcalloc(num_prs, sizeof(*rec), GFP_KERNEL);
- if (!rec) {
- query->callback(-ENOMEM, NULL, 0, query->context);
- return;
- }
-
- for (i = 0; i < num_prs; i++) {
- ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
- rec_data[i].path_rec, rec + i);
- rec[i].rec_type = SA_PATH_REC_TYPE_IB;
- sa_path_set_dmac_zero(rec + i);
- rec[i].flags = rec_data[i].flags;
- }
-
- query->callback(status, rec, num_prs, query->context);
- kvfree(rec);
-}
-
-static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
- int status, int num_prs,
- struct ib_sa_mad *mad)
-{
- struct ib_sa_path_query *query =
- container_of(sa_query, struct ib_sa_path_query, sa_query);
- struct sa_path_rec rec;
-
- if (!mad || !num_prs) {
- query->callback(status, NULL, 0, query->context);
- return;
- }
-
- if (sa_query->flags & IB_SA_QUERY_OPA) {
- if (num_prs != 1) {
- query->callback(-EINVAL, NULL, 0, query->context);
- return;
- }
-
- ib_unpack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
- mad->data, &rec);
- rec.rec_type = SA_PATH_REC_TYPE_OPA;
- query->callback(status, &rec, num_prs, query->context);
- } else {
- if (!sa_query->resp_pr_data)
- ib_sa_pr_callback_single(query, status, mad);
- else
- ib_sa_pr_callback_multiple(query, status, num_prs,
- sa_query->resp_pr_data);
- }
-}
-
static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
{
struct ib_sa_path_query *query =
unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct sa_path_rec *resp,
- int num_paths, void *context),
+ unsigned int num_paths, void *context),
void *context,
struct ib_sa_query **sa_query)
{
EXPORT_SYMBOL(ib_sa_path_rec_get);
static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
- int status, int num_prs,
- struct ib_sa_mad *mad)
+ int status, struct ib_sa_mad *mad)
{
struct ib_sa_mcmember_query *query =
container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
/* Support GuidInfoRecord */
static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
- int status, int num_paths,
- struct ib_sa_mad *mad)
+ int status, struct ib_sa_mad *mad)
{
struct ib_sa_guidinfo_query *query =
container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
}
static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
- int status, int num_prs,
- struct ib_sa_mad *mad)
+ int status, struct ib_sa_mad *mad)
{
unsigned long flags;
struct ib_sa_classport_info_query *query =
/* No callback -- already got recv */
break;
case IB_WC_RESP_TIMEOUT_ERR:
- query->callback(query, -ETIMEDOUT, 0, NULL);
+ query->callback(query, -ETIMEDOUT, NULL);
break;
case IB_WC_WR_FLUSH_ERR:
- query->callback(query, -EINTR, 0, NULL);
+ query->callback(query, -EINTR, NULL);
break;
default:
- query->callback(query, -EIO, 0, NULL);
+ query->callback(query, -EIO, NULL);
break;
}
if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
query->callback(query,
mad_recv_wc->recv_buf.mad->mad_hdr.status ?
- -EINVAL : 0, 1,
+ -EINVAL : 0,
(struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
else
- query->callback(query, -EIO, 0, NULL);
+ query->callback(query, -EIO, NULL);
}
ib_free_recv_mad(mad_recv_wc);