Automatically allocate a MR when registering a MAD agent.
MAD clients are modified to use this updated API.
Signed-off-by: Sean Hefty <sean.hefty@intel.com>
Signed-off-by: Hal Rosenstock <halr@voltaire.com>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
sizeof(mad_priv->mad),
DMA_TO_DEVICE);
gather_list.length = sizeof(mad_priv->mad);
- gather_list.lkey = (*port_priv->mr).lkey;
+ gather_list.lkey = mad_agent->mr->lkey;
send_wr.next = NULL;
send_wr.opcode = IB_WR_SEND;
goto error3;
}
- port_priv->mr = ib_get_dma_mr(port_priv->smp_agent->qp->pd,
- IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(port_priv->mr)) {
- printk(KERN_ERR SPFX "Couldn't get DMA MR\n");
- ret = PTR_ERR(port_priv->mr);
- goto error4;
- }
-
spin_lock_irqsave(&ib_agent_port_list_lock, flags);
list_add_tail(&port_priv->port_list, &ib_agent_port_list);
spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
return 0;
-error4:
- ib_unregister_mad_agent(port_priv->perf_mgmt_agent);
error3:
ib_unregister_mad_agent(port_priv->smp_agent);
error2:
list_del(&port_priv->port_list);
spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
- ib_dereg_mr(port_priv->mr);
-
ib_unregister_mad_agent(port_priv->perf_mgmt_agent);
ib_unregister_mad_agent(port_priv->smp_agent);
kfree(port_priv);
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- * $Id: agent_priv.h 1389 2004-12-27 22:56:47Z roland $
+ * $Id: agent_priv.h 1640 2005-01-24 22:39:02Z halr $
*/
#ifndef __IB_AGENT_PRIV_H__
int port_num;
struct ib_mad_agent *smp_agent; /* SM class */
struct ib_mad_agent *perf_mgmt_agent; /* PerfMgmt class */
- struct ib_mr *mr;
};
#endif /* __IB_AGENT_PRIV_H__ */
ret = ERR_PTR(-ENOMEM);
goto error1;
}
+ memset(mad_agent_priv, 0, sizeof *mad_agent_priv);
+
+ mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
+ IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(mad_agent_priv->agent.mr)) {
+ ret = ERR_PTR(-ENOMEM);
+ goto error2;
+ }
if (mad_reg_req) {
reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
if (!reg_req) {
ret = ERR_PTR(-ENOMEM);
- goto error2;
+ goto error3;
}
/* Make a copy of the MAD registration request */
memcpy(reg_req, mad_reg_req, sizeof *reg_req);
}
/* Now, fill in the various structures */
- memset(mad_agent_priv, 0, sizeof *mad_agent_priv);
mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
mad_agent_priv->reg_req = reg_req;
mad_agent_priv->rmpp_version = rmpp_version;
if (method) {
if (method_in_use(&method,
mad_reg_req))
- goto error3;
+ goto error4;
}
}
ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
if (is_vendor_method_in_use(
vendor_class,
mad_reg_req))
- goto error3;
+ goto error4;
}
}
ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
}
if (ret2) {
ret = ERR_PTR(ret2);
- goto error3;
+ goto error4;
}
}
return &mad_agent_priv->agent;
-error3:
+error4:
spin_unlock_irqrestore(&port_priv->reg_lock, flags);
kfree(reg_req);
-error2:
+error3:
kfree(mad_agent_priv);
+error2:
+ ib_dereg_mr(mad_agent_priv->agent.mr);
error1:
return ret;
}
* MADs, preventing us from queuing additional work
*/
cancel_mads(mad_agent_priv);
-
port_priv = mad_agent_priv->qp_info->port_priv;
-
cancel_delayed_work(&mad_agent_priv->timed_work);
- flush_workqueue(port_priv->wq);
spin_lock_irqsave(&port_priv->reg_lock, flags);
remove_mad_reg_req(mad_agent_priv);
list_del(&mad_agent_priv->agent_list);
spin_unlock_irqrestore(&port_priv->reg_lock, flags);
- /* XXX: Cleanup pending RMPP receives for this agent */
+ flush_workqueue(port_priv->wq);
atomic_dec(&mad_agent_priv->refcount);
wait_event(mad_agent_priv->wait,
if (mad_agent_priv->reg_req)
kfree(mad_agent_priv->reg_req);
+ ib_dereg_mr(mad_agent_priv->agent.mr);
kfree(mad_agent_priv);
}
list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
queue_work(mad_agent_priv->qp_info->port_priv->wq,
- &mad_agent_priv->local_work);
+ &mad_agent_priv->local_work);
ret = 1;
out:
return ret;
struct ib_sa_port {
struct ib_mad_agent *agent;
- struct ib_mr *mr;
struct ib_sa_sm_ah *sm_ah;
struct work_struct update_task;
spinlock_t ah_lock;
sizeof (struct ib_sa_mad),
DMA_TO_DEVICE);
gather_list.length = sizeof (struct ib_sa_mad);
- gather_list.lkey = port->mr->lkey;
+ gather_list.lkey = port->agent->mr->lkey;
pci_unmap_addr_set(query, mapping, gather_list.addr);
ret = ib_post_send_mad(port->agent, &wr, &bad_wr);
sa_dev->end_port = e;
for (i = 0; i <= e - s; ++i) {
- sa_dev->port[i].mr = NULL;
sa_dev->port[i].sm_ah = NULL;
sa_dev->port[i].port_num = i + s;
spin_lock_init(&sa_dev->port[i].ah_lock);
if (IS_ERR(sa_dev->port[i].agent))
goto err;
- sa_dev->port[i].mr = ib_get_dma_mr(sa_dev->port[i].agent->qp->pd,
- IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(sa_dev->port[i].mr)) {
- ib_unregister_mad_agent(sa_dev->port[i].agent);
- goto err;
- }
-
INIT_WORK(&sa_dev->port[i].update_task,
update_sm_ah, &sa_dev->port[i]);
}
return;
err:
- while (--i >= 0) {
- ib_dereg_mr(sa_dev->port[i].mr);
+ while (--i >= 0)
ib_unregister_mad_agent(sa_dev->port[i].agent);
- }
kfree(sa_dev);
struct ib_mad_agent {
struct ib_device *device;
struct ib_qp *qp;
+ struct ib_mr *mr;
ib_mad_recv_handler recv_handler;
ib_mad_send_handler send_handler;
ib_mad_snoop_handler snoop_handler;