Fixes timing race resulting in panic. Not a performance sensitive path.
Signed-off-by: Dave Olson <dave.olson@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
static void __devexit cleanup_device(struct ipath_devdata *dd)
{
int port;
+ struct ipath_portdata **tmp;
+ unsigned long flags;
if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) {
/* can't do anything more with chip; needs re-init */
/*
* free any resources still in use (usually just kernel ports)
- * at unload; we do for portcnt, not cfgports, because cfgports
- * could have changed while we were loaded.
+ * at unload; we do for portcnt, because that's what we allocate.
+ * We acquire lock to be really paranoid that ipath_pd isn't being
+ * accessed from some interrupt-related code (that should not happen,
+ * but best to be sure).
*/
+ spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
+ tmp = dd->ipath_pd;
+ dd->ipath_pd = NULL;
+ spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
for (port = 0; port < dd->ipath_portcnt; port++) {
- struct ipath_portdata *pd = dd->ipath_pd[port];
- dd->ipath_pd[port] = NULL;
+ struct ipath_portdata *pd = tmp[port];
+ tmp[port] = NULL; /* debugging paranoia */
ipath_free_pddata(dd, pd);
}
- kfree(dd->ipath_pd);
- /*
- * debuggability, in case some cleanup path tries to use it
- * after this
- */
- dd->ipath_pd = NULL;
+ kfree(tmp);
}
static void __devexit ipath_remove_one(struct pci_dev *pdev)
{
int ret, i;
struct ipath_devdata *dd = ipath_lookup(unit);
+ unsigned long flags;
if (!dd) {
ret = -ENODEV;
goto bail;
}
+ spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
if (dd->ipath_pd)
for (i = 1; i < dd->ipath_cfgports; i++) {
- if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) {
- ipath_dbg("unit %u port %d is in use "
- "(PID %u cmd %s), can't reset\n",
- unit, i,
- pid_nr(dd->ipath_pd[i]->port_pid),
- dd->ipath_pd[i]->port_comm);
- ret = -EBUSY;
- goto bail;
- }
+ if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
+ continue;
+ spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
+ ipath_dbg("unit %u port %d is in use "
+ "(PID %u cmd %s), can't reset\n",
+ unit, i,
+ pid_nr(dd->ipath_pd[i]->port_pid),
+ dd->ipath_pd[i]->port_comm);
+ ret = -EBUSY;
+ goto bail;
}
+ spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
teardown_sdma(dd);
{
int i, sub, any = 0;
struct pid *pid;
+ unsigned long flags;
if (!dd->ipath_pd)
return 0;
+
+ spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
for (i = 1; i < dd->ipath_cfgports; i++) {
if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
continue;
any++;
}
}
+ spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
return any;
}
struct ipath_filedata *fd;
struct ipath_portdata *pd;
struct ipath_devdata *dd;
+ unsigned long flags;
unsigned port;
+ struct pid *pid;
ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n",
(long)in->i_rdev, fp->private_data);
mutex_unlock(&ipath_mutex);
goto bail;
}
+ /* early; no interrupt users after this */
+ spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
port = pd->port_port;
-
- if (pd->port_hdrqfull) {
- ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors "
- "during run\n", pd->port_comm, pid_nr(pd->port_pid),
- pd->port_hdrqfull);
- pd->port_hdrqfull = 0;
- }
+ dd->ipath_pd[port] = NULL;
+ pid = pd->port_pid;
+ pd->port_pid = NULL;
+ spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
if (pd->port_rcvwait_to || pd->port_piowait_to
|| pd->port_rcvnowait || pd->port_pionowait) {
unlock_expected_tids(pd);
ipath_stats.sps_ports--;
ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
- pd->port_comm, pid_nr(pd->port_pid),
+ pd->port_comm, pid_nr(pid),
dd->ipath_unit, port);
}
- put_pid(pd->port_pid);
- pd->port_pid = NULL;
- dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */
+ put_pid(pid);
mutex_unlock(&ipath_mutex);
ipath_free_pddata(dd, pd); /* after releasing the mutex */
spin_lock_init(&dd->ipath_kernel_tid_lock);
spin_lock_init(&dd->ipath_user_tid_lock);
spin_lock_init(&dd->ipath_sendctrl_lock);
+ spin_lock_init(&dd->ipath_uctxt_lock);
spin_lock_init(&dd->ipath_sdma_lock);
spin_lock_init(&dd->ipath_gpio_lock);
spin_lock_init(&dd->ipath_eep_st_lock);
spinlock_t ipath_kernel_tid_lock;
spinlock_t ipath_user_tid_lock;
spinlock_t ipath_sendctrl_lock;
+ /* around ipath_pd and (user ports) port_cnt use (intr vs free) */
+ spinlock_t ipath_uctxt_lock;
/*
* IPATH_STATUS_*,
* (see ipath_get_dma_mr and ipath_dma.c).
*/
if (sge->lkey == 0) {
+ /* always a kernel port, no locking needed */
struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
if (pd->user) {
* (see ipath_get_dma_mr and ipath_dma.c).
*/
if (rkey == 0) {
+ /* always a kernel port, no locking needed */
struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
if (pd->user) {
*/
static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
{
+ /* always a kernel port, no locking needed */
struct ipath_portdata *pd = dd->ipath_pd[0];
memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
int i;
int changed = 0;
+ /* always a kernel port, no locking needed */
pd = dd->ipath_pd[0];
for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
}
/**
- * ipath_get_pkey - return the indexed PKEY from the port 0 PKEY table
+ * ipath_get_pkey - return the indexed PKEY from the port PKEY table
* @dd: the infinipath device
* @index: the PKEY index
*/
{
unsigned ret;
+ /* always a kernel port, no locking needed */
if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
ret = 0;
else