From 858c21c0f380fb9c78f47f3e372f9baadc54dffe Mon Sep 17 00:00:00 2001 From: Russell King - ARM Linux Date: Mon, 3 Jan 2011 22:41:34 +0000 Subject: [PATCH] ARM: PL08x: move callback outside spinlock'd region MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Calling the callback handler with spinlocks in the tasklet held leads to deadlock when dmaengine functions are called: BUG: spinlock lockup on CPU#0, sh/417, c1870a08 Backtrace: ... [] (do_raw_spin_lock+0x0/0x154) from [] (_raw_spin_lock_irqsave+0x54/0x60) [] (_raw_spin_lock_irqsave+0x0/0x60) from [] (pl08x_prep_channel_resources+0x718/0x8b4) [] (pl08x_prep_channel_resources+0x0/0x8b4) from [] (pl08x_prep_slave_sg+0x120/0x19c) [] (pl08x_prep_slave_sg+0x0/0x19c) from [] (pl011_dma_tx_refill+0x164/0x224) [] (pl011_dma_tx_refill+0x0/0x224) from [] (pl011_dma_tx_callback+0x7c/0xc4) [] (pl011_dma_tx_callback+0x0/0xc4) from [] (pl08x_tasklet+0x60/0x368) [] (pl08x_tasklet+0x0/0x368) from [] (tasklet_action+0xa0/0x100) Dan quoted the documentation: > 2/ Completion callback routines cannot submit new operations.  This >    results in recursion in the synchronous case and spin_locks being >    acquired twice in the asynchronous case. but then followed up to say: > I should clarify, this is the async_memcpy() api requirement which is > not used outside of md/raid5. DMA drivers can and do allow new > submissions from callbacks, and the ones that do so properly move the > callback outside of the driver lock. So let's fix it by moving the callback out of the spinlocked region. Signed-off-by: Russell King Acked-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/amba-pl08x.c | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 986c127..660165d 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1540,32 +1540,29 @@ static void pl08x_tasklet(unsigned long data) { struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; struct pl08x_driver_data *pl08x = plchan->host; + struct pl08x_txd *txd; + dma_async_tx_callback callback = NULL; + void *callback_param = NULL; unsigned long flags; spin_lock_irqsave(&plchan->lock, flags); - if (plchan->at) { - dma_async_tx_callback callback = - plchan->at->tx.callback; - void *callback_param = - plchan->at->tx.callback_param; + txd = plchan->at; + plchan->at = NULL; - /* - * Update last completed - */ - plchan->lc = plchan->at->tx.cookie; + if (txd) { + callback = txd->tx.callback; + callback_param = txd->tx.callback_param; /* - * Callback to signal completion + * Update last completed */ - if (callback) - callback(callback_param); + plchan->lc = txd->tx.cookie; /* * Free the descriptor */ - pl08x_free_txd(pl08x, plchan->at); - plchan->at = NULL; + pl08x_free_txd(pl08x, txd); } /* * If a new descriptor is queued, set it up @@ -1616,6 +1613,10 @@ static void pl08x_tasklet(unsigned long data) } spin_unlock_irqrestore(&plchan->lock, flags); + + /* Callback to signal completion */ + if (callback) + callback(callback_param); } static irqreturn_t pl08x_irq(int irq, void *dev) -- 2.7.4