struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
dma_async_tx_callback callback = tx->callback;
dma_cookie_t cookie;
+ unsigned long flags;
- spin_lock_bh(&sh_chan->desc_lock);
+ spin_lock_irqsave(&sh_chan->desc_lock, flags);
cookie = sh_chan->common.cookie;
cookie++;
tx->cookie, &last->async_tx, sh_chan->id,
desc->hw.sar, desc->hw.tcr, desc->hw.dar);
- spin_unlock_bh(&sh_chan->desc_lock);
+ spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
return cookie;
}
dmae_init(sh_chan);
}
- spin_lock_bh(&sh_chan->desc_lock);
while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
- spin_unlock_bh(&sh_chan->desc_lock);
desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
- if (!desc) {
- spin_lock_bh(&sh_chan->desc_lock);
+ if (!desc)
break;
- }
dma_async_tx_descriptor_init(&desc->async_tx,
&sh_chan->common);
desc->async_tx.tx_submit = sh_dmae_tx_submit;
desc->mark = DESC_IDLE;
- spin_lock_bh(&sh_chan->desc_lock);
list_add(&desc->node, &sh_chan->ld_free);
sh_chan->descs_allocated++;
}
- spin_unlock_bh(&sh_chan->desc_lock);
if (!sh_chan->descs_allocated) {
ret = -ENOMEM;
clear_bit(param->slave_id, sh_dmae_slave_used);
etestused:
efindslave:
+ chan->private = NULL;
pm_runtime_put(sh_chan->dev);
return ret;
}
chan->private = NULL;
}
- spin_lock_bh(&sh_chan->desc_lock);
+ spin_lock_irq(&sh_chan->desc_lock);
list_splice_init(&sh_chan->ld_free, &list);
sh_chan->descs_allocated = 0;
- spin_unlock_bh(&sh_chan->desc_lock);
+ spin_unlock_irq(&sh_chan->desc_lock);
if (descs > 0)
pm_runtime_put(sh_chan->dev);
struct sh_desc *first = NULL, *new = NULL /* compiler... */;
LIST_HEAD(tx_list);
int chunks = 0;
+ unsigned long irq_flags;
int i;
if (!sg_len)
(SH_DMA_TCR_MAX + 1);
/* Have to lock the whole loop to protect against concurrent release */
- spin_lock_bh(&sh_chan->desc_lock);
+ spin_lock_irqsave(&sh_chan->desc_lock, irq_flags);
/*
* Chaining:
/* Put them back on the free list, so, they don't get lost */
list_splice_tail(&tx_list, &sh_chan->ld_free);
- spin_unlock_bh(&sh_chan->desc_lock);
+ spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
return &first->async_tx;
new->mark = DESC_IDLE;
list_splice(&tx_list, &sh_chan->ld_free);
- spin_unlock_bh(&sh_chan->desc_lock);
+ spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
return NULL;
}
unsigned long arg)
{
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
+ unsigned long flags;
/* Only supports DMA_TERMINATE_ALL */
if (cmd != DMA_TERMINATE_ALL)
if (!chan)
return -EINVAL;
- spin_lock_bh(&sh_chan->desc_lock);
+ spin_lock_irqsave(&sh_chan->desc_lock, flags);
dmae_halt(sh_chan);
if (!list_empty(&sh_chan->ld_queue)) {
sh_chan->xmit_shift;
}
- spin_unlock_bh(&sh_chan->desc_lock);
+ spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
sh_dmae_chan_ld_cleanup(sh_chan, true);
dma_cookie_t cookie = 0;
dma_async_tx_callback callback = NULL;
void *param = NULL;
+ unsigned long flags;
- spin_lock_bh(&sh_chan->desc_lock);
+ spin_lock_irqsave(&sh_chan->desc_lock, flags);
list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
struct dma_async_tx_descriptor *tx = &desc->async_tx;
*/
sh_chan->completed_cookie = sh_chan->common.cookie;
- spin_unlock_bh(&sh_chan->desc_lock);
+ spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
if (callback)
callback(param);
{
struct sh_desc *desc;
- spin_lock_bh(&sh_chan->desc_lock);
+ spin_lock_irq(&sh_chan->desc_lock);
/* DMA work check */
- if (dmae_is_busy(sh_chan))
- goto sh_chan_xfer_ld_queue_end;
+ if (dmae_is_busy(sh_chan)) {
+ spin_unlock_irq(&sh_chan->desc_lock);
+ return;
+ }
/* Find the first not transferred descriptor */
list_for_each_entry(desc, &sh_chan->ld_queue, node)
break;
}
-sh_chan_xfer_ld_queue_end:
- spin_unlock_bh(&sh_chan->desc_lock);
+ spin_unlock_irq(&sh_chan->desc_lock);
}
static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
dma_cookie_t last_used;
dma_cookie_t last_complete;
enum dma_status status;
+ unsigned long flags;
sh_dmae_chan_ld_cleanup(sh_chan, false);
BUG_ON(last_complete < 0);
dma_set_tx_state(txstate, last_complete, last_used, 0);
- spin_lock_bh(&sh_chan->desc_lock);
+ spin_lock_irqsave(&sh_chan->desc_lock, flags);
status = dma_async_is_complete(cookie, last_complete, last_used);
}
}
- spin_unlock_bh(&sh_chan->desc_lock);
+ spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
return status;
}
u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
- spin_lock(&sh_chan->desc_lock);
+ spin_lock_irq(&sh_chan->desc_lock);
list_for_each_entry(desc, &sh_chan->ld_queue, node) {
if (desc->mark == DESC_SUBMITTED &&
((desc->direction == DMA_FROM_DEVICE &&
break;
}
}
- spin_unlock(&sh_chan->desc_lock);
+ spin_unlock_irq(&sh_chan->desc_lock);
/* Next desc */
sh_chan_xfer_ld_queue(sh_chan);