{
struct dma_multi *multi = &chan->chip->multi;
u32 val;
- int ret;
- u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT;
val = axi_dma_ioread32(chan->chip, multi->en.ch_en);
val &= ~(BIT(chan->id) << multi->en.ch_en_shift);
- val |= BIT(chan->id) << multi->en.ch_en_we_shift;
+ val |= BIT(chan->id) << multi->en.ch_en_we_shift;
axi_dma_iowrite32(chan->chip, multi->en.ch_en, val);
-
- ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
- !(val & chan_active), 10, 100000);
}
static inline void axi_chan_enable(struct axi_dma_chan *chan)
if (unlikely(axi_chan_is_hw_enable(chan))) {
dev_err(chan2dev(chan), "%s is non-idle!\n",
axi_chan_name(chan));
- axi_chan_disable(chan);
- //return;
+
+ return;
}
axi_dma_enable(chan->chip);
/*flush all the desc */
#ifdef CONFIG_SOC_STARFIVE_VIC7100
- if (chan->chip->multi.need_flush = true) {
+ if (chan->chip->multi.need_flush == true) {
int count = atomic_read(&chan->descs_allocated);
int i;
(chan->id * DMA_APB_HS_SEL_BIT_SIZE));
reg_value |= (val << (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
lo_hi_writeq(reg_value, chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
-
- return;
}
/*
axi_chan_dump_lli(chan, &desc_head->hw_desc[i]);
}
-static void axi_chan_tasklet(struct tasklet_struct *t)
+static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
{
- struct axi_dma_chan *chan = from_tasklet(chan, t, dma_tasklet);
struct virt_dma_desc *vd;
- u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT;
unsigned long flags;
- u32 val;
- int ret;
-
- ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
- !(val & chan_active), 10, 10000);
- if (ret == -ETIMEDOUT)
- dev_warn(chan2dev(chan),
- "irq %s failed to stop\n", axi_chan_name(chan));
spin_lock_irqsave(&chan->vc.lock, flags);
+ axi_chan_disable(chan);
+
/* The bad descriptor currently is in the head of vc list */
vd = vchan_next_desc(&chan->vc);
- if (!vd) {
- dev_warn(chan2dev(chan),
- "%s vd is null\n", axi_chan_name(chan));
- spin_unlock_irqrestore(&chan->vc.lock, flags);
- return;
- }
-
- if (chan->cyclic) {
- vchan_cyclic_callback(vd);
- axi_chan_enable(chan);
- } else {
- /* Remove the completed descriptor from issued list */
- list_del(&vd->node);
+ /* Remove the completed descriptor from issued list */
+ list_del(&vd->node);
- /* WARN about bad descriptor */
- dev_err(chan2dev(chan),
- "Bad descriptor submitted for %s, cookie: %d\n",
- axi_chan_name(chan), vd->tx.cookie);
- axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd));
+ /* WARN about bad descriptor */
+ dev_err(chan2dev(chan),
+ "Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n",
+ axi_chan_name(chan), vd->tx.cookie, status);
+ axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd));
- vchan_cookie_complete(vd);
+ vchan_cookie_complete(vd);
- /* Try to restart the controller */
- axi_chan_start_first_queued(chan);
- }
+ /* Try to restart the controller */
+ axi_chan_start_first_queued(chan);
spin_unlock_irqrestore(&chan->vc.lock, flags);
}
-static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&chan->vc.lock, flags);
-
- if (unlikely(axi_chan_is_hw_enable(chan)))
- axi_chan_disable(chan);
-
- tasklet_schedule(&chan->dma_tasklet);
- spin_unlock_irqrestore(&chan->vc.lock, flags);
-}
-
static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
{
int count = atomic_read(&chan->descs_allocated);
/* The completed descriptor currently is in the head of vc list */
vd = vchan_next_desc(&chan->vc);
- if (!vd) {
- dev_err(chan2dev(chan),
- "%s vd is null\n", axi_chan_name(chan));
- spin_unlock_irqrestore(&chan->vc.lock, flags);
- return;
- }
if (chan->cyclic) {
desc = vd_to_axi_desc(vd);
axi_chan_disable(chan);
ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
- !(val & chan_active), 1000, 10000);
+ !(val & chan_active), 1000, TIMEOUT_US);
if (ret == -ETIMEDOUT)
dev_warn(dchan2dev(dchan),
"%s failed to stop\n", axi_chan_name(chan));
void axi_dma_cyclic_stop(struct dma_chan *dchan)
{
struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
-
unsigned long flags;
spin_lock_irqsave(&chan->vc.lock, flags);
-
axi_chan_disable(chan);
-
spin_unlock_irqrestore(&chan->vc.lock, flags);
-
}
EXPORT_SYMBOL(axi_dma_cyclic_stop);
chan->vc.desc_free = vchan_desc_put;
vchan_init(&chan->vc, &dw->dma);
- tasklet_setup(&chan->dma_tasklet, axi_chan_tasklet);
}
/* Set capabilities */