CR_1849_DMA_515: Stop dma channel
authorWalker Chen <walker.chen@starfivetech.com>
Fri, 9 Sep 2022 14:07:55 +0000 (22:07 +0800)
committerWalker Chen <walker.chen@starfivetech.com>
Thu, 15 Sep 2022 10:16:07 +0000 (18:16 +0800)
DMA: fixed some issue.

1. Removed some invalid function such as dma_tasklet.
2. Export axi_dma_cyclic_stop function to other driver.
3. Setting timeout 200 ms to poll the status of dma
   when one channel is disabled.

Signed-off-by: Walker Chen <walker.chen@starfivetech.com>
drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c [changed mode: 0755->0644]
drivers/dma/dw-axi-dmac/dw-axi-dmac.h [changed mode: 0755->0644]

old mode 100755 (executable)
new mode 100644 (file)
index e5e1e6d..836f348
@@ -198,16 +198,11 @@ static inline void axi_chan_disable(struct axi_dma_chan *chan)
 {
        struct dma_multi *multi = &chan->chip->multi;
        u32 val;
-       int ret;
-       u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT;
 
        val = axi_dma_ioread32(chan->chip, multi->en.ch_en);
        val &= ~(BIT(chan->id) << multi->en.ch_en_shift);
-       val |=   BIT(chan->id) << multi->en.ch_en_we_shift;
+       val |= BIT(chan->id) << multi->en.ch_en_we_shift;
        axi_dma_iowrite32(chan->chip, multi->en.ch_en, val);
-
-       ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
-                                       !(val & chan_active), 10, 100000);
 }
 
 static inline void axi_chan_enable(struct axi_dma_chan *chan)
@@ -397,8 +392,8 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
        if (unlikely(axi_chan_is_hw_enable(chan))) {
                dev_err(chan2dev(chan), "%s is non-idle!\n",
                        axi_chan_name(chan));
-               axi_chan_disable(chan);
-               //return;
+
+               return;
        }
 
        axi_dma_enable(chan->chip);
@@ -474,7 +469,7 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
 
        /*flush all the desc */
 #ifdef CONFIG_SOC_STARFIVE_VIC7100
-       if (chan->chip->multi.need_flush = true) {
+       if (chan->chip->multi.need_flush == true) {
                int count = atomic_read(&chan->descs_allocated);
                int i;
 
@@ -598,8 +593,6 @@ static void dw_axi_dma_set_hw_channel(struct axi_dma_chan *chan, bool set)
                        (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
        reg_value |= (val << (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
        lo_hi_writeq(reg_value, chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
-
-       return;
 }
 
 /*
@@ -1070,67 +1063,34 @@ static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,
                axi_chan_dump_lli(chan, &desc_head->hw_desc[i]);
 }
 
-static void axi_chan_tasklet(struct tasklet_struct *t)
+static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
 {
-       struct axi_dma_chan *chan = from_tasklet(chan, t, dma_tasklet);
        struct virt_dma_desc *vd;
-       u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT;
        unsigned long flags;
-       u32 val;
-       int ret;
-
-       ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
-                                       !(val & chan_active), 10, 10000);
-       if (ret == -ETIMEDOUT)
-               dev_warn(chan2dev(chan),
-                        "irq %s failed to stop\n", axi_chan_name(chan));
 
        spin_lock_irqsave(&chan->vc.lock, flags);
 
+       axi_chan_disable(chan);
+
        /* The bad descriptor currently is in the head of vc list */
        vd = vchan_next_desc(&chan->vc);
-       if (!vd) {
-               dev_warn(chan2dev(chan),
-                        "%s vd is null\n", axi_chan_name(chan));
-               spin_unlock_irqrestore(&chan->vc.lock, flags);
-               return;
-       }
-
-       if (chan->cyclic) {
-               vchan_cyclic_callback(vd);
-               axi_chan_enable(chan);
-       } else {
-               /* Remove the completed descriptor from issued list */
-               list_del(&vd->node);
+       /* Remove the completed descriptor from issued list */
+       list_del(&vd->node);
 
-               /* WARN about bad descriptor */
-               dev_err(chan2dev(chan),
-                       "Bad descriptor submitted for %s, cookie: %d\n",
-                       axi_chan_name(chan), vd->tx.cookie);
-               axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd));
+       /* WARN about bad descriptor */
+       dev_err(chan2dev(chan),
+               "Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n",
+               axi_chan_name(chan), vd->tx.cookie, status);
+       axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd));
 
-               vchan_cookie_complete(vd);
+       vchan_cookie_complete(vd);
 
-               /* Try to restart the controller */
-               axi_chan_start_first_queued(chan);
-       }
+       /* Try to restart the controller */
+       axi_chan_start_first_queued(chan);
 
        spin_unlock_irqrestore(&chan->vc.lock, flags);
 }
 
-static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&chan->vc.lock, flags);
-
-       if (unlikely(axi_chan_is_hw_enable(chan)))
-               axi_chan_disable(chan);
-
-       tasklet_schedule(&chan->dma_tasklet);
-       spin_unlock_irqrestore(&chan->vc.lock, flags);
-}
-
 static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
 {
        int count = atomic_read(&chan->descs_allocated);
@@ -1151,12 +1111,6 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
 
        /* The completed descriptor currently is in the head of vc list */
        vd = vchan_next_desc(&chan->vc);
-       if (!vd) {
-               dev_err(chan2dev(chan),
-                        "%s vd is null\n", axi_chan_name(chan));
-               spin_unlock_irqrestore(&chan->vc.lock, flags);
-               return;
-       }
 
        if (chan->cyclic) {
                desc = vd_to_axi_desc(vd);
@@ -1236,7 +1190,7 @@ static int dma_chan_terminate_all(struct dma_chan *dchan)
        axi_chan_disable(chan);
 
        ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
-                                       !(val & chan_active), 1000, 10000);
+                                       !(val & chan_active), 1000, TIMEOUT_US);
        if (ret == -ETIMEDOUT)
                dev_warn(dchan2dev(dchan),
                         "%s failed to stop\n", axi_chan_name(chan));
@@ -1352,15 +1306,11 @@ static int axi_dma_resume(struct axi_dma_chip *chip)
 void axi_dma_cyclic_stop(struct dma_chan *dchan)
 {
        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
-
        unsigned long flags;
 
        spin_lock_irqsave(&chan->vc.lock, flags);
-
        axi_chan_disable(chan);
-       
        spin_unlock_irqrestore(&chan->vc.lock, flags);
-
 }
 EXPORT_SYMBOL(axi_dma_cyclic_stop);
 
@@ -1575,7 +1525,6 @@ static int dw_probe(struct platform_device *pdev)
 
                chan->vc.desc_free = vchan_desc_put;
                vchan_init(&chan->vc, &dw->dma);
-               tasklet_setup(&chan->dma_tasklet, axi_chan_tasklet);
        }
 
        /* Set capabilities */
old mode 100755 (executable)
new mode 100644 (file)
index 164a2d6..d229d88
@@ -23,6 +23,8 @@
 #define DMAC_MAX_MASTERS       2
 #define DMAC_MAX_BLK_SIZE      0x200000
 
+#define TIMEOUT_US             200000
+
 struct dma_ch_en {
        u8 ch_en;
        u8 ch_en_shift;
@@ -77,10 +79,8 @@ struct axi_dma_chan {
        enum dma_transfer_direction     direction;
        bool                            fixed_burst_trans_len;
        bool                            cyclic;
-       //bool                          is_err;
        /* these other elements are all protected by vc.lock */
        bool                            is_paused;
-       struct tasklet_struct           dma_tasklet;
 };
 
 struct dw_axi_dma {
@@ -131,11 +131,11 @@ struct axi_dma_hw_desc {
 struct axi_dma_desc {
        struct axi_dma_hw_desc  *hw_desc;
 
-       struct virt_dma_desc            vd;
-       struct axi_dma_chan             *chan;
-       u32                             completed_blocks;
-       u32                             length;
-       u32                             period_len;
+       struct virt_dma_desc    vd;
+       struct axi_dma_chan     *chan;
+       u32                     completed_blocks;
+       u32                     length;
+       u32                     period_len;
 };
 
 static inline struct device *dchan2dev(struct dma_chan *dchan)