WIP: merge_config
[platform/kernel/linux-starfive.git] / drivers / dma / at_hdmac.c
index 5a50423..858bd64 100644 (file)
@@ -256,6 +256,8 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
                       ATC_SPIP_BOUNDARY(first->boundary));
        channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
                       ATC_DPIP_BOUNDARY(first->boundary));
+       /* Don't allow CPU to reorder channel enable. */
+       wmb();
        dma_writel(atdma, CHER, atchan->mask);
 
        vdbg_dump_regs(atchan);
@@ -316,7 +318,8 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
        struct at_desc *desc_first = atc_first_active(atchan);
        struct at_desc *desc;
        int ret;
-       u32 ctrla, dscr, trials;
+       u32 ctrla, dscr;
+       unsigned int i;
 
        /*
         * If the cookie doesn't match to the currently running transfer then
@@ -386,7 +389,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
                dscr = channel_readl(atchan, DSCR);
                rmb(); /* ensure DSCR is read before CTRLA */
                ctrla = channel_readl(atchan, CTRLA);
-               for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
+               for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) {
                        u32 new_dscr;
 
                        rmb(); /* ensure DSCR is read after CTRLA */
@@ -412,7 +415,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
                        rmb(); /* ensure DSCR is read before CTRLA */
                        ctrla = channel_readl(atchan, CTRLA);
                }
-               if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
+               if (unlikely(i == ATC_MAX_DSCR_TRIALS))
                        return -ETIMEDOUT;
 
                /* for the first descriptor we can be more accurate */
@@ -462,18 +465,6 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
        if (!atc_chan_is_cyclic(atchan))
                dma_cookie_complete(txd);
 
-       /* If the transfer was a memset, free our temporary buffer */
-       if (desc->memset_buffer) {
-               dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
-                             desc->memset_paddr);
-               desc->memset_buffer = false;
-       }
-
-       /* move children to free_list */
-       list_splice_init(&desc->tx_list, &atchan->free_list);
-       /* move myself to free_list */
-       list_move(&desc->desc_node, &atchan->free_list);
-
        spin_unlock_irqrestore(&atchan->lock, flags);
 
        dma_descriptor_unmap(txd);
@@ -483,42 +474,20 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
                dmaengine_desc_get_callback_invoke(txd, NULL);
 
        dma_run_dependencies(txd);
-}
-
-/**
- * atc_complete_all - finish work for all transactions
- * @atchan: channel to complete transactions for
- *
- * Eventually submit queued descriptors if any
- *
- * Assume channel is idle while calling this function
- * Called with atchan->lock held and bh disabled
- */
-static void atc_complete_all(struct at_dma_chan *atchan)
-{
-       struct at_desc *desc, *_desc;
-       LIST_HEAD(list);
-       unsigned long flags;
-
-       dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
 
        spin_lock_irqsave(&atchan->lock, flags);
-
-       /*
-        * Submit queued descriptors ASAP, i.e. before we go through
-        * the completed ones.
-        */
-       if (!list_empty(&atchan->queue))
-               atc_dostart(atchan, atc_first_queued(atchan));
-       /* empty active_list now it is completed */
-       list_splice_init(&atchan->active_list, &list);
-       /* empty queue list by moving descriptors (if any) to active_list */
-       list_splice_init(&atchan->queue, &atchan->active_list);
-
+       /* move children to free_list */
+       list_splice_init(&desc->tx_list, &atchan->free_list);
+       /* add myself to free_list */
+       list_add(&desc->desc_node, &atchan->free_list);
        spin_unlock_irqrestore(&atchan->lock, flags);
 
-       list_for_each_entry_safe(desc, _desc, &list, desc_node)
-               atc_chain_complete(atchan, desc);
+       /* If the transfer was a memset, free our temporary buffer */
+       if (desc->memset_buffer) {
+               dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
+                             desc->memset_paddr);
+               desc->memset_buffer = false;
+       }
 }
 
 /**
@@ -527,26 +496,28 @@ static void atc_complete_all(struct at_dma_chan *atchan)
  */
 static void atc_advance_work(struct at_dma_chan *atchan)
 {
+       struct at_desc *desc;
        unsigned long flags;
-       int ret;
 
        dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
 
        spin_lock_irqsave(&atchan->lock, flags);
-       ret = atc_chan_is_enabled(atchan);
-       spin_unlock_irqrestore(&atchan->lock, flags);
-       if (ret)
-               return;
-
-       if (list_empty(&atchan->active_list) ||
-           list_is_singular(&atchan->active_list))
-               return atc_complete_all(atchan);
+       if (atc_chan_is_enabled(atchan) || list_empty(&atchan->active_list))
+               return spin_unlock_irqrestore(&atchan->lock, flags);
 
-       atc_chain_complete(atchan, atc_first_active(atchan));
+       desc = atc_first_active(atchan);
+       /* Remove the transfer node from the active list. */
+       list_del_init(&desc->desc_node);
+       spin_unlock_irqrestore(&atchan->lock, flags);
+       atc_chain_complete(atchan, desc);
 
        /* advance work */
        spin_lock_irqsave(&atchan->lock, flags);
-       atc_dostart(atchan, atc_first_active(atchan));
+       if (!list_empty(&atchan->active_list)) {
+               desc = atc_first_queued(atchan);
+               list_move_tail(&desc->desc_node, &atchan->active_list);
+               atc_dostart(atchan, desc);
+       }
        spin_unlock_irqrestore(&atchan->lock, flags);
 }
 
@@ -558,6 +529,7 @@ static void atc_advance_work(struct at_dma_chan *atchan)
 static void atc_handle_error(struct at_dma_chan *atchan)
 {
        struct at_desc *bad_desc;
+       struct at_desc *desc;
        struct at_desc *child;
        unsigned long flags;
 
@@ -570,13 +542,12 @@ static void atc_handle_error(struct at_dma_chan *atchan)
        bad_desc = atc_first_active(atchan);
        list_del_init(&bad_desc->desc_node);
 
-       /* As we are stopped, take advantage to push queued descriptors
-        * in active_list */
-       list_splice_init(&atchan->queue, atchan->active_list.prev);
-
        /* Try to restart the controller */
-       if (!list_empty(&atchan->active_list))
-               atc_dostart(atchan, atc_first_active(atchan));
+       if (!list_empty(&atchan->active_list)) {
+               desc = atc_first_queued(atchan);
+               list_move_tail(&desc->desc_node, &atchan->active_list);
+               atc_dostart(atchan, desc);
+       }
 
        /*
         * KERN_CRITICAL may seem harsh, but since this only happens
@@ -691,19 +662,11 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
        spin_lock_irqsave(&atchan->lock, flags);
        cookie = dma_cookie_assign(tx);
 
-       if (list_empty(&atchan->active_list)) {
-               dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
-                               desc->txd.cookie);
-               atc_dostart(atchan, desc);
-               list_add_tail(&desc->desc_node, &atchan->active_list);
-       } else {
-               dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
-                               desc->txd.cookie);
-               list_add_tail(&desc->desc_node, &atchan->queue);
-       }
-
+       list_add_tail(&desc->desc_node, &atchan->queue);
        spin_unlock_irqrestore(&atchan->lock, flags);
 
+       dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
+                desc->txd.cookie);
        return cookie;
 }
 
@@ -1445,11 +1408,8 @@ static int atc_terminate_all(struct dma_chan *chan)
        struct at_dma_chan      *atchan = to_at_dma_chan(chan);
        struct at_dma           *atdma = to_at_dma(chan->device);
        int                     chan_id = atchan->chan_common.chan_id;
-       struct at_desc          *desc, *_desc;
        unsigned long           flags;
 
-       LIST_HEAD(list);
-
        dev_vdbg(chan2dev(chan), "%s\n", __func__);
 
        /*
@@ -1468,19 +1428,15 @@ static int atc_terminate_all(struct dma_chan *chan)
                cpu_relax();
 
        /* active_list entries will end up before queued entries */
-       list_splice_init(&atchan->queue, &list);
-       list_splice_init(&atchan->active_list, &list);
-
-       spin_unlock_irqrestore(&atchan->lock, flags);
-
-       /* Flush all pending and queued descriptors */
-       list_for_each_entry_safe(desc, _desc, &list, desc_node)
-               atc_chain_complete(atchan, desc);
+       list_splice_tail_init(&atchan->queue, &atchan->free_list);
+       list_splice_tail_init(&atchan->active_list, &atchan->free_list);
 
        clear_bit(ATC_IS_PAUSED, &atchan->status);
        /* if channel dedicated to cyclic operations, free it */
        clear_bit(ATC_IS_CYCLIC, &atchan->status);
 
+       spin_unlock_irqrestore(&atchan->lock, flags);
+
        return 0;
 }
 
@@ -1535,20 +1491,26 @@ atc_tx_status(struct dma_chan *chan,
 }
 
 /**
- * atc_issue_pending - try to finish work
+ * atc_issue_pending - takes the first transaction descriptor in the pending
+ * queue and starts the transfer.
  * @chan: target DMA channel
  */
 static void atc_issue_pending(struct dma_chan *chan)
 {
-       struct at_dma_chan      *atchan = to_at_dma_chan(chan);
+       struct at_dma_chan *atchan = to_at_dma_chan(chan);
+       struct at_desc *desc;
+       unsigned long flags;
 
        dev_vdbg(chan2dev(chan), "issue_pending\n");
 
-       /* Not needed for cyclic transfers */
-       if (atc_chan_is_cyclic(atchan))
-               return;
+       spin_lock_irqsave(&atchan->lock, flags);
+       if (atc_chan_is_enabled(atchan) || list_empty(&atchan->queue))
+               return spin_unlock_irqrestore(&atchan->lock, flags);
 
-       atc_advance_work(atchan);
+       desc = atc_first_queued(atchan);
+       list_move_tail(&desc->desc_node, &atchan->active_list);
+       atc_dostart(atchan, desc);
+       spin_unlock_irqrestore(&atchan->lock, flags);
 }
 
 /**
@@ -1966,7 +1928,11 @@ static int __init at_dma_probe(struct platform_device *pdev)
          dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)  ? "slave " : "",
          plat_dat->nr_channels);
 
-       dma_async_device_register(&atdma->dma_common);
+       err = dma_async_device_register(&atdma->dma_common);
+       if (err) {
+               dev_err(&pdev->dev, "Unable to register: %d.\n", err);
+               goto err_dma_async_device_register;
+       }
 
        /*
         * Do not return an error if the dmac node is not present in order to
@@ -1986,6 +1952,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
 
 err_of_dma_controller_register:
        dma_async_device_unregister(&atdma->dma_common);
+err_dma_async_device_register:
        dma_pool_destroy(atdma->memset_pool);
 err_memset_pool_create:
        dma_pool_destroy(atdma->dma_desc_pool);