dmaengine: ti: k3-udma: Remove dma_sync_single calls for descriptors
authorPeter Ujfalusi <peter.ujfalusi@ti.com>
Tue, 7 Jul 2020 10:23:48 +0000 (13:23 +0300)
committerVinod Koul <vkoul@kernel.org>
Wed, 15 Jul 2020 06:06:55 +0000 (11:36 +0530)
The descriptors are allocated via wither dma_pool or dma_alloc_coherent.

There is no need for the dma_sync_singel_* calls.

Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
Reviewed-by: Grygorii Strashko <grygorii.strashko@ti.com>
Link: https://lore.kernel.org/r/20200707102352.28773-2-peter.ujfalusi@ti.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
drivers/dma/ti/k3-udma.c

index 945b7c6..f5c1373 100644 (file)
@@ -539,30 +539,6 @@ static bool udma_is_chan_paused(struct udma_chan *uc)
        return false;
 }
 
-static void udma_sync_for_device(struct udma_chan *uc, int idx)
-{
-       struct udma_desc *d = uc->desc;
-
-       if (uc->cyclic && uc->config.pkt_mode) {
-               dma_sync_single_for_device(uc->ud->dev,
-                                          d->hwdesc[idx].cppi5_desc_paddr,
-                                          d->hwdesc[idx].cppi5_desc_size,
-                                          DMA_TO_DEVICE);
-       } else {
-               int i;
-
-               for (i = 0; i < d->hwdesc_count; i++) {
-                       if (!d->hwdesc[i].cppi5_desc_vaddr)
-                               continue;
-
-                       dma_sync_single_for_device(uc->ud->dev,
-                                               d->hwdesc[i].cppi5_desc_paddr,
-                                               d->hwdesc[i].cppi5_desc_size,
-                                               DMA_TO_DEVICE);
-               }
-       }
-}
-
 static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
 {
        return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
@@ -593,7 +569,6 @@ static int udma_push_to_ring(struct udma_chan *uc, int idx)
                paddr = udma_curr_cppi5_desc_paddr(d, idx);
 
                wmb(); /* Ensure that writes are not moved over this point */
-               udma_sync_for_device(uc, idx);
        }
 
        return k3_ringacc_ring_push(ring, &paddr);
@@ -628,12 +603,12 @@ static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
        }
 
        if (ring && k3_ringacc_ring_get_occ(ring)) {
-               struct udma_desc *d = NULL;
-
                ret = k3_ringacc_ring_pop(ring, addr);
                if (ret)
                        return ret;
 
+               rmb(); /* Ensure that reads are not moved before this point */
+
                /* Teardown completion */
                if (cppi5_desc_is_tdcm(*addr))
                        return ret;
@@ -641,14 +616,6 @@ static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
                /* Check for flush descriptor */
                if (udma_desc_is_rx_flush(uc, *addr))
                        return -ENOENT;
-
-               d = udma_udma_desc_from_paddr(uc, *addr);
-
-               if (d)
-                       dma_sync_single_for_cpu(uc->ud->dev, *addr,
-                                               d->hwdesc[0].cppi5_desc_size,
-                                               DMA_FROM_DEVICE);
-               rmb(); /* Ensure that reads are not moved before this point */
        }
 
        return ret;