dmaengine: xilinx_dma: Introduce xilinx_dma_get_residue
authorNicholas Graumann <nick.graumann@gmail.com>
Tue, 15 Oct 2019 14:48:21 +0000 (20:18 +0530)
committerVinod Koul <vkoul@kernel.org>
Sun, 20 Oct 2019 14:05:27 +0000 (19:35 +0530)
Introduce a function that can calculate residues for IPs that support it:
AXI DMA and CDMA.

Signed-off-by: Nicholas Graumann <nick.graumann@gmail.com>
Signed-off-by: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
Link: https://lore.kernel.org/r/1571150904-3988-5-git-send-email-radhey.shyam.pandey@xilinx.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
drivers/dma/xilinx/xilinx_dma.c

index 41d536c..fe265d9 100644 (file)
@@ -785,6 +785,44 @@ static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
 }
 
 /**
+ * xilinx_dma_get_residue - Compute residue for a given descriptor
+ * @chan: Driver specific dma channel
+ * @desc: dma transaction descriptor
+ *
+ * Return: The number of residue bytes for the descriptor.
+ */
+static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
+                                 struct xilinx_dma_tx_descriptor *desc)
+{
+       struct xilinx_cdma_tx_segment *cdma_seg;
+       struct xilinx_axidma_tx_segment *axidma_seg;
+       struct xilinx_cdma_desc_hw *cdma_hw;
+       struct xilinx_axidma_desc_hw *axidma_hw;
+       struct list_head *entry;
+       u32 residue = 0;
+
+       list_for_each(entry, &desc->segments) {
+               if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+                       cdma_seg = list_entry(entry,
+                                             struct xilinx_cdma_tx_segment,
+                                             node);
+                       cdma_hw = &cdma_seg->hw;
+                       residue += (cdma_hw->control - cdma_hw->status) &
+                                  chan->xdev->max_buffer_len;
+               } else {
+                       axidma_seg = list_entry(entry,
+                                               struct xilinx_axidma_tx_segment,
+                                               node);
+                       axidma_hw = &axidma_seg->hw;
+                       residue += (axidma_hw->control - axidma_hw->status) &
+                                  chan->xdev->max_buffer_len;
+               }
+       }
+
+       return residue;
+}
+
+/**
  * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
  * @chan: Driver specific dma channel
  * @desc: dma transaction descriptor
@@ -993,8 +1031,6 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
 {
        struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
        struct xilinx_dma_tx_descriptor *desc;
-       struct xilinx_axidma_tx_segment *segment;
-       struct xilinx_axidma_desc_hw *hw;
        enum dma_status ret;
        unsigned long flags;
        u32 residue = 0;
@@ -1003,22 +1039,20 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
        if (ret == DMA_COMPLETE || !txstate)
                return ret;
 
-       if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
-               spin_lock_irqsave(&chan->lock, flags);
+       spin_lock_irqsave(&chan->lock, flags);
 
-               desc = list_last_entry(&chan->active_list,
-                                      struct xilinx_dma_tx_descriptor, node);
-               if (chan->has_sg) {
-                       list_for_each_entry(segment, &desc->segments, node) {
-                               hw = &segment->hw;
-                               residue += (hw->control - hw->status) &
-                                          chan->xdev->max_buffer_len;
-                       }
-               }
-               spin_unlock_irqrestore(&chan->lock, flags);
+       desc = list_last_entry(&chan->active_list,
+                              struct xilinx_dma_tx_descriptor, node);
+       /*
+        * VDMA and simple mode do not support residue reporting, so the
+        * residue field will always be 0.
+        */
+       if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
+               residue = xilinx_dma_get_residue(chan, desc);
 
-               dma_set_residue(txstate, residue);
-       }
+       spin_unlock_irqrestore(&chan->lock, flags);
+
+       dma_set_residue(txstate, residue);
 
        return ret;
 }
@@ -2705,12 +2739,15 @@ static int xilinx_dma_probe(struct platform_device *pdev)
                                          xilinx_dma_prep_dma_cyclic;
                xdev->common.device_prep_interleaved_dma =
                                        xilinx_dma_prep_interleaved;
-               /* Residue calculation is supported by only AXI DMA */
+               /* Residue calculation is supported by only AXI DMA and CDMA */
                xdev->common.residue_granularity =
                                          DMA_RESIDUE_GRANULARITY_SEGMENT;
        } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
                dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
                xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
+               /* Residue calculation is supported by only AXI DMA and CDMA */
+               xdev->common.residue_granularity =
+                                       DMA_RESIDUE_GRANULARITY_SEGMENT;
        } else {
                xdev->common.device_prep_interleaved_dma =
                                xilinx_vdma_dma_prep_interleaved;