dmaengine: xilinx_dma: Freeup active list based on descriptor completion bit
authorRadhey Shyam Pandey <radhey.shyam.pandey@amd.com>
Mon, 7 Aug 2023 05:51:44 +0000 (11:21 +0530)
committerVinod Koul <vkoul@kernel.org>
Mon, 21 Aug 2023 13:10:37 +0000 (18:40 +0530)
AXIDMA IP in SG mode sets completion bit to 1 when the transfer is
completed. Read this bit to move descriptor from active list to the
done list. This feature is needed when interrupt delay timeout and
IRQThreshold is enabled i.e Dly_IrqEn is triggered w/o completing
interrupt threshold.

Signed-off-by: Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
Link: https://lore.kernel.org/r/1691387509-2113129-6-git-send-email-radhey.shyam.pandey@amd.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
drivers/dma/xilinx/xilinx_dma.c

index 9bdce8f..5de9d36 100644 (file)
 #define XILINX_DMA_CR_COALESCE_SHIFT   16
 #define XILINX_DMA_BD_SOP              BIT(27)
 #define XILINX_DMA_BD_EOP              BIT(26)
+#define XILINX_DMA_BD_COMP_MASK                BIT(31)
 #define XILINX_DMA_COALESCE_MAX                255
 #define XILINX_DMA_NUM_DESCS           512
 #define XILINX_DMA_NUM_APP_WORDS       5
@@ -1708,6 +1709,14 @@ static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
                return;
 
        list_for_each_entry_safe(desc, next, &chan->active_list, node) {
+               if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+                       struct xilinx_axidma_tx_segment *seg;
+
+                       seg = list_last_entry(&desc->segments,
+                                             struct xilinx_axidma_tx_segment, node);
+                       if (!(seg->hw.status & XILINX_DMA_BD_COMP_MASK) && chan->has_sg)
+                               break;
+               }
                if (chan->has_sg && chan->xdev->dma_config->dmatype !=
                    XDMA_TYPE_VDMA)
                        desc->residue = xilinx_dma_get_residue(chan, desc);