dmaengine: stm32-dma: take address into account when computing max width
authorAmelie Delaunay <amelie.delaunay@st.com>
Fri, 20 Nov 2020 14:33:19 +0000 (15:33 +0100)
committerVinod Koul <vkoul@kernel.org>
Fri, 11 Dec 2020 15:43:08 +0000 (21:13 +0530)
DMA_SxPAR or DMA_SxM0AR/M1AR registers have to be aligned on PSIZE or MSIZE
respectively. This means that bus width needs to be forced to 1 byte when
computed width is not aligned with address.

Signed-off-by: Amelie Delaunay <amelie.delaunay@st.com>
Link: https://lore.kernel.org/r/20201120143320.30367-4-amelie.delaunay@st.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
drivers/dma/stm32-dma.c

index 62501e5..f54ecb1 100644 (file)
@@ -264,9 +264,11 @@ static int stm32_dma_get_width(struct stm32_dma_chan *chan,
 }
 
 static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len,
+                                                      dma_addr_t buf_addr,
                                                       u32 threshold)
 {
        enum dma_slave_buswidth max_width;
+       u64 addr = buf_addr;
 
        if (threshold == STM32_DMA_FIFO_THRESHOLD_FULL)
                max_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -277,6 +279,9 @@ static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len,
               max_width > DMA_SLAVE_BUSWIDTH_1_BYTE)
                max_width = max_width >> 1;
 
+       if (do_div(addr, max_width))
+               max_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+
        return max_width;
 }
 
@@ -707,7 +712,7 @@ static void stm32_dma_issue_pending(struct dma_chan *c)
 static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
                                    enum dma_transfer_direction direction,
                                    enum dma_slave_buswidth *buswidth,
-                                   u32 buf_len)
+                                   u32 buf_len, dma_addr_t buf_addr)
 {
        enum dma_slave_buswidth src_addr_width, dst_addr_width;
        int src_bus_width, dst_bus_width;
@@ -739,7 +744,8 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
                        return dst_burst_size;
 
                /* Set memory data size */
-               src_addr_width = stm32_dma_get_max_width(buf_len, fifoth);
+               src_addr_width = stm32_dma_get_max_width(buf_len, buf_addr,
+                                                        fifoth);
                chan->mem_width = src_addr_width;
                src_bus_width = stm32_dma_get_width(chan, src_addr_width);
                if (src_bus_width < 0)
@@ -788,7 +794,8 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
                        return src_burst_size;
 
                /* Set memory data size */
-               dst_addr_width = stm32_dma_get_max_width(buf_len, fifoth);
+               dst_addr_width = stm32_dma_get_max_width(buf_len, buf_addr,
+                                                        fifoth);
                chan->mem_width = dst_addr_width;
                dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
                if (dst_bus_width < 0)
@@ -876,7 +883,8 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
 
        for_each_sg(sgl, sg, sg_len, i) {
                ret = stm32_dma_set_xfer_param(chan, direction, &buswidth,
-                                              sg_dma_len(sg));
+                                              sg_dma_len(sg),
+                                              sg_dma_address(sg));
                if (ret < 0)
                        goto err;
 
@@ -944,7 +952,8 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
                return NULL;
        }
 
-       ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, period_len);
+       ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, period_len,
+                                      buf_addr);
        if (ret < 0)
                return NULL;