dmaengine: jz4780: Support bidirectional I/O on one channel
authorPaul Cercueil <paul@crapouillou.net>
Mon, 6 Dec 2021 17:42:59 +0000 (17:42 +0000)
committerVinod Koul <vkoul@kernel.org>
Fri, 17 Dec 2021 16:09:27 +0000 (21:39 +0530)
For some devices with only half-duplex capabilities, it doesn't make
much sense to use one DMA channel per direction, as both channels will
never be active at the same time.

Add support for bidirectional I/O on DMA channels. The client drivers
can then request a "tx-rx" DMA channel which will be used for both
directions.

Signed-off-by: Paul Cercueil <paul@crapouillou.net>
Link: https://lore.kernel.org/r/20211206174259.68133-7-paul@crapouillou.net
Signed-off-by: Vinod Koul <vkoul@kernel.org>
drivers/dma/dma-jz4780.c

index c8c4bbd..fc513eb 100644 (file)
@@ -122,6 +122,7 @@ struct jz4780_dma_desc {
        dma_addr_t desc_phys;
        unsigned int count;
        enum dma_transaction_type type;
+       u32 transfer_type;
        u32 status;
 };
 
@@ -130,7 +131,7 @@ struct jz4780_dma_chan {
        unsigned int id;
        struct dma_pool *desc_pool;
 
-       u32 transfer_type;
+       u32 transfer_type_tx, transfer_type_rx;
        u32 transfer_shift;
        struct dma_slave_config config;
 
@@ -157,7 +158,7 @@ struct jz4780_dma_dev {
 };
 
 struct jz4780_dma_filter_data {
-       u32 transfer_type;
+       u32 transfer_type_tx, transfer_type_rx;
        int channel;
 };
 
@@ -226,9 +227,10 @@ static inline void jz4780_dma_chan_disable(struct jz4780_dma_dev *jzdma,
                jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DCKEC, BIT(chn));
 }
 
-static struct jz4780_dma_desc *jz4780_dma_desc_alloc(
-       struct jz4780_dma_chan *jzchan, unsigned int count,
-       enum dma_transaction_type type)
+static struct jz4780_dma_desc *
+jz4780_dma_desc_alloc(struct jz4780_dma_chan *jzchan, unsigned int count,
+                     enum dma_transaction_type type,
+                     enum dma_transfer_direction direction)
 {
        struct jz4780_dma_desc *desc;
 
@@ -248,6 +250,12 @@ static struct jz4780_dma_desc *jz4780_dma_desc_alloc(
 
        desc->count = count;
        desc->type = type;
+
+       if (direction == DMA_DEV_TO_MEM)
+               desc->transfer_type = jzchan->transfer_type_rx;
+       else
+               desc->transfer_type = jzchan->transfer_type_tx;
+
        return desc;
 }
 
@@ -361,7 +369,7 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
        unsigned int i;
        int err;
 
-       desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE);
+       desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE, direction);
        if (!desc)
                return NULL;
 
@@ -410,7 +418,7 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic(
 
        periods = buf_len / period_len;
 
-       desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC);
+       desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC, direction);
        if (!desc)
                return NULL;
 
@@ -455,14 +463,14 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
        struct jz4780_dma_desc *desc;
        u32 tsz;
 
-       desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY);
+       desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY, 0);
        if (!desc)
                return NULL;
 
        tsz = jz4780_dma_transfer_size(jzchan, dest | src | len,
                                       &jzchan->transfer_shift);
 
-       jzchan->transfer_type = JZ_DMA_DRT_AUTO;
+       desc->transfer_type = JZ_DMA_DRT_AUTO;
 
        desc->desc[0].dsa = src;
        desc->desc[0].dta = dest;
@@ -528,7 +536,7 @@ static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan)
 
        /* Set transfer type. */
        jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DRT,
-                             jzchan->transfer_type);
+                             jzchan->desc->transfer_type);
 
        /*
         * Set the transfer count. This is redundant for a descriptor-driven
@@ -788,7 +796,8 @@ static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param)
                return false;
        }
 
-       jzchan->transfer_type = data->transfer_type;
+       jzchan->transfer_type_tx = data->transfer_type_tx;
+       jzchan->transfer_type_rx = data->transfer_type_rx;
 
        return true;
 }
@@ -800,11 +809,17 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
        dma_cap_mask_t mask = jzdma->dma_device.cap_mask;
        struct jz4780_dma_filter_data data;
 
-       if (dma_spec->args_count != 2)
+       if (dma_spec->args_count == 2) {
+               data.transfer_type_tx = dma_spec->args[0];
+               data.transfer_type_rx = dma_spec->args[0];
+               data.channel = dma_spec->args[1];
+       } else if (dma_spec->args_count == 3) {
+               data.transfer_type_tx = dma_spec->args[0];
+               data.transfer_type_rx = dma_spec->args[1];
+               data.channel = dma_spec->args[2];
+       } else {
                return NULL;
-
-       data.transfer_type = dma_spec->args[0];
-       data.channel = dma_spec->args[1];
+       }
 
        if (data.channel > -1) {
                if (data.channel >= jzdma->soc_data->nb_channels) {
@@ -822,7 +837,8 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
                        return NULL;
                }
 
-               jzdma->chan[data.channel].transfer_type = data.transfer_type;
+               jzdma->chan[data.channel].transfer_type_tx = data.transfer_type_tx;
+               jzdma->chan[data.channel].transfer_type_rx = data.transfer_type_rx;
 
                return dma_get_slave_channel(
                        &jzdma->chan[data.channel].vchan.chan);