dma: Added DMA misc driver interface for data transfer
authorsamin <samin.guo@starfivetech.com>
Mon, 22 Nov 2021 10:08:40 +0000 (18:08 +0800)
committersamin <samin.guo@starfivetech.com>
Tue, 23 Nov 2021 03:48:28 +0000 (11:48 +0800)
1)add async_memcpy api kernel space.
2)add dma-misc driver for user space.

Signed-off-by: samin <samin.guo@starfivetech.com>
drivers/dma/Kconfig
drivers/dma/Makefile
drivers/dma/dw-axi-dmac-starfive/Makefile [new file with mode: 0644]
drivers/dma/dw-axi-dmac-starfive/dw-axi-dmac-starfive-misc.c [new file with mode: 0644]
drivers/dma/dw-axi-dmac-starfive/starfive_dmaengine_memcpy.c [new file with mode: 0644]

index 80c2c03..1dee309 100644 (file)
@@ -181,6 +181,13 @@ config DW_AXI_DMAC
          NOTE: This driver wasn't tested on 64 bit platform because
          of lack 64 bit platform with Synopsys DW AXI DMAC.
 
+config DW_AXI_DMAC_STARFIVE
+       tristate "Synopsys DesignWare AXI DMA support for StarFive SOC"
+       depends on SOC_STARFIVE_VIC7100
+       help
+         Enable support for Synopsys DesignWare AXI DMA controller.
+         NOTE: It's for StarFive SOC.
+
 config EP93XX_DMA
        bool "Cirrus Logic EP93xx DMA support"
        depends on ARCH_EP93XX || COMPILE_TEST
index 616d926..5d6c354 100644 (file)
@@ -27,6 +27,7 @@ obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
 obj-$(CONFIG_DMA_SUN4I) += sun4i-dma.o
 obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
 obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/
+obj-$(CONFIG_DW_AXI_DMAC_STARFIVE) += dw-axi-dmac-starfive/
 obj-$(CONFIG_DW_DMAC_CORE) += dw/
 obj-$(CONFIG_DW_EDMA) += dw-edma/
 obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
diff --git a/drivers/dma/dw-axi-dmac-starfive/Makefile b/drivers/dma/dw-axi-dmac-starfive/Makefile
new file mode 100644 (file)
index 0000000..d497783
--- /dev/null
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_DW_AXI_DMAC_STARFIVE) += starfive_dmaengine_memcpy.o dw-axi-dmac-starfive-misc.o
diff --git a/drivers/dma/dw-axi-dmac-starfive/dw-axi-dmac-starfive-misc.c b/drivers/dma/dw-axi-dmac-starfive/dw-axi-dmac-starfive-misc.c
new file mode 100644 (file)
index 0000000..6b339d6
--- /dev/null
@@ -0,0 +1,290 @@
+/*
+ * Copyright 2020 StarFive, Inc <samin.guo@starfivetech.com>
+ *
+ * DW AXI dma driver for StarFive SoC VIC7100.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <asm/uaccess.h>
+#include <linux/dmaengine.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#define DRIVER_NAME                    "dwdma"
+#define AXIDMA_IOC_MAGIC               'A'
+#define AXIDMA_IOCGETCHN               _IO(AXIDMA_IOC_MAGIC, 0)
+#define AXIDMA_IOCCFGANDSTART          _IO(AXIDMA_IOC_MAGIC, 1)
+#define AXIDMA_IOCGETSTATUS            _IO(AXIDMA_IOC_MAGIC, 2)
+#define AXIDMA_IOCRELEASECHN           _IO(AXIDMA_IOC_MAGIC, 3)
+
+#define AXI_DMA_MAX_CHANS              20
+
+#define DMA_CHN_UNUSED                 0
+#define DMA_CHN_USED                   1
+#define DMA_STATUS_UNFINISHED          0
+#define DMA_STATUS_FINISHED            1
+#define DMA_MAX_TIMEOUT_MS             20000
+
+static DECLARE_WAIT_QUEUE_HEAD(wq);
+
+struct axidma_chncfg {
+       unsigned long src_addr; /*dma addr*/
+       unsigned long dst_addr; /*dma addr*/
+       unsigned long virt_src; /*mmap src addr*/
+       unsigned long virt_dst; /*mmap dst addr*/
+       unsigned long phys;     /*desc phys addr*/
+       unsigned int len;       /*transport lenth*/
+       int mem_fd;             /*fd*/
+       unsigned char chn_num;  /*dma channels number*/
+       unsigned char status;   /*dma transport status*/
+};
+
+struct axidma_chns {
+       struct dma_chan *dma_chan;
+       unsigned char used;
+       unsigned char status;
+       unsigned char reserve[2];
+};
+
+struct axidma_chns channels[AXI_DMA_MAX_CHANS];
+
+static int axidma_open(struct inode *inode, struct file *file)
+{
+       /*Open: do nothing*/
+       return 0;
+}
+
+static int axidma_release(struct inode *inode, struct file *file)
+{
+       /* Release: do nothing */
+       return 0;
+}
+
+static ssize_t axidma_write(struct file *file, const char __user *data,
+                       size_t len, loff_t *ppos)
+{
+       /* Write: do nothing */
+       return 0;
+}
+
+static void dma_complete_func(void *status)
+{
+       *(char *)status = DMA_STATUS_FINISHED;
+       wake_up_interruptible(&wq);
+}
+
+static long axidma_unlocked_ioctl(struct file *file, unsigned int cmd,
+                               unsigned long arg)
+{
+       int i, ret;
+       dma_cap_mask_t mask;
+       dma_cookie_t cookie;
+       struct dma_device *dma_dev;
+       struct axidma_chncfg chncfg;
+       struct dma_async_tx_descriptor *tx;
+       enum dma_status status;
+
+       memset(&chncfg, 0, sizeof(struct axidma_chncfg));
+
+       switch(cmd) {
+       case AXIDMA_IOCGETCHN:
+               for (i = 0; i < AXI_DMA_MAX_CHANS; i++) {
+                       if(DMA_CHN_UNUSED == channels[i].used)
+                               break;
+               }
+
+               if (AXI_DMA_MAX_CHANS == i) {
+                       pr_err("Get dma chn failed, because no idle channel\n");
+                       goto error;
+               } else {
+                       channels[i].used = DMA_CHN_USED;
+                       channels[i].status = DMA_STATUS_UNFINISHED;
+                       chncfg.status = DMA_STATUS_UNFINISHED;
+                       chncfg.chn_num = i;
+               }
+
+               dma_cap_zero(mask);
+               dma_cap_set(DMA_MEMCPY, mask);
+
+               channels[i].dma_chan = dma_request_channel(mask, NULL, NULL);
+               if (!channels[i].dma_chan) {
+                       pr_err("dma request channel failed\n");
+                       channels[i].used = DMA_CHN_UNUSED;
+                       goto error;
+               }
+
+               ret = copy_to_user((void __user *)arg, &chncfg,
+                               sizeof(struct axidma_chncfg));
+               if (ret) {
+                       pr_err("Copy to user failed\n");
+                       goto error;
+               }
+               break;
+
+       case AXIDMA_IOCCFGANDSTART:
+               ret = copy_from_user(&chncfg, (void __user *)arg,
+                                    sizeof(struct axidma_chncfg));
+               if (ret) {
+                       pr_err("Copy from user failed\n");
+                       goto error;
+               }
+
+               if ((chncfg.chn_num >= AXI_DMA_MAX_CHANS) ||
+                  (!channels[chncfg.chn_num].dma_chan)) {
+                       pr_err("chn_num[%d] is invalid\n", chncfg.chn_num);
+                       goto error;
+               }
+               dma_dev = channels[chncfg.chn_num].dma_chan->device;
+
+#ifdef CONFIG_SOC_STARFIVE_VIC7100
+               starfive_flush_dcache(chncfg.src_addr, chncfg.len);
+#endif
+               tx = dma_dev->device_prep_dma_memcpy(
+                       channels[chncfg.chn_num].dma_chan,
+                       chncfg.dst_addr, chncfg.src_addr, chncfg.len,
+                       DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+               if (!tx) {
+                       pr_err("Failed to prepare DMA memcpy\n");
+                       goto error;
+               }
+
+               channels[chncfg.chn_num].status = DMA_STATUS_UNFINISHED;
+
+               tx->callback_param = &channels[chncfg.chn_num].status;
+               tx->callback = dma_complete_func;
+               cookie = tx->tx_submit(tx);
+
+               if (dma_submit_error(cookie)) {
+                       pr_err("Failed to dma tx_submit\n");
+                       goto error;
+               }
+
+               dma_async_issue_pending(channels[chncfg.chn_num].dma_chan);
+
+               break;
+
+       case AXIDMA_IOCGETSTATUS:
+               ret = copy_from_user(&chncfg, (void __user *)arg,
+                       sizeof(struct axidma_chncfg));
+               if (ret) {
+                       pr_info("Copy from user failed\n");
+                       goto error;
+               }
+
+               if (chncfg.chn_num >= AXI_DMA_MAX_CHANS) {
+                       pr_info("chn_num[%d] is invalid\n", chncfg.chn_num);
+                       goto error;
+               }
+
+               wait_event_interruptible_timeout(wq,
+                               &channels[chncfg.chn_num].status,
+                               msecs_to_jiffies(DMA_MAX_TIMEOUT_MS));
+#ifdef CONFIG_SOC_STARFIVE_VIC7100
+               /*flush dcache*/
+               starfive_flush_dcache(chncfg.dst_addr, chncfg.len);
+#endif
+               status = dma_async_is_tx_complete(channels[chncfg.chn_num].dma_chan,
+                                               cookie, NULL, NULL);
+               if (status != DMA_COMPLETE) {
+                       pr_err("dma: not complete! status:%d \n", status);
+                       dmaengine_terminate_sync(channels[chncfg.chn_num].dma_chan);
+
+                       channels[chncfg.chn_num].used = DMA_CHN_UNUSED;
+                       channels[chncfg.chn_num].status = DMA_STATUS_UNFINISHED;
+                       return -EBUSY;
+               }
+
+               chncfg.status = channels[chncfg.chn_num].status;
+
+               ret = copy_to_user((void __user *)arg, &chncfg,
+                                  sizeof(struct axidma_chncfg));
+               if(ret) {
+                       pr_info("Copy to user failed\n");
+                       goto error;
+               }
+               break;
+
+       case AXIDMA_IOCRELEASECHN:
+               ret = copy_from_user(&chncfg, (void __user *)arg,
+                                    sizeof(struct axidma_chncfg));
+               if(ret) {
+                       pr_info("Copy from user failed\n");
+                       goto error;
+               }
+
+               if((chncfg.chn_num >= AXI_DMA_MAX_CHANS) ||
+                  (!channels[chncfg.chn_num].dma_chan)) {
+                       pr_info("chn_num[%d] is invalid\n", chncfg.chn_num);
+                       goto error;
+               }
+
+               dma_release_channel(channels[chncfg.chn_num].dma_chan);
+               channels[chncfg.chn_num].used = DMA_CHN_UNUSED;
+               channels[chncfg.chn_num].status = DMA_STATUS_UNFINISHED;
+               break;
+
+       default:
+               pr_info("Don't support cmd [%d]\n", cmd);
+               break;
+       }
+       return 0;
+
+error:
+       return -EFAULT;
+}
+
+/*
+ *     Kernel Interfaces
+ */
+static struct file_operations axidma_fops = {
+       .owner          = THIS_MODULE,
+       .llseek         = no_llseek,
+       .write          = axidma_write,
+       .unlocked_ioctl = axidma_unlocked_ioctl,
+       .open           = axidma_open,
+       .release        = axidma_release,
+};
+
+static struct miscdevice axidma_miscdev = {
+       .minor          = MISC_DYNAMIC_MINOR,
+       .name           = DRIVER_NAME,
+       .fops           = &axidma_fops,
+};
+
+static int __init axidma_init(void)
+{
+       int ret = misc_register(&axidma_miscdev);
+       if(ret) {
+               pr_info(KERN_ERR "cannot register miscdev (err=%d)\n", ret);
+               return ret;
+       }
+
+       memset(&channels, 0, sizeof(channels));
+
+       return 0;
+}
+
+static void __exit axidma_exit(void)
+{
+       misc_deregister(&axidma_miscdev);
+}
+
+module_init(axidma_init);
+module_exit(axidma_exit);
+
+MODULE_AUTHOR("samin <samin.guo@starfivetech.com>");
+MODULE_DESCRIPTION("DW Axi Dmac Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/dw-axi-dmac-starfive/starfive_dmaengine_memcpy.c b/drivers/dma/dw-axi-dmac-starfive/starfive_dmaengine_memcpy.c
new file mode 100644 (file)
index 0000000..7fb3332
--- /dev/null
@@ -0,0 +1,246 @@
+/*
+ * Copyright 2021 StarFive, Inc <samin.guo@starfivetech.com>
+ *
+ * API|test for dma memcopy.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/acpi_iort.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-map-ops.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+
+#include <soc/starfive/vic7100.h>
+
+#define DMATEST_MAX_TIMEOUT_MS         20000
+
+static DECLARE_WAIT_QUEUE_HEAD(wq);
+
+struct dmatest_done {
+       int     timeout;
+       bool    done;
+};
+
+typedef struct async_dma_parm_t {
+       struct device dev;
+       dma_addr_t src_dma;
+       dma_addr_t dst_dma;
+       void *src;
+       void *dst;
+       size_t size;
+} async_dma_parm_t;
+
+dma_addr_t dw_virt_to_phys(void *vaddr)
+{
+       struct page *pg = virt_to_page(vaddr);
+       unsigned long pa_off = offset_in_page(pg);
+
+        /* dma_map_page */
+       return page_to_phys(pg) + pa_off;
+}
+EXPORT_SYMBOL(dw_virt_to_phys);
+
+void *dw_phys_to_virt(dma_addr_t phys)
+{
+       struct page *pg = phys_to_page(phys);
+       unsigned long pa_off = offset_in_page(phys);
+
+       return page_to_virt(pg) + pa_off;
+}
+EXPORT_SYMBOL(dw_phys_to_virt);
+
+static void tx_callback(void *arg)
+{
+       struct dmatest_done *done = arg;
+
+       done->done = true;
+       wake_up_interruptible(&wq);
+}
+
+static int async_dma_alloc_buf(async_dma_parm_t *dma_parm)
+{
+       struct device *dev = &dma_parm->dev;
+
+       dev->bus = NULL;
+       dev->coherent_dma_mask = 0xffffffff;
+       arch_setup_dma_ops(dev, dma_parm->dst_dma, 0, NULL, true);
+
+       dma_parm->dst = dma_alloc_coherent(dev, dma_parm->size,
+                                       &dma_parm->dst_dma, GFP_KERNEL);
+       if (!(dma_parm->dst))
+               goto _FAILED_ALLOC_DST;
+
+       dma_parm->src = dma_alloc_coherent(dev, dma_parm->size,
+                                       &dma_parm->src_dma, GFP_KERNEL);
+       if (!(dma_parm->src))
+               goto _FAILED_ALLOC_SRC;
+
+       return 0;
+
+_FAILED_ALLOC_SRC:
+       dma_free_coherent(dev, dma_parm->size, dma_parm->dst, dma_parm->dst_dma);
+_FAILED_ALLOC_DST:
+       dma_free_coherent(dev, dma_parm->size, dma_parm->src, dma_parm->src_dma);
+       return -ENOMEM;
+}
+
+static int async_dma_free_buf(async_dma_parm_t *dma_parm)
+{
+       struct device *dev = &dma_parm->dev;
+
+       dma_free_coherent(dev, dma_parm->size, dma_parm->dst, dma_parm->dst_dma);
+       dma_free_coherent(dev, dma_parm->size, dma_parm->src, dma_parm->src_dma);
+
+       return 0;
+}
+
+static void async_dma_prebuf(void *dst, void *src, size_t size)
+{
+       memset((u8 *)dst, 0x00, size);
+       memset((u8 *)src, 0x5a, size);
+}
+
+static int async_dma_check_data(void *dst, void *src, size_t size)
+{
+       return memcmp(dst, src, size);
+}
+
+/*
+* phys addr for dma.
+*/
+int async_memcpy_single(dma_addr_t dst_dma, dma_addr_t src_dma, size_t size)
+{
+       struct dma_async_tx_descriptor *tx;
+       struct dma_chan *chan;
+       struct dmatest_done done;
+       dma_cap_mask_t mask;
+       dma_cookie_t cookie;
+       enum dma_status status;
+
+       dma_cap_zero(mask);
+       dma_cap_set(DMA_MEMCPY, mask);
+       chan = dma_request_channel(mask, NULL, NULL);
+       if (!chan) {
+               pr_err("dma request channel failed\n");
+               return -EBUSY;
+       }
+
+       tx = chan->device->device_prep_dma_memcpy(chan, dst_dma, src_dma, size,
+                               DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+
+       if (!tx) {
+               pr_err("Failed to prepare DMA memcpy\n");
+               dma_release_channel(chan);
+               return -EIO;
+       }
+
+       pr_debug("dmatest: dma_src=%#llx dma_dst=%#llx size:%#lx\n",
+                                       src_dma, dst_dma, size);
+       done.done = false;
+       done.timeout = DMATEST_MAX_TIMEOUT_MS;
+       tx->callback_param = &done;
+       tx->callback = tx_callback;
+
+       cookie = tx->tx_submit(tx);
+       if (dma_submit_error(cookie)) {
+               pr_err("Failed to dma tx_submit\n");
+               return -EBUSY;
+       }
+
+       dma_async_issue_pending(chan);
+       wait_event_interruptible_timeout(wq, done.done,
+                       msecs_to_jiffies(done.timeout));
+
+#ifdef CONFIG_SOC_STARFIVE_VIC7100
+       starfive_flush_dcache(src_dma, size);
+       starfive_flush_dcache(dst_dma, size);
+#endif
+       status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
+       if (status != DMA_COMPLETE) {
+               pr_err("dma: not complete! status:%d \n", status);
+               dmaengine_terminate_sync(chan);
+               return -EBUSY;
+       }
+
+       dma_release_channel(chan);
+       return 0;
+}
+EXPORT_SYMBOL(async_memcpy_single);
+
+/*
+*virtl addr for cpu.
+*/
+int async_memcpy_single_virt(void *dst, void *src, size_t size)
+{
+       dma_addr_t src_dma, dst_dma;
+       int ret;
+
+       src_dma = dw_virt_to_phys(src);
+       dst_dma = dw_virt_to_phys(dst);
+
+       ret = async_memcpy_single(dst_dma, src_dma, size);
+       return ret;
+}
+EXPORT_SYMBOL(async_memcpy_single_virt);
+
+int async_memcpy_test(size_t size)
+{
+       async_dma_parm_t *dma_parm;
+       int ret = 0;
+
+       if (size < 0) {
+               pr_warn("dmatest: no size input yet.\n");
+               return -1;
+       }
+
+       dma_parm = kzalloc(sizeof(*dma_parm), GFP_KERNEL);
+       if (IS_ERR(dma_parm))
+               return PTR_ERR(dma_parm);
+
+       dma_parm->size = size;
+       ret = async_dma_alloc_buf(dma_parm);
+       if (ret) {
+               ret = -ENOMEM;
+               goto _ERR_DMA_ALLOC_MEM;
+       }
+
+       pr_debug("dmatest: src=%#llx, dst=%#llx\n", (u64)dma_parm->src,
+                                               (u64)dma_parm->dst);
+       pr_debug("dmatest: dma_src=%#llx dma_dst=%#llx\n", dma_parm->src_dma,
+                                               dma_parm->dst_dma);
+
+       async_dma_prebuf(dma_parm->dst, dma_parm->src, size);
+       ret = async_memcpy_single(dma_parm->dst_dma, dma_parm->src_dma, size);
+       if (ret) {
+               pr_err("dmatest: async_memcpy test failed. status:%d\n", ret);
+               goto _ERR_DMA_MEMCPY;
+       }
+       ret = async_dma_check_data(dma_parm->dst, dma_parm->src, size);
+       if (ret)
+               pr_err("dmatest: check data error.\n");
+
+_ERR_DMA_MEMCPY:
+       async_dma_free_buf(dma_parm);
+_ERR_DMA_ALLOC_MEM:
+       kfree(dma_parm);
+
+       return ret;
+}
+EXPORT_SYMBOL(async_memcpy_test);