2 * Copyright 2021 StarFive, Inc <samin.guo@starfivetech.com>
4 * API|test for dma memcopy.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation, version 2.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/acpi_iort.h>
17 #include <linux/cdev.h>
18 #include <linux/device.h>
19 #include <linux/dmaengine.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dma-map-ops.h>
23 #include <linux/init.h>
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/string.h>
28 #include <linux/wait.h>
30 #include <soc/starfive/vic7100.h>
32 #define DMATEST_MAX_TIMEOUT_MS 20000
34 static DECLARE_WAIT_QUEUE_HEAD(wq);
41 typedef struct async_dma_parm_t {
50 dma_addr_t dw_virt_to_phys(void *vaddr)
52 struct page *pg = virt_to_page(vaddr);
53 unsigned long pa_off = offset_in_page(pg);
56 return page_to_phys(pg) + pa_off;
58 EXPORT_SYMBOL(dw_virt_to_phys);
60 void *dw_phys_to_virt(dma_addr_t phys)
62 struct page *pg = phys_to_page(phys);
63 unsigned long pa_off = offset_in_page(phys);
65 return page_to_virt(pg) + pa_off;
67 EXPORT_SYMBOL(dw_phys_to_virt);
69 static void tx_callback(void *arg)
71 struct dmatest_done *done = arg;
74 wake_up_interruptible(&wq);
77 static int async_dma_alloc_buf(async_dma_parm_t *dma_parm)
79 struct device *dev = &dma_parm->dev;
82 dev->coherent_dma_mask = 0xffffffff;
83 arch_setup_dma_ops(dev, dma_parm->dst_dma, 0, NULL, true);
85 dma_parm->dst = dma_alloc_coherent(dev, dma_parm->size,
86 &dma_parm->dst_dma, GFP_KERNEL);
88 goto _FAILED_ALLOC_DST;
90 dma_parm->src = dma_alloc_coherent(dev, dma_parm->size,
91 &dma_parm->src_dma, GFP_KERNEL);
93 goto _FAILED_ALLOC_SRC;
98 dma_free_coherent(dev, dma_parm->size, dma_parm->dst, dma_parm->dst_dma);
100 dma_free_coherent(dev, dma_parm->size, dma_parm->src, dma_parm->src_dma);
104 static int async_dma_free_buf(async_dma_parm_t *dma_parm)
106 struct device *dev = &dma_parm->dev;
108 dma_free_coherent(dev, dma_parm->size, dma_parm->dst, dma_parm->dst_dma);
109 dma_free_coherent(dev, dma_parm->size, dma_parm->src, dma_parm->src_dma);
114 static void async_dma_prebuf(void *dst, void *src, size_t size)
116 memset((u8 *)dst, 0x00, size);
117 memset((u8 *)src, 0x5a, size);
120 static int async_dma_check_data(void *dst, void *src, size_t size)
122 return memcmp(dst, src, size);
128 int async_memcpy_single(dma_addr_t dst_dma, dma_addr_t src_dma, size_t size)
130 struct dma_async_tx_descriptor *tx;
131 struct dma_chan *chan;
132 struct dmatest_done done;
135 enum dma_status status;
138 dma_cap_set(DMA_MEMCPY, mask);
139 chan = dma_request_channel(mask, NULL, NULL);
141 pr_err("dma request channel failed\n");
145 tx = chan->device->device_prep_dma_memcpy(chan, dst_dma, src_dma, size,
146 DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
149 pr_err("Failed to prepare DMA memcpy\n");
150 dma_release_channel(chan);
154 pr_debug("dmatest: dma_src=%#llx dma_dst=%#llx size:%#lx\n",
155 src_dma, dst_dma, size);
157 done.timeout = DMATEST_MAX_TIMEOUT_MS;
158 tx->callback_param = &done;
159 tx->callback = tx_callback;
161 cookie = tx->tx_submit(tx);
162 if (dma_submit_error(cookie)) {
163 pr_err("Failed to dma tx_submit\n");
167 dma_async_issue_pending(chan);
168 wait_event_interruptible_timeout(wq, done.done,
169 msecs_to_jiffies(done.timeout));
171 #ifdef CONFIG_SOC_STARFIVE_VIC7100
172 starfive_flush_dcache(src_dma, size);
173 starfive_flush_dcache(dst_dma, size);
175 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
176 if (status != DMA_COMPLETE) {
177 pr_err("dma: not complete! status:%d \n", status);
178 dmaengine_terminate_sync(chan);
182 dma_release_channel(chan);
185 EXPORT_SYMBOL(async_memcpy_single);
190 int async_memcpy_single_virt(void *dst, void *src, size_t size)
192 dma_addr_t src_dma, dst_dma;
195 src_dma = dw_virt_to_phys(src);
196 dst_dma = dw_virt_to_phys(dst);
198 ret = async_memcpy_single(dst_dma, src_dma, size);
201 EXPORT_SYMBOL(async_memcpy_single_virt);
203 int async_memcpy_test(size_t size)
205 async_dma_parm_t *dma_parm;
209 pr_warn("dmatest: no size input yet.\n");
213 dma_parm = kzalloc(sizeof(*dma_parm), GFP_KERNEL);
214 if (IS_ERR(dma_parm))
215 return PTR_ERR(dma_parm);
217 dma_parm->size = size;
218 ret = async_dma_alloc_buf(dma_parm);
221 goto _ERR_DMA_ALLOC_MEM;
224 pr_debug("dmatest: src=%#llx, dst=%#llx\n", (u64)dma_parm->src,
226 pr_debug("dmatest: dma_src=%#llx dma_dst=%#llx\n", dma_parm->src_dma,
229 async_dma_prebuf(dma_parm->dst, dma_parm->src, size);
230 ret = async_memcpy_single(dma_parm->dst_dma, dma_parm->src_dma, size);
232 pr_err("dmatest: async_memcpy test failed. status:%d\n", ret);
233 goto _ERR_DMA_MEMCPY;
235 ret = async_dma_check_data(dma_parm->dst, dma_parm->src, size);
237 pr_err("dmatest: check data error.\n");
240 async_dma_free_buf(dma_parm);
246 EXPORT_SYMBOL(async_memcpy_test);