dma: Added DMA misc driver interface for data transfer
[platform/kernel/linux-starfive.git] / drivers / dma / dw-axi-dmac-starfive / starfive_dmaengine_memcpy.c
1 /*
2  * Copyright 2021 StarFive, Inc <samin.guo@starfivetech.com>
3  *
4  * API|test for dma memcopy.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation, version 2.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  */
15
16 #include <linux/acpi_iort.h>
17 #include <linux/cdev.h>
18 #include <linux/device.h>
19 #include <linux/dmaengine.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dma-map-ops.h>
22 #include <linux/fs.h>
23 #include <linux/init.h>
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/string.h>
28 #include <linux/wait.h>
29
30 #include <soc/starfive/vic7100.h>
31
32 #define DMATEST_MAX_TIMEOUT_MS          20000
33
34 static DECLARE_WAIT_QUEUE_HEAD(wq);
35
36 struct dmatest_done {
37         int     timeout;
38         bool    done;
39 };
40
41 typedef struct async_dma_parm_t {
42         struct device dev;
43         dma_addr_t src_dma;
44         dma_addr_t dst_dma;
45         void *src;
46         void *dst;
47         size_t size;
48 } async_dma_parm_t;
49
50 dma_addr_t dw_virt_to_phys(void *vaddr)
51 {
52         struct page *pg = virt_to_page(vaddr);
53         unsigned long pa_off = offset_in_page(pg);
54
55          /* dma_map_page */
56         return page_to_phys(pg) + pa_off;
57 }
58 EXPORT_SYMBOL(dw_virt_to_phys);
59
60 void *dw_phys_to_virt(dma_addr_t phys)
61 {
62         struct page *pg = phys_to_page(phys);
63         unsigned long pa_off = offset_in_page(phys);
64
65         return page_to_virt(pg) + pa_off;
66 }
67 EXPORT_SYMBOL(dw_phys_to_virt);
68
69 static void tx_callback(void *arg)
70 {
71         struct dmatest_done *done = arg;
72
73         done->done = true;
74         wake_up_interruptible(&wq);
75 }
76
77 static int async_dma_alloc_buf(async_dma_parm_t *dma_parm)
78 {
79         struct device *dev = &dma_parm->dev;
80
81         dev->bus = NULL;
82         dev->coherent_dma_mask = 0xffffffff;
83         arch_setup_dma_ops(dev, dma_parm->dst_dma, 0, NULL, true);
84
85         dma_parm->dst = dma_alloc_coherent(dev, dma_parm->size,
86                                         &dma_parm->dst_dma, GFP_KERNEL);
87         if (!(dma_parm->dst))
88                 goto _FAILED_ALLOC_DST;
89
90         dma_parm->src = dma_alloc_coherent(dev, dma_parm->size,
91                                         &dma_parm->src_dma, GFP_KERNEL);
92         if (!(dma_parm->src))
93                 goto _FAILED_ALLOC_SRC;
94
95         return 0;
96
97 _FAILED_ALLOC_SRC:
98         dma_free_coherent(dev, dma_parm->size, dma_parm->dst, dma_parm->dst_dma);
99 _FAILED_ALLOC_DST:
100         dma_free_coherent(dev, dma_parm->size, dma_parm->src, dma_parm->src_dma);
101         return -ENOMEM;
102 }
103
104 static int async_dma_free_buf(async_dma_parm_t *dma_parm)
105 {
106         struct device *dev = &dma_parm->dev;
107
108         dma_free_coherent(dev, dma_parm->size, dma_parm->dst, dma_parm->dst_dma);
109         dma_free_coherent(dev, dma_parm->size, dma_parm->src, dma_parm->src_dma);
110
111         return 0;
112 }
113
114 static void async_dma_prebuf(void *dst, void *src, size_t size)
115 {
116         memset((u8 *)dst, 0x00, size);
117         memset((u8 *)src, 0x5a, size);
118 }
119
120 static int async_dma_check_data(void *dst, void *src, size_t size)
121 {
122         return memcmp(dst, src, size);
123 }
124
125 /*
126 * phys addr for dma.
127 */
128 int async_memcpy_single(dma_addr_t dst_dma, dma_addr_t src_dma, size_t size)
129 {
130         struct dma_async_tx_descriptor *tx;
131         struct dma_chan *chan;
132         struct dmatest_done done;
133         dma_cap_mask_t mask;
134         dma_cookie_t cookie;
135         enum dma_status status;
136
137         dma_cap_zero(mask);
138         dma_cap_set(DMA_MEMCPY, mask);
139         chan = dma_request_channel(mask, NULL, NULL);
140         if (!chan) {
141                 pr_err("dma request channel failed\n");
142                 return -EBUSY;
143         }
144
145         tx = chan->device->device_prep_dma_memcpy(chan, dst_dma, src_dma, size,
146                                 DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
147
148         if (!tx) {
149                 pr_err("Failed to prepare DMA memcpy\n");
150                 dma_release_channel(chan);
151                 return -EIO;
152         }
153
154         pr_debug("dmatest: dma_src=%#llx dma_dst=%#llx size:%#lx\n",
155                                         src_dma, dst_dma, size);
156         done.done = false;
157         done.timeout = DMATEST_MAX_TIMEOUT_MS;
158         tx->callback_param = &done;
159         tx->callback = tx_callback;
160
161         cookie = tx->tx_submit(tx);
162         if (dma_submit_error(cookie)) {
163                 pr_err("Failed to dma tx_submit\n");
164                 return -EBUSY;
165         }
166
167         dma_async_issue_pending(chan);
168         wait_event_interruptible_timeout(wq, done.done,
169                         msecs_to_jiffies(done.timeout));
170
171 #ifdef CONFIG_SOC_STARFIVE_VIC7100
172         starfive_flush_dcache(src_dma, size);
173         starfive_flush_dcache(dst_dma, size);
174 #endif
175         status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
176         if (status != DMA_COMPLETE) {
177                 pr_err("dma: not complete! status:%d \n", status);
178                 dmaengine_terminate_sync(chan);
179                 return -EBUSY;
180         }
181
182         dma_release_channel(chan);
183         return 0;
184 }
185 EXPORT_SYMBOL(async_memcpy_single);
186
187 /*
188 *virtl addr for cpu.
189 */
190 int async_memcpy_single_virt(void *dst, void *src, size_t size)
191 {
192         dma_addr_t src_dma, dst_dma;
193         int ret;
194
195         src_dma = dw_virt_to_phys(src);
196         dst_dma = dw_virt_to_phys(dst);
197
198         ret = async_memcpy_single(dst_dma, src_dma, size);
199         return ret;
200 }
201 EXPORT_SYMBOL(async_memcpy_single_virt);
202
203 int async_memcpy_test(size_t size)
204 {
205         async_dma_parm_t *dma_parm;
206         int ret = 0;
207
208         if (size < 0) {
209                 pr_warn("dmatest: no size input yet.\n");
210                 return -1;
211         }
212
213         dma_parm = kzalloc(sizeof(*dma_parm), GFP_KERNEL);
214         if (IS_ERR(dma_parm))
215                 return PTR_ERR(dma_parm);
216
217         dma_parm->size = size;
218         ret = async_dma_alloc_buf(dma_parm);
219         if (ret) {
220                 ret = -ENOMEM;
221                 goto _ERR_DMA_ALLOC_MEM;
222         }
223
224         pr_debug("dmatest: src=%#llx, dst=%#llx\n", (u64)dma_parm->src,
225                                                 (u64)dma_parm->dst);
226         pr_debug("dmatest: dma_src=%#llx dma_dst=%#llx\n", dma_parm->src_dma,
227                                                 dma_parm->dst_dma);
228
229         async_dma_prebuf(dma_parm->dst, dma_parm->src, size);
230         ret = async_memcpy_single(dma_parm->dst_dma, dma_parm->src_dma, size);
231         if (ret) {
232                 pr_err("dmatest: async_memcpy test failed. status:%d\n", ret);
233                 goto _ERR_DMA_MEMCPY;
234         }
235         ret = async_dma_check_data(dma_parm->dst, dma_parm->src, size);
236         if (ret)
237                 pr_err("dmatest: check data error.\n");
238
239 _ERR_DMA_MEMCPY:
240         async_dma_free_buf(dma_parm);
241 _ERR_DMA_ALLOC_MEM:
242         kfree(dma_parm);
243
244         return ret;
245 }
246 EXPORT_SYMBOL(async_memcpy_test);