1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
5 * Copyright (C) 2014 Atmel Corporation
7 * Author: Ludovic Desroches <ludovic.desroches@atmel.com>
10 #include <asm/barrier.h>
11 #include <dt-bindings/dma/at91.h>
12 #include <linux/clk.h>
13 #include <linux/dmaengine.h>
14 #include <linux/dmapool.h>
15 #include <linux/interrupt.h>
16 #include <linux/irq.h>
17 #include <linux/kernel.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/of_dma.h>
21 #include <linux/of_platform.h>
22 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
26 #include "dmaengine.h"
28 /* Global registers */
29 #define AT_XDMAC_GTYPE 0x00 /* Global Type Register */
30 #define AT_XDMAC_NB_CH(i) (((i) & 0x1F) + 1) /* Number of Channels Minus One */
31 #define AT_XDMAC_FIFO_SZ(i) (((i) >> 5) & 0x7FF) /* Number of Bytes */
32 #define AT_XDMAC_NB_REQ(i) ((((i) >> 16) & 0x3F) + 1) /* Number of Peripheral Requests Minus One */
33 #define AT_XDMAC_GCFG 0x04 /* Global Configuration Register */
34 #define AT_XDMAC_WRHP(i) (((i) & 0xF) << 4)
35 #define AT_XDMAC_WRMP(i) (((i) & 0xF) << 8)
36 #define AT_XDMAC_WRLP(i) (((i) & 0xF) << 12)
37 #define AT_XDMAC_RDHP(i) (((i) & 0xF) << 16)
38 #define AT_XDMAC_RDMP(i) (((i) & 0xF) << 20)
39 #define AT_XDMAC_RDLP(i) (((i) & 0xF) << 24)
40 #define AT_XDMAC_RDSG(i) (((i) & 0xF) << 28)
41 #define AT_XDMAC_GCFG_M2M (AT_XDMAC_RDLP(0xF) | AT_XDMAC_WRLP(0xF))
42 #define AT_XDMAC_GCFG_P2M (AT_XDMAC_RDSG(0x1) | AT_XDMAC_RDHP(0x3) | \
44 #define AT_XDMAC_GWAC 0x08 /* Global Weighted Arbiter Configuration Register */
45 #define AT_XDMAC_PW0(i) (((i) & 0xF) << 0)
46 #define AT_XDMAC_PW1(i) (((i) & 0xF) << 4)
47 #define AT_XDMAC_PW2(i) (((i) & 0xF) << 8)
48 #define AT_XDMAC_PW3(i) (((i) & 0xF) << 12)
49 #define AT_XDMAC_GWAC_M2M 0
50 #define AT_XDMAC_GWAC_P2M (AT_XDMAC_PW0(0xF) | AT_XDMAC_PW2(0xF))
52 #define AT_XDMAC_GIE 0x0C /* Global Interrupt Enable Register */
53 #define AT_XDMAC_GID 0x10 /* Global Interrupt Disable Register */
54 #define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */
55 #define AT_XDMAC_GIS 0x18 /* Global Interrupt Status Register */
56 #define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */
57 #define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */
58 #define AT_XDMAC_GS 0x24 /* Global Channel Status Register */
59 #define AT_XDMAC_VERSION 0xFFC /* XDMAC Version Register */
61 /* Channel relative registers offsets */
62 #define AT_XDMAC_CIE 0x00 /* Channel Interrupt Enable Register */
63 #define AT_XDMAC_CIE_BIE BIT(0) /* End of Block Interrupt Enable Bit */
64 #define AT_XDMAC_CIE_LIE BIT(1) /* End of Linked List Interrupt Enable Bit */
65 #define AT_XDMAC_CIE_DIE BIT(2) /* End of Disable Interrupt Enable Bit */
66 #define AT_XDMAC_CIE_FIE BIT(3) /* End of Flush Interrupt Enable Bit */
67 #define AT_XDMAC_CIE_RBEIE BIT(4) /* Read Bus Error Interrupt Enable Bit */
68 #define AT_XDMAC_CIE_WBEIE BIT(5) /* Write Bus Error Interrupt Enable Bit */
69 #define AT_XDMAC_CIE_ROIE BIT(6) /* Request Overflow Interrupt Enable Bit */
70 #define AT_XDMAC_CID 0x04 /* Channel Interrupt Disable Register */
71 #define AT_XDMAC_CID_BID BIT(0) /* End of Block Interrupt Disable Bit */
72 #define AT_XDMAC_CID_LID BIT(1) /* End of Linked List Interrupt Disable Bit */
73 #define AT_XDMAC_CID_DID BIT(2) /* End of Disable Interrupt Disable Bit */
74 #define AT_XDMAC_CID_FID BIT(3) /* End of Flush Interrupt Disable Bit */
75 #define AT_XDMAC_CID_RBEID BIT(4) /* Read Bus Error Interrupt Disable Bit */
76 #define AT_XDMAC_CID_WBEID BIT(5) /* Write Bus Error Interrupt Disable Bit */
77 #define AT_XDMAC_CID_ROID BIT(6) /* Request Overflow Interrupt Disable Bit */
78 #define AT_XDMAC_CIM 0x08 /* Channel Interrupt Mask Register */
79 #define AT_XDMAC_CIM_BIM BIT(0) /* End of Block Interrupt Mask Bit */
80 #define AT_XDMAC_CIM_LIM BIT(1) /* End of Linked List Interrupt Mask Bit */
81 #define AT_XDMAC_CIM_DIM BIT(2) /* End of Disable Interrupt Mask Bit */
82 #define AT_XDMAC_CIM_FIM BIT(3) /* End of Flush Interrupt Mask Bit */
83 #define AT_XDMAC_CIM_RBEIM BIT(4) /* Read Bus Error Interrupt Mask Bit */
84 #define AT_XDMAC_CIM_WBEIM BIT(5) /* Write Bus Error Interrupt Mask Bit */
85 #define AT_XDMAC_CIM_ROIM BIT(6) /* Request Overflow Interrupt Mask Bit */
86 #define AT_XDMAC_CIS 0x0C /* Channel Interrupt Status Register */
87 #define AT_XDMAC_CIS_BIS BIT(0) /* End of Block Interrupt Status Bit */
88 #define AT_XDMAC_CIS_LIS BIT(1) /* End of Linked List Interrupt Status Bit */
89 #define AT_XDMAC_CIS_DIS BIT(2) /* End of Disable Interrupt Status Bit */
90 #define AT_XDMAC_CIS_FIS BIT(3) /* End of Flush Interrupt Status Bit */
91 #define AT_XDMAC_CIS_RBEIS BIT(4) /* Read Bus Error Interrupt Status Bit */
92 #define AT_XDMAC_CIS_WBEIS BIT(5) /* Write Bus Error Interrupt Status Bit */
93 #define AT_XDMAC_CIS_ROIS BIT(6) /* Request Overflow Interrupt Status Bit */
94 #define AT_XDMAC_CSA 0x10 /* Channel Source Address Register */
95 #define AT_XDMAC_CDA 0x14 /* Channel Destination Address Register */
96 #define AT_XDMAC_CNDA 0x18 /* Channel Next Descriptor Address Register */
97 #define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1) /* Channel x Next Descriptor Interface */
98 #define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc) /* Channel x Next Descriptor Address */
99 #define AT_XDMAC_CNDC 0x1C /* Channel Next Descriptor Control Register */
100 #define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */
101 #define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */
102 #define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */
103 #define AT_XDMAC_CNDC_NDVIEW_MASK GENMASK(28, 27)
104 #define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */
105 #define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */
106 #define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */
107 #define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3) /* Channel x Next Descriptor View 3 */
108 #define AT_XDMAC_CUBC 0x20 /* Channel Microblock Control Register */
109 #define AT_XDMAC_CBC 0x24 /* Channel Block Control Register */
110 #define AT_XDMAC_CC 0x28 /* Channel Configuration Register */
111 #define AT_XDMAC_CC_TYPE (0x1 << 0) /* Channel Transfer Type */
112 #define AT_XDMAC_CC_TYPE_MEM_TRAN (0x0 << 0) /* Memory to Memory Transfer */
113 #define AT_XDMAC_CC_TYPE_PER_TRAN (0x1 << 0) /* Peripheral to Memory or Memory to Peripheral Transfer */
114 #define AT_XDMAC_CC_MBSIZE_MASK (0x3 << 1)
115 #define AT_XDMAC_CC_MBSIZE_SINGLE (0x0 << 1)
116 #define AT_XDMAC_CC_MBSIZE_FOUR (0x1 << 1)
117 #define AT_XDMAC_CC_MBSIZE_EIGHT (0x2 << 1)
118 #define AT_XDMAC_CC_MBSIZE_SIXTEEN (0x3 << 1)
119 #define AT_XDMAC_CC_DSYNC (0x1 << 4) /* Channel Synchronization */
120 #define AT_XDMAC_CC_DSYNC_PER2MEM (0x0 << 4)
121 #define AT_XDMAC_CC_DSYNC_MEM2PER (0x1 << 4)
122 #define AT_XDMAC_CC_PROT (0x1 << 5) /* Channel Protection */
123 #define AT_XDMAC_CC_PROT_SEC (0x0 << 5)
124 #define AT_XDMAC_CC_PROT_UNSEC (0x1 << 5)
125 #define AT_XDMAC_CC_SWREQ (0x1 << 6) /* Channel Software Request Trigger */
126 #define AT_XDMAC_CC_SWREQ_HWR_CONNECTED (0x0 << 6)
127 #define AT_XDMAC_CC_SWREQ_SWR_CONNECTED (0x1 << 6)
128 #define AT_XDMAC_CC_MEMSET (0x1 << 7) /* Channel Fill Block of memory */
129 #define AT_XDMAC_CC_MEMSET_NORMAL_MODE (0x0 << 7)
130 #define AT_XDMAC_CC_MEMSET_HW_MODE (0x1 << 7)
131 #define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8) /* Channel Chunk Size */
132 #define AT_XDMAC_CC_DWIDTH_OFFSET 11
133 #define AT_XDMAC_CC_DWIDTH_MASK (0x3 << AT_XDMAC_CC_DWIDTH_OFFSET)
134 #define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET) /* Channel Data Width */
135 #define AT_XDMAC_CC_DWIDTH_BYTE 0x0
136 #define AT_XDMAC_CC_DWIDTH_HALFWORD 0x1
137 #define AT_XDMAC_CC_DWIDTH_WORD 0x2
138 #define AT_XDMAC_CC_DWIDTH_DWORD 0x3
139 #define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13) /* Channel Source Interface Identifier */
140 #define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14) /* Channel Destination Interface Identifier */
141 #define AT_XDMAC_CC_SAM_MASK (0x3 << 16) /* Channel Source Addressing Mode */
142 #define AT_XDMAC_CC_SAM_FIXED_AM (0x0 << 16)
143 #define AT_XDMAC_CC_SAM_INCREMENTED_AM (0x1 << 16)
144 #define AT_XDMAC_CC_SAM_UBS_AM (0x2 << 16)
145 #define AT_XDMAC_CC_SAM_UBS_DS_AM (0x3 << 16)
146 #define AT_XDMAC_CC_DAM_MASK (0x3 << 18) /* Channel Source Addressing Mode */
147 #define AT_XDMAC_CC_DAM_FIXED_AM (0x0 << 18)
148 #define AT_XDMAC_CC_DAM_INCREMENTED_AM (0x1 << 18)
149 #define AT_XDMAC_CC_DAM_UBS_AM (0x2 << 18)
150 #define AT_XDMAC_CC_DAM_UBS_DS_AM (0x3 << 18)
151 #define AT_XDMAC_CC_INITD (0x1 << 21) /* Channel Initialization Terminated (read only) */
152 #define AT_XDMAC_CC_INITD_TERMINATED (0x0 << 21)
153 #define AT_XDMAC_CC_INITD_IN_PROGRESS (0x1 << 21)
154 #define AT_XDMAC_CC_RDIP (0x1 << 22) /* Read in Progress (read only) */
155 #define AT_XDMAC_CC_RDIP_DONE (0x0 << 22)
156 #define AT_XDMAC_CC_RDIP_IN_PROGRESS (0x1 << 22)
157 #define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
158 #define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
159 #define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
160 #define AT_XDMAC_CC_PERID(i) ((0x7f & (i)) << 24) /* Channel Peripheral Identifier */
161 #define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
162 #define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
163 #define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
165 /* Microblock control members */
166 #define AT_XDMAC_MBR_UBC_UBLEN_MAX 0xFFFFFFUL /* Maximum Microblock Length */
167 #define AT_XDMAC_MBR_UBC_NDE (0x1 << 24) /* Next Descriptor Enable */
168 #define AT_XDMAC_MBR_UBC_NSEN (0x1 << 25) /* Next Descriptor Source Update */
169 #define AT_XDMAC_MBR_UBC_NDEN (0x1 << 26) /* Next Descriptor Destination Update */
170 #define AT_XDMAC_MBR_UBC_NDV0 (0x0 << 27) /* Next Descriptor View 0 */
171 #define AT_XDMAC_MBR_UBC_NDV1 (0x1 << 27) /* Next Descriptor View 1 */
172 #define AT_XDMAC_MBR_UBC_NDV2 (0x2 << 27) /* Next Descriptor View 2 */
173 #define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */
175 #define AT_XDMAC_MAX_CHAN 0x20
176 #define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
177 #define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
178 #define AT_XDMAC_RESIDUE_MAX_RETRIES 5
180 #define AT_XDMAC_DMA_BUSWIDTHS\
181 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
182 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
183 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
184 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
185 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
188 AT_XDMAC_CHAN_IS_CYCLIC = 0,
189 AT_XDMAC_CHAN_IS_PAUSED,
190 AT_XDMAC_CHAN_IS_PAUSED_INTERNAL,
193 struct at_xdmac_layout {
194 /* Global Channel Read Suspend Register */
196 /* Global Write Suspend Register */
198 /* Global Channel Read Write Suspend Register */
200 /* Global Channel Read Write Resume Register */
202 /* Global Channel Software Request Register */
204 /* Global channel Software Request Status Register */
206 /* Global Channel Software Flush Request Register */
208 /* Channel reg base */
210 /* Source/Destination Interface must be specified or not */
212 /* AXI queue priority configuration supported */
216 /* ----- Channels ----- */
217 struct at_xdmac_chan {
218 struct dma_chan chan;
219 void __iomem *ch_regs;
220 u32 mask; /* Channel Mask */
221 u32 cfg; /* Channel Configuration Register */
222 u8 perid; /* Peripheral ID */
223 u8 perif; /* Peripheral Interface */
224 u8 memif; /* Memory Interface */
230 unsigned long status;
231 struct tasklet_struct tasklet;
232 struct dma_slave_config sconfig;
236 struct list_head xfers_list;
237 struct list_head free_descs_list;
241 /* ----- Controller ----- */
243 struct dma_device dma;
249 struct dma_pool *at_xdmac_desc_pool;
250 const struct at_xdmac_layout *layout;
251 struct at_xdmac_chan chan[];
255 /* ----- Descriptors ----- */
257 /* Linked List Descriptor */
258 struct at_xdmac_lld {
259 u32 mbr_nda; /* Next Descriptor Member */
260 u32 mbr_ubc; /* Microblock Control Member */
261 u32 mbr_sa; /* Source Address Member */
262 u32 mbr_da; /* Destination Address Member */
263 u32 mbr_cfg; /* Configuration Register */
264 u32 mbr_bc; /* Block Control Register */
265 u32 mbr_ds; /* Data Stride Register */
266 u32 mbr_sus; /* Source Microblock Stride Register */
267 u32 mbr_dus; /* Destination Microblock Stride Register */
270 /* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
271 struct at_xdmac_desc {
272 struct at_xdmac_lld lld;
273 enum dma_transfer_direction direction;
274 struct dma_async_tx_descriptor tx_dma_desc;
275 struct list_head desc_node;
276 /* Following members are only used by the first descriptor */
278 unsigned int xfer_size;
279 struct list_head descs_list;
280 struct list_head xfer_node;
281 } __aligned(sizeof(u64));
283 static const struct at_xdmac_layout at_xdmac_sama5d4_layout = {
291 .chan_cc_reg_base = 0x50,
296 static const struct at_xdmac_layout at_xdmac_sama7g5_layout = {
304 .chan_cc_reg_base = 0x60,
309 static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
311 return atxdmac->regs + (atxdmac->layout->chan_cc_reg_base + chan_nb * 0x40);
314 #define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
315 #define at_xdmac_write(atxdmac, reg, value) \
316 writel_relaxed((value), (atxdmac)->regs + (reg))
318 #define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
319 #define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
321 static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan)
323 return container_of(dchan, struct at_xdmac_chan, chan);
326 static struct device *chan2dev(struct dma_chan *chan)
328 return &chan->dev->device;
331 static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev)
333 return container_of(ddev, struct at_xdmac, dma);
336 static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd)
338 return container_of(txd, struct at_xdmac_desc, tx_dma_desc);
341 static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan)
343 return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
346 static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
348 return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
351 static inline int at_xdmac_chan_is_paused_internal(struct at_xdmac_chan *atchan)
353 return test_bit(AT_XDMAC_CHAN_IS_PAUSED_INTERNAL, &atchan->status);
356 static inline bool at_xdmac_chan_is_peripheral_xfer(u32 cfg)
358 return cfg & AT_XDMAC_CC_TYPE_PER_TRAN;
361 static inline u8 at_xdmac_get_dwidth(u32 cfg)
363 return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
366 static unsigned int init_nr_desc_per_channel = 64;
367 module_param(init_nr_desc_per_channel, uint, 0644);
368 MODULE_PARM_DESC(init_nr_desc_per_channel,
369 "initial descriptors per channel (default: 64)");
372 static void at_xdmac_runtime_suspend_descriptors(struct at_xdmac_chan *atchan)
374 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
375 struct at_xdmac_desc *desc, *_desc;
377 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
378 if (!desc->active_xfer)
381 pm_runtime_mark_last_busy(atxdmac->dev);
382 pm_runtime_put_autosuspend(atxdmac->dev);
386 static int at_xdmac_runtime_resume_descriptors(struct at_xdmac_chan *atchan)
388 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
389 struct at_xdmac_desc *desc, *_desc;
392 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
393 if (!desc->active_xfer)
396 ret = pm_runtime_resume_and_get(atxdmac->dev);
404 static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
406 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
409 ret = pm_runtime_resume_and_get(atxdmac->dev);
413 ret = !!(at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask);
415 pm_runtime_mark_last_busy(atxdmac->dev);
416 pm_runtime_put_autosuspend(atxdmac->dev);
421 static void at_xdmac_off(struct at_xdmac *atxdmac, bool suspend_descriptors)
423 struct dma_chan *chan, *_chan;
424 struct at_xdmac_chan *atchan;
427 ret = pm_runtime_resume_and_get(atxdmac->dev);
431 at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
433 /* Wait that all chans are disabled. */
434 while (at_xdmac_read(atxdmac, AT_XDMAC_GS))
437 at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
439 /* Decrement runtime PM ref counter for each active descriptor. */
440 if (!list_empty(&atxdmac->dma.channels) && suspend_descriptors) {
441 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels,
443 atchan = to_at_xdmac_chan(chan);
444 at_xdmac_runtime_suspend_descriptors(atchan);
448 pm_runtime_mark_last_busy(atxdmac->dev);
449 pm_runtime_put_autosuspend(atxdmac->dev);
452 /* Call with lock hold. */
453 static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
454 struct at_xdmac_desc *first)
456 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
460 ret = pm_runtime_resume_and_get(atxdmac->dev);
464 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
466 /* Set transfer as active to not try to start it again. */
467 first->active_xfer = true;
469 /* Tell xdmac where to get the first descriptor. */
470 reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys);
471 if (atxdmac->layout->sdif)
472 reg |= AT_XDMAC_CNDA_NDAIF(atchan->memif);
474 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
477 * When doing non cyclic transfer we need to use the next
478 * descriptor view 2 since some fields of the configuration register
479 * depend on transfer size and src/dest addresses.
481 if (at_xdmac_chan_is_cyclic(atchan))
482 reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
483 else if ((first->lld.mbr_ubc &
484 AT_XDMAC_CNDC_NDVIEW_MASK) == AT_XDMAC_MBR_UBC_NDV3)
485 reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
487 reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
489 * Even if the register will be updated from the configuration in the
490 * descriptor when using view 2 or higher, the PROT bit won't be set
491 * properly. This bit can be modified only by using the channel
492 * configuration register.
494 at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
496 reg |= AT_XDMAC_CNDC_NDDUP
497 | AT_XDMAC_CNDC_NDSUP
499 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg);
501 dev_vdbg(chan2dev(&atchan->chan),
502 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
503 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
504 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
505 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
506 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
507 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
508 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
510 at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
511 reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE;
513 * Request Overflow Error is only for peripheral synchronized transfers
515 if (at_xdmac_chan_is_peripheral_xfer(first->lld.mbr_cfg))
516 reg |= AT_XDMAC_CIE_ROIE;
519 * There is no end of list when doing cyclic dma, we need to get
520 * an interrupt after each periods.
522 if (at_xdmac_chan_is_cyclic(atchan))
523 at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
524 reg | AT_XDMAC_CIE_BIE);
526 at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
527 reg | AT_XDMAC_CIE_LIE);
528 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
529 dev_vdbg(chan2dev(&atchan->chan),
530 "%s: enable channel (0x%08x)\n", __func__, atchan->mask);
532 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
534 dev_vdbg(chan2dev(&atchan->chan),
535 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
536 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
537 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
538 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
539 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
540 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
541 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
544 static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
546 struct at_xdmac_desc *desc = txd_to_at_desc(tx);
547 struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan);
549 unsigned long irqflags;
551 spin_lock_irqsave(&atchan->lock, irqflags);
552 cookie = dma_cookie_assign(tx);
554 list_add_tail(&desc->xfer_node, &atchan->xfers_list);
555 spin_unlock_irqrestore(&atchan->lock, irqflags);
557 dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
558 __func__, atchan, desc);
563 static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
566 struct at_xdmac_desc *desc;
567 struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
570 desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
572 INIT_LIST_HEAD(&desc->descs_list);
573 dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
574 desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
575 desc->tx_dma_desc.phys = phys;
581 static void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
583 memset(&desc->lld, 0, sizeof(desc->lld));
584 INIT_LIST_HEAD(&desc->descs_list);
585 desc->direction = DMA_TRANS_NONE;
587 desc->active_xfer = false;
590 /* Call must be protected by lock. */
591 static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
593 struct at_xdmac_desc *desc;
595 if (list_empty(&atchan->free_descs_list)) {
596 desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT);
598 desc = list_first_entry(&atchan->free_descs_list,
599 struct at_xdmac_desc, desc_node);
600 list_del(&desc->desc_node);
601 at_xdmac_init_used_desc(desc);
607 static void at_xdmac_queue_desc(struct dma_chan *chan,
608 struct at_xdmac_desc *prev,
609 struct at_xdmac_desc *desc)
614 prev->lld.mbr_nda = desc->tx_dma_desc.phys;
615 prev->lld.mbr_ubc |= AT_XDMAC_MBR_UBC_NDE;
617 dev_dbg(chan2dev(chan), "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
618 __func__, prev, &prev->lld.mbr_nda);
621 static inline void at_xdmac_increment_block_count(struct dma_chan *chan,
622 struct at_xdmac_desc *desc)
629 dev_dbg(chan2dev(chan),
630 "%s: incrementing the block count of the desc 0x%p\n",
634 static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
635 struct of_dma *of_dma)
637 struct at_xdmac *atxdmac = of_dma->of_dma_data;
638 struct at_xdmac_chan *atchan;
639 struct dma_chan *chan;
640 struct device *dev = atxdmac->dma.dev;
642 if (dma_spec->args_count != 1) {
643 dev_err(dev, "dma phandler args: bad number of args\n");
647 chan = dma_get_any_slave_channel(&atxdmac->dma);
649 dev_err(dev, "can't get a dma channel\n");
653 atchan = to_at_xdmac_chan(chan);
654 atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]);
655 atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]);
656 atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]);
657 dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n",
658 atchan->memif, atchan->perif, atchan->perid);
663 static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
664 enum dma_transfer_direction direction)
666 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
667 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
670 if (direction == DMA_DEV_TO_MEM) {
672 AT91_XDMAC_DT_PERID(atchan->perid)
673 | AT_XDMAC_CC_DAM_INCREMENTED_AM
674 | AT_XDMAC_CC_SAM_FIXED_AM
675 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
676 | AT_XDMAC_CC_DSYNC_PER2MEM
677 | AT_XDMAC_CC_MBSIZE_SIXTEEN
678 | AT_XDMAC_CC_TYPE_PER_TRAN;
679 if (atxdmac->layout->sdif)
680 atchan->cfg |= AT_XDMAC_CC_DIF(atchan->memif) |
681 AT_XDMAC_CC_SIF(atchan->perif);
683 csize = ffs(atchan->sconfig.src_maxburst) - 1;
685 dev_err(chan2dev(chan), "invalid src maxburst value\n");
688 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
689 dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
691 dev_err(chan2dev(chan), "invalid src addr width value\n");
694 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
695 } else if (direction == DMA_MEM_TO_DEV) {
697 AT91_XDMAC_DT_PERID(atchan->perid)
698 | AT_XDMAC_CC_DAM_FIXED_AM
699 | AT_XDMAC_CC_SAM_INCREMENTED_AM
700 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
701 | AT_XDMAC_CC_DSYNC_MEM2PER
702 | AT_XDMAC_CC_MBSIZE_SIXTEEN
703 | AT_XDMAC_CC_TYPE_PER_TRAN;
704 if (atxdmac->layout->sdif)
705 atchan->cfg |= AT_XDMAC_CC_DIF(atchan->perif) |
706 AT_XDMAC_CC_SIF(atchan->memif);
708 csize = ffs(atchan->sconfig.dst_maxburst) - 1;
710 dev_err(chan2dev(chan), "invalid src maxburst value\n");
713 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
714 dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
716 dev_err(chan2dev(chan), "invalid dst addr width value\n");
719 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
722 dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg);
728 * Only check that maxburst and addr width values are supported by
729 * the controller but not that the configuration is good to perform the
730 * transfer since we don't know the direction at this stage.
732 static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
734 if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
735 || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
738 if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
739 || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
745 static int at_xdmac_set_slave_config(struct dma_chan *chan,
746 struct dma_slave_config *sconfig)
748 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
750 if (at_xdmac_check_slave_config(sconfig)) {
751 dev_err(chan2dev(chan), "invalid slave configuration\n");
755 memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));
760 static struct dma_async_tx_descriptor *
761 at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
762 unsigned int sg_len, enum dma_transfer_direction direction,
763 unsigned long flags, void *context)
765 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
766 struct at_xdmac_desc *first = NULL, *prev = NULL;
767 struct scatterlist *sg;
769 unsigned int xfer_size = 0;
770 unsigned long irqflags;
771 struct dma_async_tx_descriptor *ret = NULL;
776 if (!is_slave_direction(direction)) {
777 dev_err(chan2dev(chan), "invalid DMA direction\n");
781 dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n",
783 direction == DMA_MEM_TO_DEV ? "to device" : "from device",
786 /* Protect dma_sconfig field that can be modified by set_slave_conf. */
787 spin_lock_irqsave(&atchan->lock, irqflags);
789 if (at_xdmac_compute_chan_conf(chan, direction))
792 /* Prepare descriptors. */
793 for_each_sg(sgl, sg, sg_len, i) {
794 struct at_xdmac_desc *desc = NULL;
795 u32 len, mem, dwidth, fixed_dwidth;
797 len = sg_dma_len(sg);
798 mem = sg_dma_address(sg);
799 if (unlikely(!len)) {
800 dev_err(chan2dev(chan), "sg data length is zero\n");
803 dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
804 __func__, i, len, mem);
806 desc = at_xdmac_get_desc(atchan);
808 dev_err(chan2dev(chan), "can't get descriptor\n");
810 list_splice_tail_init(&first->descs_list,
811 &atchan->free_descs_list);
815 /* Linked list descriptor setup. */
816 if (direction == DMA_DEV_TO_MEM) {
817 desc->lld.mbr_sa = atchan->sconfig.src_addr;
818 desc->lld.mbr_da = mem;
820 desc->lld.mbr_sa = mem;
821 desc->lld.mbr_da = atchan->sconfig.dst_addr;
823 dwidth = at_xdmac_get_dwidth(atchan->cfg);
824 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
826 : AT_XDMAC_CC_DWIDTH_BYTE;
827 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
828 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
829 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
830 | (len >> fixed_dwidth); /* microblock length */
831 desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
832 AT_XDMAC_CC_DWIDTH(fixed_dwidth);
833 dev_dbg(chan2dev(chan),
834 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
835 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
839 at_xdmac_queue_desc(chan, prev, desc);
845 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
846 __func__, desc, first);
847 list_add_tail(&desc->desc_node, &first->descs_list);
852 first->tx_dma_desc.flags = flags;
853 first->xfer_size = xfer_size;
854 first->direction = direction;
855 ret = &first->tx_dma_desc;
858 spin_unlock_irqrestore(&atchan->lock, irqflags);
862 static struct dma_async_tx_descriptor *
863 at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
864 size_t buf_len, size_t period_len,
865 enum dma_transfer_direction direction,
868 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
869 struct at_xdmac_desc *first = NULL, *prev = NULL;
870 unsigned int periods = buf_len / period_len;
872 unsigned long irqflags;
874 dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
875 __func__, &buf_addr, buf_len, period_len,
876 direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags);
878 if (!is_slave_direction(direction)) {
879 dev_err(chan2dev(chan), "invalid DMA direction\n");
883 if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) {
884 dev_err(chan2dev(chan), "channel currently used\n");
888 if (at_xdmac_compute_chan_conf(chan, direction))
891 for (i = 0; i < periods; i++) {
892 struct at_xdmac_desc *desc = NULL;
894 spin_lock_irqsave(&atchan->lock, irqflags);
895 desc = at_xdmac_get_desc(atchan);
897 dev_err(chan2dev(chan), "can't get descriptor\n");
899 list_splice_tail_init(&first->descs_list,
900 &atchan->free_descs_list);
901 spin_unlock_irqrestore(&atchan->lock, irqflags);
904 spin_unlock_irqrestore(&atchan->lock, irqflags);
905 dev_dbg(chan2dev(chan),
906 "%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
907 __func__, desc, &desc->tx_dma_desc.phys);
909 if (direction == DMA_DEV_TO_MEM) {
910 desc->lld.mbr_sa = atchan->sconfig.src_addr;
911 desc->lld.mbr_da = buf_addr + i * period_len;
913 desc->lld.mbr_sa = buf_addr + i * period_len;
914 desc->lld.mbr_da = atchan->sconfig.dst_addr;
916 desc->lld.mbr_cfg = atchan->cfg;
917 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
918 | AT_XDMAC_MBR_UBC_NDEN
919 | AT_XDMAC_MBR_UBC_NSEN
920 | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);
922 dev_dbg(chan2dev(chan),
923 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
924 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
928 at_xdmac_queue_desc(chan, prev, desc);
934 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
935 __func__, desc, first);
936 list_add_tail(&desc->desc_node, &first->descs_list);
939 at_xdmac_queue_desc(chan, prev, first);
940 first->tx_dma_desc.flags = flags;
941 first->xfer_size = buf_len;
942 first->direction = direction;
944 return &first->tx_dma_desc;
947 static inline u32 at_xdmac_align_width(struct dma_chan *chan, dma_addr_t addr)
952 * Check address alignment to select the greater data width we
955 * Some XDMAC implementations don't provide dword transfer, in
956 * this case selecting dword has the same behavior as
957 * selecting word transfers.
960 width = AT_XDMAC_CC_DWIDTH_DWORD;
961 dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
962 } else if (!(addr & 3)) {
963 width = AT_XDMAC_CC_DWIDTH_WORD;
964 dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
965 } else if (!(addr & 1)) {
966 width = AT_XDMAC_CC_DWIDTH_HALFWORD;
967 dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
969 width = AT_XDMAC_CC_DWIDTH_BYTE;
970 dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
976 static struct at_xdmac_desc *
977 at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
978 struct at_xdmac_chan *atchan,
979 struct at_xdmac_desc *prev,
980 dma_addr_t src, dma_addr_t dst,
981 struct dma_interleaved_template *xt,
982 struct data_chunk *chunk)
984 struct at_xdmac_desc *desc;
989 * WARNING: The channel configuration is set here since there is no
990 * dmaengine_slave_config call in this case. Moreover we don't know the
991 * direction, it involves we can't dynamically set the source and dest
992 * interface so we have to use the same one. Only interface 0 allows EBI
993 * access. Hopefully we can access DDR through both ports (at least on
994 * SAMA5D4x), so we can use the same interface for source and dest,
995 * that solves the fact we don't know the direction.
996 * ERRATA: Even if useless for memory transfers, the PERID has to not
997 * match the one of another channel. If not, it could lead to spurious
999 * For SAMA7G5x case, the SIF and DIF fields are no longer used.
1000 * Thus, no need to have the SIF/DIF interfaces here.
1001 * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
1004 u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
1005 | AT_XDMAC_CC_MBSIZE_SIXTEEN
1006 | AT_XDMAC_CC_TYPE_MEM_TRAN;
1008 dwidth = at_xdmac_align_width(chan, src | dst | chunk->size);
1009 if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
1010 dev_dbg(chan2dev(chan),
1011 "%s: chunk too big (%zu, max size %lu)...\n",
1012 __func__, chunk->size,
1013 AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth);
1018 dev_dbg(chan2dev(chan),
1019 "Adding items at the end of desc 0x%p\n", prev);
1023 chan_cc |= AT_XDMAC_CC_SAM_UBS_AM;
1025 chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM;
1030 chan_cc |= AT_XDMAC_CC_DAM_UBS_AM;
1032 chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM;
1035 spin_lock_irqsave(&atchan->lock, flags);
1036 desc = at_xdmac_get_desc(atchan);
1037 spin_unlock_irqrestore(&atchan->lock, flags);
1039 dev_err(chan2dev(chan), "can't get descriptor\n");
1043 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1045 ublen = chunk->size >> dwidth;
1047 desc->lld.mbr_sa = src;
1048 desc->lld.mbr_da = dst;
1049 desc->lld.mbr_sus = dmaengine_get_src_icg(xt, chunk);
1050 desc->lld.mbr_dus = dmaengine_get_dst_icg(xt, chunk);
1052 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
1053 | AT_XDMAC_MBR_UBC_NDEN
1054 | AT_XDMAC_MBR_UBC_NSEN
1056 desc->lld.mbr_cfg = chan_cc;
1058 dev_dbg(chan2dev(chan),
1059 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1060 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da,
1061 desc->lld.mbr_ubc, desc->lld.mbr_cfg);
1065 at_xdmac_queue_desc(chan, prev, desc);
1070 static struct dma_async_tx_descriptor *
1071 at_xdmac_prep_interleaved(struct dma_chan *chan,
1072 struct dma_interleaved_template *xt,
1073 unsigned long flags)
1075 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1076 struct at_xdmac_desc *prev = NULL, *first = NULL;
1077 dma_addr_t dst_addr, src_addr;
1078 size_t src_skip = 0, dst_skip = 0, len = 0;
1079 struct data_chunk *chunk;
1082 if (!xt || !xt->numf || (xt->dir != DMA_MEM_TO_MEM))
1086 * TODO: Handle the case where we have to repeat a chain of
1089 if ((xt->numf > 1) && (xt->frame_size > 1))
1092 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%zu, frame_size=%zu, flags=0x%lx\n",
1093 __func__, &xt->src_start, &xt->dst_start, xt->numf,
1094 xt->frame_size, flags);
1096 src_addr = xt->src_start;
1097 dst_addr = xt->dst_start;
1100 first = at_xdmac_interleaved_queue_desc(chan, atchan,
1105 /* Length of the block is (BLEN+1) microblocks. */
1106 for (i = 0; i < xt->numf - 1; i++)
1107 at_xdmac_increment_block_count(chan, first);
1109 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1110 __func__, first, first);
1111 list_add_tail(&first->desc_node, &first->descs_list);
1113 for (i = 0; i < xt->frame_size; i++) {
1114 size_t src_icg = 0, dst_icg = 0;
1115 struct at_xdmac_desc *desc;
1117 chunk = xt->sgl + i;
1119 dst_icg = dmaengine_get_dst_icg(xt, chunk);
1120 src_icg = dmaengine_get_src_icg(xt, chunk);
1122 src_skip = chunk->size + src_icg;
1123 dst_skip = chunk->size + dst_icg;
1125 dev_dbg(chan2dev(chan),
1126 "%s: chunk size=%zu, src icg=%zu, dst icg=%zu\n",
1127 __func__, chunk->size, src_icg, dst_icg);
1129 desc = at_xdmac_interleaved_queue_desc(chan, atchan,
1134 list_splice_tail_init(&first->descs_list,
1135 &atchan->free_descs_list);
1142 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1143 __func__, desc, first);
1144 list_add_tail(&desc->desc_node, &first->descs_list);
1147 src_addr += src_skip;
1150 dst_addr += dst_skip;
1157 first->tx_dma_desc.cookie = -EBUSY;
1158 first->tx_dma_desc.flags = flags;
1159 first->xfer_size = len;
1161 return &first->tx_dma_desc;
1164 static struct dma_async_tx_descriptor *
1165 at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1166 size_t len, unsigned long flags)
1168 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1169 struct at_xdmac_desc *first = NULL, *prev = NULL;
1170 size_t remaining_size = len, xfer_size = 0, ublen;
1171 dma_addr_t src_addr = src, dst_addr = dest;
1174 * WARNING: We don't know the direction, it involves we can't
1175 * dynamically set the source and dest interface so we have to use the
1176 * same one. Only interface 0 allows EBI access. Hopefully we can
1177 * access DDR through both ports (at least on SAMA5D4x), so we can use
1178 * the same interface for source and dest, that solves the fact we
1179 * don't know the direction.
1180 * ERRATA: Even if useless for memory transfers, the PERID has to not
1181 * match the one of another channel. If not, it could lead to spurious
1183 * For SAMA7G5x case, the SIF and DIF fields are no longer used.
1184 * Thus, no need to have the SIF/DIF interfaces here.
1185 * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
1188 u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
1189 | AT_XDMAC_CC_DAM_INCREMENTED_AM
1190 | AT_XDMAC_CC_SAM_INCREMENTED_AM
1191 | AT_XDMAC_CC_MBSIZE_SIXTEEN
1192 | AT_XDMAC_CC_TYPE_MEM_TRAN;
1193 unsigned long irqflags;
1195 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
1196 __func__, &src, &dest, len, flags);
1201 dwidth = at_xdmac_align_width(chan, src_addr | dst_addr);
1203 /* Prepare descriptors. */
1204 while (remaining_size) {
1205 struct at_xdmac_desc *desc = NULL;
1207 dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
1209 spin_lock_irqsave(&atchan->lock, irqflags);
1210 desc = at_xdmac_get_desc(atchan);
1211 spin_unlock_irqrestore(&atchan->lock, irqflags);
1213 dev_err(chan2dev(chan), "can't get descriptor\n");
1215 list_splice_tail_init(&first->descs_list,
1216 &atchan->free_descs_list);
1220 /* Update src and dest addresses. */
1221 src_addr += xfer_size;
1222 dst_addr += xfer_size;
1224 if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)
1225 xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth;
1227 xfer_size = remaining_size;
1229 dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size);
1231 /* Check remaining length and change data width if needed. */
1232 dwidth = at_xdmac_align_width(chan,
1233 src_addr | dst_addr | xfer_size);
1234 chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK;
1235 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1237 ublen = xfer_size >> dwidth;
1238 remaining_size -= xfer_size;
1240 desc->lld.mbr_sa = src_addr;
1241 desc->lld.mbr_da = dst_addr;
1242 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2
1243 | AT_XDMAC_MBR_UBC_NDEN
1244 | AT_XDMAC_MBR_UBC_NSEN
1246 desc->lld.mbr_cfg = chan_cc;
1248 dev_dbg(chan2dev(chan),
1249 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1250 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg);
1254 at_xdmac_queue_desc(chan, prev, desc);
1260 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1261 __func__, desc, first);
1262 list_add_tail(&desc->desc_node, &first->descs_list);
1265 first->tx_dma_desc.flags = flags;
1266 first->xfer_size = len;
1268 return &first->tx_dma_desc;
1271 static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
1272 struct at_xdmac_chan *atchan,
1273 dma_addr_t dst_addr,
1277 struct at_xdmac_desc *desc;
1278 unsigned long flags;
1283 * WARNING: The channel configuration is set here since there is no
1284 * dmaengine_slave_config call in this case. Moreover we don't know the
1285 * direction, it involves we can't dynamically set the source and dest
1286 * interface so we have to use the same one. Only interface 0 allows EBI
1287 * access. Hopefully we can access DDR through both ports (at least on
1288 * SAMA5D4x), so we can use the same interface for source and dest,
1289 * that solves the fact we don't know the direction.
1290 * ERRATA: Even if useless for memory transfers, the PERID has to not
1291 * match the one of another channel. If not, it could lead to spurious
1293 * For SAMA7G5x case, the SIF and DIF fields are no longer used.
1294 * Thus, no need to have the SIF/DIF interfaces here.
1295 * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
1298 u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
1299 | AT_XDMAC_CC_DAM_UBS_AM
1300 | AT_XDMAC_CC_SAM_INCREMENTED_AM
1301 | AT_XDMAC_CC_MBSIZE_SIXTEEN
1302 | AT_XDMAC_CC_MEMSET_HW_MODE
1303 | AT_XDMAC_CC_TYPE_MEM_TRAN;
1305 dwidth = at_xdmac_align_width(chan, dst_addr);
1307 if (len >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
1308 dev_err(chan2dev(chan),
1309 "%s: Transfer too large, aborting...\n",
1314 spin_lock_irqsave(&atchan->lock, flags);
1315 desc = at_xdmac_get_desc(atchan);
1316 spin_unlock_irqrestore(&atchan->lock, flags);
1318 dev_err(chan2dev(chan), "can't get descriptor\n");
1322 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1324 /* Only the first byte of value is to be used according to dmaengine */
1325 pattern = (char)value;
1327 ublen = len >> dwidth;
1329 desc->lld.mbr_da = dst_addr;
1330 desc->lld.mbr_ds = (pattern << 24) |
1334 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
1335 | AT_XDMAC_MBR_UBC_NDEN
1336 | AT_XDMAC_MBR_UBC_NSEN
1338 desc->lld.mbr_cfg = chan_cc;
1340 dev_dbg(chan2dev(chan),
1341 "%s: lld: mbr_da=%pad, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1342 __func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc,
1348 static struct dma_async_tx_descriptor *
1349 at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
1350 size_t len, unsigned long flags)
1352 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1353 struct at_xdmac_desc *desc;
1355 dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%zu, pattern=0x%x, flags=0x%lx\n",
1356 __func__, &dest, len, value, flags);
1361 desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
1362 list_add_tail(&desc->desc_node, &desc->descs_list);
1364 desc->tx_dma_desc.cookie = -EBUSY;
1365 desc->tx_dma_desc.flags = flags;
1366 desc->xfer_size = len;
1368 return &desc->tx_dma_desc;
1371 static struct dma_async_tx_descriptor *
1372 at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
1373 unsigned int sg_len, int value,
1374 unsigned long flags)
1376 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1377 struct at_xdmac_desc *desc, *pdesc = NULL,
1378 *ppdesc = NULL, *first = NULL;
1379 struct scatterlist *sg, *psg = NULL, *ppsg = NULL;
1380 size_t stride = 0, pstride = 0, len = 0;
1386 dev_dbg(chan2dev(chan), "%s: sg_len=%d, value=0x%x, flags=0x%lx\n",
1387 __func__, sg_len, value, flags);
1389 /* Prepare descriptors. */
1390 for_each_sg(sgl, sg, sg_len, i) {
1391 dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
1392 __func__, &sg_dma_address(sg), sg_dma_len(sg),
1394 desc = at_xdmac_memset_create_desc(chan, atchan,
1399 list_splice_tail_init(&first->descs_list,
1400 &atchan->free_descs_list);
1405 /* Update our strides */
1408 stride = sg_dma_address(sg) -
1409 (sg_dma_address(psg) + sg_dma_len(psg));
1412 * The scatterlist API gives us only the address and
1413 * length of each elements.
1415 * Unfortunately, we don't have the stride, which we
1416 * will need to compute.
1418 * That make us end up in a situation like this one:
1419 * len stride len stride len
1420 * +-------+ +-------+ +-------+
1421 * | N-2 | | N-1 | | N |
1422 * +-------+ +-------+ +-------+
1424 * We need all these three elements (N-2, N-1 and N)
1425 * to actually take the decision on whether we need to
1426 * queue N-1 or reuse N-2.
1428 * We will only consider N if it is the last element.
1430 if (ppdesc && pdesc) {
1431 if ((stride == pstride) &&
1432 (sg_dma_len(ppsg) == sg_dma_len(psg))) {
1433 dev_dbg(chan2dev(chan),
1434 "%s: desc 0x%p can be merged with desc 0x%p\n",
1435 __func__, pdesc, ppdesc);
1438 * Increment the block count of the
1441 at_xdmac_increment_block_count(chan, ppdesc);
1442 ppdesc->lld.mbr_dus = stride;
1445 * Put back the N-1 descriptor in the
1446 * free descriptor list
1448 list_add_tail(&pdesc->desc_node,
1449 &atchan->free_descs_list);
1452 * Make our N-1 descriptor pointer
1453 * point to the N-2 since they were
1459 * Rule out the case where we don't have
1460 * pstride computed yet (our second sg
1463 * We also want to catch the case where there
1464 * would be a negative stride,
1466 } else if (pstride ||
1467 sg_dma_address(sg) < sg_dma_address(psg)) {
1469 * Queue the N-1 descriptor after the
1472 at_xdmac_queue_desc(chan, ppdesc, pdesc);
1475 * Add the N-1 descriptor to the list
1476 * of the descriptors used for this
1479 list_add_tail(&desc->desc_node,
1480 &first->descs_list);
1481 dev_dbg(chan2dev(chan),
1482 "%s: add desc 0x%p to descs_list 0x%p\n",
1483 __func__, desc, first);
1488 * If we are the last element, just see if we have the
1489 * same size than the previous element.
1491 * If so, we can merge it with the previous descriptor
1492 * since we don't care about the stride anymore.
1494 if ((i == (sg_len - 1)) &&
1495 sg_dma_len(psg) == sg_dma_len(sg)) {
1496 dev_dbg(chan2dev(chan),
1497 "%s: desc 0x%p can be merged with desc 0x%p\n",
1498 __func__, desc, pdesc);
1501 * Increment the block count of the N-1
1504 at_xdmac_increment_block_count(chan, pdesc);
1505 pdesc->lld.mbr_dus = stride;
1508 * Put back the N descriptor in the free
1511 list_add_tail(&desc->desc_node,
1512 &atchan->free_descs_list);
1515 /* Update our descriptors */
1519 /* Update our scatter pointers */
1523 len += sg_dma_len(sg);
1526 first->tx_dma_desc.cookie = -EBUSY;
1527 first->tx_dma_desc.flags = flags;
1528 first->xfer_size = len;
1530 return &first->tx_dma_desc;
1533 static enum dma_status
1534 at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1535 struct dma_tx_state *txstate)
1537 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1538 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1539 struct at_xdmac_desc *desc, *_desc, *iter;
1540 struct list_head *descs_list;
1541 enum dma_status ret;
1542 int residue, retry, pm_status;
1543 u32 cur_nda, check_nda, cur_ubc, mask, value;
1545 unsigned long flags;
1548 ret = dma_cookie_status(chan, cookie, txstate);
1549 if (ret == DMA_COMPLETE || !txstate)
1552 pm_status = pm_runtime_resume_and_get(atxdmac->dev);
1556 spin_lock_irqsave(&atchan->lock, flags);
1558 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
1561 * If the transfer has not been started yet, don't need to compute the
1562 * residue, it's the transfer length.
1564 if (!desc->active_xfer) {
1565 dma_set_residue(txstate, desc->xfer_size);
1569 residue = desc->xfer_size;
1571 * Flush FIFO: only relevant when the transfer is source peripheral
1572 * synchronized. Flush is needed before reading CUBC because data in
1573 * the FIFO are not reported by CUBC. Reporting a residue of the
1574 * transfer length while we have data in FIFO can cause issue.
1575 * Usecase: atmel USART has a timeout which means I have received
1576 * characters but there is no more character received for a while. On
1577 * timeout, it requests the residue. If the data are in the DMA FIFO,
1578 * we will return a residue of the transfer length. It means no data
1579 * received. If an application is waiting for these data, it will hang
1580 * since we won't have another USART timeout without receiving new
1583 mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
1584 value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
1585 if ((desc->lld.mbr_cfg & mask) == value) {
1586 at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
1587 while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
1592 * The easiest way to compute the residue should be to pause the DMA
1593 * but doing this can lead to miss some data as some devices don't
1595 * We need to read several registers because:
1596 * - DMA is running therefore a descriptor change is possible while
1597 * reading these registers
1598 * - When the block transfer is done, the value of the CUBC register
1599 * is set to its initial value until the fetch of the next descriptor.
1600 * This value will corrupt the residue calculation so we have to skip
1603 * INITD -------- ------------
1604 * |____________________|
1605 * _______________________ _______________
1606 * NDA @desc2 \/ @desc3
1607 * _______________________/\_______________
1608 * __________ ___________ _______________
1609 * CUBC 0 \/ MAX desc1 \/ MAX desc2
1610 * __________/\___________/\_______________
1612 * Since descriptors are aligned on 64 bits, we can assume that
1613 * the update of NDA and CUBC is atomic.
1614 * Memory barriers are used to ensure the read order of the registers.
1615 * A max number of retries is set because unlikely it could never ends.
1617 for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
1618 check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1620 cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
1622 initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
1624 cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1627 if ((check_nda == cur_nda) && initd)
1631 if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
1637 * Flush FIFO: only relevant when the transfer is source peripheral
1638 * synchronized. Another flush is needed here because CUBC is updated
1639 * when the controller sends the data write command. It can lead to
1640 * report data that are not written in the memory or the device. The
1641 * FIFO flush ensures that data are really written.
1643 if ((desc->lld.mbr_cfg & mask) == value) {
1644 at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
1645 while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
1650 * Remove size of all microblocks already transferred and the current
1651 * one. Then add the remaining size to transfer of the current
1654 descs_list = &desc->descs_list;
1655 list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
1656 dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
1657 residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
1658 if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
1663 residue += cur_ubc << dwidth;
1665 dma_set_residue(txstate, residue);
1667 dev_dbg(chan2dev(chan),
1668 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
1669 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
1672 spin_unlock_irqrestore(&atchan->lock, flags);
1673 pm_runtime_mark_last_busy(atxdmac->dev);
1674 pm_runtime_put_autosuspend(atxdmac->dev);
1678 static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
1680 struct at_xdmac_desc *desc;
1683 * If channel is enabled, do nothing, advance_work will be triggered
1684 * after the interruption.
1686 if (at_xdmac_chan_is_enabled(atchan) || list_empty(&atchan->xfers_list))
1689 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
1691 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1692 if (!desc->active_xfer)
1693 at_xdmac_start_xfer(atchan, desc);
1696 static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
1698 struct at_xdmac_desc *desc;
1699 struct dma_async_tx_descriptor *txd;
1701 spin_lock_irq(&atchan->lock);
1702 dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
1703 __func__, atchan->irq_status);
1704 if (list_empty(&atchan->xfers_list)) {
1705 spin_unlock_irq(&atchan->lock);
1708 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
1710 spin_unlock_irq(&atchan->lock);
1711 txd = &desc->tx_dma_desc;
1712 if (txd->flags & DMA_PREP_INTERRUPT)
1713 dmaengine_desc_get_callback_invoke(txd, NULL);
1716 /* Called with atchan->lock held. */
1717 static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
1719 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1720 struct at_xdmac_desc *bad_desc;
1723 ret = pm_runtime_resume_and_get(atxdmac->dev);
1728 * The descriptor currently at the head of the active list is
1729 * broken. Since we don't have any way to report errors, we'll
1730 * just have to scream loudly and try to continue with other
1731 * descriptors queued (if any).
1733 if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
1734 dev_err(chan2dev(&atchan->chan), "read bus error!!!");
1735 if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
1736 dev_err(chan2dev(&atchan->chan), "write bus error!!!");
1737 if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
1738 dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
1740 /* Channel must be disabled first as it's not done automatically */
1741 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1742 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1745 bad_desc = list_first_entry(&atchan->xfers_list,
1746 struct at_xdmac_desc,
1749 /* Print bad descriptor's details if needed */
1750 dev_dbg(chan2dev(&atchan->chan),
1751 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
1752 __func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da,
1753 bad_desc->lld.mbr_ubc);
1755 pm_runtime_mark_last_busy(atxdmac->dev);
1756 pm_runtime_put_autosuspend(atxdmac->dev);
1758 /* Then continue with usual descriptor management */
1761 static void at_xdmac_tasklet(struct tasklet_struct *t)
1763 struct at_xdmac_chan *atchan = from_tasklet(atchan, t, tasklet);
1764 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1765 struct at_xdmac_desc *desc;
1766 struct dma_async_tx_descriptor *txd;
1769 if (at_xdmac_chan_is_cyclic(atchan))
1770 return at_xdmac_handle_cyclic(atchan);
1772 error_mask = AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS |
1775 spin_lock_irq(&atchan->lock);
1777 dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
1778 __func__, atchan->irq_status);
1780 if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) &&
1781 !(atchan->irq_status & error_mask)) {
1782 spin_unlock_irq(&atchan->lock);
1786 if (atchan->irq_status & error_mask)
1787 at_xdmac_handle_error(atchan);
1789 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
1791 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1792 if (!desc->active_xfer) {
1793 dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
1794 spin_unlock_irq(&atchan->lock);
1798 txd = &desc->tx_dma_desc;
1799 dma_cookie_complete(txd);
1800 /* Remove the transfer from the transfer list. */
1801 list_del(&desc->xfer_node);
1802 spin_unlock_irq(&atchan->lock);
1804 if (txd->flags & DMA_PREP_INTERRUPT)
1805 dmaengine_desc_get_callback_invoke(txd, NULL);
1807 dma_run_dependencies(txd);
1809 spin_lock_irq(&atchan->lock);
1810 /* Move the xfer descriptors into the free descriptors list. */
1811 list_splice_tail_init(&desc->descs_list, &atchan->free_descs_list);
1812 at_xdmac_advance_work(atchan);
1813 spin_unlock_irq(&atchan->lock);
1816 * Decrement runtime PM ref counter incremented in
1817 * at_xdmac_start_xfer().
1819 pm_runtime_mark_last_busy(atxdmac->dev);
1820 pm_runtime_put_autosuspend(atxdmac->dev);
1823 static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1825 struct at_xdmac *atxdmac = (struct at_xdmac *)dev_id;
1826 struct at_xdmac_chan *atchan;
1827 u32 imr, status, pending;
1828 u32 chan_imr, chan_status;
1829 int i, ret = IRQ_NONE;
1832 imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1833 status = at_xdmac_read(atxdmac, AT_XDMAC_GIS);
1834 pending = status & imr;
1836 dev_vdbg(atxdmac->dma.dev,
1837 "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n",
1838 __func__, status, imr, pending);
1843 /* We have to find which channel has generated the interrupt. */
1844 for (i = 0; i < atxdmac->dma.chancnt; i++) {
1845 if (!((1 << i) & pending))
1848 atchan = &atxdmac->chan[i];
1849 chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1850 chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
1851 atchan->irq_status = chan_status & chan_imr;
1852 dev_vdbg(atxdmac->dma.dev,
1853 "%s: chan%d: imr=0x%x, status=0x%x\n",
1854 __func__, i, chan_imr, chan_status);
1855 dev_vdbg(chan2dev(&atchan->chan),
1856 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
1858 at_xdmac_chan_read(atchan, AT_XDMAC_CC),
1859 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
1860 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
1861 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
1862 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
1863 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
1865 if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
1866 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1868 tasklet_schedule(&atchan->tasklet);
1877 static void at_xdmac_issue_pending(struct dma_chan *chan)
1879 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1880 unsigned long flags;
1882 dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
1884 spin_lock_irqsave(&atchan->lock, flags);
1885 at_xdmac_advance_work(atchan);
1886 spin_unlock_irqrestore(&atchan->lock, flags);
1891 static int at_xdmac_device_config(struct dma_chan *chan,
1892 struct dma_slave_config *config)
1894 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1896 unsigned long flags;
1898 dev_dbg(chan2dev(chan), "%s\n", __func__);
1900 spin_lock_irqsave(&atchan->lock, flags);
1901 ret = at_xdmac_set_slave_config(chan, config);
1902 spin_unlock_irqrestore(&atchan->lock, flags);
1907 static void at_xdmac_device_pause_set(struct at_xdmac *atxdmac,
1908 struct at_xdmac_chan *atchan)
1910 at_xdmac_write(atxdmac, atxdmac->layout->grws, atchan->mask);
1911 while (at_xdmac_chan_read(atchan, AT_XDMAC_CC) &
1912 (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
1916 static void at_xdmac_device_pause_internal(struct at_xdmac_chan *atchan)
1918 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1919 unsigned long flags;
1921 spin_lock_irqsave(&atchan->lock, flags);
1922 set_bit(AT_XDMAC_CHAN_IS_PAUSED_INTERNAL, &atchan->status);
1923 at_xdmac_device_pause_set(atxdmac, atchan);
1924 spin_unlock_irqrestore(&atchan->lock, flags);
1927 static int at_xdmac_device_pause(struct dma_chan *chan)
1929 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1930 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1931 unsigned long flags;
1934 dev_dbg(chan2dev(chan), "%s\n", __func__);
1936 if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
1939 ret = pm_runtime_resume_and_get(atxdmac->dev);
1943 spin_lock_irqsave(&atchan->lock, flags);
1945 at_xdmac_device_pause_set(atxdmac, atchan);
1946 /* Decrement runtime PM ref counter for each active descriptor. */
1947 at_xdmac_runtime_suspend_descriptors(atchan);
1949 spin_unlock_irqrestore(&atchan->lock, flags);
1951 pm_runtime_mark_last_busy(atxdmac->dev);
1952 pm_runtime_put_autosuspend(atxdmac->dev);
1957 static void at_xdmac_device_resume_internal(struct at_xdmac_chan *atchan)
1959 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1960 unsigned long flags;
1962 spin_lock_irqsave(&atchan->lock, flags);
1963 at_xdmac_write(atxdmac, atxdmac->layout->grwr, atchan->mask);
1964 clear_bit(AT_XDMAC_CHAN_IS_PAUSED_INTERNAL, &atchan->status);
1965 spin_unlock_irqrestore(&atchan->lock, flags);
1968 static int at_xdmac_device_resume(struct dma_chan *chan)
1970 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1971 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1972 unsigned long flags;
1975 dev_dbg(chan2dev(chan), "%s\n", __func__);
1977 ret = pm_runtime_resume_and_get(atxdmac->dev);
1981 spin_lock_irqsave(&atchan->lock, flags);
1982 if (!at_xdmac_chan_is_paused(atchan))
1985 /* Increment runtime PM ref counter for each active descriptor. */
1986 ret = at_xdmac_runtime_resume_descriptors(atchan);
1990 at_xdmac_write(atxdmac, atxdmac->layout->grwr, atchan->mask);
1991 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1994 spin_unlock_irqrestore(&atchan->lock, flags);
1995 pm_runtime_mark_last_busy(atxdmac->dev);
1996 pm_runtime_put_autosuspend(atxdmac->dev);
2001 static int at_xdmac_device_terminate_all(struct dma_chan *chan)
2003 struct at_xdmac_desc *desc, *_desc;
2004 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
2005 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
2006 unsigned long flags;
2009 dev_dbg(chan2dev(chan), "%s\n", __func__);
2011 ret = pm_runtime_resume_and_get(atxdmac->dev);
2015 spin_lock_irqsave(&atchan->lock, flags);
2016 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
2017 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
2020 /* Cancel all pending transfers. */
2021 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
2022 list_del(&desc->xfer_node);
2023 list_splice_tail_init(&desc->descs_list,
2024 &atchan->free_descs_list);
2026 * We incremented the runtime PM reference count on
2027 * at_xdmac_start_xfer() for this descriptor. Now it's time
2030 if (desc->active_xfer) {
2031 pm_runtime_put_autosuspend(atxdmac->dev);
2032 pm_runtime_mark_last_busy(atxdmac->dev);
2036 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
2037 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
2038 spin_unlock_irqrestore(&atchan->lock, flags);
2040 pm_runtime_mark_last_busy(atxdmac->dev);
2041 pm_runtime_put_autosuspend(atxdmac->dev);
2046 static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
2048 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
2049 struct at_xdmac_desc *desc;
2052 if (at_xdmac_chan_is_enabled(atchan)) {
2053 dev_err(chan2dev(chan),
2054 "can't allocate channel resources (channel enabled)\n");
2058 if (!list_empty(&atchan->free_descs_list)) {
2059 dev_err(chan2dev(chan),
2060 "can't allocate channel resources (channel not free from a previous use)\n");
2064 for (i = 0; i < init_nr_desc_per_channel; i++) {
2065 desc = at_xdmac_alloc_desc(chan, GFP_KERNEL);
2068 dev_warn(chan2dev(chan),
2069 "can't allocate any descriptors\n");
2072 dev_warn(chan2dev(chan),
2073 "only %d descriptors have been allocated\n", i);
2076 list_add_tail(&desc->desc_node, &atchan->free_descs_list);
2079 dma_cookie_init(chan);
2081 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
2086 static void at_xdmac_free_chan_resources(struct dma_chan *chan)
2088 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
2089 struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
2090 struct at_xdmac_desc *desc, *_desc;
2092 list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) {
2093 dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc);
2094 list_del(&desc->desc_node);
2095 dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys);
2101 static void at_xdmac_axi_config(struct platform_device *pdev)
2103 struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
2104 bool dev_m2m = false;
2107 if (!atxdmac->layout->axi_config)
2108 return; /* Not supported */
2110 if (!of_property_read_u32(pdev->dev.of_node, "dma-requests",
2112 dev_info(&pdev->dev, "controller in mem2mem mode.\n");
2117 at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_M2M);
2118 at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_M2M);
2120 at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_P2M);
2121 at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_P2M);
2125 static int __maybe_unused atmel_xdmac_prepare(struct device *dev)
2127 struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2128 struct dma_chan *chan, *_chan;
2130 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
2131 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
2133 /* Wait for transfer completion, except in cyclic case. */
2134 if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan))
2140 static int __maybe_unused atmel_xdmac_suspend(struct device *dev)
2142 struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2143 struct dma_chan *chan, *_chan;
2146 ret = pm_runtime_resume_and_get(atxdmac->dev);
2150 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
2151 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
2153 atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
2154 if (at_xdmac_chan_is_cyclic(atchan)) {
2155 if (!at_xdmac_chan_is_paused(atchan)) {
2156 at_xdmac_device_pause_internal(atchan);
2157 at_xdmac_runtime_suspend_descriptors(atchan);
2159 atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
2160 atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
2161 atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
2164 atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
2166 at_xdmac_off(atxdmac, false);
2167 pm_runtime_mark_last_busy(atxdmac->dev);
2168 pm_runtime_put_noidle(atxdmac->dev);
2169 clk_disable_unprepare(atxdmac->clk);
2174 static int __maybe_unused atmel_xdmac_resume(struct device *dev)
2176 struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2177 struct at_xdmac_chan *atchan;
2178 struct dma_chan *chan, *_chan;
2179 struct platform_device *pdev = container_of(dev, struct platform_device, dev);
2183 ret = clk_prepare_enable(atxdmac->clk);
2187 pm_runtime_get_noresume(atxdmac->dev);
2189 at_xdmac_axi_config(pdev);
2191 /* Clear pending interrupts. */
2192 for (i = 0; i < atxdmac->dma.chancnt; i++) {
2193 atchan = &atxdmac->chan[i];
2194 while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
2198 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
2199 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
2200 atchan = to_at_xdmac_chan(chan);
2202 at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
2203 if (at_xdmac_chan_is_cyclic(atchan)) {
2205 * Resume only channels not explicitly paused by
2208 if (at_xdmac_chan_is_paused_internal(atchan)) {
2209 ret = at_xdmac_runtime_resume_descriptors(atchan);
2212 at_xdmac_device_resume_internal(atchan);
2216 * We may resume from a deep sleep state where power
2217 * to DMA controller is cut-off. Thus, restore the
2218 * suspend state of channels set though dmaengine API.
2220 else if (at_xdmac_chan_is_paused(atchan))
2221 at_xdmac_device_pause_set(atxdmac, atchan);
2223 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
2224 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
2225 at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
2227 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
2231 pm_runtime_mark_last_busy(atxdmac->dev);
2232 pm_runtime_put_autosuspend(atxdmac->dev);
2237 static int __maybe_unused atmel_xdmac_runtime_suspend(struct device *dev)
2239 struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2241 clk_disable(atxdmac->clk);
2246 static int __maybe_unused atmel_xdmac_runtime_resume(struct device *dev)
2248 struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2250 return clk_enable(atxdmac->clk);
2253 static int at_xdmac_probe(struct platform_device *pdev)
2255 struct at_xdmac *atxdmac;
2256 int irq, nr_channels, i, ret;
2260 irq = platform_get_irq(pdev, 0);
2264 base = devm_platform_ioremap_resource(pdev, 0);
2266 return PTR_ERR(base);
2269 * Read number of xdmac channels, read helper function can't be used
2270 * since atxdmac is not yet allocated and we need to know the number
2271 * of channels to do the allocation.
2273 reg = readl_relaxed(base + AT_XDMAC_GTYPE);
2274 nr_channels = AT_XDMAC_NB_CH(reg);
2275 if (nr_channels > AT_XDMAC_MAX_CHAN) {
2276 dev_err(&pdev->dev, "invalid number of channels (%u)\n",
2281 atxdmac = devm_kzalloc(&pdev->dev,
2282 struct_size(atxdmac, chan, nr_channels),
2285 dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
2289 atxdmac->regs = base;
2291 atxdmac->dev = &pdev->dev;
2293 atxdmac->layout = of_device_get_match_data(&pdev->dev);
2294 if (!atxdmac->layout)
2297 atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
2298 if (IS_ERR(atxdmac->clk)) {
2299 dev_err(&pdev->dev, "can't get dma_clk\n");
2300 return PTR_ERR(atxdmac->clk);
2303 /* Do not use dev res to prevent races with tasklet */
2304 ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac);
2306 dev_err(&pdev->dev, "can't request irq\n");
2310 ret = clk_prepare_enable(atxdmac->clk);
2312 dev_err(&pdev->dev, "can't prepare or enable clock\n");
2316 atxdmac->at_xdmac_desc_pool =
2317 dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
2318 sizeof(struct at_xdmac_desc), 4, 0);
2319 if (!atxdmac->at_xdmac_desc_pool) {
2320 dev_err(&pdev->dev, "no memory for descriptors dma pool\n");
2322 goto err_clk_disable;
2325 dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
2326 dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask);
2327 dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
2328 dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask);
2329 dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask);
2330 dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
2332 * Without DMA_PRIVATE the driver is not able to allocate more than
2333 * one channel, second allocation fails in private_candidate.
2335 dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask);
2336 atxdmac->dma.dev = &pdev->dev;
2337 atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources;
2338 atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources;
2339 atxdmac->dma.device_tx_status = at_xdmac_tx_status;
2340 atxdmac->dma.device_issue_pending = at_xdmac_issue_pending;
2341 atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic;
2342 atxdmac->dma.device_prep_interleaved_dma = at_xdmac_prep_interleaved;
2343 atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy;
2344 atxdmac->dma.device_prep_dma_memset = at_xdmac_prep_dma_memset;
2345 atxdmac->dma.device_prep_dma_memset_sg = at_xdmac_prep_dma_memset_sg;
2346 atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg;
2347 atxdmac->dma.device_config = at_xdmac_device_config;
2348 atxdmac->dma.device_pause = at_xdmac_device_pause;
2349 atxdmac->dma.device_resume = at_xdmac_device_resume;
2350 atxdmac->dma.device_terminate_all = at_xdmac_device_terminate_all;
2351 atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
2352 atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
2353 atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2354 atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2356 platform_set_drvdata(pdev, atxdmac);
2358 pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
2359 pm_runtime_use_autosuspend(&pdev->dev);
2360 pm_runtime_set_active(&pdev->dev);
2361 pm_runtime_enable(&pdev->dev);
2362 pm_runtime_get_noresume(&pdev->dev);
2364 /* Init channels. */
2365 INIT_LIST_HEAD(&atxdmac->dma.channels);
2367 /* Disable all chans and interrupts. */
2368 at_xdmac_off(atxdmac, true);
2370 for (i = 0; i < nr_channels; i++) {
2371 struct at_xdmac_chan *atchan = &atxdmac->chan[i];
2373 atchan->chan.device = &atxdmac->dma;
2374 list_add_tail(&atchan->chan.device_node,
2375 &atxdmac->dma.channels);
2377 atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i);
2378 atchan->mask = 1 << i;
2380 spin_lock_init(&atchan->lock);
2381 INIT_LIST_HEAD(&atchan->xfers_list);
2382 INIT_LIST_HEAD(&atchan->free_descs_list);
2383 tasklet_setup(&atchan->tasklet, at_xdmac_tasklet);
2385 /* Clear pending interrupts. */
2386 while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
2390 ret = dma_async_device_register(&atxdmac->dma);
2392 dev_err(&pdev->dev, "fail to register DMA engine device\n");
2393 goto err_pm_disable;
2396 ret = of_dma_controller_register(pdev->dev.of_node,
2397 at_xdmac_xlate, atxdmac);
2399 dev_err(&pdev->dev, "could not register of dma controller\n");
2400 goto err_dma_unregister;
2403 dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
2404 nr_channels, atxdmac->regs);
2406 at_xdmac_axi_config(pdev);
2408 pm_runtime_mark_last_busy(&pdev->dev);
2409 pm_runtime_put_autosuspend(&pdev->dev);
2414 dma_async_device_unregister(&atxdmac->dma);
2416 pm_runtime_put_noidle(&pdev->dev);
2417 pm_runtime_disable(&pdev->dev);
2418 pm_runtime_set_suspended(&pdev->dev);
2419 pm_runtime_dont_use_autosuspend(&pdev->dev);
2421 clk_disable_unprepare(atxdmac->clk);
2423 free_irq(atxdmac->irq, atxdmac);
2427 static int at_xdmac_remove(struct platform_device *pdev)
2429 struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
2432 at_xdmac_off(atxdmac, true);
2433 of_dma_controller_free(pdev->dev.of_node);
2434 dma_async_device_unregister(&atxdmac->dma);
2435 pm_runtime_disable(atxdmac->dev);
2436 pm_runtime_set_suspended(&pdev->dev);
2437 pm_runtime_dont_use_autosuspend(&pdev->dev);
2438 clk_disable_unprepare(atxdmac->clk);
2440 free_irq(atxdmac->irq, atxdmac);
2442 for (i = 0; i < atxdmac->dma.chancnt; i++) {
2443 struct at_xdmac_chan *atchan = &atxdmac->chan[i];
2445 tasklet_kill(&atchan->tasklet);
2446 at_xdmac_free_chan_resources(&atchan->chan);
2452 static const struct dev_pm_ops __maybe_unused atmel_xdmac_dev_pm_ops = {
2453 .prepare = atmel_xdmac_prepare,
2454 SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
2455 SET_RUNTIME_PM_OPS(atmel_xdmac_runtime_suspend,
2456 atmel_xdmac_runtime_resume, NULL)
2459 static const struct of_device_id atmel_xdmac_dt_ids[] = {
2461 .compatible = "atmel,sama5d4-dma",
2462 .data = &at_xdmac_sama5d4_layout,
2464 .compatible = "microchip,sama7g5-dma",
2465 .data = &at_xdmac_sama7g5_layout,
2470 MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
2472 static struct platform_driver at_xdmac_driver = {
2473 .probe = at_xdmac_probe,
2474 .remove = at_xdmac_remove,
2477 .of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
2478 .pm = pm_ptr(&atmel_xdmac_dev_pm_ops),
2482 static int __init at_xdmac_init(void)
2484 return platform_driver_register(&at_xdmac_driver);
2486 subsys_initcall(at_xdmac_init);
2488 static void __exit at_xdmac_exit(void)
2490 platform_driver_unregister(&at_xdmac_driver);
2492 module_exit(at_xdmac_exit);
2494 MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
2495 MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
2496 MODULE_LICENSE("GPL");