dm: treewide: Rename auto_alloc_size members to be shorter
[platform/kernel/u-boot.git] / drivers / dma / ti / k3-udma.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *  Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
4  *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5  */
6 #define pr_fmt(fmt) "udma: " fmt
7
8 #include <common.h>
9 #include <cpu_func.h>
10 #include <log.h>
11 #include <asm/cache.h>
12 #include <asm/io.h>
13 #include <asm/bitops.h>
14 #include <malloc.h>
15 #include <linux/bitops.h>
16 #include <linux/dma-mapping.h>
17 #include <dm.h>
18 #include <dm/device_compat.h>
19 #include <dm/devres.h>
20 #include <dm/read.h>
21 #include <dm/of_access.h>
22 #include <dma.h>
23 #include <dma-uclass.h>
24 #include <linux/delay.h>
25 #include <linux/bitmap.h>
26 #include <linux/err.h>
27 #include <linux/soc/ti/k3-navss-ringacc.h>
28 #include <linux/soc/ti/cppi5.h>
29 #include <linux/soc/ti/ti-udma.h>
30 #include <linux/soc/ti/ti_sci_protocol.h>
31
32 #include "k3-udma-hwdef.h"
33 #include "k3-psil-priv.h"
34
35 #define K3_UDMA_MAX_RFLOWS 1024
36
37 struct udma_chan;
38
39 enum udma_mmr {
40         MMR_GCFG = 0,
41         MMR_RCHANRT,
42         MMR_TCHANRT,
43         MMR_LAST,
44 };
45
46 static const char * const mmr_names[] = {
47         "gcfg", "rchanrt", "tchanrt"
48 };
49
50 struct udma_tchan {
51         void __iomem *reg_rt;
52
53         int id;
54         struct k3_nav_ring *t_ring; /* Transmit ring */
55         struct k3_nav_ring *tc_ring; /* Transmit Completion ring */
56 };
57
58 struct udma_rchan {
59         void __iomem *reg_rt;
60
61         int id;
62 };
63
64 #define UDMA_FLAG_PDMA_ACC32            BIT(0)
65 #define UDMA_FLAG_PDMA_BURST            BIT(1)
66 #define UDMA_FLAG_TDTYPE                BIT(2)
67
68 struct udma_match_data {
69         u32 psil_base;
70         bool enable_memcpy_support;
71         u32 flags;
72         u32 statictr_z_mask;
73         u32 rchan_oes_offset;
74
75         u8 tpl_levels;
76         u32 level_start_idx[];
77 };
78
79 struct udma_rflow {
80         int id;
81
82         struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
83         struct k3_nav_ring *r_ring; /* Receive ring*/
84 };
85
86 enum udma_rm_range {
87         RM_RANGE_TCHAN = 0,
88         RM_RANGE_RCHAN,
89         RM_RANGE_RFLOW,
90         RM_RANGE_LAST,
91 };
92
93 struct udma_tisci_rm {
94         const struct ti_sci_handle *tisci;
95         const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
96         u32  tisci_dev_id;
97
98         /* tisci information for PSI-L thread pairing/unpairing */
99         const struct ti_sci_rm_psil_ops *tisci_psil_ops;
100         u32  tisci_navss_dev_id;
101
102         struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
103 };
104
105 struct udma_dev {
106         struct udevice *dev;
107         void __iomem *mmrs[MMR_LAST];
108
109         struct udma_tisci_rm tisci_rm;
110         struct k3_nav_ringacc *ringacc;
111
112         u32 features;
113
114         int tchan_cnt;
115         int echan_cnt;
116         int rchan_cnt;
117         int rflow_cnt;
118         unsigned long *tchan_map;
119         unsigned long *rchan_map;
120         unsigned long *rflow_map;
121         unsigned long *rflow_map_reserved;
122
123         struct udma_tchan *tchans;
124         struct udma_rchan *rchans;
125         struct udma_rflow *rflows;
126
127         struct udma_match_data *match_data;
128
129         struct udma_chan *channels;
130         u32 psil_base;
131
132         u32 ch_count;
133 };
134
135 struct udma_chan_config {
136         u32 psd_size; /* size of Protocol Specific Data */
137         u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
138         u32 hdesc_size; /* Size of a packet descriptor in packet mode */
139         int remote_thread_id;
140         u32 atype;
141         u32 src_thread;
142         u32 dst_thread;
143         enum psil_endpoint_type ep_type;
144         enum udma_tp_level channel_tpl; /* Channel Throughput Level */
145
146         enum dma_direction dir;
147
148         unsigned int pkt_mode:1; /* TR or packet */
149         unsigned int needs_epib:1; /* EPIB is needed for the communication or not */
150         unsigned int enable_acc32:1;
151         unsigned int enable_burst:1;
152         unsigned int notdpkt:1; /* Suppress sending TDC packet */
153 };
154
155 struct udma_chan {
156         struct udma_dev *ud;
157         char name[20];
158
159         struct udma_tchan *tchan;
160         struct udma_rchan *rchan;
161         struct udma_rflow *rflow;
162
163         struct ti_udma_drv_chan_cfg_data cfg_data;
164
165         u32 bcnt; /* number of bytes completed since the start of the channel */
166
167         struct udma_chan_config config;
168
169         u32 id;
170
171         struct cppi5_host_desc_t *desc_tx;
172         bool in_use;
173         void    *desc_rx;
174         u32     num_rx_bufs;
175         u32     desc_rx_cur;
176
177 };
178
179 #define UDMA_CH_1000(ch)                (ch * 0x1000)
180 #define UDMA_CH_100(ch)                 (ch * 0x100)
181 #define UDMA_CH_40(ch)                  (ch * 0x40)
182
183 #ifdef PKTBUFSRX
184 #define UDMA_RX_DESC_NUM PKTBUFSRX
185 #else
186 #define UDMA_RX_DESC_NUM 4
187 #endif
188
189 /* Generic register access functions */
190 static inline u32 udma_read(void __iomem *base, int reg)
191 {
192         u32 v;
193
194         v = __raw_readl(base + reg);
195         pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg);
196         return v;
197 }
198
199 static inline void udma_write(void __iomem *base, int reg, u32 val)
200 {
201         pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg);
202         __raw_writel(val, base + reg);
203 }
204
205 static inline void udma_update_bits(void __iomem *base, int reg,
206                                     u32 mask, u32 val)
207 {
208         u32 tmp, orig;
209
210         orig = udma_read(base, reg);
211         tmp = orig & ~mask;
212         tmp |= (val & mask);
213
214         if (tmp != orig)
215                 udma_write(base, reg, tmp);
216 }
217
218 /* TCHANRT */
219 static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
220 {
221         if (!tchan)
222                 return 0;
223         return udma_read(tchan->reg_rt, reg);
224 }
225
226 static inline void udma_tchanrt_write(struct udma_tchan *tchan,
227                                       int reg, u32 val)
228 {
229         if (!tchan)
230                 return;
231         udma_write(tchan->reg_rt, reg, val);
232 }
233
234 /* RCHANRT */
235 static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
236 {
237         if (!rchan)
238                 return 0;
239         return udma_read(rchan->reg_rt, reg);
240 }
241
242 static inline void udma_rchanrt_write(struct udma_rchan *rchan,
243                                       int reg, u32 val)
244 {
245         if (!rchan)
246                 return;
247         udma_write(rchan->reg_rt, reg, val);
248 }
249
250 static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread,
251                                        u32 dst_thread)
252 {
253         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
254
255         dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
256
257         return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
258                                               tisci_rm->tisci_navss_dev_id,
259                                               src_thread, dst_thread);
260 }
261
262 static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
263                                          u32 dst_thread)
264 {
265         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
266
267         dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
268
269         return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
270                                                 tisci_rm->tisci_navss_dev_id,
271                                                 src_thread, dst_thread);
272 }
273
274 static inline char *udma_get_dir_text(enum dma_direction dir)
275 {
276         switch (dir) {
277         case DMA_DEV_TO_MEM:
278                 return "DEV_TO_MEM";
279         case DMA_MEM_TO_DEV:
280                 return "MEM_TO_DEV";
281         case DMA_MEM_TO_MEM:
282                 return "MEM_TO_MEM";
283         case DMA_DEV_TO_DEV:
284                 return "DEV_TO_DEV";
285         default:
286                 break;
287         }
288
289         return "invalid";
290 }
291
292 static inline bool udma_is_chan_running(struct udma_chan *uc)
293 {
294         u32 trt_ctl = 0;
295         u32 rrt_ctl = 0;
296
297         switch (uc->config.dir) {
298         case DMA_DEV_TO_MEM:
299                 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
300                 pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n",
301                          __func__, rrt_ctl,
302                          udma_rchanrt_read(uc->rchan,
303                                            UDMA_RCHAN_RT_PEER_RT_EN_REG));
304                 break;
305         case DMA_MEM_TO_DEV:
306                 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
307                 pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n",
308                          __func__, trt_ctl,
309                          udma_tchanrt_read(uc->tchan,
310                                            UDMA_TCHAN_RT_PEER_RT_EN_REG));
311                 break;
312         case DMA_MEM_TO_MEM:
313                 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
314                 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
315                 break;
316         default:
317                 break;
318         }
319
320         if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
321                 return true;
322
323         return false;
324 }
325
326 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
327 {
328         struct k3_nav_ring *ring = NULL;
329         int ret = -ENOENT;
330
331         switch (uc->config.dir) {
332         case DMA_DEV_TO_MEM:
333                 ring = uc->rflow->r_ring;
334                 break;
335         case DMA_MEM_TO_DEV:
336                 ring = uc->tchan->tc_ring;
337                 break;
338         case DMA_MEM_TO_MEM:
339                 ring = uc->tchan->tc_ring;
340                 break;
341         default:
342                 break;
343         }
344
345         if (ring && k3_nav_ringacc_ring_get_occ(ring))
346                 ret = k3_nav_ringacc_ring_pop(ring, addr);
347
348         return ret;
349 }
350
351 static void udma_reset_rings(struct udma_chan *uc)
352 {
353         struct k3_nav_ring *ring1 = NULL;
354         struct k3_nav_ring *ring2 = NULL;
355
356         switch (uc->config.dir) {
357         case DMA_DEV_TO_MEM:
358                 ring1 = uc->rflow->fd_ring;
359                 ring2 = uc->rflow->r_ring;
360                 break;
361         case DMA_MEM_TO_DEV:
362                 ring1 = uc->tchan->t_ring;
363                 ring2 = uc->tchan->tc_ring;
364                 break;
365         case DMA_MEM_TO_MEM:
366                 ring1 = uc->tchan->t_ring;
367                 ring2 = uc->tchan->tc_ring;
368                 break;
369         default:
370                 break;
371         }
372
373         if (ring1)
374                 k3_nav_ringacc_ring_reset_dma(ring1, 0);
375         if (ring2)
376                 k3_nav_ringacc_ring_reset(ring2);
377 }
378
379 static void udma_reset_counters(struct udma_chan *uc)
380 {
381         u32 val;
382
383         if (uc->tchan) {
384                 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
385                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
386
387                 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
388                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
389
390                 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
391                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
392
393                 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
394                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
395         }
396
397         if (uc->rchan) {
398                 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
399                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
400
401                 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
402                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
403
404                 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
405                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
406
407                 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
408                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
409         }
410
411         uc->bcnt = 0;
412 }
413
414 static inline int udma_stop_hard(struct udma_chan *uc)
415 {
416         pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id);
417
418         switch (uc->config.dir) {
419         case DMA_DEV_TO_MEM:
420                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
421                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
422                 break;
423         case DMA_MEM_TO_DEV:
424                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
425                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
426                 break;
427         case DMA_MEM_TO_MEM:
428                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
429                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
430                 break;
431         default:
432                 return -EINVAL;
433         }
434
435         return 0;
436 }
437
438 static int udma_start(struct udma_chan *uc)
439 {
440         /* Channel is already running, no need to proceed further */
441         if (udma_is_chan_running(uc))
442                 goto out;
443
444         pr_debug("%s: chan:%d dir:%s\n",
445                  __func__, uc->id, udma_get_dir_text(uc->config.dir));
446
447         /* Make sure that we clear the teardown bit, if it is set */
448         udma_stop_hard(uc);
449
450         /* Reset all counters */
451         udma_reset_counters(uc);
452
453         switch (uc->config.dir) {
454         case DMA_DEV_TO_MEM:
455                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
456                                    UDMA_CHAN_RT_CTL_EN);
457
458                 /* Enable remote */
459                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
460                                    UDMA_PEER_RT_EN_ENABLE);
461
462                 pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
463                          __func__,
464                          udma_rchanrt_read(uc->rchan,
465                                            UDMA_RCHAN_RT_CTL_REG),
466                          udma_rchanrt_read(uc->rchan,
467                                            UDMA_RCHAN_RT_PEER_RT_EN_REG));
468                 break;
469         case DMA_MEM_TO_DEV:
470                 /* Enable remote */
471                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
472                                    UDMA_PEER_RT_EN_ENABLE);
473
474                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
475                                    UDMA_CHAN_RT_CTL_EN);
476
477                 pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
478                          __func__,
479                          udma_tchanrt_read(uc->tchan,
480                                            UDMA_TCHAN_RT_CTL_REG),
481                          udma_tchanrt_read(uc->tchan,
482                                            UDMA_TCHAN_RT_PEER_RT_EN_REG));
483                 break;
484         case DMA_MEM_TO_MEM:
485                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
486                                    UDMA_CHAN_RT_CTL_EN);
487                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
488                                    UDMA_CHAN_RT_CTL_EN);
489
490                 break;
491         default:
492                 return -EINVAL;
493         }
494
495         pr_debug("%s: DONE chan:%d\n", __func__, uc->id);
496 out:
497         return 0;
498 }
499
500 static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync)
501 {
502         int i = 0;
503         u32 val;
504
505         udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
506                            UDMA_CHAN_RT_CTL_EN |
507                            UDMA_CHAN_RT_CTL_TDOWN);
508
509         val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
510
511         while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
512                 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
513                 udelay(1);
514                 if (i > 1000) {
515                         printf(" %s TIMEOUT !\n", __func__);
516                         break;
517                 }
518                 i++;
519         }
520
521         val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG);
522         if (val & UDMA_PEER_RT_EN_ENABLE)
523                 printf("%s: peer not stopped TIMEOUT !\n", __func__);
524 }
525
526 static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync)
527 {
528         int i = 0;
529         u32 val;
530
531         udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
532                            UDMA_PEER_RT_EN_ENABLE |
533                            UDMA_PEER_RT_EN_TEARDOWN);
534
535         val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
536
537         while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
538                 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
539                 udelay(1);
540                 if (i > 1000) {
541                         printf("%s TIMEOUT !\n", __func__);
542                         break;
543                 }
544                 i++;
545         }
546
547         val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG);
548         if (val & UDMA_PEER_RT_EN_ENABLE)
549                 printf("%s: peer not stopped TIMEOUT !\n", __func__);
550 }
551
552 static inline int udma_stop(struct udma_chan *uc)
553 {
554         pr_debug("%s: chan:%d dir:%s\n",
555                  __func__, uc->id, udma_get_dir_text(uc->config.dir));
556
557         udma_reset_counters(uc);
558         switch (uc->config.dir) {
559         case DMA_DEV_TO_MEM:
560                 udma_stop_dev2mem(uc, true);
561                 break;
562         case DMA_MEM_TO_DEV:
563                 udma_stop_mem2dev(uc, true);
564                 break;
565         case DMA_MEM_TO_MEM:
566                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
567                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
568                 break;
569         default:
570                 return -EINVAL;
571         }
572
573         return 0;
574 }
575
576 static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr)
577 {
578         int i = 1;
579
580         while (udma_pop_from_ring(uc, paddr)) {
581                 udelay(1);
582                 if (!(i % 1000000))
583                         printf(".");
584                 i++;
585         }
586 }
587
588 static struct udma_rflow *__udma_reserve_rflow(struct udma_dev *ud, int id)
589 {
590         DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
591
592         if (id >= 0) {
593                 if (test_bit(id, ud->rflow_map)) {
594                         dev_err(ud->dev, "rflow%d is in use\n", id);
595                         return ERR_PTR(-ENOENT);
596                 }
597         } else {
598                 bitmap_or(tmp, ud->rflow_map, ud->rflow_map_reserved,
599                           ud->rflow_cnt);
600
601                 id = find_next_zero_bit(tmp, ud->rflow_cnt, ud->rchan_cnt);
602                 if (id >= ud->rflow_cnt)
603                         return ERR_PTR(-ENOENT);
604         }
605
606         __set_bit(id, ud->rflow_map);
607         return &ud->rflows[id];
608 }
609
610 #define UDMA_RESERVE_RESOURCE(res)                                      \
611 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud,     \
612                                                int id)                  \
613 {                                                                       \
614         if (id >= 0) {                                                  \
615                 if (test_bit(id, ud->res##_map)) {                      \
616                         dev_err(ud->dev, "res##%d is in use\n", id);    \
617                         return ERR_PTR(-ENOENT);                        \
618                 }                                                       \
619         } else {                                                        \
620                 id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \
621                 if (id == ud->res##_cnt) {                              \
622                         return ERR_PTR(-ENOENT);                        \
623                 }                                                       \
624         }                                                               \
625                                                                         \
626         __set_bit(id, ud->res##_map);                                   \
627         return &ud->res##s[id];                                         \
628 }
629
630 UDMA_RESERVE_RESOURCE(tchan);
631 UDMA_RESERVE_RESOURCE(rchan);
632
633 static int udma_get_tchan(struct udma_chan *uc)
634 {
635         struct udma_dev *ud = uc->ud;
636
637         if (uc->tchan) {
638                 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
639                         uc->id, uc->tchan->id);
640                 return 0;
641         }
642
643         uc->tchan = __udma_reserve_tchan(ud, -1);
644         if (IS_ERR(uc->tchan))
645                 return PTR_ERR(uc->tchan);
646
647         pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id);
648
649         return 0;
650 }
651
652 static int udma_get_rchan(struct udma_chan *uc)
653 {
654         struct udma_dev *ud = uc->ud;
655
656         if (uc->rchan) {
657                 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
658                         uc->id, uc->rchan->id);
659                 return 0;
660         }
661
662         uc->rchan = __udma_reserve_rchan(ud, -1);
663         if (IS_ERR(uc->rchan))
664                 return PTR_ERR(uc->rchan);
665
666         pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id);
667
668         return 0;
669 }
670
671 static int udma_get_chan_pair(struct udma_chan *uc)
672 {
673         struct udma_dev *ud = uc->ud;
674         int chan_id, end;
675
676         if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
677                 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
678                          uc->id, uc->tchan->id);
679                 return 0;
680         }
681
682         if (uc->tchan) {
683                 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
684                         uc->id, uc->tchan->id);
685                 return -EBUSY;
686         } else if (uc->rchan) {
687                 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
688                         uc->id, uc->rchan->id);
689                 return -EBUSY;
690         }
691
692         /* Can be optimized, but let's have it like this for now */
693         end = min(ud->tchan_cnt, ud->rchan_cnt);
694         for (chan_id = 0; chan_id < end; chan_id++) {
695                 if (!test_bit(chan_id, ud->tchan_map) &&
696                     !test_bit(chan_id, ud->rchan_map))
697                         break;
698         }
699
700         if (chan_id == end)
701                 return -ENOENT;
702
703         __set_bit(chan_id, ud->tchan_map);
704         __set_bit(chan_id, ud->rchan_map);
705         uc->tchan = &ud->tchans[chan_id];
706         uc->rchan = &ud->rchans[chan_id];
707
708         pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id);
709
710         return 0;
711 }
712
713 static int udma_get_rflow(struct udma_chan *uc, int flow_id)
714 {
715         struct udma_dev *ud = uc->ud;
716
717         if (uc->rflow) {
718                 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
719                         uc->id, uc->rflow->id);
720                 return 0;
721         }
722
723         if (!uc->rchan)
724                 dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
725
726         uc->rflow = __udma_reserve_rflow(ud, flow_id);
727         if (IS_ERR(uc->rflow))
728                 return PTR_ERR(uc->rflow);
729
730         pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id);
731         return 0;
732 }
733
734 static void udma_put_rchan(struct udma_chan *uc)
735 {
736         struct udma_dev *ud = uc->ud;
737
738         if (uc->rchan) {
739                 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
740                         uc->rchan->id);
741                 __clear_bit(uc->rchan->id, ud->rchan_map);
742                 uc->rchan = NULL;
743         }
744 }
745
746 static void udma_put_tchan(struct udma_chan *uc)
747 {
748         struct udma_dev *ud = uc->ud;
749
750         if (uc->tchan) {
751                 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
752                         uc->tchan->id);
753                 __clear_bit(uc->tchan->id, ud->tchan_map);
754                 uc->tchan = NULL;
755         }
756 }
757
758 static void udma_put_rflow(struct udma_chan *uc)
759 {
760         struct udma_dev *ud = uc->ud;
761
762         if (uc->rflow) {
763                 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
764                         uc->rflow->id);
765                 __clear_bit(uc->rflow->id, ud->rflow_map);
766                 uc->rflow = NULL;
767         }
768 }
769
770 static void udma_free_tx_resources(struct udma_chan *uc)
771 {
772         if (!uc->tchan)
773                 return;
774
775         k3_nav_ringacc_ring_free(uc->tchan->t_ring);
776         k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
777         uc->tchan->t_ring = NULL;
778         uc->tchan->tc_ring = NULL;
779
780         udma_put_tchan(uc);
781 }
782
783 static int udma_alloc_tx_resources(struct udma_chan *uc)
784 {
785         struct k3_nav_ring_cfg ring_cfg;
786         struct udma_dev *ud = uc->ud;
787         int ret;
788
789         ret = udma_get_tchan(uc);
790         if (ret)
791                 return ret;
792
793         ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, uc->tchan->id, -1,
794                                                 &uc->tchan->t_ring,
795                                                 &uc->tchan->tc_ring);
796         if (ret) {
797                 ret = -EBUSY;
798                 goto err_tx_ring;
799         }
800
801         memset(&ring_cfg, 0, sizeof(ring_cfg));
802         ring_cfg.size = 16;
803         ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
804         ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
805
806         ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
807         ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
808
809         if (ret)
810                 goto err_ringcfg;
811
812         return 0;
813
814 err_ringcfg:
815         k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
816         uc->tchan->tc_ring = NULL;
817         k3_nav_ringacc_ring_free(uc->tchan->t_ring);
818         uc->tchan->t_ring = NULL;
819 err_tx_ring:
820         udma_put_tchan(uc);
821
822         return ret;
823 }
824
825 static void udma_free_rx_resources(struct udma_chan *uc)
826 {
827         if (!uc->rchan)
828                 return;
829
830         if (uc->rflow) {
831                 k3_nav_ringacc_ring_free(uc->rflow->fd_ring);
832                 k3_nav_ringacc_ring_free(uc->rflow->r_ring);
833                 uc->rflow->fd_ring = NULL;
834                 uc->rflow->r_ring = NULL;
835
836                 udma_put_rflow(uc);
837         }
838
839         udma_put_rchan(uc);
840 }
841
842 static int udma_alloc_rx_resources(struct udma_chan *uc)
843 {
844         struct k3_nav_ring_cfg ring_cfg;
845         struct udma_dev *ud = uc->ud;
846         struct udma_rflow *rflow;
847         int fd_ring_id;
848         int ret;
849
850         ret = udma_get_rchan(uc);
851         if (ret)
852                 return ret;
853
854         /* For MEM_TO_MEM we don't need rflow or rings */
855         if (uc->config.dir == DMA_MEM_TO_MEM)
856                 return 0;
857
858         ret = udma_get_rflow(uc, uc->rchan->id);
859         if (ret) {
860                 ret = -EBUSY;
861                 goto err_rflow;
862         }
863
864         fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
865
866         rflow = uc->rflow;
867         ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
868                                                 &rflow->fd_ring, &rflow->r_ring);
869         if (ret) {
870                 ret = -EBUSY;
871                 goto err_rx_ring;
872         }
873
874         memset(&ring_cfg, 0, sizeof(ring_cfg));
875         ring_cfg.size = 16;
876         ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
877         ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
878
879         ret = k3_nav_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
880         ret |= k3_nav_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
881         if (ret)
882                 goto err_ringcfg;
883
884         return 0;
885
886 err_ringcfg:
887         k3_nav_ringacc_ring_free(rflow->r_ring);
888         rflow->r_ring = NULL;
889         k3_nav_ringacc_ring_free(rflow->fd_ring);
890         rflow->fd_ring = NULL;
891 err_rx_ring:
892         udma_put_rflow(uc);
893 err_rflow:
894         udma_put_rchan(uc);
895
896         return ret;
897 }
898
899 static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
900 {
901         struct udma_dev *ud = uc->ud;
902         int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
903         struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
904         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
905         u32 mode;
906         int ret;
907
908         if (uc->config.pkt_mode)
909                 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
910         else
911                 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
912
913         req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
914                         TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
915                         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
916         req.nav_id = tisci_rm->tisci_dev_id;
917         req.index = uc->tchan->id;
918         req.tx_chan_type = mode;
919         if (uc->config.dir == DMA_MEM_TO_MEM)
920                 req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
921         else
922                 req.tx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
923                                                           uc->config.psd_size,
924                                                           0) >> 2;
925         req.txcq_qnum = tc_ring;
926
927         ret = tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
928         if (ret)
929                 dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
930
931         return ret;
932 }
933
934 static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
935 {
936         struct udma_dev *ud = uc->ud;
937         int fd_ring = k3_nav_ringacc_get_ring_id(uc->rflow->fd_ring);
938         int rx_ring = k3_nav_ringacc_get_ring_id(uc->rflow->r_ring);
939         int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
940         struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 };
941         struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
942         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
943         u32 mode;
944         int ret;
945
946         if (uc->config.pkt_mode)
947                 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
948         else
949                 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
950
951         req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
952                         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
953                         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
954                         TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
955                         TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
956         req.nav_id = tisci_rm->tisci_dev_id;
957         req.index = uc->rchan->id;
958         req.rx_chan_type = mode;
959         if (uc->config.dir == DMA_MEM_TO_MEM) {
960                 req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
961                 req.rxcq_qnum = tc_ring;
962         } else {
963                 req.rx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
964                                                           uc->config.psd_size,
965                                                           0) >> 2;
966                 req.rxcq_qnum = rx_ring;
967         }
968         if (uc->rflow->id != uc->rchan->id && uc->config.dir != DMA_MEM_TO_MEM) {
969                 req.flowid_start = uc->rflow->id;
970                 req.flowid_cnt = 1;
971         }
972
973         ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
974         if (ret) {
975                 dev_err(ud->dev, "tisci rx %u cfg failed %d\n",
976                         uc->rchan->id, ret);
977                 return ret;
978         }
979         if (uc->config.dir == DMA_MEM_TO_MEM)
980                 return ret;
981
982         flow_req.valid_params =
983                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
984                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
985                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
986                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
987                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
988                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
989                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
990                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
991                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
992                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
993                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
994                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
995                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID |
996                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID;
997
998         flow_req.nav_id = tisci_rm->tisci_dev_id;
999         flow_req.flow_index = uc->rflow->id;
1000
1001         if (uc->config.needs_epib)
1002                 flow_req.rx_einfo_present = 1;
1003         else
1004                 flow_req.rx_einfo_present = 0;
1005
1006         if (uc->config.psd_size)
1007                 flow_req.rx_psinfo_present = 1;
1008         else
1009                 flow_req.rx_psinfo_present = 0;
1010
1011         flow_req.rx_error_handling = 0;
1012         flow_req.rx_desc_type = 0;
1013         flow_req.rx_dest_qnum = rx_ring;
1014         flow_req.rx_src_tag_hi_sel = 2;
1015         flow_req.rx_src_tag_lo_sel = 4;
1016         flow_req.rx_dest_tag_hi_sel = 5;
1017         flow_req.rx_dest_tag_lo_sel = 4;
1018         flow_req.rx_fdq0_sz0_qnum = fd_ring;
1019         flow_req.rx_fdq1_qnum = fd_ring;
1020         flow_req.rx_fdq2_qnum = fd_ring;
1021         flow_req.rx_fdq3_qnum = fd_ring;
1022         flow_req.rx_ps_location = 0;
1023
1024         ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci,
1025                                                      &flow_req);
1026         if (ret)
1027                 dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
1028                         uc->rchan->id, uc->rflow->id, ret);
1029
1030         return ret;
1031 }
1032
1033 static int udma_alloc_chan_resources(struct udma_chan *uc)
1034 {
1035         struct udma_dev *ud = uc->ud;
1036         int ret;
1037
1038         pr_debug("%s: chan:%d as %s\n",
1039                  __func__, uc->id, udma_get_dir_text(uc->config.dir));
1040
1041         switch (uc->config.dir) {
1042         case DMA_MEM_TO_MEM:
1043                 /* Non synchronized - mem to mem type of transfer */
1044                 uc->config.pkt_mode = false;
1045                 ret = udma_get_chan_pair(uc);
1046                 if (ret)
1047                         return ret;
1048
1049                 ret = udma_alloc_tx_resources(uc);
1050                 if (ret)
1051                         goto err_free_res;
1052
1053                 ret = udma_alloc_rx_resources(uc);
1054                 if (ret)
1055                         goto err_free_res;
1056
1057                 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1058                 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1059                 break;
1060         case DMA_MEM_TO_DEV:
1061                 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1062                 ret = udma_alloc_tx_resources(uc);
1063                 if (ret)
1064                         goto err_free_res;
1065
1066                 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1067                 uc->config.dst_thread = uc->config.remote_thread_id;
1068                 uc->config.dst_thread |= 0x8000;
1069
1070                 break;
1071         case DMA_DEV_TO_MEM:
1072                 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1073                 ret = udma_alloc_rx_resources(uc);
1074                 if (ret)
1075                         goto err_free_res;
1076
1077                 uc->config.src_thread = uc->config.remote_thread_id;
1078                 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1079
1080                 break;
1081         default:
1082                 /* Can not happen */
1083                 pr_debug("%s: chan:%d invalid direction (%u)\n",
1084                          __func__, uc->id, uc->config.dir);
1085                 return -EINVAL;
1086         }
1087
1088         /* We have channel indexes and rings */
1089         if (uc->config.dir == DMA_MEM_TO_MEM) {
1090                 ret = udma_alloc_tchan_sci_req(uc);
1091                 if (ret)
1092                         goto err_free_res;
1093
1094                 ret = udma_alloc_rchan_sci_req(uc);
1095                 if (ret)
1096                         goto err_free_res;
1097         } else {
1098                 /* Slave transfer */
1099                 if (uc->config.dir == DMA_MEM_TO_DEV) {
1100                         ret = udma_alloc_tchan_sci_req(uc);
1101                         if (ret)
1102                                 goto err_free_res;
1103                 } else {
1104                         ret = udma_alloc_rchan_sci_req(uc);
1105                         if (ret)
1106                                 goto err_free_res;
1107                 }
1108         }
1109
1110         if (udma_is_chan_running(uc)) {
1111                 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1112                 udma_stop(uc);
1113                 if (udma_is_chan_running(uc)) {
1114                         dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1115                         goto err_free_res;
1116                 }
1117         }
1118
1119         /* PSI-L pairing */
1120         ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
1121         if (ret) {
1122                 dev_err(ud->dev, "k3_nav_psil_request_link fail\n");
1123                 goto err_free_res;
1124         }
1125
1126         return 0;
1127
1128 err_free_res:
1129         udma_free_tx_resources(uc);
1130         udma_free_rx_resources(uc);
1131         uc->config.remote_thread_id = -1;
1132         return ret;
1133 }
1134
1135 static void udma_free_chan_resources(struct udma_chan *uc)
1136 {
1137         /* Hard reset UDMA channel */
1138         udma_stop_hard(uc);
1139         udma_reset_counters(uc);
1140
1141         /* Release PSI-L pairing */
1142         udma_navss_psil_unpair(uc->ud, uc->config.src_thread, uc->config.dst_thread);
1143
1144         /* Reset the rings for a new start */
1145         udma_reset_rings(uc);
1146         udma_free_tx_resources(uc);
1147         udma_free_rx_resources(uc);
1148
1149         uc->config.remote_thread_id = -1;
1150         uc->config.dir = DMA_MEM_TO_MEM;
1151 }
1152
1153 static int udma_get_mmrs(struct udevice *dev)
1154 {
1155         struct udma_dev *ud = dev_get_priv(dev);
1156         int i;
1157
1158         for (i = 0; i < MMR_LAST; i++) {
1159                 ud->mmrs[i] = (uint32_t *)devfdt_get_addr_name(dev,
1160                                 mmr_names[i]);
1161                 if (!ud->mmrs[i])
1162                         return -EINVAL;
1163         }
1164
1165         return 0;
1166 }
1167
1168 static int udma_setup_resources(struct udma_dev *ud)
1169 {
1170         struct udevice *dev = ud->dev;
1171         int ch_count, i;
1172         u32 cap2, cap3;
1173         struct ti_sci_resource_desc *rm_desc;
1174         struct ti_sci_resource *rm_res;
1175         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1176         static const char * const range_names[] = { "ti,sci-rm-range-tchan",
1177                                                     "ti,sci-rm-range-rchan",
1178                                                     "ti,sci-rm-range-rflow" };
1179
1180         cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
1181         cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
1182
1183         ud->rflow_cnt = cap3 & 0x3fff;
1184         ud->tchan_cnt = cap2 & 0x1ff;
1185         ud->echan_cnt = (cap2 >> 9) & 0x1ff;
1186         ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1187         ch_count  = ud->tchan_cnt + ud->rchan_cnt;
1188
1189         ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1190                                            sizeof(unsigned long), GFP_KERNEL);
1191         ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1192                                   GFP_KERNEL);
1193         ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1194                                            sizeof(unsigned long), GFP_KERNEL);
1195         ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1196                                   GFP_KERNEL);
1197         ud->rflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
1198                                            sizeof(unsigned long), GFP_KERNEL);
1199         ud->rflow_map_reserved = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1200                                               sizeof(unsigned long),
1201                                               GFP_KERNEL);
1202         ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1203                                   GFP_KERNEL);
1204
1205         if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_map ||
1206             !ud->rflow_map_reserved || !ud->tchans || !ud->rchans ||
1207             !ud->rflows)
1208                 return -ENOMEM;
1209
1210         /*
1211          * RX flows with the same Ids as RX channels are reserved to be used
1212          * as default flows if remote HW can't generate flow_ids. Those
1213          * RX flows can be requested only explicitly by id.
1214          */
1215         bitmap_set(ud->rflow_map_reserved, 0, ud->rchan_cnt);
1216
1217         /* Get resource ranges from tisci */
1218         for (i = 0; i < RM_RANGE_LAST; i++)
1219                 tisci_rm->rm_ranges[i] =
1220                         devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1221                                                     tisci_rm->tisci_dev_id,
1222                                                     (char *)range_names[i]);
1223
1224         /* tchan ranges */
1225         rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1226         if (IS_ERR(rm_res)) {
1227                 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1228         } else {
1229                 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1230                 for (i = 0; i < rm_res->sets; i++) {
1231                         rm_desc = &rm_res->desc[i];
1232                         bitmap_clear(ud->tchan_map, rm_desc->start,
1233                                      rm_desc->num);
1234                 }
1235         }
1236
1237         /* rchan and matching default flow ranges */
1238         rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1239         if (IS_ERR(rm_res)) {
1240                 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1241                 bitmap_zero(ud->rflow_map, ud->rchan_cnt);
1242         } else {
1243                 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1244                 bitmap_fill(ud->rflow_map, ud->rchan_cnt);
1245                 for (i = 0; i < rm_res->sets; i++) {
1246                         rm_desc = &rm_res->desc[i];
1247                         bitmap_clear(ud->rchan_map, rm_desc->start,
1248                                      rm_desc->num);
1249                         bitmap_clear(ud->rflow_map, rm_desc->start,
1250                                      rm_desc->num);
1251                 }
1252         }
1253
1254         /* GP rflow ranges */
1255         rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1256         if (IS_ERR(rm_res)) {
1257                 bitmap_clear(ud->rflow_map, ud->rchan_cnt,
1258                              ud->rflow_cnt - ud->rchan_cnt);
1259         } else {
1260                 bitmap_set(ud->rflow_map, ud->rchan_cnt,
1261                            ud->rflow_cnt - ud->rchan_cnt);
1262                 for (i = 0; i < rm_res->sets; i++) {
1263                         rm_desc = &rm_res->desc[i];
1264                         bitmap_clear(ud->rflow_map, rm_desc->start,
1265                                      rm_desc->num);
1266                 }
1267         }
1268
1269         ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
1270         ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
1271         if (!ch_count)
1272                 return -ENODEV;
1273
1274         ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
1275                                     GFP_KERNEL);
1276         if (!ud->channels)
1277                 return -ENOMEM;
1278
1279         dev_info(dev,
1280                  "Channels: %d (tchan: %u, echan: %u, rchan: %u, rflow: %u)\n",
1281                  ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
1282                  ud->rflow_cnt);
1283
1284         return ch_count;
1285 }
1286 static int udma_probe(struct udevice *dev)
1287 {
1288         struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
1289         struct udma_dev *ud = dev_get_priv(dev);
1290         int i, ret;
1291         struct udevice *tmp;
1292         struct udevice *tisci_dev = NULL;
1293         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1294         ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev));
1295
1296
1297         ret = udma_get_mmrs(dev);
1298         if (ret)
1299                 return ret;
1300
1301         ret = uclass_get_device_by_phandle(UCLASS_MISC, dev,
1302                                            "ti,ringacc", &tmp);
1303         ud->ringacc = dev_get_priv(tmp);
1304         if (IS_ERR(ud->ringacc))
1305                 return PTR_ERR(ud->ringacc);
1306
1307         ud->match_data = (void *)dev_get_driver_data(dev);
1308         ud->psil_base = ud->match_data->psil_base;
1309
1310         ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev,
1311                                            "ti,sci", &tisci_dev);
1312         if (ret) {
1313                 debug("Failed to get TISCI phandle (%d)\n", ret);
1314                 tisci_rm->tisci = NULL;
1315                 return -EINVAL;
1316         }
1317         tisci_rm->tisci = (struct ti_sci_handle *)
1318                           (ti_sci_get_handle_from_sysfw(tisci_dev));
1319
1320         tisci_rm->tisci_dev_id = -1;
1321         ret = dev_read_u32(dev, "ti,sci-dev-id", &tisci_rm->tisci_dev_id);
1322         if (ret) {
1323                 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
1324                 return ret;
1325         }
1326
1327         tisci_rm->tisci_navss_dev_id = -1;
1328         ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id",
1329                               &tisci_rm->tisci_navss_dev_id);
1330         if (ret) {
1331                 dev_err(dev, "navss sci-dev-id read failure %d\n", ret);
1332                 return ret;
1333         }
1334
1335         tisci_rm->tisci_udmap_ops = &tisci_rm->tisci->ops.rm_udmap_ops;
1336         tisci_rm->tisci_psil_ops = &tisci_rm->tisci->ops.rm_psil_ops;
1337
1338         ud->dev = dev;
1339         ud->ch_count = udma_setup_resources(ud);
1340         if (ud->ch_count <= 0)
1341                 return ud->ch_count;
1342
1343         dev_info(dev,
1344                  "Number of channels: %u (tchan: %u, echan: %u, rchan: %u dev-id %u)\n",
1345                  ud->ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
1346                  tisci_rm->tisci_dev_id);
1347         dev_info(dev, "Number of rflows: %u\n", ud->rflow_cnt);
1348
1349         for (i = 0; i < ud->tchan_cnt; i++) {
1350                 struct udma_tchan *tchan = &ud->tchans[i];
1351
1352                 tchan->id = i;
1353                 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
1354         }
1355
1356         for (i = 0; i < ud->rchan_cnt; i++) {
1357                 struct udma_rchan *rchan = &ud->rchans[i];
1358
1359                 rchan->id = i;
1360                 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
1361         }
1362
1363         for (i = 0; i < ud->rflow_cnt; i++) {
1364                 struct udma_rflow *rflow = &ud->rflows[i];
1365
1366                 rflow->id = i;
1367         }
1368
1369         for (i = 0; i < ud->ch_count; i++) {
1370                 struct udma_chan *uc = &ud->channels[i];
1371
1372                 uc->ud = ud;
1373                 uc->id = i;
1374                 uc->config.remote_thread_id = -1;
1375                 uc->tchan = NULL;
1376                 uc->rchan = NULL;
1377                 uc->config.dir = DMA_MEM_TO_MEM;
1378                 sprintf(uc->name, "UDMA chan%d\n", i);
1379                 if (!i)
1380                         uc->in_use = true;
1381         }
1382
1383         pr_debug("UDMA(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
1384                  udma_read(ud->mmrs[MMR_GCFG], 0),
1385                  udma_read(ud->mmrs[MMR_GCFG], 0x20),
1386                  udma_read(ud->mmrs[MMR_GCFG], 0x24),
1387                  udma_read(ud->mmrs[MMR_GCFG], 0x28),
1388                  udma_read(ud->mmrs[MMR_GCFG], 0x2c));
1389
1390         uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
1391
1392         return ret;
1393 }
1394
1395 static int udma_push_to_ring(struct k3_nav_ring *ring, void *elem)
1396 {
1397         u64 addr = 0;
1398
1399         memcpy(&addr, &elem, sizeof(elem));
1400         return k3_nav_ringacc_ring_push(ring, &addr);
1401 }
1402
1403 static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
1404                                  dma_addr_t src, size_t len)
1405 {
1406         u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1407         struct cppi5_tr_type15_t *tr_req;
1408         int num_tr;
1409         size_t tr_size = sizeof(struct cppi5_tr_type15_t);
1410         u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
1411         unsigned long dummy;
1412         void *tr_desc;
1413         size_t desc_size;
1414
1415         if (len < SZ_64K) {
1416                 num_tr = 1;
1417                 tr0_cnt0 = len;
1418                 tr0_cnt1 = 1;
1419         } else {
1420                 unsigned long align_to = __ffs(src | dest);
1421
1422                 if (align_to > 3)
1423                         align_to = 3;
1424                 /*
1425                  * Keep simple: tr0: SZ_64K-alignment blocks,
1426                  *              tr1: the remaining
1427                  */
1428                 num_tr = 2;
1429                 tr0_cnt0 = (SZ_64K - BIT(align_to));
1430                 if (len / tr0_cnt0 >= SZ_64K) {
1431                         dev_err(uc->ud->dev, "size %zu is not supported\n",
1432                                 len);
1433                         return NULL;
1434                 }
1435
1436                 tr0_cnt1 = len / tr0_cnt0;
1437                 tr1_cnt0 = len % tr0_cnt0;
1438         }
1439
1440         desc_size = cppi5_trdesc_calc_size(num_tr, tr_size);
1441         tr_desc = dma_alloc_coherent(desc_size, &dummy);
1442         if (!tr_desc)
1443                 return NULL;
1444         memset(tr_desc, 0, desc_size);
1445
1446         cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0);
1447         cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff);
1448         cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id);
1449
1450         tr_req = tr_desc + tr_size;
1451
1452         cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
1453                       CPPI5_TR_EVENT_SIZE_COMPLETION, 1);
1454         cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
1455
1456         tr_req[0].addr = src;
1457         tr_req[0].icnt0 = tr0_cnt0;
1458         tr_req[0].icnt1 = tr0_cnt1;
1459         tr_req[0].icnt2 = 1;
1460         tr_req[0].icnt3 = 1;
1461         tr_req[0].dim1 = tr0_cnt0;
1462
1463         tr_req[0].daddr = dest;
1464         tr_req[0].dicnt0 = tr0_cnt0;
1465         tr_req[0].dicnt1 = tr0_cnt1;
1466         tr_req[0].dicnt2 = 1;
1467         tr_req[0].dicnt3 = 1;
1468         tr_req[0].ddim1 = tr0_cnt0;
1469
1470         if (num_tr == 2) {
1471                 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
1472                               CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
1473                 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
1474
1475                 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
1476                 tr_req[1].icnt0 = tr1_cnt0;
1477                 tr_req[1].icnt1 = 1;
1478                 tr_req[1].icnt2 = 1;
1479                 tr_req[1].icnt3 = 1;
1480
1481                 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
1482                 tr_req[1].dicnt0 = tr1_cnt0;
1483                 tr_req[1].dicnt1 = 1;
1484                 tr_req[1].dicnt2 = 1;
1485                 tr_req[1].dicnt3 = 1;
1486         }
1487
1488         cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
1489
1490         flush_dcache_range((unsigned long)tr_desc,
1491                            ALIGN((unsigned long)tr_desc + desc_size,
1492                                  ARCH_DMA_MINALIGN));
1493
1494         udma_push_to_ring(uc->tchan->t_ring, tr_desc);
1495
1496         return 0;
1497 }
1498
1499 static int udma_transfer(struct udevice *dev, int direction,
1500                          void *dst, void *src, size_t len)
1501 {
1502         struct udma_dev *ud = dev_get_priv(dev);
1503         /* Channel0 is reserved for memcpy */
1504         struct udma_chan *uc = &ud->channels[0];
1505         dma_addr_t paddr = 0;
1506         int ret;
1507
1508         ret = udma_alloc_chan_resources(uc);
1509         if (ret)
1510                 return ret;
1511
1512         udma_prep_dma_memcpy(uc, (dma_addr_t)dst, (dma_addr_t)src, len);
1513         udma_start(uc);
1514         udma_poll_completion(uc, &paddr);
1515         udma_stop(uc);
1516
1517         udma_free_chan_resources(uc);
1518         return 0;
1519 }
1520
1521 static int udma_request(struct dma *dma)
1522 {
1523         struct udma_dev *ud = dev_get_priv(dma->dev);
1524         struct udma_chan_config *ucc;
1525         struct udma_chan *uc;
1526         unsigned long dummy;
1527         int ret;
1528
1529         if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1530                 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1531                 return -EINVAL;
1532         }
1533
1534         uc = &ud->channels[dma->id];
1535         ucc = &uc->config;
1536         ret = udma_alloc_chan_resources(uc);
1537         if (ret) {
1538                 dev_err(dma->dev, "alloc dma res failed %d\n", ret);
1539                 return -EINVAL;
1540         }
1541
1542         if (uc->config.dir == DMA_MEM_TO_DEV) {
1543                 uc->desc_tx = dma_alloc_coherent(ucc->hdesc_size, &dummy);
1544                 memset(uc->desc_tx, 0, ucc->hdesc_size);
1545         } else {
1546                 uc->desc_rx = dma_alloc_coherent(
1547                                 ucc->hdesc_size * UDMA_RX_DESC_NUM, &dummy);
1548                 memset(uc->desc_rx, 0, ucc->hdesc_size * UDMA_RX_DESC_NUM);
1549         }
1550
1551         uc->in_use = true;
1552         uc->desc_rx_cur = 0;
1553         uc->num_rx_bufs = 0;
1554
1555         if (uc->config.dir == DMA_DEV_TO_MEM) {
1556                 uc->cfg_data.flow_id_base = uc->rflow->id;
1557                 uc->cfg_data.flow_id_cnt = 1;
1558         }
1559
1560         return 0;
1561 }
1562
1563 static int udma_rfree(struct dma *dma)
1564 {
1565         struct udma_dev *ud = dev_get_priv(dma->dev);
1566         struct udma_chan *uc;
1567
1568         if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1569                 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1570                 return -EINVAL;
1571         }
1572         uc = &ud->channels[dma->id];
1573
1574         if (udma_is_chan_running(uc))
1575                 udma_stop(uc);
1576         udma_free_chan_resources(uc);
1577
1578         uc->in_use = false;
1579
1580         return 0;
1581 }
1582
1583 static int udma_enable(struct dma *dma)
1584 {
1585         struct udma_dev *ud = dev_get_priv(dma->dev);
1586         struct udma_chan *uc;
1587         int ret;
1588
1589         if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1590                 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1591                 return -EINVAL;
1592         }
1593         uc = &ud->channels[dma->id];
1594
1595         ret = udma_start(uc);
1596
1597         return ret;
1598 }
1599
1600 static int udma_disable(struct dma *dma)
1601 {
1602         struct udma_dev *ud = dev_get_priv(dma->dev);
1603         struct udma_chan *uc;
1604         int ret = 0;
1605
1606         if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1607                 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1608                 return -EINVAL;
1609         }
1610         uc = &ud->channels[dma->id];
1611
1612         if (udma_is_chan_running(uc))
1613                 ret = udma_stop(uc);
1614         else
1615                 dev_err(dma->dev, "%s not running\n", __func__);
1616
1617         return ret;
1618 }
1619
1620 static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
1621 {
1622         struct udma_dev *ud = dev_get_priv(dma->dev);
1623         struct cppi5_host_desc_t *desc_tx;
1624         dma_addr_t dma_src = (dma_addr_t)src;
1625         struct ti_udma_drv_packet_data packet_data = { 0 };
1626         dma_addr_t paddr;
1627         struct udma_chan *uc;
1628         u32 tc_ring_id;
1629         int ret;
1630
1631         if (metadata)
1632                 packet_data = *((struct ti_udma_drv_packet_data *)metadata);
1633
1634         if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1635                 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1636                 return -EINVAL;
1637         }
1638         uc = &ud->channels[dma->id];
1639
1640         if (uc->config.dir != DMA_MEM_TO_DEV)
1641                 return -EINVAL;
1642
1643         tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1644
1645         desc_tx = uc->desc_tx;
1646
1647         cppi5_hdesc_reset_hbdesc(desc_tx);
1648
1649         cppi5_hdesc_init(desc_tx,
1650                          uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1651                          uc->config.psd_size);
1652         cppi5_hdesc_set_pktlen(desc_tx, len);
1653         cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len);
1654         cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff);
1655         cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id);
1656         /* pass below information from caller */
1657         cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type);
1658         cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag);
1659
1660         flush_dcache_range((unsigned long)dma_src,
1661                            ALIGN((unsigned long)dma_src + len,
1662                                  ARCH_DMA_MINALIGN));
1663         flush_dcache_range((unsigned long)desc_tx,
1664                            ALIGN((unsigned long)desc_tx + uc->config.hdesc_size,
1665                                  ARCH_DMA_MINALIGN));
1666
1667         ret = udma_push_to_ring(uc->tchan->t_ring, uc->desc_tx);
1668         if (ret) {
1669                 dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
1670                         dma->id, ret);
1671                 return ret;
1672         }
1673
1674         udma_poll_completion(uc, &paddr);
1675
1676         return 0;
1677 }
1678
1679 static int udma_receive(struct dma *dma, void **dst, void *metadata)
1680 {
1681         struct udma_dev *ud = dev_get_priv(dma->dev);
1682         struct udma_chan_config *ucc;
1683         struct cppi5_host_desc_t *desc_rx;
1684         dma_addr_t buf_dma;
1685         struct udma_chan *uc;
1686         u32 buf_dma_len, pkt_len;
1687         u32 port_id = 0;
1688         int ret;
1689
1690         if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1691                 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1692                 return -EINVAL;
1693         }
1694         uc = &ud->channels[dma->id];
1695         ucc = &uc->config;
1696
1697         if (uc->config.dir != DMA_DEV_TO_MEM)
1698                 return -EINVAL;
1699         if (!uc->num_rx_bufs)
1700                 return -EINVAL;
1701
1702         ret = k3_nav_ringacc_ring_pop(uc->rflow->r_ring, &desc_rx);
1703         if (ret && ret != -ENODATA) {
1704                 dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret);
1705                 return ret;
1706         } else if (ret == -ENODATA) {
1707                 return 0;
1708         }
1709
1710         /* invalidate cache data */
1711         invalidate_dcache_range((ulong)desc_rx,
1712                                 (ulong)(desc_rx + ucc->hdesc_size));
1713
1714         cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
1715         pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
1716
1717         /* invalidate cache data */
1718         invalidate_dcache_range((ulong)buf_dma,
1719                                 (ulong)(buf_dma + buf_dma_len));
1720
1721         cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
1722
1723         *dst = (void *)buf_dma;
1724         uc->num_rx_bufs--;
1725
1726         return pkt_len;
1727 }
1728
1729 static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args)
1730 {
1731         struct udma_chan_config *ucc;
1732         struct udma_dev *ud = dev_get_priv(dma->dev);
1733         struct udma_chan *uc = &ud->channels[0];
1734         struct psil_endpoint_config *ep_config;
1735         u32 val;
1736
1737         for (val = 0; val < ud->ch_count; val++) {
1738                 uc = &ud->channels[val];
1739                 if (!uc->in_use)
1740                         break;
1741         }
1742
1743         if (val == ud->ch_count)
1744                 return -EBUSY;
1745
1746         ucc = &uc->config;
1747         ucc->remote_thread_id = args->args[0];
1748         if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
1749                 ucc->dir = DMA_MEM_TO_DEV;
1750         else
1751                 ucc->dir = DMA_DEV_TO_MEM;
1752
1753         ep_config = psil_get_ep_config(ucc->remote_thread_id);
1754         if (IS_ERR(ep_config)) {
1755                 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
1756                         uc->config.remote_thread_id);
1757                 ucc->dir = DMA_MEM_TO_MEM;
1758                 ucc->remote_thread_id = -1;
1759                 return false;
1760         }
1761
1762         ucc->pkt_mode = ep_config->pkt_mode;
1763         ucc->channel_tpl = ep_config->channel_tpl;
1764         ucc->notdpkt = ep_config->notdpkt;
1765         ucc->ep_type = ep_config->ep_type;
1766
1767         ucc->needs_epib = ep_config->needs_epib;
1768         ucc->psd_size = ep_config->psd_size;
1769         ucc->metadata_size = (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + ucc->psd_size;
1770
1771         ucc->hdesc_size = cppi5_hdesc_calc_size(ucc->needs_epib,
1772                                                 ucc->psd_size, 0);
1773         ucc->hdesc_size = ALIGN(ucc->hdesc_size, ARCH_DMA_MINALIGN);
1774
1775         dma->id = uc->id;
1776         pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n",
1777                  dma->id, ucc->needs_epib,
1778                  ucc->psd_size, ucc->metadata_size,
1779                  ucc->remote_thread_id);
1780
1781         return 0;
1782 }
1783
1784 int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
1785 {
1786         struct udma_dev *ud = dev_get_priv(dma->dev);
1787         struct cppi5_host_desc_t *desc_rx;
1788         dma_addr_t dma_dst;
1789         struct udma_chan *uc;
1790         u32 desc_num;
1791
1792         if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1793                 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1794                 return -EINVAL;
1795         }
1796         uc = &ud->channels[dma->id];
1797
1798         if (uc->config.dir != DMA_DEV_TO_MEM)
1799                 return -EINVAL;
1800
1801         if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM)
1802                 return -EINVAL;
1803
1804         desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM;
1805         desc_rx = uc->desc_rx + (desc_num * uc->config.hdesc_size);
1806         dma_dst = (dma_addr_t)dst;
1807
1808         cppi5_hdesc_reset_hbdesc(desc_rx);
1809
1810         cppi5_hdesc_init(desc_rx,
1811                          uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1812                          uc->config.psd_size);
1813         cppi5_hdesc_set_pktlen(desc_rx, size);
1814         cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size);
1815
1816         flush_dcache_range((unsigned long)desc_rx,
1817                            ALIGN((unsigned long)desc_rx + uc->config.hdesc_size,
1818                                  ARCH_DMA_MINALIGN));
1819
1820         udma_push_to_ring(uc->rflow->fd_ring, desc_rx);
1821
1822         uc->num_rx_bufs++;
1823         uc->desc_rx_cur++;
1824
1825         return 0;
1826 }
1827
1828 static int udma_get_cfg(struct dma *dma, u32 id, void **data)
1829 {
1830         struct udma_dev *ud = dev_get_priv(dma->dev);
1831         struct udma_chan *uc;
1832
1833         if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1834                 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1835                 return -EINVAL;
1836         }
1837
1838         switch (id) {
1839         case TI_UDMA_CHAN_PRIV_INFO:
1840                 uc = &ud->channels[dma->id];
1841                 *data = &uc->cfg_data;
1842                 return 0;
1843         }
1844
1845         return -EINVAL;
1846 }
1847
1848 static const struct dma_ops udma_ops = {
1849         .transfer       = udma_transfer,
1850         .of_xlate       = udma_of_xlate,
1851         .request        = udma_request,
1852         .rfree          = udma_rfree,
1853         .enable         = udma_enable,
1854         .disable        = udma_disable,
1855         .send           = udma_send,
1856         .receive        = udma_receive,
1857         .prepare_rcv_buf = udma_prepare_rcv_buf,
1858         .get_cfg        = udma_get_cfg,
1859 };
1860
1861 static struct udma_match_data am654_main_data = {
1862         .psil_base = 0x1000,
1863         .enable_memcpy_support = true,
1864         .statictr_z_mask = GENMASK(11, 0),
1865         .rchan_oes_offset = 0x200,
1866         .tpl_levels = 2,
1867         .level_start_idx = {
1868                 [0] = 8, /* Normal channels */
1869                 [1] = 0, /* High Throughput channels */
1870         },
1871 };
1872
1873 static struct udma_match_data am654_mcu_data = {
1874         .psil_base = 0x6000,
1875         .enable_memcpy_support = true,
1876         .statictr_z_mask = GENMASK(11, 0),
1877         .rchan_oes_offset = 0x200,
1878         .tpl_levels = 2,
1879         .level_start_idx = {
1880                 [0] = 2, /* Normal channels */
1881                 [1] = 0, /* High Throughput channels */
1882         },
1883 };
1884
1885 static struct udma_match_data j721e_main_data = {
1886         .psil_base = 0x1000,
1887         .enable_memcpy_support = true,
1888         .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
1889         .statictr_z_mask = GENMASK(23, 0),
1890         .rchan_oes_offset = 0x400,
1891         .tpl_levels = 3,
1892         .level_start_idx = {
1893                 [0] = 16, /* Normal channels */
1894                 [1] = 4, /* High Throughput channels */
1895                 [2] = 0, /* Ultra High Throughput channels */
1896         },
1897 };
1898
1899 static struct udma_match_data j721e_mcu_data = {
1900         .psil_base = 0x6000,
1901         .enable_memcpy_support = true,
1902         .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
1903         .statictr_z_mask = GENMASK(23, 0),
1904         .rchan_oes_offset = 0x400,
1905         .tpl_levels = 2,
1906         .level_start_idx = {
1907                 [0] = 2, /* Normal channels */
1908                 [1] = 0, /* High Throughput channels */
1909         },
1910 };
1911
1912 static const struct udevice_id udma_ids[] = {
1913         {
1914                 .compatible = "ti,am654-navss-main-udmap",
1915                 .data = (ulong)&am654_main_data,
1916         },
1917         {
1918                 .compatible = "ti,am654-navss-mcu-udmap",
1919                 .data = (ulong)&am654_mcu_data,
1920         }, {
1921                 .compatible = "ti,j721e-navss-main-udmap",
1922                 .data = (ulong)&j721e_main_data,
1923         }, {
1924                 .compatible = "ti,j721e-navss-mcu-udmap",
1925                 .data = (ulong)&j721e_mcu_data,
1926         },
1927         { /* Sentinel */ },
1928 };
1929
1930 U_BOOT_DRIVER(ti_edma3) = {
1931         .name   = "ti-udma",
1932         .id     = UCLASS_DMA,
1933         .of_match = udma_ids,
1934         .ops    = &udma_ops,
1935         .probe  = udma_probe,
1936         .priv_auto      = sizeof(struct udma_dev),
1937 };