1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
6 #define pr_fmt(fmt) "udma: " fmt
11 #include <asm/bitops.h>
13 #include <asm/dma-mapping.h>
15 #include <dm/device.h>
17 #include <dm/of_access.h>
19 #include <dma-uclass.h>
20 #include <linux/delay.h>
21 #include <dt-bindings/dma/k3-udma.h>
22 #include <linux/bitmap.h>
23 #include <linux/soc/ti/k3-navss-ringacc.h>
24 #include <linux/soc/ti/cppi5.h>
25 #include <linux/soc/ti/ti-udma.h>
26 #include <linux/soc/ti/ti_sci_protocol.h>
28 #include "k3-udma-hwdef.h"
30 #if BITS_PER_LONG == 64
31 #define RINGACC_RING_USE_PROXY (0)
33 #define RINGACC_RING_USE_PROXY (1)
36 #define K3_UDMA_MAX_RFLOWS 1024
47 static const char * const mmr_names[] = {
48 "gcfg", "rchanrt", "tchanrt"
55 struct k3_nav_ring *t_ring; /* Transmit ring */
56 struct k3_nav_ring *tc_ring; /* Transmit Completion ring */
63 struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
64 struct k3_nav_ring *r_ring; /* Receive ring*/
78 struct udma_tisci_rm {
79 const struct ti_sci_handle *tisci;
80 const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
83 /* tisci information for PSI-L thread pairing/unpairing */
84 const struct ti_sci_rm_psil_ops *tisci_psil_ops;
85 u32 tisci_navss_dev_id;
87 struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
92 void __iomem *mmrs[MMR_LAST];
94 struct udma_tisci_rm tisci_rm;
95 struct k3_nav_ringacc *ringacc;
103 unsigned long *tchan_map;
104 unsigned long *rchan_map;
105 unsigned long *rflow_map;
106 unsigned long *rflow_map_reserved;
108 struct udma_tchan *tchans;
109 struct udma_rchan *rchans;
110 struct udma_rflow *rflows;
112 struct udma_chan *channels;
122 struct udma_tchan *tchan;
123 struct udma_rchan *rchan;
124 struct udma_rflow *rflow;
126 struct ti_udma_drv_chan_cfg_data cfg_data;
128 u32 bcnt; /* number of bytes completed since the start of the channel */
130 bool pkt_mode; /* TR or packet */
131 bool needs_epib; /* EPIB is needed for the communication or not */
132 u32 psd_size; /* size of Protocol Specific Data */
133 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
140 enum dma_direction dir;
142 struct cppi5_host_desc_t *desc_tx;
151 #define UDMA_CH_1000(ch) (ch * 0x1000)
152 #define UDMA_CH_100(ch) (ch * 0x100)
153 #define UDMA_CH_40(ch) (ch * 0x40)
156 #define UDMA_RX_DESC_NUM PKTBUFSRX
158 #define UDMA_RX_DESC_NUM 4
161 /* Generic register access functions */
162 static inline u32 udma_read(void __iomem *base, int reg)
166 v = __raw_readl(base + reg);
167 pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg);
171 static inline void udma_write(void __iomem *base, int reg, u32 val)
173 pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg);
174 __raw_writel(val, base + reg);
177 static inline void udma_update_bits(void __iomem *base, int reg,
182 orig = udma_read(base, reg);
187 udma_write(base, reg, tmp);
191 static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
195 return udma_read(tchan->reg_rt, reg);
198 static inline void udma_tchanrt_write(struct udma_tchan *tchan,
203 udma_write(tchan->reg_rt, reg, val);
207 static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
211 return udma_read(rchan->reg_rt, reg);
214 static inline void udma_rchanrt_write(struct udma_rchan *rchan,
219 udma_write(rchan->reg_rt, reg, val);
222 static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread,
225 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
227 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
229 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
230 tisci_rm->tisci_navss_dev_id,
231 src_thread, dst_thread);
234 static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
237 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
239 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
241 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
242 tisci_rm->tisci_navss_dev_id,
243 src_thread, dst_thread);
246 static inline char *udma_get_dir_text(enum dma_direction dir)
264 static inline bool udma_is_chan_running(struct udma_chan *uc)
271 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
272 pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n",
274 udma_rchanrt_read(uc->rchan,
275 UDMA_RCHAN_RT_PEER_RT_EN_REG));
278 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
279 pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n",
281 udma_tchanrt_read(uc->tchan,
282 UDMA_TCHAN_RT_PEER_RT_EN_REG));
285 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
286 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
292 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
298 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
300 struct k3_nav_ring *ring = NULL;
305 ring = uc->rchan->r_ring;
308 ring = uc->tchan->tc_ring;
311 ring = uc->tchan->tc_ring;
317 if (ring && k3_nav_ringacc_ring_get_occ(ring))
318 ret = k3_nav_ringacc_ring_pop(ring, addr);
323 static void udma_reset_rings(struct udma_chan *uc)
325 struct k3_nav_ring *ring1 = NULL;
326 struct k3_nav_ring *ring2 = NULL;
330 ring1 = uc->rchan->fd_ring;
331 ring2 = uc->rchan->r_ring;
334 ring1 = uc->tchan->t_ring;
335 ring2 = uc->tchan->tc_ring;
338 ring1 = uc->tchan->t_ring;
339 ring2 = uc->tchan->tc_ring;
346 k3_nav_ringacc_ring_reset_dma(ring1, 0);
348 k3_nav_ringacc_ring_reset(ring2);
351 static void udma_reset_counters(struct udma_chan *uc)
356 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
357 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
359 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
360 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
362 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
363 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
365 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
366 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
370 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
371 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
373 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
374 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
376 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
377 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
379 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
380 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
386 static inline int udma_stop_hard(struct udma_chan *uc)
388 pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id);
392 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
393 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
396 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
397 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
400 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
401 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
410 static int udma_start(struct udma_chan *uc)
412 /* Channel is already running, no need to proceed further */
413 if (udma_is_chan_running(uc))
416 pr_debug("%s: chan:%d dir:%s (static_tr_type: %d)\n",
417 __func__, uc->id, udma_get_dir_text(uc->dir),
420 /* Make sure that we clear the teardown bit, if it is set */
423 /* Reset all counters */
424 udma_reset_counters(uc);
428 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
429 UDMA_CHAN_RT_CTL_EN);
432 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
433 UDMA_PEER_RT_EN_ENABLE);
435 pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
437 udma_rchanrt_read(uc->rchan,
438 UDMA_RCHAN_RT_CTL_REG),
439 udma_rchanrt_read(uc->rchan,
440 UDMA_RCHAN_RT_PEER_RT_EN_REG));
444 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
445 UDMA_PEER_RT_EN_ENABLE);
447 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
448 UDMA_CHAN_RT_CTL_EN);
450 pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
452 udma_tchanrt_read(uc->tchan,
453 UDMA_TCHAN_RT_CTL_REG),
454 udma_tchanrt_read(uc->tchan,
455 UDMA_TCHAN_RT_PEER_RT_EN_REG));
458 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
459 UDMA_CHAN_RT_CTL_EN);
460 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
461 UDMA_CHAN_RT_CTL_EN);
468 pr_debug("%s: DONE chan:%d\n", __func__, uc->id);
473 static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync)
478 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
479 UDMA_CHAN_RT_CTL_EN |
480 UDMA_CHAN_RT_CTL_TDOWN);
482 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
484 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
485 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
488 printf(" %s TIMEOUT !\n", __func__);
494 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG);
495 if (val & UDMA_PEER_RT_EN_ENABLE)
496 printf("%s: peer not stopped TIMEOUT !\n", __func__);
499 static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync)
504 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
505 UDMA_PEER_RT_EN_ENABLE |
506 UDMA_PEER_RT_EN_TEARDOWN);
508 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
510 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
511 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
514 printf("%s TIMEOUT !\n", __func__);
520 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG);
521 if (val & UDMA_PEER_RT_EN_ENABLE)
522 printf("%s: peer not stopped TIMEOUT !\n", __func__);
525 static inline int udma_stop(struct udma_chan *uc)
527 pr_debug("%s: chan:%d dir:%s\n",
528 __func__, uc->id, udma_get_dir_text(uc->dir));
530 udma_reset_counters(uc);
533 udma_stop_dev2mem(uc, true);
536 udma_stop_mem2dev(uc, true);
539 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
540 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
549 static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr)
553 while (udma_pop_from_ring(uc, paddr)) {
561 static struct udma_rflow *__udma_reserve_rflow(struct udma_dev *ud, int id)
563 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
566 if (test_bit(id, ud->rflow_map)) {
567 dev_err(ud->dev, "rflow%d is in use\n", id);
568 return ERR_PTR(-ENOENT);
571 bitmap_or(tmp, ud->rflow_map, ud->rflow_map_reserved,
574 id = find_next_zero_bit(tmp, ud->rflow_cnt, ud->rchan_cnt);
575 if (id >= ud->rflow_cnt)
576 return ERR_PTR(-ENOENT);
579 __set_bit(id, ud->rflow_map);
580 return &ud->rflows[id];
583 #define UDMA_RESERVE_RESOURCE(res) \
584 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
588 if (test_bit(id, ud->res##_map)) { \
589 dev_err(ud->dev, "res##%d is in use\n", id); \
590 return ERR_PTR(-ENOENT); \
593 id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \
594 if (id == ud->res##_cnt) { \
595 return ERR_PTR(-ENOENT); \
599 __set_bit(id, ud->res##_map); \
600 return &ud->res##s[id]; \
603 UDMA_RESERVE_RESOURCE(tchan);
604 UDMA_RESERVE_RESOURCE(rchan);
606 static int udma_get_tchan(struct udma_chan *uc)
608 struct udma_dev *ud = uc->ud;
611 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
612 uc->id, uc->tchan->id);
616 uc->tchan = __udma_reserve_tchan(ud, -1);
617 if (IS_ERR(uc->tchan))
618 return PTR_ERR(uc->tchan);
620 pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id);
625 static int udma_get_rchan(struct udma_chan *uc)
627 struct udma_dev *ud = uc->ud;
630 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
631 uc->id, uc->rchan->id);
635 uc->rchan = __udma_reserve_rchan(ud, -1);
636 if (IS_ERR(uc->rchan))
637 return PTR_ERR(uc->rchan);
639 pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id);
644 static int udma_get_chan_pair(struct udma_chan *uc)
646 struct udma_dev *ud = uc->ud;
649 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
650 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
651 uc->id, uc->tchan->id);
656 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
657 uc->id, uc->tchan->id);
659 } else if (uc->rchan) {
660 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
661 uc->id, uc->rchan->id);
665 /* Can be optimized, but let's have it like this for now */
666 end = min(ud->tchan_cnt, ud->rchan_cnt);
667 for (chan_id = 0; chan_id < end; chan_id++) {
668 if (!test_bit(chan_id, ud->tchan_map) &&
669 !test_bit(chan_id, ud->rchan_map))
676 __set_bit(chan_id, ud->tchan_map);
677 __set_bit(chan_id, ud->rchan_map);
678 uc->tchan = &ud->tchans[chan_id];
679 uc->rchan = &ud->rchans[chan_id];
681 pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id);
686 static int udma_get_rflow(struct udma_chan *uc, int flow_id)
688 struct udma_dev *ud = uc->ud;
691 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
692 uc->id, uc->rflow->id);
697 dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
699 uc->rflow = __udma_reserve_rflow(ud, flow_id);
700 if (IS_ERR(uc->rflow))
701 return PTR_ERR(uc->rflow);
703 pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id);
707 static void udma_put_rchan(struct udma_chan *uc)
709 struct udma_dev *ud = uc->ud;
712 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
714 __clear_bit(uc->rchan->id, ud->rchan_map);
719 static void udma_put_tchan(struct udma_chan *uc)
721 struct udma_dev *ud = uc->ud;
724 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
726 __clear_bit(uc->tchan->id, ud->tchan_map);
731 static void udma_put_rflow(struct udma_chan *uc)
733 struct udma_dev *ud = uc->ud;
736 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
738 __clear_bit(uc->rflow->id, ud->rflow_map);
743 static void udma_free_tx_resources(struct udma_chan *uc)
748 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
749 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
750 uc->tchan->t_ring = NULL;
751 uc->tchan->tc_ring = NULL;
756 static int udma_alloc_tx_resources(struct udma_chan *uc)
758 struct k3_nav_ring_cfg ring_cfg;
759 struct udma_dev *ud = uc->ud;
762 ret = udma_get_tchan(uc);
766 uc->tchan->t_ring = k3_nav_ringacc_request_ring(
767 ud->ringacc, uc->tchan->id,
768 RINGACC_RING_USE_PROXY);
769 if (!uc->tchan->t_ring) {
774 uc->tchan->tc_ring = k3_nav_ringacc_request_ring(
775 ud->ringacc, -1, RINGACC_RING_USE_PROXY);
776 if (!uc->tchan->tc_ring) {
781 memset(&ring_cfg, 0, sizeof(ring_cfg));
783 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
784 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_MESSAGE;
786 ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
787 ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
795 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
796 uc->tchan->tc_ring = NULL;
798 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
799 uc->tchan->t_ring = NULL;
806 static void udma_free_rx_resources(struct udma_chan *uc)
811 k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
812 k3_nav_ringacc_ring_free(uc->rchan->r_ring);
813 uc->rchan->fd_ring = NULL;
814 uc->rchan->r_ring = NULL;
820 static int udma_alloc_rx_resources(struct udma_chan *uc)
822 struct k3_nav_ring_cfg ring_cfg;
823 struct udma_dev *ud = uc->ud;
827 ret = udma_get_rchan(uc);
831 /* For MEM_TO_MEM we don't need rflow or rings */
832 if (uc->dir == DMA_MEM_TO_MEM)
835 ret = udma_get_rflow(uc, uc->rchan->id);
841 fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
843 uc->rchan->fd_ring = k3_nav_ringacc_request_ring(
844 ud->ringacc, fd_ring_id,
845 RINGACC_RING_USE_PROXY);
846 if (!uc->rchan->fd_ring) {
851 uc->rchan->r_ring = k3_nav_ringacc_request_ring(
852 ud->ringacc, -1, RINGACC_RING_USE_PROXY);
853 if (!uc->rchan->r_ring) {
858 memset(&ring_cfg, 0, sizeof(ring_cfg));
860 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
861 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_MESSAGE;
863 ret = k3_nav_ringacc_ring_cfg(uc->rchan->fd_ring, &ring_cfg);
864 ret |= k3_nav_ringacc_ring_cfg(uc->rchan->r_ring, &ring_cfg);
872 k3_nav_ringacc_ring_free(uc->rchan->r_ring);
873 uc->rchan->r_ring = NULL;
875 k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
876 uc->rchan->fd_ring = NULL;
885 static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
887 struct udma_dev *ud = uc->ud;
888 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
889 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
890 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
895 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
897 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
899 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
900 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
901 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
902 req.nav_id = tisci_rm->tisci_dev_id;
903 req.index = uc->tchan->id;
904 req.tx_chan_type = mode;
905 if (uc->dir == DMA_MEM_TO_MEM)
906 req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
908 req.tx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib,
911 req.txcq_qnum = tc_ring;
913 ret = tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
915 dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
920 static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
922 struct udma_dev *ud = uc->ud;
923 int fd_ring = k3_nav_ringacc_get_ring_id(uc->rchan->fd_ring);
924 int rx_ring = k3_nav_ringacc_get_ring_id(uc->rchan->r_ring);
925 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
926 struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 };
927 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
928 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
933 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
935 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
937 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
938 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
939 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID;
940 req.nav_id = tisci_rm->tisci_dev_id;
941 req.index = uc->rchan->id;
942 req.rx_chan_type = mode;
943 if (uc->dir == DMA_MEM_TO_MEM) {
944 req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
945 req.rxcq_qnum = tc_ring;
947 req.rx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib,
950 req.rxcq_qnum = rx_ring;
952 if (uc->rflow->id != uc->rchan->id && uc->dir != DMA_MEM_TO_MEM) {
953 req.flowid_start = uc->rflow->id;
956 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
957 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
960 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
962 dev_err(ud->dev, "tisci rx %u cfg failed %d\n",
966 if (uc->dir == DMA_MEM_TO_MEM)
969 flow_req.valid_params =
970 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
971 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
972 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
973 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
974 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
975 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
976 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
977 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
978 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
979 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
980 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
981 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
982 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID |
983 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID;
985 flow_req.nav_id = tisci_rm->tisci_dev_id;
986 flow_req.flow_index = uc->rflow->id;
989 flow_req.rx_einfo_present = 1;
991 flow_req.rx_einfo_present = 0;
994 flow_req.rx_psinfo_present = 1;
996 flow_req.rx_psinfo_present = 0;
998 flow_req.rx_error_handling = 0;
999 flow_req.rx_desc_type = 0;
1000 flow_req.rx_dest_qnum = rx_ring;
1001 flow_req.rx_src_tag_hi_sel = 2;
1002 flow_req.rx_src_tag_lo_sel = 4;
1003 flow_req.rx_dest_tag_hi_sel = 5;
1004 flow_req.rx_dest_tag_lo_sel = 4;
1005 flow_req.rx_fdq0_sz0_qnum = fd_ring;
1006 flow_req.rx_fdq1_qnum = fd_ring;
1007 flow_req.rx_fdq2_qnum = fd_ring;
1008 flow_req.rx_fdq3_qnum = fd_ring;
1009 flow_req.rx_ps_location = 0;
1011 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci,
1014 dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
1015 uc->rchan->id, uc->rflow->id, ret);
1020 static int udma_alloc_chan_resources(struct udma_chan *uc)
1022 struct udma_dev *ud = uc->ud;
1025 pr_debug("%s: chan:%d as %s\n",
1026 __func__, uc->id, udma_get_dir_text(uc->dir));
1029 case DMA_MEM_TO_MEM:
1030 /* Non synchronized - mem to mem type of transfer */
1031 ret = udma_get_chan_pair(uc);
1035 ret = udma_alloc_tx_resources(uc);
1039 ret = udma_alloc_rx_resources(uc);
1043 uc->src_thread = ud->psil_base + uc->tchan->id;
1044 uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1046 case DMA_MEM_TO_DEV:
1047 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1048 ret = udma_alloc_tx_resources(uc);
1052 uc->src_thread = ud->psil_base + uc->tchan->id;
1053 uc->dst_thread = uc->slave_thread_id;
1054 if (!(uc->dst_thread & 0x8000))
1055 uc->dst_thread |= 0x8000;
1058 case DMA_DEV_TO_MEM:
1059 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1060 ret = udma_alloc_rx_resources(uc);
1064 uc->src_thread = uc->slave_thread_id;
1065 uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1069 /* Can not happen */
1070 pr_debug("%s: chan:%d invalid direction (%u)\n",
1071 __func__, uc->id, uc->dir);
1075 /* We have channel indexes and rings */
1076 if (uc->dir == DMA_MEM_TO_MEM) {
1077 ret = udma_alloc_tchan_sci_req(uc);
1081 ret = udma_alloc_rchan_sci_req(uc);
1085 /* Slave transfer */
1086 if (uc->dir == DMA_MEM_TO_DEV) {
1087 ret = udma_alloc_tchan_sci_req(uc);
1091 ret = udma_alloc_rchan_sci_req(uc);
1097 if (udma_is_chan_running(uc)) {
1098 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1100 if (udma_is_chan_running(uc)) {
1101 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1107 ret = udma_navss_psil_pair(ud, uc->src_thread, uc->dst_thread);
1109 dev_err(ud->dev, "k3_nav_psil_request_link fail\n");
1116 udma_free_tx_resources(uc);
1117 udma_free_rx_resources(uc);
1118 uc->slave_thread_id = -1;
1122 static void udma_free_chan_resources(struct udma_chan *uc)
1124 /* Some configuration to UDMA-P channel: disable, reset, whatever */
1126 /* Release PSI-L pairing */
1127 udma_navss_psil_unpair(uc->ud, uc->src_thread, uc->dst_thread);
1129 /* Reset the rings for a new start */
1130 udma_reset_rings(uc);
1131 udma_free_tx_resources(uc);
1132 udma_free_rx_resources(uc);
1134 uc->slave_thread_id = -1;
1135 uc->dir = DMA_MEM_TO_MEM;
1138 static int udma_get_mmrs(struct udevice *dev)
1140 struct udma_dev *ud = dev_get_priv(dev);
1143 for (i = 0; i < MMR_LAST; i++) {
1144 ud->mmrs[i] = (uint32_t *)devfdt_get_addr_name(dev,
1153 static int udma_setup_resources(struct udma_dev *ud)
1155 struct udevice *dev = ud->dev;
1158 struct ti_sci_resource_desc *rm_desc;
1159 struct ti_sci_resource *rm_res;
1160 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1161 static const char * const range_names[] = { "ti,sci-rm-range-tchan",
1162 "ti,sci-rm-range-rchan",
1163 "ti,sci-rm-range-rflow" };
1165 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
1166 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
1168 ud->rflow_cnt = cap3 & 0x3fff;
1169 ud->tchan_cnt = cap2 & 0x1ff;
1170 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
1171 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1172 ch_count = ud->tchan_cnt + ud->rchan_cnt;
1174 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1175 sizeof(unsigned long), GFP_KERNEL);
1176 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1178 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1179 sizeof(unsigned long), GFP_KERNEL);
1180 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1182 ud->rflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
1183 sizeof(unsigned long), GFP_KERNEL);
1184 ud->rflow_map_reserved = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1185 sizeof(unsigned long),
1187 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1190 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_map ||
1191 !ud->rflow_map_reserved || !ud->tchans || !ud->rchans ||
1196 * RX flows with the same Ids as RX channels are reserved to be used
1197 * as default flows if remote HW can't generate flow_ids. Those
1198 * RX flows can be requested only explicitly by id.
1200 bitmap_set(ud->rflow_map_reserved, 0, ud->rchan_cnt);
1202 /* Get resource ranges from tisci */
1203 for (i = 0; i < RM_RANGE_LAST; i++)
1204 tisci_rm->rm_ranges[i] =
1205 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1206 tisci_rm->tisci_dev_id,
1207 (char *)range_names[i]);
1210 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1211 if (IS_ERR(rm_res)) {
1212 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1214 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1215 for (i = 0; i < rm_res->sets; i++) {
1216 rm_desc = &rm_res->desc[i];
1217 bitmap_clear(ud->tchan_map, rm_desc->start,
1222 /* rchan and matching default flow ranges */
1223 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1224 if (IS_ERR(rm_res)) {
1225 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1226 bitmap_zero(ud->rflow_map, ud->rchan_cnt);
1228 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1229 bitmap_fill(ud->rflow_map, ud->rchan_cnt);
1230 for (i = 0; i < rm_res->sets; i++) {
1231 rm_desc = &rm_res->desc[i];
1232 bitmap_clear(ud->rchan_map, rm_desc->start,
1234 bitmap_clear(ud->rflow_map, rm_desc->start,
1239 /* GP rflow ranges */
1240 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1241 if (IS_ERR(rm_res)) {
1242 bitmap_clear(ud->rflow_map, ud->rchan_cnt,
1243 ud->rflow_cnt - ud->rchan_cnt);
1245 bitmap_set(ud->rflow_map, ud->rchan_cnt,
1246 ud->rflow_cnt - ud->rchan_cnt);
1247 for (i = 0; i < rm_res->sets; i++) {
1248 rm_desc = &rm_res->desc[i];
1249 bitmap_clear(ud->rflow_map, rm_desc->start,
1254 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
1255 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
1259 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
1265 "Channels: %d (tchan: %u, echan: %u, rchan: %u, rflow: %u)\n",
1266 ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
1271 static int udma_probe(struct udevice *dev)
1273 struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
1274 struct udma_dev *ud = dev_get_priv(dev);
1276 struct udevice *tmp;
1277 struct udevice *tisci_dev = NULL;
1278 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1279 ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev));
1282 ret = udma_get_mmrs(dev);
1286 ret = uclass_get_device_by_phandle(UCLASS_MISC, dev,
1287 "ti,ringacc", &tmp);
1288 ud->ringacc = dev_get_priv(tmp);
1289 if (IS_ERR(ud->ringacc))
1290 return PTR_ERR(ud->ringacc);
1292 ud->psil_base = dev_read_u32_default(dev, "ti,psil-base", 0);
1293 if (!ud->psil_base) {
1295 "Missing ti,psil-base property, using %d.\n", ret);
1299 ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev,
1300 "ti,sci", &tisci_dev);
1302 debug("Failed to get TISCI phandle (%d)\n", ret);
1303 tisci_rm->tisci = NULL;
1306 tisci_rm->tisci = (struct ti_sci_handle *)
1307 (ti_sci_get_handle_from_sysfw(tisci_dev));
1309 tisci_rm->tisci_dev_id = -1;
1310 ret = dev_read_u32(dev, "ti,sci-dev-id", &tisci_rm->tisci_dev_id);
1312 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
1316 tisci_rm->tisci_navss_dev_id = -1;
1317 ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id",
1318 &tisci_rm->tisci_navss_dev_id);
1320 dev_err(dev, "navss sci-dev-id read failure %d\n", ret);
1324 tisci_rm->tisci_udmap_ops = &tisci_rm->tisci->ops.rm_udmap_ops;
1325 tisci_rm->tisci_psil_ops = &tisci_rm->tisci->ops.rm_psil_ops;
1328 ud->ch_count = udma_setup_resources(ud);
1329 if (ud->ch_count <= 0)
1330 return ud->ch_count;
1333 "Number of channels: %u (tchan: %u, echan: %u, rchan: %u dev-id %u)\n",
1334 ud->ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
1335 tisci_rm->tisci_dev_id);
1336 dev_info(dev, "Number of rflows: %u\n", ud->rflow_cnt);
1338 for (i = 0; i < ud->tchan_cnt; i++) {
1339 struct udma_tchan *tchan = &ud->tchans[i];
1342 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
1345 for (i = 0; i < ud->rchan_cnt; i++) {
1346 struct udma_rchan *rchan = &ud->rchans[i];
1349 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
1352 for (i = 0; i < ud->rflow_cnt; i++) {
1353 struct udma_rflow *rflow = &ud->rflows[i];
1358 for (i = 0; i < ud->ch_count; i++) {
1359 struct udma_chan *uc = &ud->channels[i];
1363 uc->slave_thread_id = -1;
1366 uc->dir = DMA_MEM_TO_MEM;
1367 sprintf(uc->name, "UDMA chan%d\n", i);
1372 pr_debug("UDMA(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
1373 udma_read(ud->mmrs[MMR_GCFG], 0),
1374 udma_read(ud->mmrs[MMR_GCFG], 0x20),
1375 udma_read(ud->mmrs[MMR_GCFG], 0x24),
1376 udma_read(ud->mmrs[MMR_GCFG], 0x28),
1377 udma_read(ud->mmrs[MMR_GCFG], 0x2c));
1379 uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
1384 static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
1385 dma_addr_t src, size_t len)
1387 u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1388 struct cppi5_tr_type15_t *tr_req;
1390 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
1391 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
1392 unsigned long dummy;
1401 unsigned long align_to = __ffs(src | dest);
1406 * Keep simple: tr0: SZ_64K-alignment blocks,
1407 * tr1: the remaining
1410 tr0_cnt0 = (SZ_64K - BIT(align_to));
1411 if (len / tr0_cnt0 >= SZ_64K) {
1412 dev_err(uc->ud->dev, "size %zu is not supported\n",
1417 tr0_cnt1 = len / tr0_cnt0;
1418 tr1_cnt0 = len % tr0_cnt0;
1421 desc_size = cppi5_trdesc_calc_size(num_tr, tr_size);
1422 tr_desc = dma_alloc_coherent(desc_size, &dummy);
1425 memset(tr_desc, 0, desc_size);
1427 cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0);
1428 cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff);
1429 cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id);
1431 tr_req = tr_desc + tr_size;
1433 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
1434 CPPI5_TR_EVENT_SIZE_COMPLETION, 1);
1435 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
1437 tr_req[0].addr = src;
1438 tr_req[0].icnt0 = tr0_cnt0;
1439 tr_req[0].icnt1 = tr0_cnt1;
1440 tr_req[0].icnt2 = 1;
1441 tr_req[0].icnt3 = 1;
1442 tr_req[0].dim1 = tr0_cnt0;
1444 tr_req[0].daddr = dest;
1445 tr_req[0].dicnt0 = tr0_cnt0;
1446 tr_req[0].dicnt1 = tr0_cnt1;
1447 tr_req[0].dicnt2 = 1;
1448 tr_req[0].dicnt3 = 1;
1449 tr_req[0].ddim1 = tr0_cnt0;
1452 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
1453 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
1454 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
1456 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
1457 tr_req[1].icnt0 = tr1_cnt0;
1458 tr_req[1].icnt1 = 1;
1459 tr_req[1].icnt2 = 1;
1460 tr_req[1].icnt3 = 1;
1462 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
1463 tr_req[1].dicnt0 = tr1_cnt0;
1464 tr_req[1].dicnt1 = 1;
1465 tr_req[1].dicnt2 = 1;
1466 tr_req[1].dicnt3 = 1;
1469 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
1471 flush_dcache_range((u64)tr_desc,
1472 ALIGN((u64)tr_desc + desc_size,
1473 ARCH_DMA_MINALIGN));
1475 k3_nav_ringacc_ring_push(uc->tchan->t_ring, &tr_desc);
1480 static int udma_transfer(struct udevice *dev, int direction,
1481 void *dst, void *src, size_t len)
1483 struct udma_dev *ud = dev_get_priv(dev);
1484 /* Channel0 is reserved for memcpy */
1485 struct udma_chan *uc = &ud->channels[0];
1486 dma_addr_t paddr = 0;
1489 ret = udma_alloc_chan_resources(uc);
1493 udma_prep_dma_memcpy(uc, (dma_addr_t)dst, (dma_addr_t)src, len);
1495 udma_poll_completion(uc, &paddr);
1498 udma_free_chan_resources(uc);
1502 static int udma_request(struct dma *dma)
1504 struct udma_dev *ud = dev_get_priv(dma->dev);
1505 struct udma_chan *uc;
1506 unsigned long dummy;
1509 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1510 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1514 uc = &ud->channels[dma->id];
1515 ret = udma_alloc_chan_resources(uc);
1517 dev_err(dma->dev, "alloc dma res failed %d\n", ret);
1521 uc->hdesc_size = cppi5_hdesc_calc_size(uc->needs_epib,
1523 uc->hdesc_size = ALIGN(uc->hdesc_size, ARCH_DMA_MINALIGN);
1525 if (uc->dir == DMA_MEM_TO_DEV) {
1526 uc->desc_tx = dma_alloc_coherent(uc->hdesc_size, &dummy);
1527 memset(uc->desc_tx, 0, uc->hdesc_size);
1529 uc->desc_rx = dma_alloc_coherent(
1530 uc->hdesc_size * UDMA_RX_DESC_NUM, &dummy);
1531 memset(uc->desc_rx, 0, uc->hdesc_size * UDMA_RX_DESC_NUM);
1535 uc->desc_rx_cur = 0;
1536 uc->num_rx_bufs = 0;
1538 if (uc->dir == DMA_DEV_TO_MEM) {
1539 uc->cfg_data.flow_id_base = uc->rflow->id;
1540 uc->cfg_data.flow_id_cnt = 1;
1546 static int udma_free(struct dma *dma)
1548 struct udma_dev *ud = dev_get_priv(dma->dev);
1549 struct udma_chan *uc;
1551 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1552 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1555 uc = &ud->channels[dma->id];
1557 if (udma_is_chan_running(uc))
1559 udma_free_chan_resources(uc);
1566 static int udma_enable(struct dma *dma)
1568 struct udma_dev *ud = dev_get_priv(dma->dev);
1569 struct udma_chan *uc;
1572 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1573 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1576 uc = &ud->channels[dma->id];
1578 ret = udma_start(uc);
1583 static int udma_disable(struct dma *dma)
1585 struct udma_dev *ud = dev_get_priv(dma->dev);
1586 struct udma_chan *uc;
1589 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1590 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1593 uc = &ud->channels[dma->id];
1595 if (udma_is_chan_running(uc))
1596 ret = udma_stop(uc);
1598 dev_err(dma->dev, "%s not running\n", __func__);
1603 static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
1605 struct udma_dev *ud = dev_get_priv(dma->dev);
1606 struct cppi5_host_desc_t *desc_tx;
1607 dma_addr_t dma_src = (dma_addr_t)src;
1608 struct ti_udma_drv_packet_data packet_data = { 0 };
1610 struct udma_chan *uc;
1615 packet_data = *((struct ti_udma_drv_packet_data *)metadata);
1617 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1618 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1621 uc = &ud->channels[dma->id];
1623 if (uc->dir != DMA_MEM_TO_DEV)
1626 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1628 desc_tx = uc->desc_tx;
1630 cppi5_hdesc_reset_hbdesc(desc_tx);
1632 cppi5_hdesc_init(desc_tx,
1633 uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1635 cppi5_hdesc_set_pktlen(desc_tx, len);
1636 cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len);
1637 cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff);
1638 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id);
1639 /* pass below information from caller */
1640 cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type);
1641 cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag);
1643 flush_dcache_range((u64)dma_src,
1644 ALIGN((u64)dma_src + len,
1645 ARCH_DMA_MINALIGN));
1646 flush_dcache_range((u64)desc_tx,
1647 ALIGN((u64)desc_tx + uc->hdesc_size,
1648 ARCH_DMA_MINALIGN));
1650 ret = k3_nav_ringacc_ring_push(uc->tchan->t_ring, &uc->desc_tx);
1652 dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
1657 udma_poll_completion(uc, &paddr);
1662 static int udma_receive(struct dma *dma, void **dst, void *metadata)
1664 struct udma_dev *ud = dev_get_priv(dma->dev);
1665 struct cppi5_host_desc_t *desc_rx;
1667 struct udma_chan *uc;
1668 u32 buf_dma_len, pkt_len;
1672 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1673 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1676 uc = &ud->channels[dma->id];
1678 if (uc->dir != DMA_DEV_TO_MEM)
1680 if (!uc->num_rx_bufs)
1683 ret = k3_nav_ringacc_ring_pop(uc->rchan->r_ring, &desc_rx);
1684 if (ret && ret != -ENODATA) {
1685 dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret);
1687 } else if (ret == -ENODATA) {
1691 /* invalidate cache data */
1692 invalidate_dcache_range((ulong)desc_rx,
1693 (ulong)(desc_rx + uc->hdesc_size));
1695 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
1696 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
1698 /* invalidate cache data */
1699 invalidate_dcache_range((ulong)buf_dma,
1700 (ulong)(buf_dma + buf_dma_len));
1702 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
1704 *dst = (void *)buf_dma;
1710 static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args)
1712 struct udma_dev *ud = dev_get_priv(dma->dev);
1713 struct udma_chan *uc = &ud->channels[0];
1714 ofnode chconf_node, slave_node;
1718 for (val = 0; val < ud->ch_count; val++) {
1719 uc = &ud->channels[val];
1724 if (val == ud->ch_count)
1727 uc->dir = DMA_DEV_TO_MEM;
1728 if (args->args[2] == UDMA_DIR_TX)
1729 uc->dir = DMA_MEM_TO_DEV;
1731 slave_node = ofnode_get_by_phandle(args->args[0]);
1732 if (!ofnode_valid(slave_node)) {
1733 dev_err(ud->dev, "slave node is missing\n");
1737 snprintf(prop, sizeof(prop), "ti,psil-config%u", args->args[1]);
1738 chconf_node = ofnode_find_subnode(slave_node, prop);
1739 if (!ofnode_valid(chconf_node)) {
1740 dev_err(ud->dev, "Channel configuration node is missing\n");
1744 if (!ofnode_read_u32(chconf_node, "linux,udma-mode", &val)) {
1745 if (val == UDMA_PKT_MODE)
1746 uc->pkt_mode = true;
1749 if (!ofnode_read_u32(chconf_node, "statictr-type", &val))
1750 uc->static_tr_type = val;
1752 uc->needs_epib = ofnode_read_bool(chconf_node, "ti,needs-epib");
1753 if (!ofnode_read_u32(chconf_node, "ti,psd-size", &val))
1755 uc->metadata_size = (uc->needs_epib ? 16 : 0) + uc->psd_size;
1757 if (ofnode_read_u32(slave_node, "ti,psil-base", &val)) {
1758 dev_err(ud->dev, "ti,psil-base is missing\n");
1762 uc->slave_thread_id = val + args->args[1];
1765 pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n",
1766 dma->id, uc->needs_epib,
1767 uc->psd_size, uc->metadata_size,
1768 uc->slave_thread_id);
1773 int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
1775 struct udma_dev *ud = dev_get_priv(dma->dev);
1776 struct cppi5_host_desc_t *desc_rx;
1778 struct udma_chan *uc;
1781 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1782 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1785 uc = &ud->channels[dma->id];
1787 if (uc->dir != DMA_DEV_TO_MEM)
1790 if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM)
1793 desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM;
1794 desc_rx = uc->desc_rx + (desc_num * uc->hdesc_size);
1795 dma_dst = (dma_addr_t)dst;
1797 cppi5_hdesc_reset_hbdesc(desc_rx);
1799 cppi5_hdesc_init(desc_rx,
1800 uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1802 cppi5_hdesc_set_pktlen(desc_rx, size);
1803 cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size);
1805 flush_dcache_range((u64)desc_rx,
1806 ALIGN((u64)desc_rx + uc->hdesc_size,
1807 ARCH_DMA_MINALIGN));
1809 k3_nav_ringacc_ring_push(uc->rchan->fd_ring, &desc_rx);
1817 static int udma_get_cfg(struct dma *dma, u32 id, void **data)
1819 struct udma_dev *ud = dev_get_priv(dma->dev);
1820 struct udma_chan *uc;
1822 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1823 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1828 case TI_UDMA_CHAN_PRIV_INFO:
1829 uc = &ud->channels[dma->id];
1830 *data = &uc->cfg_data;
1837 static const struct dma_ops udma_ops = {
1838 .transfer = udma_transfer,
1839 .of_xlate = udma_of_xlate,
1840 .request = udma_request,
1842 .enable = udma_enable,
1843 .disable = udma_disable,
1845 .receive = udma_receive,
1846 .prepare_rcv_buf = udma_prepare_rcv_buf,
1847 .get_cfg = udma_get_cfg,
1850 static const struct udevice_id udma_ids[] = {
1851 { .compatible = "ti,k3-navss-udmap" },
1852 { .compatible = "ti,j721e-navss-mcu-udmap" },
1856 U_BOOT_DRIVER(ti_edma3) = {
1859 .of_match = udma_ids,
1861 .probe = udma_probe,
1862 .priv_auto_alloc_size = sizeof(struct udma_dev),