1 // SPDX-License-Identifier: GPL-2.0
3 * K3 NAVSS DMA glue interface
5 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
9 #include <linux/module.h>
10 #include <linux/atomic.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
14 #include <linux/init.h>
16 #include <linux/platform_device.h>
17 #include <linux/soc/ti/k3-ringacc.h>
18 #include <linux/dma/ti-cppi5.h>
19 #include <linux/dma/k3-udma-glue.h>
22 #include "k3-psil-priv.h"
24 struct k3_udma_glue_common {
26 struct device chan_dev;
27 struct udma_dev *udmax;
28 const struct udma_tisci_rm *tisci_rm;
29 struct k3_ringacc *ringacc;
38 struct psil_endpoint_config *ep_config;
41 struct k3_udma_glue_tx_channel {
42 struct k3_udma_glue_common common;
44 struct udma_tchan *udma_tchanx;
47 struct k3_ring *ringtx;
48 struct k3_ring *ringtxcq;
63 struct k3_udma_glue_rx_flow {
64 struct udma_rflow *udma_rflow;
66 struct k3_ring *ringrx;
67 struct k3_ring *ringrxfdq;
72 struct k3_udma_glue_rx_channel {
73 struct k3_udma_glue_common common;
75 struct udma_rchan *udma_rchanx;
84 struct k3_udma_glue_rx_flow *flows;
89 static void k3_udma_chan_dev_release(struct device *dev)
91 /* The struct containing the device is devm managed */
94 static struct class k3_udma_glue_devclass = {
95 .name = "k3_udma_glue_chan",
96 .dev_release = k3_udma_chan_dev_release,
99 #define K3_UDMAX_TDOWN_TIMEOUT_US 1000
101 static int of_k3_udma_glue_parse(struct device_node *udmax_np,
102 struct k3_udma_glue_common *common)
104 common->udmax = of_xudma_dev_get(udmax_np, NULL);
105 if (IS_ERR(common->udmax))
106 return PTR_ERR(common->udmax);
108 common->ringacc = xudma_get_ringacc(common->udmax);
109 common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
114 static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
115 const char *name, struct k3_udma_glue_common *common,
118 struct of_phandle_args dma_spec;
126 index = of_property_match_string(chn_np, "dma-names", name);
130 if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index,
134 ret = of_k3_udma_glue_parse(dma_spec.np, common);
138 thread_id = dma_spec.args[0];
139 if (dma_spec.args_count == 2) {
140 if (dma_spec.args[1] > 2 && !xudma_is_pktdma(common->udmax)) {
141 dev_err(common->dev, "Invalid channel atype: %u\n",
146 if (dma_spec.args[1] > 15 && xudma_is_pktdma(common->udmax)) {
147 dev_err(common->dev, "Invalid channel asel: %u\n",
153 common->atype_asel = dma_spec.args[1];
156 if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
161 if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
166 /* get psil endpoint config */
167 common->ep_config = psil_get_ep_config(thread_id);
168 if (IS_ERR(common->ep_config)) {
170 "No configuration for psi-l thread 0x%04x\n",
172 ret = PTR_ERR(common->ep_config);
176 common->epib = common->ep_config->needs_epib;
177 common->psdata_size = common->ep_config->psd_size;
180 common->dst_thread = thread_id;
182 common->src_thread = thread_id;
185 of_node_put(dma_spec.np);
189 static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
191 struct device *dev = tx_chn->common.dev;
193 dev_dbg(dev, "dump_tx_chn:\n"
194 "udma_tchan_id: %d\n"
196 "dst_thread: %08x\n",
197 tx_chn->udma_tchan_id,
198 tx_chn->common.src_thread,
199 tx_chn->common.dst_thread);
202 static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn,
205 struct device *dev = chn->common.dev;
207 dev_dbg(dev, "=== dump ===> %s\n", mark);
208 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
209 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG));
210 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
211 xudma_tchanrt_read(chn->udma_tchanx,
212 UDMA_CHAN_RT_PEER_RT_EN_REG));
213 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
214 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_PCNT_REG));
215 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
216 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_BCNT_REG));
217 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
218 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_SBCNT_REG));
221 static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
223 const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm;
224 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
226 memset(&req, 0, sizeof(req));
228 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |
229 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |
230 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |
231 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
232 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
233 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
234 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
235 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
236 req.nav_id = tisci_rm->tisci_dev_id;
237 req.index = tx_chn->udma_tchan_id;
238 if (tx_chn->tx_pause_on_err)
239 req.tx_pause_on_err = 1;
240 if (tx_chn->tx_filt_einfo)
241 req.tx_filt_einfo = 1;
242 if (tx_chn->tx_filt_pswords)
243 req.tx_filt_pswords = 1;
244 req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
245 if (tx_chn->tx_supr_tdpkt)
246 req.tx_supr_tdpkt = 1;
247 req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
248 req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
249 req.tx_atype = tx_chn->common.atype_asel;
251 return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
254 struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
255 const char *name, struct k3_udma_glue_tx_channel_cfg *cfg)
257 struct k3_udma_glue_tx_channel *tx_chn;
260 tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
262 return ERR_PTR(-ENOMEM);
264 tx_chn->common.dev = dev;
265 tx_chn->common.swdata_size = cfg->swdata_size;
266 tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
267 tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
268 tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
269 tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
271 /* parse of udmap channel */
272 ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
273 &tx_chn->common, true);
277 tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
278 tx_chn->common.psdata_size,
279 tx_chn->common.swdata_size);
281 if (xudma_is_pktdma(tx_chn->common.udmax))
282 tx_chn->udma_tchan_id = tx_chn->common.ep_config->mapped_channel_id;
284 tx_chn->udma_tchan_id = -1;
286 /* request and cfg UDMAP TX channel */
287 tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax,
288 tx_chn->udma_tchan_id);
289 if (IS_ERR(tx_chn->udma_tchanx)) {
290 ret = PTR_ERR(tx_chn->udma_tchanx);
291 dev_err(dev, "UDMAX tchanx get err %d\n", ret);
294 tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
296 tx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
297 tx_chn->common.chan_dev.parent = xudma_get_device(tx_chn->common.udmax);
298 dev_set_name(&tx_chn->common.chan_dev, "tchan%d-0x%04x",
299 tx_chn->udma_tchan_id, tx_chn->common.dst_thread);
300 ret = device_register(&tx_chn->common.chan_dev);
302 dev_err(dev, "Channel Device registration failed %d\n", ret);
303 put_device(&tx_chn->common.chan_dev);
304 tx_chn->common.chan_dev.parent = NULL;
308 if (xudma_is_pktdma(tx_chn->common.udmax)) {
309 /* prepare the channel device as coherent */
310 tx_chn->common.chan_dev.dma_coherent = true;
311 dma_coerce_mask_and_coherent(&tx_chn->common.chan_dev,
315 atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
317 if (xudma_is_pktdma(tx_chn->common.udmax))
318 tx_chn->udma_tflow_id = tx_chn->common.ep_config->default_flow_id;
320 tx_chn->udma_tflow_id = tx_chn->udma_tchan_id;
322 /* request and cfg rings */
323 ret = k3_ringacc_request_rings_pair(tx_chn->common.ringacc,
324 tx_chn->udma_tflow_id, -1,
328 dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret);
332 /* Set the dma_dev for the rings to be configured */
333 cfg->tx_cfg.dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn);
334 cfg->txcq_cfg.dma_dev = cfg->tx_cfg.dma_dev;
336 /* Set the ASEL value for DMA rings of PKTDMA */
337 if (xudma_is_pktdma(tx_chn->common.udmax)) {
338 cfg->tx_cfg.asel = tx_chn->common.atype_asel;
339 cfg->txcq_cfg.asel = tx_chn->common.atype_asel;
342 ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
344 dev_err(dev, "Failed to cfg ringtx %d\n", ret);
348 ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
350 dev_err(dev, "Failed to cfg ringtx %d\n", ret);
354 /* request and cfg psi-l */
355 tx_chn->common.src_thread =
356 xudma_dev_get_psil_base(tx_chn->common.udmax) +
357 tx_chn->udma_tchan_id;
359 ret = k3_udma_glue_cfg_tx_chn(tx_chn);
361 dev_err(dev, "Failed to cfg tchan %d\n", ret);
365 k3_udma_glue_dump_tx_chn(tx_chn);
370 k3_udma_glue_release_tx_chn(tx_chn);
373 EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
375 void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
377 if (tx_chn->psil_paired) {
378 xudma_navss_psil_unpair(tx_chn->common.udmax,
379 tx_chn->common.src_thread,
380 tx_chn->common.dst_thread);
381 tx_chn->psil_paired = false;
384 if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx))
385 xudma_tchan_put(tx_chn->common.udmax,
386 tx_chn->udma_tchanx);
388 if (tx_chn->ringtxcq)
389 k3_ringacc_ring_free(tx_chn->ringtxcq);
392 k3_ringacc_ring_free(tx_chn->ringtx);
394 if (tx_chn->common.chan_dev.parent) {
395 device_unregister(&tx_chn->common.chan_dev);
396 tx_chn->common.chan_dev.parent = NULL;
399 EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
401 int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
402 struct cppi5_host_desc_t *desc_tx,
407 if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0))
410 ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
411 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id);
413 return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma);
415 EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn);
417 int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
418 dma_addr_t *desc_dma)
422 ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma);
424 atomic_inc(&tx_chn->free_pkts);
428 EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
430 int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
434 ret = xudma_navss_psil_pair(tx_chn->common.udmax,
435 tx_chn->common.src_thread,
436 tx_chn->common.dst_thread);
438 dev_err(tx_chn->common.dev, "PSI-L request err %d\n", ret);
442 tx_chn->psil_paired = true;
444 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
445 UDMA_PEER_RT_EN_ENABLE);
447 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
448 UDMA_CHAN_RT_CTL_EN);
450 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
453 EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn);
455 void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
457 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1");
459 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, 0);
461 xudma_tchanrt_write(tx_chn->udma_tchanx,
462 UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
463 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
465 if (tx_chn->psil_paired) {
466 xudma_navss_psil_unpair(tx_chn->common.udmax,
467 tx_chn->common.src_thread,
468 tx_chn->common.dst_thread);
469 tx_chn->psil_paired = false;
472 EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
474 void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
480 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1");
482 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
483 UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN);
485 val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG);
487 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
488 val = xudma_tchanrt_read(tx_chn->udma_tchanx,
489 UDMA_CHAN_RT_CTL_REG);
491 if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
492 dev_err(tx_chn->common.dev, "TX tdown timeout\n");
498 val = xudma_tchanrt_read(tx_chn->udma_tchanx,
499 UDMA_CHAN_RT_PEER_RT_EN_REG);
500 if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
501 dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n");
502 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2");
504 EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn);
506 void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
508 void (*cleanup)(void *data, dma_addr_t desc_dma))
510 struct device *dev = tx_chn->common.dev;
515 * TXQ reset need to be special way as it is input for udma and its
516 * state cached by udma, so:
518 * 2) clean up TXQ and call callback .cleanup() for each desc
519 * 3) reset TXQ in a special way
521 occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
522 dev_dbg(dev, "TX reset occ_tx %u\n", occ_tx);
524 for (i = 0; i < occ_tx; i++) {
525 ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
528 dev_err(dev, "TX reset pop %d\n", ret);
531 cleanup(data, desc_dma);
534 /* reset TXCQ as it is not input for udma - expected to be empty */
535 k3_ringacc_ring_reset(tx_chn->ringtxcq);
536 k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
538 EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
540 u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn)
542 return tx_chn->common.hdesc_size;
544 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size);
546 u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn)
548 return k3_ringacc_get_ring_id(tx_chn->ringtxcq);
550 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
552 int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
554 if (xudma_is_pktdma(tx_chn->common.udmax)) {
555 tx_chn->virq = xudma_pktdma_tflow_get_irq(tx_chn->common.udmax,
556 tx_chn->udma_tflow_id);
558 tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
563 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
566 k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn)
568 if (xudma_is_pktdma(tx_chn->common.udmax) &&
569 (tx_chn->common.atype_asel == 14 || tx_chn->common.atype_asel == 15))
570 return &tx_chn->common.chan_dev;
572 return xudma_get_device(tx_chn->common.udmax);
574 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_dma_device);
576 void k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel *tx_chn,
579 if (!xudma_is_pktdma(tx_chn->common.udmax) ||
580 !tx_chn->common.atype_asel)
583 *addr |= (u64)tx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
585 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_dma_to_cppi5_addr);
587 void k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel *tx_chn,
590 if (!xudma_is_pktdma(tx_chn->common.udmax) ||
591 !tx_chn->common.atype_asel)
594 *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
596 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_cppi5_to_dma_addr);
598 static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
600 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
601 struct ti_sci_msg_rm_udmap_rx_ch_cfg req;
604 memset(&req, 0, sizeof(req));
606 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
607 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
608 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
609 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
611 req.nav_id = tisci_rm->tisci_dev_id;
612 req.index = rx_chn->udma_rchan_id;
613 req.rx_fetch_size = rx_chn->common.hdesc_size >> 2;
615 * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw
616 * and udmax impl, so just configure it to invalid value.
617 * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
619 req.rxcq_qnum = 0xFFFF;
620 if (!xudma_is_pktdma(rx_chn->common.udmax) && rx_chn->flow_num &&
621 rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
622 /* Default flow + extra ones */
623 req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
624 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
625 req.flowid_start = rx_chn->flow_id_base;
626 req.flowid_cnt = rx_chn->flow_num;
628 req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
629 req.rx_atype = rx_chn->common.atype_asel;
631 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
633 dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n",
634 rx_chn->udma_rchan_id, ret);
639 static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
642 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
644 if (IS_ERR_OR_NULL(flow->udma_rflow))
648 k3_ringacc_ring_free(flow->ringrxfdq);
651 k3_ringacc_ring_free(flow->ringrx);
653 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
654 flow->udma_rflow = NULL;
655 rx_chn->flows_ready--;
658 static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
660 struct k3_udma_glue_rx_flow_cfg *flow_cfg)
662 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
663 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
664 struct device *dev = rx_chn->common.dev;
665 struct ti_sci_msg_rm_udmap_flow_cfg req;
670 flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax,
671 flow->udma_rflow_id);
672 if (IS_ERR(flow->udma_rflow)) {
673 ret = PTR_ERR(flow->udma_rflow);
674 dev_err(dev, "UDMAX rflow get err %d\n", ret);
678 if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
683 if (xudma_is_pktdma(rx_chn->common.udmax)) {
684 rx_ringfdq_id = flow->udma_rflow_id +
685 xudma_get_rflow_ring_offset(rx_chn->common.udmax);
688 rx_ring_id = flow_cfg->ring_rxq_id;
689 rx_ringfdq_id = flow_cfg->ring_rxfdq0_id;
692 /* request and cfg rings */
693 ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc,
694 rx_ringfdq_id, rx_ring_id,
698 dev_err(dev, "Failed to get RX/RXFDQ rings %d\n", ret);
702 /* Set the dma_dev for the rings to be configured */
703 flow_cfg->rx_cfg.dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn);
704 flow_cfg->rxfdq_cfg.dma_dev = flow_cfg->rx_cfg.dma_dev;
706 /* Set the ASEL value for DMA rings of PKTDMA */
707 if (xudma_is_pktdma(rx_chn->common.udmax)) {
708 flow_cfg->rx_cfg.asel = rx_chn->common.atype_asel;
709 flow_cfg->rxfdq_cfg.asel = rx_chn->common.atype_asel;
712 ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
714 dev_err(dev, "Failed to cfg ringrx %d\n", ret);
715 goto err_ringrxfdq_free;
718 ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
720 dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
721 goto err_ringrxfdq_free;
724 if (rx_chn->remote) {
725 rx_ring_id = TI_SCI_RESOURCE_NULL;
726 rx_ringfdq_id = TI_SCI_RESOURCE_NULL;
728 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
729 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
732 memset(&req, 0, sizeof(req));
735 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
736 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
737 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
738 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
739 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
740 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
741 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
742 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
743 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
744 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
745 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
746 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
747 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
748 req.nav_id = tisci_rm->tisci_dev_id;
749 req.flow_index = flow->udma_rflow_id;
750 if (rx_chn->common.epib)
751 req.rx_einfo_present = 1;
752 if (rx_chn->common.psdata_size)
753 req.rx_psinfo_present = 1;
754 if (flow_cfg->rx_error_handling)
755 req.rx_error_handling = 1;
756 req.rx_desc_type = 0;
757 req.rx_dest_qnum = rx_ring_id;
758 req.rx_src_tag_hi_sel = 0;
759 req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel;
760 req.rx_dest_tag_hi_sel = 0;
761 req.rx_dest_tag_lo_sel = 0;
762 req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
763 req.rx_fdq1_qnum = rx_ringfdq_id;
764 req.rx_fdq2_qnum = rx_ringfdq_id;
765 req.rx_fdq3_qnum = rx_ringfdq_id;
767 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
769 dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
771 goto err_ringrxfdq_free;
774 rx_chn->flows_ready++;
775 dev_dbg(dev, "flow%d config done. ready:%d\n",
776 flow->udma_rflow_id, rx_chn->flows_ready);
781 k3_ringacc_ring_free(flow->ringrxfdq);
782 k3_ringacc_ring_free(flow->ringrx);
785 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
786 flow->udma_rflow = NULL;
791 static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn)
793 struct device *dev = chn->common.dev;
795 dev_dbg(dev, "dump_rx_chn:\n"
796 "udma_rchan_id: %d\n"
806 chn->common.src_thread,
807 chn->common.dst_thread,
809 chn->common.hdesc_size,
810 chn->common.psdata_size,
811 chn->common.swdata_size,
816 static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn,
819 struct device *dev = chn->common.dev;
821 dev_dbg(dev, "=== dump ===> %s\n", mark);
823 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
824 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG));
825 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
826 xudma_rchanrt_read(chn->udma_rchanx,
827 UDMA_CHAN_RT_PEER_RT_EN_REG));
828 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
829 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_PCNT_REG));
830 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
831 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_BCNT_REG));
832 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
833 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_SBCNT_REG));
837 k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn,
838 struct k3_udma_glue_rx_channel_cfg *cfg)
843 if (cfg->flow_id_use_rxchan_id)
846 /* not a GP rflows */
847 if (rx_chn->flow_id_base != -1 &&
848 !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
851 /* Allocate range of GP rflows */
852 ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax,
853 rx_chn->flow_id_base,
856 dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
857 rx_chn->flow_id_base, rx_chn->flow_num, ret);
860 rx_chn->flow_id_base = ret;
865 static struct k3_udma_glue_rx_channel *
866 k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
867 struct k3_udma_glue_rx_channel_cfg *cfg)
869 struct k3_udma_glue_rx_channel *rx_chn;
870 struct psil_endpoint_config *ep_cfg;
873 if (cfg->flow_id_num <= 0)
874 return ERR_PTR(-EINVAL);
876 if (cfg->flow_id_num != 1 &&
877 (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id))
878 return ERR_PTR(-EINVAL);
880 rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
882 return ERR_PTR(-ENOMEM);
884 rx_chn->common.dev = dev;
885 rx_chn->common.swdata_size = cfg->swdata_size;
886 rx_chn->remote = false;
888 /* parse of udmap channel */
889 ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
890 &rx_chn->common, false);
894 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
895 rx_chn->common.psdata_size,
896 rx_chn->common.swdata_size);
898 ep_cfg = rx_chn->common.ep_config;
900 if (xudma_is_pktdma(rx_chn->common.udmax))
901 rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id;
903 rx_chn->udma_rchan_id = -1;
905 /* request and cfg UDMAP RX channel */
906 rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax,
907 rx_chn->udma_rchan_id);
908 if (IS_ERR(rx_chn->udma_rchanx)) {
909 ret = PTR_ERR(rx_chn->udma_rchanx);
910 dev_err(dev, "UDMAX rchanx get err %d\n", ret);
913 rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
915 rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
916 rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
917 dev_set_name(&rx_chn->common.chan_dev, "rchan%d-0x%04x",
918 rx_chn->udma_rchan_id, rx_chn->common.src_thread);
919 ret = device_register(&rx_chn->common.chan_dev);
921 dev_err(dev, "Channel Device registration failed %d\n", ret);
922 put_device(&rx_chn->common.chan_dev);
923 rx_chn->common.chan_dev.parent = NULL;
927 if (xudma_is_pktdma(rx_chn->common.udmax)) {
928 /* prepare the channel device as coherent */
929 rx_chn->common.chan_dev.dma_coherent = true;
930 dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
934 if (xudma_is_pktdma(rx_chn->common.udmax)) {
935 int flow_start = cfg->flow_id_base;
938 if (flow_start == -1)
939 flow_start = ep_cfg->flow_start;
941 flow_end = flow_start + cfg->flow_id_num - 1;
942 if (flow_start < ep_cfg->flow_start ||
943 flow_end > (ep_cfg->flow_start + ep_cfg->flow_num - 1)) {
944 dev_err(dev, "Invalid flow range requested\n");
948 rx_chn->flow_id_base = flow_start;
950 rx_chn->flow_id_base = cfg->flow_id_base;
952 /* Use RX channel id as flow id: target dev can't generate flow_id */
953 if (cfg->flow_id_use_rxchan_id)
954 rx_chn->flow_id_base = rx_chn->udma_rchan_id;
957 rx_chn->flow_num = cfg->flow_id_num;
959 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
960 sizeof(*rx_chn->flows), GFP_KERNEL);
961 if (!rx_chn->flows) {
966 ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
970 for (i = 0; i < rx_chn->flow_num; i++)
971 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
973 /* request and cfg psi-l */
974 rx_chn->common.dst_thread =
975 xudma_dev_get_psil_base(rx_chn->common.udmax) +
976 rx_chn->udma_rchan_id;
978 ret = k3_udma_glue_cfg_rx_chn(rx_chn);
980 dev_err(dev, "Failed to cfg rchan %d\n", ret);
984 /* init default RX flow only if flow_num = 1 */
985 if (cfg->def_flow_cfg) {
986 ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg);
991 k3_udma_glue_dump_rx_chn(rx_chn);
996 k3_udma_glue_release_rx_chn(rx_chn);
1000 static struct k3_udma_glue_rx_channel *
1001 k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
1002 struct k3_udma_glue_rx_channel_cfg *cfg)
1004 struct k3_udma_glue_rx_channel *rx_chn;
1007 if (cfg->flow_id_num <= 0 ||
1008 cfg->flow_id_use_rxchan_id ||
1009 cfg->def_flow_cfg ||
1010 cfg->flow_id_base < 0)
1011 return ERR_PTR(-EINVAL);
1014 * Remote RX channel is under control of Remote CPU core, so
1015 * Linux can only request and manipulate by dedicated RX flows
1018 rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
1020 return ERR_PTR(-ENOMEM);
1022 rx_chn->common.dev = dev;
1023 rx_chn->common.swdata_size = cfg->swdata_size;
1024 rx_chn->remote = true;
1025 rx_chn->udma_rchan_id = -1;
1026 rx_chn->flow_num = cfg->flow_id_num;
1027 rx_chn->flow_id_base = cfg->flow_id_base;
1028 rx_chn->psil_paired = false;
1030 /* parse of udmap channel */
1031 ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
1032 &rx_chn->common, false);
1036 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
1037 rx_chn->common.psdata_size,
1038 rx_chn->common.swdata_size);
1040 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
1041 sizeof(*rx_chn->flows), GFP_KERNEL);
1042 if (!rx_chn->flows) {
1047 rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
1048 rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
1049 dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x",
1050 rx_chn->common.src_thread);
1051 ret = device_register(&rx_chn->common.chan_dev);
1053 dev_err(dev, "Channel Device registration failed %d\n", ret);
1054 put_device(&rx_chn->common.chan_dev);
1055 rx_chn->common.chan_dev.parent = NULL;
1059 if (xudma_is_pktdma(rx_chn->common.udmax)) {
1060 /* prepare the channel device as coherent */
1061 rx_chn->common.chan_dev.dma_coherent = true;
1062 dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
1066 ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
1070 for (i = 0; i < rx_chn->flow_num; i++)
1071 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
1073 k3_udma_glue_dump_rx_chn(rx_chn);
1078 k3_udma_glue_release_rx_chn(rx_chn);
1079 return ERR_PTR(ret);
1082 struct k3_udma_glue_rx_channel *
1083 k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
1084 struct k3_udma_glue_rx_channel_cfg *cfg)
1087 return k3_udma_glue_request_remote_rx_chn(dev, name, cfg);
1089 return k3_udma_glue_request_rx_chn_priv(dev, name, cfg);
1091 EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn);
1093 void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1097 if (IS_ERR_OR_NULL(rx_chn->common.udmax))
1100 if (rx_chn->psil_paired) {
1101 xudma_navss_psil_unpair(rx_chn->common.udmax,
1102 rx_chn->common.src_thread,
1103 rx_chn->common.dst_thread);
1104 rx_chn->psil_paired = false;
1107 for (i = 0; i < rx_chn->flow_num; i++)
1108 k3_udma_glue_release_rx_flow(rx_chn, i);
1110 if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
1111 xudma_free_gp_rflow_range(rx_chn->common.udmax,
1112 rx_chn->flow_id_base,
1115 if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
1116 xudma_rchan_put(rx_chn->common.udmax,
1117 rx_chn->udma_rchanx);
1119 if (rx_chn->common.chan_dev.parent) {
1120 device_unregister(&rx_chn->common.chan_dev);
1121 rx_chn->common.chan_dev.parent = NULL;
1124 EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
1126 int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
1128 struct k3_udma_glue_rx_flow_cfg *flow_cfg)
1130 if (flow_idx >= rx_chn->flow_num)
1133 return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg);
1135 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init);
1137 u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
1140 struct k3_udma_glue_rx_flow *flow;
1142 if (flow_idx >= rx_chn->flow_num)
1145 flow = &rx_chn->flows[flow_idx];
1147 return k3_ringacc_get_ring_id(flow->ringrxfdq);
1149 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id);
1151 u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn)
1153 return rx_chn->flow_id_base;
1155 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base);
1157 int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
1160 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
1161 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
1162 struct device *dev = rx_chn->common.dev;
1163 struct ti_sci_msg_rm_udmap_flow_cfg req;
1168 if (!rx_chn->remote)
1171 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
1172 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
1174 memset(&req, 0, sizeof(req));
1177 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1178 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1179 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1180 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1181 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1182 req.nav_id = tisci_rm->tisci_dev_id;
1183 req.flow_index = flow->udma_rflow_id;
1184 req.rx_dest_qnum = rx_ring_id;
1185 req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
1186 req.rx_fdq1_qnum = rx_ringfdq_id;
1187 req.rx_fdq2_qnum = rx_ringfdq_id;
1188 req.rx_fdq3_qnum = rx_ringfdq_id;
1190 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1192 dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id,
1198 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable);
1200 int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
1203 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
1204 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
1205 struct device *dev = rx_chn->common.dev;
1206 struct ti_sci_msg_rm_udmap_flow_cfg req;
1209 if (!rx_chn->remote)
1212 memset(&req, 0, sizeof(req));
1214 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1215 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1216 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1217 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1218 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1219 req.nav_id = tisci_rm->tisci_dev_id;
1220 req.flow_index = flow->udma_rflow_id;
1221 req.rx_dest_qnum = TI_SCI_RESOURCE_NULL;
1222 req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL;
1223 req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL;
1224 req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL;
1225 req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL;
1227 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1229 dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id,
1235 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
1237 int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1244 if (rx_chn->flows_ready < rx_chn->flow_num)
1247 ret = xudma_navss_psil_pair(rx_chn->common.udmax,
1248 rx_chn->common.src_thread,
1249 rx_chn->common.dst_thread);
1251 dev_err(rx_chn->common.dev, "PSI-L request err %d\n", ret);
1255 rx_chn->psil_paired = true;
1257 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG,
1258 UDMA_CHAN_RT_CTL_EN);
1260 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1261 UDMA_PEER_RT_EN_ENABLE);
1263 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en");
1266 EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn);
1268 void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1270 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1");
1272 xudma_rchanrt_write(rx_chn->udma_rchanx,
1273 UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
1274 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0);
1276 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
1278 if (rx_chn->psil_paired) {
1279 xudma_navss_psil_unpair(rx_chn->common.udmax,
1280 rx_chn->common.src_thread,
1281 rx_chn->common.dst_thread);
1282 rx_chn->psil_paired = false;
1285 EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
1287 void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1296 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1");
1298 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1299 UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN);
1301 val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG);
1303 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
1304 val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1305 UDMA_CHAN_RT_CTL_REG);
1307 if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
1308 dev_err(rx_chn->common.dev, "RX tdown timeout\n");
1314 val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1315 UDMA_CHAN_RT_PEER_RT_EN_REG);
1316 if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
1317 dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n");
1318 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2");
1320 EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
1322 void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1323 u32 flow_num, void *data,
1324 void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
1326 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1327 struct device *dev = rx_chn->common.dev;
1328 dma_addr_t desc_dma;
1331 /* reset RXCQ as it is not input for udma - expected to be empty */
1332 occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
1333 dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
1335 /* Skip RX FDQ in case one FDQ is used for the set of flows */
1340 * RX FDQ reset need to be special way as it is input for udma and its
1341 * state cached by udma, so:
1342 * 1) save RX FDQ occ
1343 * 2) clean up RX FDQ and call callback .cleanup() for each desc
1344 * 3) reset RX FDQ in a special way
1346 occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq);
1347 dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx);
1349 for (i = 0; i < occ_rx; i++) {
1350 ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
1352 if (ret != -ENODATA)
1353 dev_err(dev, "RX reset pop %d\n", ret);
1356 cleanup(data, desc_dma);
1359 k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
1362 k3_ringacc_ring_reset(flow->ringrx);
1364 EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
1366 int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1367 u32 flow_num, struct cppi5_host_desc_t *desc_rx,
1368 dma_addr_t desc_dma)
1370 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1372 return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma);
1374 EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn);
1376 int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1377 u32 flow_num, dma_addr_t *desc_dma)
1379 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1381 return k3_ringacc_ring_pop(flow->ringrx, desc_dma);
1383 EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn);
1385 int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
1388 struct k3_udma_glue_rx_flow *flow;
1390 flow = &rx_chn->flows[flow_num];
1392 if (xudma_is_pktdma(rx_chn->common.udmax)) {
1393 flow->virq = xudma_pktdma_rflow_get_irq(rx_chn->common.udmax,
1394 flow->udma_rflow_id);
1396 flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
1401 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);
1404 k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn)
1406 if (xudma_is_pktdma(rx_chn->common.udmax) &&
1407 (rx_chn->common.atype_asel == 14 || rx_chn->common.atype_asel == 15))
1408 return &rx_chn->common.chan_dev;
1410 return xudma_get_device(rx_chn->common.udmax);
1412 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_dma_device);
1414 void k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel *rx_chn,
1417 if (!xudma_is_pktdma(rx_chn->common.udmax) ||
1418 !rx_chn->common.atype_asel)
1421 *addr |= (u64)rx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
1423 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_dma_to_cppi5_addr);
1425 void k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel *rx_chn,
1428 if (!xudma_is_pktdma(rx_chn->common.udmax) ||
1429 !rx_chn->common.atype_asel)
1432 *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
1434 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_cppi5_to_dma_addr);
1436 static int __init k3_udma_glue_class_init(void)
1438 return class_register(&k3_udma_glue_devclass);
1441 module_init(k3_udma_glue_class_init);
1442 MODULE_LICENSE("GPL v2");