2 * Copyright (C) 2014 Freescale Semiconductor
4 * SPDX-License-Identifier: GPL-2.0+
14 #include <linux/compat.h>
16 #include "ldpaa_eth.h"
18 static int init_phy(struct eth_device *dev)
20 /*TODO for external PHY */
25 static void ldpaa_eth_rx(struct ldpaa_eth_priv *priv,
26 const struct dpaa_fd *fd)
31 struct ldpaa_fas *fas;
33 struct qbman_release_desc releasedesc;
34 struct qbman_swp *swp = dflt_dpio->sw_portal;
36 invalidate_dcache_all();
38 fd_addr = ldpaa_fd_get_addr(fd);
39 fd_offset = ldpaa_fd_get_offset(fd);
40 fd_length = ldpaa_fd_get_len(fd);
42 debug("Rx frame:data addr=0x%p size=0x%x\n", (u64 *)fd_addr, fd_length);
44 if (fd->simple.frc & LDPAA_FD_FRC_FASV) {
45 /* Read the frame annotation status word and check for errors */
46 fas = (struct ldpaa_fas *)
47 ((uint8_t *)(fd_addr) +
48 priv->buf_layout.private_data_size);
49 status = le32_to_cpu(fas->status);
50 if (status & LDPAA_ETH_RX_ERR_MASK) {
51 printf("Rx frame error(s): 0x%08x\n",
52 status & LDPAA_ETH_RX_ERR_MASK);
54 } else if (status & LDPAA_ETH_RX_UNSUPP_MASK) {
55 printf("Unsupported feature in bitmask: 0x%08x\n",
56 status & LDPAA_ETH_RX_UNSUPP_MASK);
61 debug("Rx frame: To Upper layer\n");
62 net_process_received_packet((uint8_t *)(fd_addr) + fd_offset,
66 qbman_release_desc_clear(&releasedesc);
67 qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
69 /* Release buffer into the QBMAN */
70 err = qbman_swp_release(swp, &releasedesc, &fd_addr, 1);
71 } while (err == -EBUSY);
75 static int ldpaa_eth_pull_dequeue_rx(struct eth_device *dev)
77 struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)dev->priv;
78 const struct ldpaa_dq *dq;
79 const struct dpaa_fd *fd;
80 int i = 5, err = 0, status;
81 static struct qbman_pull_desc pulldesc;
82 struct qbman_swp *swp = dflt_dpio->sw_portal;
84 qbman_pull_desc_clear(&pulldesc);
85 qbman_pull_desc_set_numframes(&pulldesc, 1);
86 qbman_pull_desc_set_fq(&pulldesc, priv->rx_dflt_fqid);
89 err = qbman_swp_pull(swp, &pulldesc);
91 printf("Dequeue frames error:0x%08x\n", err);
95 dq = qbman_swp_dqrr_next(swp);
97 /* Check for valid frame. If not sent a consume
98 * confirmation to QBMAN otherwise give it to NADK
99 * application and then send consume confirmation to
102 status = (uint8_t)ldpaa_dq_flags(dq);
103 if ((status & LDPAA_DQ_STAT_VALIDFRAME) == 0) {
104 debug("Dequeue RX frames:");
105 debug("No frame delivered\n");
107 qbman_swp_dqrr_consume(swp, dq);
111 fd = ldpaa_dq_fd(dq);
113 /* Obtain FD and process it */
114 ldpaa_eth_rx(priv, fd);
115 qbman_swp_dqrr_consume(swp, dq);
123 static void ldpaa_eth_tx_conf(struct ldpaa_eth_priv *priv,
124 const struct dpaa_fd *fd)
127 struct ldpaa_fas *fas;
128 uint32_t status, err;
129 struct qbman_release_desc releasedesc;
130 struct qbman_swp *swp = dflt_dpio->sw_portal;
132 invalidate_dcache_all();
133 fd_addr = ldpaa_fd_get_addr(fd);
136 debug("TX Conf frame:data addr=0x%p\n", (u64 *)fd_addr);
138 /* Check the status from the Frame Annotation */
139 if (fd->simple.frc & LDPAA_FD_FRC_FASV) {
140 fas = (struct ldpaa_fas *)
141 ((uint8_t *)(fd_addr) +
142 priv->buf_layout.private_data_size);
143 status = le32_to_cpu(fas->status);
144 if (status & LDPAA_ETH_TXCONF_ERR_MASK) {
145 printf("TxConf frame error(s): 0x%08x\n",
146 status & LDPAA_ETH_TXCONF_ERR_MASK);
150 qbman_release_desc_clear(&releasedesc);
151 qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
153 /* Release buffer into the QBMAN */
154 err = qbman_swp_release(swp, &releasedesc, &fd_addr, 1);
155 } while (err == -EBUSY);
158 static int ldpaa_eth_pull_dequeue_tx_conf(struct ldpaa_eth_priv *priv)
160 const struct ldpaa_dq *dq;
161 const struct dpaa_fd *fd;
164 static struct qbman_pull_desc pulldesc;
165 struct qbman_swp *swp = dflt_dpio->sw_portal;
167 qbman_pull_desc_clear(&pulldesc);
168 qbman_pull_desc_set_numframes(&pulldesc, 1);
169 qbman_pull_desc_set_fq(&pulldesc, priv->tx_conf_fqid);
172 err = qbman_swp_pull(swp, &pulldesc);
174 printf("Dequeue TX conf frames error:0x%08x\n", err);
178 dq = qbman_swp_dqrr_next(swp);
180 /* Check for valid frame. If not sent a consume
181 * confirmation to QBMAN otherwise give it to NADK
182 * application and then send consume confirmation to
185 status = (uint8_t)ldpaa_dq_flags(dq);
186 if ((status & LDPAA_DQ_STAT_VALIDFRAME) == 0) {
187 debug("Dequeue TX conf frames:");
188 debug("No frame is delivered\n");
190 qbman_swp_dqrr_consume(swp, dq);
193 fd = ldpaa_dq_fd(dq);
195 ldpaa_eth_tx_conf(priv, fd);
196 qbman_swp_dqrr_consume(swp, dq);
204 static int ldpaa_eth_tx(struct eth_device *net_dev, void *buf, int len)
206 struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
209 int data_offset, err;
210 struct qbman_swp *swp = dflt_dpio->sw_portal;
211 struct qbman_eq_desc ed;
213 /* Setup the FD fields */
214 memset(&fd, 0, sizeof(fd));
216 data_offset = priv->tx_data_offset;
219 err = qbman_swp_acquire(dflt_dpio->sw_portal,
220 dflt_dpbp->dpbp_attr.bpid,
222 } while (err == -EBUSY);
225 printf("qbman_swp_acquire() failed\n");
229 debug("TX data: malloc buffer start=0x%p\n", (u64 *)buffer_start);
231 memcpy(((uint8_t *)(buffer_start) + data_offset), buf, len);
233 flush_dcache_range(buffer_start, LDPAA_ETH_RX_BUFFER_SIZE);
235 ldpaa_fd_set_addr(&fd, (u64)buffer_start);
236 ldpaa_fd_set_offset(&fd, (uint16_t)(data_offset));
237 ldpaa_fd_set_bpid(&fd, dflt_dpbp->dpbp_attr.bpid);
238 ldpaa_fd_set_len(&fd, len);
240 fd.simple.ctrl = LDPAA_FD_CTRL_ASAL | LDPAA_FD_CTRL_PTA |
243 qbman_eq_desc_clear(&ed);
244 qbman_eq_desc_set_no_orp(&ed, 0);
245 qbman_eq_desc_set_qd(&ed, priv->tx_qdid, priv->tx_flow_id, 0);
246 err = qbman_swp_enqueue(swp, &ed, (const struct qbman_fd *)(&fd));
248 printf("error enqueueing Tx frame\n");
252 err = ldpaa_eth_pull_dequeue_tx_conf(priv);
254 printf("error Tx Conf frame\n");
259 static int ldpaa_eth_open(struct eth_device *net_dev, bd_t *bd)
261 struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
262 struct dpni_queue_attr rx_queue_attr;
263 struct dpni_tx_flow_attr tx_flow_attr;
267 if (net_dev->state == ETH_STATE_ACTIVE)
270 /* DPNI initialization */
271 err = ldpaa_dpni_setup(priv);
275 err = ldpaa_dpbp_setup();
279 /* DPNI binding DPBP */
280 err = ldpaa_dpni_bind(priv);
284 err = dpni_get_primary_mac_addr(dflt_mc_io, priv->dpni_handle,
287 printf("dpni_get_primary_mac_addr() failed\n");
291 memcpy(net_dev->enetaddr, mac_addr, 0x6);
293 /* setup the MAC address */
294 if (net_dev->enetaddr[0] & 0x01) {
295 printf("%s: MacAddress is multcast address\n", __func__);
300 /* TODO Check this path */
301 ret = phy_startup(priv->phydev);
303 printf("%s: Could not initialize\n", priv->phydev->dev->name);
307 priv->phydev->speed = SPEED_1000;
308 priv->phydev->link = 1;
309 priv->phydev->duplex = DUPLEX_FULL;
312 err = dpni_enable(dflt_mc_io, priv->dpni_handle);
314 printf("dpni_enable() failed\n");
318 /* TODO: support multiple Rx flows */
319 err = dpni_get_rx_flow(dflt_mc_io, priv->dpni_handle, 0, 0,
322 printf("dpni_get_rx_flow() failed\n");
326 priv->rx_dflt_fqid = rx_queue_attr.fqid;
328 err = dpni_get_qdid(dflt_mc_io, priv->dpni_handle, &priv->tx_qdid);
330 printf("dpni_get_qdid() failed\n");
334 err = dpni_get_tx_flow(dflt_mc_io, priv->dpni_handle, priv->tx_flow_id,
337 printf("dpni_get_tx_flow() failed\n");
341 priv->tx_conf_fqid = tx_flow_attr.conf_err_attr.queue_attr.fqid;
343 if (!priv->phydev->link)
344 printf("%s: No link.\n", priv->phydev->dev->name);
346 return priv->phydev->link ? 0 : -1;
351 dpni_disable(dflt_mc_io, priv->dpni_handle);
355 dpni_close(dflt_mc_io, priv->dpni_handle);
360 static void ldpaa_eth_stop(struct eth_device *net_dev)
362 struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
365 if (net_dev->state == ETH_STATE_PASSIVE)
367 /* Stop Tx and Rx traffic */
368 err = dpni_disable(dflt_mc_io, priv->dpni_handle);
370 printf("dpni_disable() failed\n");
373 phy_shutdown(priv->phydev);
377 dpni_reset(dflt_mc_io, priv->dpni_handle);
378 dpni_close(dflt_mc_io, priv->dpni_handle);
381 static void ldpaa_dpbp_drain_cnt(int count)
383 uint64_t buf_array[7];
390 ret = qbman_swp_acquire(dflt_dpio->sw_portal,
391 dflt_dpbp->dpbp_attr.bpid,
394 printf("qbman_swp_acquire() failed\n");
397 for (i = 0; i < ret; i++) {
398 addr = (void *)buf_array[i];
399 debug("Free: buffer addr =0x%p\n", addr);
405 static void ldpaa_dpbp_drain(void)
408 for (i = 0; i < LDPAA_ETH_NUM_BUFS; i += 7)
409 ldpaa_dpbp_drain_cnt(7);
412 static int ldpaa_bp_add_7(uint16_t bpid)
414 uint64_t buf_array[7];
417 struct qbman_release_desc rd;
419 for (i = 0; i < 7; i++) {
420 addr = memalign(L1_CACHE_BYTES, LDPAA_ETH_RX_BUFFER_SIZE);
422 printf("addr allocation failed\n");
425 memset(addr, 0x00, LDPAA_ETH_RX_BUFFER_SIZE);
427 buf_array[i] = (uint64_t)addr;
428 debug("Release: buffer addr =0x%p\n", addr);
432 /* In case the portal is busy, retry until successful.
433 * This function is guaranteed to succeed in a reasonable amount
439 qbman_release_desc_clear(&rd);
440 qbman_release_desc_set_bpid(&rd, bpid);
441 } while (qbman_swp_release(dflt_dpio->sw_portal, &rd, buf_array, i));
452 static int ldpaa_dpbp_seed(uint16_t bpid)
457 for (i = 0; i < LDPAA_ETH_NUM_BUFS; i += 7) {
458 count = ldpaa_bp_add_7(bpid);
460 printf("Buffer Seed= %d\n", count);
466 static int ldpaa_dpbp_setup(void)
470 err = dpbp_open(dflt_mc_io, dflt_dpbp->dpbp_attr.id,
471 &dflt_dpbp->dpbp_handle);
473 printf("dpbp_open() failed\n");
477 err = dpbp_enable(dflt_mc_io, dflt_dpbp->dpbp_handle);
479 printf("dpbp_enable() failed\n");
483 err = dpbp_get_attributes(dflt_mc_io, dflt_dpbp->dpbp_handle,
484 &dflt_dpbp->dpbp_attr);
486 printf("dpbp_get_attributes() failed\n");
490 err = ldpaa_dpbp_seed(dflt_dpbp->dpbp_attr.bpid);
492 printf("Buffer seeding failed for DPBP %d (bpid=%d)\n",
493 dflt_dpbp->dpbp_attr.id, dflt_dpbp->dpbp_attr.bpid);
501 dpbp_disable(dflt_mc_io, dflt_dpbp->dpbp_handle);
503 dpbp_close(dflt_mc_io, dflt_dpbp->dpbp_handle);
508 static void ldpaa_dpbp_free(void)
511 dpbp_disable(dflt_mc_io, dflt_dpbp->dpbp_handle);
512 dpbp_reset(dflt_mc_io, dflt_dpbp->dpbp_handle);
513 dpbp_close(dflt_mc_io, dflt_dpbp->dpbp_handle);
516 static int ldpaa_dpni_setup(struct ldpaa_eth_priv *priv)
520 /* and get a handle for the DPNI this interface is associate with */
521 err = dpni_open(dflt_mc_io, priv->dpni_id, &priv->dpni_handle);
523 printf("dpni_open() failed\n");
527 err = dpni_get_attributes(dflt_mc_io, priv->dpni_handle,
530 printf("dpni_get_attributes() failed (err=%d)\n", err);
534 /* Configure our buffers' layout */
535 priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
536 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
537 DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
538 priv->buf_layout.pass_parser_result = true;
539 priv->buf_layout.pass_frame_status = true;
540 priv->buf_layout.private_data_size = LDPAA_ETH_SWA_SIZE;
542 err = dpni_set_rx_buffer_layout(dflt_mc_io, priv->dpni_handle,
545 printf("dpni_set_rx_buffer_layout() failed");
550 priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PARSER_RESULT;
551 err = dpni_set_tx_buffer_layout(dflt_mc_io, priv->dpni_handle,
554 printf("dpni_set_tx_buffer_layout() failed");
558 /* ... tx-confirm. */
559 priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
560 err = dpni_set_tx_conf_buffer_layout(dflt_mc_io, priv->dpni_handle,
563 printf("dpni_set_tx_conf_buffer_layout() failed");
567 /* Now that we've set our tx buffer layout, retrieve the minimum
568 * required tx data offset.
570 err = dpni_get_tx_data_offset(dflt_mc_io, priv->dpni_handle,
571 &priv->tx_data_offset);
573 printf("dpni_get_tx_data_offset() failed\n");
574 goto err_data_offset;
577 /* Warn in case TX data offset is not multiple of 64 bytes. */
578 WARN_ON(priv->tx_data_offset % 64);
580 /* Accomodate SWA space. */
581 priv->tx_data_offset += LDPAA_ETH_SWA_SIZE;
582 debug("priv->tx_data_offset=%d\n", priv->tx_data_offset);
589 dpni_close(dflt_mc_io, priv->dpni_handle);
594 static int ldpaa_dpni_bind(struct ldpaa_eth_priv *priv)
596 struct dpni_pools_cfg pools_params;
597 struct dpni_tx_flow_cfg dflt_tx_flow;
600 pools_params.num_dpbp = 1;
601 pools_params.pools[0].dpbp_id = (uint16_t)dflt_dpbp->dpbp_attr.id;
602 pools_params.pools[0].buffer_size = LDPAA_ETH_RX_BUFFER_SIZE;
603 err = dpni_set_pools(dflt_mc_io, priv->dpni_handle, &pools_params);
605 printf("dpni_set_pools() failed\n");
609 priv->tx_flow_id = DPNI_NEW_FLOW_ID;
610 memset(&dflt_tx_flow, 0, sizeof(dflt_tx_flow));
612 err = dpni_set_tx_flow(dflt_mc_io, priv->dpni_handle,
613 &priv->tx_flow_id, &dflt_tx_flow);
615 printf("dpni_set_tx_flow() failed\n");
622 static int ldpaa_eth_netdev_init(struct eth_device *net_dev)
625 struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
627 if (priv->type == LDPAA_ETH_1G_E)
628 sprintf(net_dev->name, "DTSEC%d", priv->dpni_id);
630 sprintf(net_dev->name, "TGEC%d", priv->dpni_id);
633 net_dev->init = ldpaa_eth_open;
634 net_dev->halt = ldpaa_eth_stop;
635 net_dev->send = ldpaa_eth_tx;
636 net_dev->recv = ldpaa_eth_pull_dequeue_rx;
638 TODO: PHY MDIO information
639 priv->bus = info->bus;
640 priv->phyaddr = info->phy_addr;
641 priv->enet_if = info->enet_if;
644 if (init_phy(net_dev))
647 err = eth_register(net_dev);
649 printf("eth_register() = %d\n", err);
656 int ldpaa_eth_init(struct dprc_obj_desc obj_desc)
658 struct eth_device *net_dev = NULL;
659 struct ldpaa_eth_priv *priv = NULL;
664 net_dev = (struct eth_device *)malloc(sizeof(struct eth_device));
666 printf("eth_device malloc() failed\n");
669 memset(net_dev, 0, sizeof(struct eth_device));
671 /* alloc the ldpaa ethernet private struct */
672 priv = (struct ldpaa_eth_priv *)malloc(sizeof(struct ldpaa_eth_priv));
674 printf("ldpaa_eth_priv malloc() failed\n");
677 memset(priv, 0, sizeof(struct ldpaa_eth_priv));
679 net_dev->priv = (void *)priv;
680 priv->net_dev = (struct eth_device *)net_dev;
681 priv->dpni_id = obj_desc.id;
683 err = ldpaa_eth_netdev_init(net_dev);
685 goto err_netdev_init;
687 debug("ldpaa ethernet: Probed interface %s\n", net_dev->name);
692 net_dev->priv = NULL;