2 * CPSW Ethernet Switch Driver
4 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
24 #include <asm/errno.h>
28 #include <asm/arch/cpu.h>
30 #include <fdt_support.h>
32 DECLARE_GLOBAL_DATA_PTR;
34 #define BITMASK(bits) (BIT(bits) - 1)
35 #define PHY_REG_MASK 0x1f
36 #define PHY_ID_MASK 0x1f
37 #define NUM_DESCS (PKTBUFSRX * 2)
39 #define PKT_MAX (1500 + 14 + 4 + 4)
41 #define GIGABITEN BIT(7)
42 #define FULLDUPLEXEN BIT(0)
46 #define CPSW_HOST_PORT_OFFSET 0x108
47 #define CPSW_SLAVE0_OFFSET 0x208
48 #define CPSW_SLAVE1_OFFSET 0x308
49 #define CPSW_SLAVE_SIZE 0x100
50 #define CPSW_CPDMA_OFFSET 0x800
51 #define CPSW_HW_STATS 0x900
52 #define CPSW_STATERAM_OFFSET 0xa00
53 #define CPSW_CPTS_OFFSET 0xc00
54 #define CPSW_ALE_OFFSET 0xd00
55 #define CPSW_SLIVER0_OFFSET 0xd80
56 #define CPSW_SLIVER1_OFFSET 0xdc0
57 #define CPSW_BD_OFFSET 0x2000
58 #define CPSW_MDIO_DIV 0xff
60 #define AM335X_GMII_SEL_OFFSET 0x630
63 #define CPDMA_TXCONTROL 0x004
64 #define CPDMA_RXCONTROL 0x014
65 #define CPDMA_SOFTRESET 0x01c
66 #define CPDMA_RXFREE 0x0e0
67 #define CPDMA_TXHDP_VER1 0x100
68 #define CPDMA_TXHDP_VER2 0x200
69 #define CPDMA_RXHDP_VER1 0x120
70 #define CPDMA_RXHDP_VER2 0x220
71 #define CPDMA_TXCP_VER1 0x140
72 #define CPDMA_TXCP_VER2 0x240
73 #define CPDMA_RXCP_VER1 0x160
74 #define CPDMA_RXCP_VER2 0x260
76 /* Descriptor mode bits */
77 #define CPDMA_DESC_SOP BIT(31)
78 #define CPDMA_DESC_EOP BIT(30)
79 #define CPDMA_DESC_OWNER BIT(29)
80 #define CPDMA_DESC_EOQ BIT(28)
83 * This timeout definition is a worst-case ultra defensive measure against
84 * unexpected controller lock ups. Ideally, we should never ever hit this
85 * scenario in practice.
87 #define MDIO_TIMEOUT 100 /* msecs */
88 #define CPDMA_TIMEOUT 100 /* msecs */
90 struct cpsw_mdio_regs {
93 #define CONTROL_IDLE BIT(31)
94 #define CONTROL_ENABLE BIT(30)
105 u32 __reserved_1[20];
110 #define USERACCESS_GO BIT(31)
111 #define USERACCESS_WRITE BIT(30)
112 #define USERACCESS_ACK BIT(29)
113 #define USERACCESS_READ (0)
114 #define USERACCESS_DATA (0xffff)
126 struct cpsw_slave_regs {
134 #elif defined(CONFIG_TI814X)
143 struct cpsw_host_regs {
149 u32 cpdma_tx_pri_map;
150 u32 cpdma_rx_chan_map;
153 struct cpsw_sliver_regs {
166 #define ALE_ENTRY_BITS 68
167 #define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
170 #define ALE_CONTROL 0x08
171 #define ALE_UNKNOWNVLAN 0x18
172 #define ALE_TABLE_CONTROL 0x20
173 #define ALE_TABLE 0x34
174 #define ALE_PORTCTL 0x40
176 #define ALE_TABLE_WRITE BIT(31)
178 #define ALE_TYPE_FREE 0
179 #define ALE_TYPE_ADDR 1
180 #define ALE_TYPE_VLAN 2
181 #define ALE_TYPE_VLAN_ADDR 3
183 #define ALE_UCAST_PERSISTANT 0
184 #define ALE_UCAST_UNTOUCHED 1
185 #define ALE_UCAST_OUI 2
186 #define ALE_UCAST_TOUCHED 3
188 #define ALE_MCAST_FWD 0
189 #define ALE_MCAST_BLOCK_LEARN_FWD 1
190 #define ALE_MCAST_FWD_LEARN 2
191 #define ALE_MCAST_FWD_2 3
193 enum cpsw_ale_port_state {
194 ALE_PORT_STATE_DISABLE = 0x00,
195 ALE_PORT_STATE_BLOCK = 0x01,
196 ALE_PORT_STATE_LEARN = 0x02,
197 ALE_PORT_STATE_FORWARD = 0x03,
200 /* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
202 #define ALE_BLOCKED 2
205 struct cpsw_slave_regs *regs;
206 struct cpsw_sliver_regs *sliver;
209 struct cpsw_slave_data *data;
213 /* hardware fields */
218 /* software fields */
224 struct cpdma_desc *head, *tail;
225 void *hdp, *cp, *rxfree;
228 #define desc_write(desc, fld, val) __raw_writel((u32)(val), &(desc)->fld)
229 #define desc_read(desc, fld) __raw_readl(&(desc)->fld)
230 #define desc_read_ptr(desc, fld) ((void *)__raw_readl(&(desc)->fld))
232 #define chan_write(chan, fld, val) __raw_writel((u32)(val), (chan)->fld)
233 #define chan_read(chan, fld) __raw_readl((chan)->fld)
234 #define chan_read_ptr(chan, fld) ((void *)__raw_readl((chan)->fld))
236 #define for_active_slave(slave, priv) \
237 slave = (priv)->slaves + (priv)->data.active_slave; if (slave)
238 #define for_each_slave(slave, priv) \
239 for (slave = (priv)->slaves; slave != (priv)->slaves + \
240 (priv)->data.slaves; slave++)
246 struct eth_device *dev;
248 struct cpsw_platform_data data;
251 struct cpsw_regs *regs;
253 struct cpsw_host_regs *host_port_regs;
256 struct cpdma_desc *descs;
257 struct cpdma_desc *desc_free;
258 struct cpdma_chan rx_chan, tx_chan;
260 struct cpsw_slave *slaves;
261 struct phy_device *phydev;
267 static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
273 idx = 2 - idx; /* flip */
274 return (ale_entry[idx] >> start) & BITMASK(bits);
277 static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
282 value &= BITMASK(bits);
285 idx = 2 - idx; /* flip */
286 ale_entry[idx] &= ~(BITMASK(bits) << start);
287 ale_entry[idx] |= (value << start);
290 #define DEFINE_ALE_FIELD(name, start, bits) \
291 static inline int cpsw_ale_get_##name(u32 *ale_entry) \
293 return cpsw_ale_get_field(ale_entry, start, bits); \
295 static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
297 cpsw_ale_set_field(ale_entry, start, bits, value); \
300 DEFINE_ALE_FIELD(entry_type, 60, 2)
301 DEFINE_ALE_FIELD(mcast_state, 62, 2)
302 DEFINE_ALE_FIELD(port_mask, 66, 3)
303 DEFINE_ALE_FIELD(ucast_type, 62, 2)
304 DEFINE_ALE_FIELD(port_num, 66, 2)
305 DEFINE_ALE_FIELD(blocked, 65, 1)
306 DEFINE_ALE_FIELD(secure, 64, 1)
307 DEFINE_ALE_FIELD(mcast, 40, 1)
309 /* The MAC address field in the ALE entry cannot be macroized as above */
310 static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
314 for (i = 0; i < 6; i++)
315 addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
318 static inline void cpsw_ale_set_addr(u32 *ale_entry, const u8 *addr)
322 for (i = 0; i < 6; i++)
323 cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
326 static int cpsw_ale_read(struct cpsw_priv *priv, int idx, u32 *ale_entry)
330 __raw_writel(idx, priv->ale_regs + ALE_TABLE_CONTROL);
332 for (i = 0; i < ALE_ENTRY_WORDS; i++)
333 ale_entry[i] = __raw_readl(priv->ale_regs + ALE_TABLE + 4 * i);
338 static int cpsw_ale_write(struct cpsw_priv *priv, int idx, u32 *ale_entry)
342 for (i = 0; i < ALE_ENTRY_WORDS; i++)
343 __raw_writel(ale_entry[i], priv->ale_regs + ALE_TABLE + 4 * i);
345 __raw_writel(idx | ALE_TABLE_WRITE, priv->ale_regs + ALE_TABLE_CONTROL);
350 static int cpsw_ale_match_addr(struct cpsw_priv *priv, const u8 *addr)
352 u32 ale_entry[ALE_ENTRY_WORDS];
355 for (idx = 0; idx < priv->data.ale_entries; idx++) {
358 cpsw_ale_read(priv, idx, ale_entry);
359 type = cpsw_ale_get_entry_type(ale_entry);
360 if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
362 cpsw_ale_get_addr(ale_entry, entry_addr);
363 if (memcmp(entry_addr, addr, 6) == 0)
369 static int cpsw_ale_match_free(struct cpsw_priv *priv)
371 u32 ale_entry[ALE_ENTRY_WORDS];
374 for (idx = 0; idx < priv->data.ale_entries; idx++) {
375 cpsw_ale_read(priv, idx, ale_entry);
376 type = cpsw_ale_get_entry_type(ale_entry);
377 if (type == ALE_TYPE_FREE)
383 static int cpsw_ale_find_ageable(struct cpsw_priv *priv)
385 u32 ale_entry[ALE_ENTRY_WORDS];
388 for (idx = 0; idx < priv->data.ale_entries; idx++) {
389 cpsw_ale_read(priv, idx, ale_entry);
390 type = cpsw_ale_get_entry_type(ale_entry);
391 if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
393 if (cpsw_ale_get_mcast(ale_entry))
395 type = cpsw_ale_get_ucast_type(ale_entry);
396 if (type != ALE_UCAST_PERSISTANT &&
397 type != ALE_UCAST_OUI)
403 static int cpsw_ale_add_ucast(struct cpsw_priv *priv, const u8 *addr,
406 u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
409 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
410 cpsw_ale_set_addr(ale_entry, addr);
411 cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
412 cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
413 cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
414 cpsw_ale_set_port_num(ale_entry, port);
416 idx = cpsw_ale_match_addr(priv, addr);
418 idx = cpsw_ale_match_free(priv);
420 idx = cpsw_ale_find_ageable(priv);
424 cpsw_ale_write(priv, idx, ale_entry);
428 static int cpsw_ale_add_mcast(struct cpsw_priv *priv, const u8 *addr,
431 u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
434 idx = cpsw_ale_match_addr(priv, addr);
436 cpsw_ale_read(priv, idx, ale_entry);
438 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
439 cpsw_ale_set_addr(ale_entry, addr);
440 cpsw_ale_set_mcast_state(ale_entry, ALE_MCAST_FWD_2);
442 mask = cpsw_ale_get_port_mask(ale_entry);
444 cpsw_ale_set_port_mask(ale_entry, port_mask);
447 idx = cpsw_ale_match_free(priv);
449 idx = cpsw_ale_find_ageable(priv);
453 cpsw_ale_write(priv, idx, ale_entry);
457 static inline void cpsw_ale_control(struct cpsw_priv *priv, int bit, int val)
459 u32 tmp, mask = BIT(bit);
461 tmp = __raw_readl(priv->ale_regs + ALE_CONTROL);
463 tmp |= val ? mask : 0;
464 __raw_writel(tmp, priv->ale_regs + ALE_CONTROL);
467 #define cpsw_ale_enable(priv, val) cpsw_ale_control(priv, 31, val)
468 #define cpsw_ale_clear(priv, val) cpsw_ale_control(priv, 30, val)
469 #define cpsw_ale_vlan_aware(priv, val) cpsw_ale_control(priv, 2, val)
471 static inline void cpsw_ale_port_state(struct cpsw_priv *priv, int port,
474 int offset = ALE_PORTCTL + 4 * port;
477 tmp = __raw_readl(priv->ale_regs + offset);
480 __raw_writel(tmp, priv->ale_regs + offset);
483 static struct cpsw_mdio_regs *mdio_regs;
485 /* wait until hardware is ready for another user access */
486 static inline u32 wait_for_user_access(void)
489 int timeout = MDIO_TIMEOUT;
492 ((reg = __raw_readl(&mdio_regs->user[0].access)) & USERACCESS_GO))
496 printf("wait_for_user_access Timeout\n");
502 /* wait until hardware state machine is idle */
503 static inline void wait_for_idle(void)
505 int timeout = MDIO_TIMEOUT;
508 ((__raw_readl(&mdio_regs->control) & CONTROL_IDLE) == 0))
512 printf("wait_for_idle Timeout\n");
515 static int cpsw_mdio_read(struct mii_dev *bus, int phy_id,
516 int dev_addr, int phy_reg)
521 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
524 wait_for_user_access();
525 reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
527 __raw_writel(reg, &mdio_regs->user[0].access);
528 reg = wait_for_user_access();
530 data = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -1;
534 static int cpsw_mdio_write(struct mii_dev *bus, int phy_id, int dev_addr,
535 int phy_reg, u16 data)
539 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
542 wait_for_user_access();
543 reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
544 (phy_id << 16) | (data & USERACCESS_DATA));
545 __raw_writel(reg, &mdio_regs->user[0].access);
546 wait_for_user_access();
551 static void cpsw_mdio_init(const char *name, u32 mdio_base, u32 div)
553 struct mii_dev *bus = mdio_alloc();
555 mdio_regs = (struct cpsw_mdio_regs *)mdio_base;
557 /* set enable and clock divider */
558 __raw_writel(div | CONTROL_ENABLE, &mdio_regs->control);
561 * wait for scan logic to settle:
562 * the scan time consists of (a) a large fixed component, and (b) a
563 * small component that varies with the mii bus frequency. These
564 * were estimated using measurements at 1.1 and 2.2 MHz on tnetv107x
565 * silicon. Since the effect of (b) was found to be largely
566 * negligible, we keep things simple here.
570 bus->read = cpsw_mdio_read;
571 bus->write = cpsw_mdio_write;
572 strcpy(bus->name, name);
577 /* Set a self-clearing bit in a register, and wait for it to clear */
578 static inline void setbit_and_wait_for_clear32(void *addr)
580 __raw_writel(CLEAR_BIT, addr);
581 while (__raw_readl(addr) & CLEAR_BIT)
585 #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
586 ((mac)[2] << 16) | ((mac)[3] << 24))
587 #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
589 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
590 struct cpsw_priv *priv)
593 struct eth_pdata *pdata = dev_get_platdata(priv->dev);
595 writel(mac_hi(pdata->enetaddr), &slave->regs->sa_hi);
596 writel(mac_lo(pdata->enetaddr), &slave->regs->sa_lo);
598 __raw_writel(mac_hi(priv->dev->enetaddr), &slave->regs->sa_hi);
599 __raw_writel(mac_lo(priv->dev->enetaddr), &slave->regs->sa_lo);
603 static void cpsw_slave_update_link(struct cpsw_slave *slave,
604 struct cpsw_priv *priv, int *link)
606 struct phy_device *phy;
617 if (*link) { /* link up */
618 mac_control = priv->data.mac_control;
619 if (phy->speed == 1000)
620 mac_control |= GIGABITEN;
621 if (phy->duplex == DUPLEX_FULL)
622 mac_control |= FULLDUPLEXEN;
623 if (phy->speed == 100)
624 mac_control |= MIIEN;
627 if (mac_control == slave->mac_control)
631 printf("link up on port %d, speed %d, %s duplex\n",
632 slave->slave_num, phy->speed,
633 (phy->duplex == DUPLEX_FULL) ? "full" : "half");
635 printf("link down on port %d\n", slave->slave_num);
638 __raw_writel(mac_control, &slave->sliver->mac_control);
639 slave->mac_control = mac_control;
642 static int cpsw_update_link(struct cpsw_priv *priv)
645 struct cpsw_slave *slave;
647 for_active_slave(slave, priv)
648 cpsw_slave_update_link(slave, priv, &link);
653 static inline u32 cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
655 if (priv->host_port == 0)
656 return slave_num + 1;
661 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
665 setbit_and_wait_for_clear32(&slave->sliver->soft_reset);
667 /* setup priority mapping */
668 __raw_writel(0x76543210, &slave->sliver->rx_pri_map);
669 __raw_writel(0x33221100, &slave->regs->tx_pri_map);
671 /* setup max packet size, and mac address */
672 __raw_writel(PKT_MAX, &slave->sliver->rx_maxlen);
673 cpsw_set_slave_mac(slave, priv);
675 slave->mac_control = 0; /* no link yet */
677 /* enable forwarding */
678 slave_port = cpsw_get_slave_port(priv, slave->slave_num);
679 cpsw_ale_port_state(priv, slave_port, ALE_PORT_STATE_FORWARD);
681 cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << slave_port);
683 priv->phy_mask |= 1 << slave->data->phy_addr;
686 static struct cpdma_desc *cpdma_desc_alloc(struct cpsw_priv *priv)
688 struct cpdma_desc *desc = priv->desc_free;
691 priv->desc_free = desc_read_ptr(desc, hw_next);
695 static void cpdma_desc_free(struct cpsw_priv *priv, struct cpdma_desc *desc)
698 desc_write(desc, hw_next, priv->desc_free);
699 priv->desc_free = desc;
703 static int cpdma_submit(struct cpsw_priv *priv, struct cpdma_chan *chan,
704 void *buffer, int len)
706 struct cpdma_desc *desc, *prev;
709 desc = cpdma_desc_alloc(priv);
716 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
718 desc_write(desc, hw_next, 0);
719 desc_write(desc, hw_buffer, buffer);
720 desc_write(desc, hw_len, len);
721 desc_write(desc, hw_mode, mode | len);
722 desc_write(desc, sw_buffer, buffer);
723 desc_write(desc, sw_len, len);
726 /* simple case - first packet enqueued */
729 chan_write(chan, hdp, desc);
733 /* not the first packet - enqueue at the tail */
735 desc_write(prev, hw_next, desc);
738 /* next check if EOQ has been triggered already */
739 if (desc_read(prev, hw_mode) & CPDMA_DESC_EOQ)
740 chan_write(chan, hdp, desc);
744 chan_write(chan, rxfree, 1);
748 static int cpdma_process(struct cpsw_priv *priv, struct cpdma_chan *chan,
749 void **buffer, int *len)
751 struct cpdma_desc *desc = chan->head;
757 status = desc_read(desc, hw_mode);
760 *len = status & 0x7ff;
763 *buffer = desc_read_ptr(desc, sw_buffer);
765 if (status & CPDMA_DESC_OWNER) {
766 if (chan_read(chan, hdp) == 0) {
767 if (desc_read(desc, hw_mode) & CPDMA_DESC_OWNER)
768 chan_write(chan, hdp, desc);
774 chan->head = desc_read_ptr(desc, hw_next);
775 chan_write(chan, cp, desc);
777 cpdma_desc_free(priv, desc);
781 static int _cpsw_init(struct cpsw_priv *priv, u8 *enetaddr)
783 struct cpsw_slave *slave;
786 /* soft reset the controller and initialize priv */
787 setbit_and_wait_for_clear32(&priv->regs->soft_reset);
789 /* initialize and reset the address lookup engine */
790 cpsw_ale_enable(priv, 1);
791 cpsw_ale_clear(priv, 1);
792 cpsw_ale_vlan_aware(priv, 0); /* vlan unaware mode */
794 /* setup host port priority mapping */
795 __raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map);
796 __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
798 /* disable priority elevation and enable statistics on all ports */
799 __raw_writel(0, &priv->regs->ptype);
801 /* enable statistics collection only on the host port */
802 __raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en);
803 __raw_writel(0x7, &priv->regs->stat_port_en);
805 cpsw_ale_port_state(priv, priv->host_port, ALE_PORT_STATE_FORWARD);
807 cpsw_ale_add_ucast(priv, enetaddr, priv->host_port, ALE_SECURE);
808 cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << priv->host_port);
810 for_active_slave(slave, priv)
811 cpsw_slave_init(slave, priv);
813 cpsw_update_link(priv);
815 /* init descriptor pool */
816 for (i = 0; i < NUM_DESCS; i++) {
817 desc_write(&priv->descs[i], hw_next,
818 (i == (NUM_DESCS - 1)) ? 0 : &priv->descs[i+1]);
820 priv->desc_free = &priv->descs[0];
822 /* initialize channels */
823 if (priv->data.version == CPSW_CTRL_VERSION_2) {
824 memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
825 priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER2;
826 priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER2;
827 priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
829 memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
830 priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER2;
831 priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER2;
833 memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
834 priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER1;
835 priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER1;
836 priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
838 memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
839 priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER1;
840 priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER1;
843 /* clear dma state */
844 setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
846 if (priv->data.version == CPSW_CTRL_VERSION_2) {
847 for (i = 0; i < priv->data.channels; i++) {
848 __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER2 + 4
850 __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
852 __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER2 + 4
854 __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER2 + 4
856 __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER2 + 4
860 for (i = 0; i < priv->data.channels; i++) {
861 __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER1 + 4
863 __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
865 __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER1 + 4
867 __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER1 + 4
869 __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER1 + 4
875 __raw_writel(1, priv->dma_regs + CPDMA_TXCONTROL);
876 __raw_writel(1, priv->dma_regs + CPDMA_RXCONTROL);
878 /* submit rx descs */
879 for (i = 0; i < PKTBUFSRX; i++) {
880 ret = cpdma_submit(priv, &priv->rx_chan, net_rx_packets[i],
883 printf("error %d submitting rx desc\n", ret);
891 static void _cpsw_halt(struct cpsw_priv *priv)
893 writel(0, priv->dma_regs + CPDMA_TXCONTROL);
894 writel(0, priv->dma_regs + CPDMA_RXCONTROL);
896 /* soft reset the controller and initialize priv */
897 setbit_and_wait_for_clear32(&priv->regs->soft_reset);
899 /* clear dma state */
900 setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
904 static int _cpsw_send(struct cpsw_priv *priv, void *packet, int length)
908 int timeout = CPDMA_TIMEOUT;
910 flush_dcache_range((unsigned long)packet,
911 (unsigned long)packet + ALIGN(length, PKTALIGN));
913 /* first reap completed packets */
915 (cpdma_process(priv, &priv->tx_chan, &buffer, &len) >= 0))
919 printf("cpdma_process timeout\n");
923 return cpdma_submit(priv, &priv->tx_chan, packet, length);
926 static int _cpsw_recv(struct cpsw_priv *priv, uchar **pkt)
932 ret = cpdma_process(priv, &priv->rx_chan, &buffer, &len);
936 invalidate_dcache_range((unsigned long)buffer,
937 (unsigned long)buffer + PKTSIZE_ALIGN);
943 static void cpsw_slave_setup(struct cpsw_slave *slave, int slave_num,
944 struct cpsw_priv *priv)
946 void *regs = priv->regs;
947 struct cpsw_slave_data *data = priv->data.slave_data + slave_num;
948 slave->slave_num = slave_num;
950 slave->regs = regs + data->slave_reg_ofs;
951 slave->sliver = regs + data->sliver_reg_ofs;
954 static int cpsw_phy_init(struct cpsw_priv *priv, struct cpsw_slave *slave)
956 struct phy_device *phydev;
957 u32 supported = PHY_GBIT_FEATURES;
959 phydev = phy_connect(priv->bus,
960 slave->data->phy_addr,
962 slave->data->phy_if);
967 phydev->supported &= supported;
968 phydev->advertising = phydev->supported;
971 if (slave->data->phy_of_handle)
972 phydev->dev->of_offset = slave->data->phy_of_handle;
975 priv->phydev = phydev;
981 int _cpsw_register(struct cpsw_priv *priv)
983 struct cpsw_slave *slave;
984 struct cpsw_platform_data *data = &priv->data;
985 void *regs = (void *)data->cpsw_base;
987 priv->slaves = malloc(sizeof(struct cpsw_slave) * data->slaves);
992 priv->host_port = data->host_port_num;
994 priv->host_port_regs = regs + data->host_port_reg_ofs;
995 priv->dma_regs = regs + data->cpdma_reg_ofs;
996 priv->ale_regs = regs + data->ale_reg_ofs;
997 priv->descs = (void *)regs + data->bd_ram_ofs;
1001 for_each_slave(slave, priv) {
1002 cpsw_slave_setup(slave, idx, priv);
1006 cpsw_mdio_init(priv->dev->name, data->mdio_base, data->mdio_div);
1007 priv->bus = miiphy_get_dev_by_name(priv->dev->name);
1008 for_active_slave(slave, priv)
1009 cpsw_phy_init(priv, slave);
1014 #ifndef CONFIG_DM_ETH
1015 static int cpsw_init(struct eth_device *dev, bd_t *bis)
1017 struct cpsw_priv *priv = dev->priv;
1019 return _cpsw_init(priv, dev->enetaddr);
1022 static void cpsw_halt(struct eth_device *dev)
1024 struct cpsw_priv *priv = dev->priv;
1026 return _cpsw_halt(priv);
1029 static int cpsw_send(struct eth_device *dev, void *packet, int length)
1031 struct cpsw_priv *priv = dev->priv;
1033 return _cpsw_send(priv, packet, length);
1036 static int cpsw_recv(struct eth_device *dev)
1038 struct cpsw_priv *priv = dev->priv;
1042 len = _cpsw_recv(priv, &pkt);
1045 net_process_received_packet(pkt, len);
1046 cpdma_submit(priv, &priv->rx_chan, pkt, PKTSIZE);
1052 int cpsw_register(struct cpsw_platform_data *data)
1054 struct cpsw_priv *priv;
1055 struct eth_device *dev;
1058 dev = calloc(sizeof(*dev), 1);
1062 priv = calloc(sizeof(*priv), 1);
1071 strcpy(dev->name, "cpsw");
1073 dev->init = cpsw_init;
1074 dev->halt = cpsw_halt;
1075 dev->send = cpsw_send;
1076 dev->recv = cpsw_recv;
1081 ret = _cpsw_register(priv);
1083 eth_unregister(dev);
1092 static int cpsw_eth_start(struct udevice *dev)
1094 struct eth_pdata *pdata = dev_get_platdata(dev);
1095 struct cpsw_priv *priv = dev_get_priv(dev);
1097 return _cpsw_init(priv, pdata->enetaddr);
1100 static int cpsw_eth_send(struct udevice *dev, void *packet, int length)
1102 struct cpsw_priv *priv = dev_get_priv(dev);
1104 return _cpsw_send(priv, packet, length);
1107 static int cpsw_eth_recv(struct udevice *dev, int flags, uchar **packetp)
1109 struct cpsw_priv *priv = dev_get_priv(dev);
1111 return _cpsw_recv(priv, packetp);
1114 static int cpsw_eth_free_pkt(struct udevice *dev, uchar *packet,
1117 struct cpsw_priv *priv = dev_get_priv(dev);
1119 return cpdma_submit(priv, &priv->rx_chan, packet, PKTSIZE);
1122 static void cpsw_eth_stop(struct udevice *dev)
1124 struct cpsw_priv *priv = dev_get_priv(dev);
1126 return _cpsw_halt(priv);
1130 static int cpsw_eth_probe(struct udevice *dev)
1132 struct cpsw_priv *priv = dev_get_priv(dev);
1136 return _cpsw_register(priv);
1139 static const struct eth_ops cpsw_eth_ops = {
1140 .start = cpsw_eth_start,
1141 .send = cpsw_eth_send,
1142 .recv = cpsw_eth_recv,
1143 .free_pkt = cpsw_eth_free_pkt,
1144 .stop = cpsw_eth_stop,
1147 static inline fdt_addr_t cpsw_get_addr_by_node(const void *fdt, int node)
1149 return fdtdec_get_addr_size_auto_noparent(fdt, node, "reg", 0, NULL,
1153 static int cpsw_eth_ofdata_to_platdata(struct udevice *dev)
1155 struct eth_pdata *pdata = dev_get_platdata(dev);
1156 struct cpsw_priv *priv = dev_get_priv(dev);
1157 struct gpio_desc *mode_gpios;
1158 const char *phy_mode;
1159 const void *fdt = gd->fdt_blob;
1160 int node = dev->of_offset;
1162 int slave_index = 0;
1167 pdata->iobase = dev_get_addr(dev);
1168 priv->data.version = CPSW_CTRL_VERSION_2;
1169 priv->data.bd_ram_ofs = CPSW_BD_OFFSET;
1170 priv->data.ale_reg_ofs = CPSW_ALE_OFFSET;
1171 priv->data.cpdma_reg_ofs = CPSW_CPDMA_OFFSET;
1172 priv->data.mdio_div = CPSW_MDIO_DIV;
1173 priv->data.host_port_reg_ofs = CPSW_HOST_PORT_OFFSET,
1175 pdata->phy_interface = -1;
1177 priv->data.cpsw_base = pdata->iobase;
1178 priv->data.channels = fdtdec_get_int(fdt, node, "cpdma_channels", -1);
1179 if (priv->data.channels <= 0) {
1180 printf("error: cpdma_channels not found in dt\n");
1184 priv->data.slaves = fdtdec_get_int(fdt, node, "slaves", -1);
1185 if (priv->data.slaves <= 0) {
1186 printf("error: slaves not found in dt\n");
1189 priv->data.slave_data = malloc(sizeof(struct cpsw_slave_data) *
1192 priv->data.ale_entries = fdtdec_get_int(fdt, node, "ale_entries", -1);
1193 if (priv->data.ale_entries <= 0) {
1194 printf("error: ale_entries not found in dt\n");
1198 priv->data.bd_ram_ofs = fdtdec_get_int(fdt, node, "bd_ram_size", -1);
1199 if (priv->data.bd_ram_ofs <= 0) {
1200 printf("error: bd_ram_size not found in dt\n");
1204 priv->data.mac_control = fdtdec_get_int(fdt, node, "mac_control", -1);
1205 if (priv->data.mac_control <= 0) {
1206 printf("error: ale_entries not found in dt\n");
1210 num_mode_gpios = gpio_get_list_count(dev, "mode-gpios");
1211 if (num_mode_gpios > 0) {
1212 mode_gpios = malloc(sizeof(struct gpio_desc) *
1214 gpio_request_list_by_name(dev, "mode-gpios", mode_gpios,
1215 num_mode_gpios, GPIOD_IS_OUT);
1219 active_slave = fdtdec_get_int(fdt, node, "active_slave", 0);
1220 priv->data.active_slave = active_slave;
1222 fdt_for_each_subnode(fdt, subnode, node) {
1226 name = fdt_get_name(fdt, subnode, &len);
1227 if (!strncmp(name, "mdio", 4)) {
1230 mdio_base = cpsw_get_addr_by_node(fdt, subnode);
1231 if (mdio_base == FDT_ADDR_T_NONE) {
1232 error("Not able to get MDIO address space\n");
1235 priv->data.mdio_base = mdio_base;
1238 if (!strncmp(name, "slave", 5)) {
1241 if (slave_index >= priv->data.slaves)
1243 phy_mode = fdt_getprop(fdt, subnode, "phy-mode", NULL);
1245 priv->data.slave_data[slave_index].phy_if =
1246 phy_get_interface_by_name(phy_mode);
1248 priv->data.slave_data[slave_index].phy_of_handle =
1249 fdtdec_lookup_phandle(fdt, subnode,
1252 if (priv->data.slave_data[slave_index].phy_of_handle >= 0) {
1253 priv->data.slave_data[slave_index].phy_addr =
1254 fdtdec_get_int(gd->fdt_blob,
1255 priv->data.slave_data[slave_index].phy_of_handle,
1258 fdtdec_get_int_array(fdt, subnode, "phy_id",
1260 priv->data.slave_data[slave_index].phy_addr =
1266 if (!strncmp(name, "cpsw-phy-sel", 12)) {
1267 priv->data.gmii_sel = cpsw_get_addr_by_node(fdt,
1270 if (priv->data.gmii_sel == FDT_ADDR_T_NONE) {
1271 error("Not able to get gmii_sel reg address\n");
1277 priv->data.slave_data[0].slave_reg_ofs = CPSW_SLAVE0_OFFSET;
1278 priv->data.slave_data[0].sliver_reg_ofs = CPSW_SLIVER0_OFFSET;
1280 if (priv->data.slaves == 2) {
1281 priv->data.slave_data[1].slave_reg_ofs = CPSW_SLAVE1_OFFSET;
1282 priv->data.slave_data[1].sliver_reg_ofs = CPSW_SLIVER1_OFFSET;
1285 ret = ti_cm_get_macid(dev, active_slave, pdata->enetaddr);
1287 error("cpsw read efuse mac failed\n");
1291 pdata->phy_interface = priv->data.slave_data[active_slave].phy_if;
1292 if (pdata->phy_interface == -1) {
1293 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
1296 switch (pdata->phy_interface) {
1297 case PHY_INTERFACE_MODE_MII:
1298 writel(MII_MODE_ENABLE, priv->data.gmii_sel);
1300 case PHY_INTERFACE_MODE_RMII:
1301 writel(RMII_MODE_ENABLE, priv->data.gmii_sel);
1303 case PHY_INTERFACE_MODE_RGMII:
1304 case PHY_INTERFACE_MODE_RGMII_ID:
1305 case PHY_INTERFACE_MODE_RGMII_RXID:
1306 case PHY_INTERFACE_MODE_RGMII_TXID:
1307 writel(RGMII_MODE_ENABLE, priv->data.gmii_sel);
1315 static const struct udevice_id cpsw_eth_ids[] = {
1316 { .compatible = "ti,cpsw" },
1317 { .compatible = "ti,am335x-cpsw" },
1321 U_BOOT_DRIVER(eth_cpsw) = {
1324 .of_match = cpsw_eth_ids,
1325 .ofdata_to_platdata = cpsw_eth_ofdata_to_platdata,
1326 .probe = cpsw_eth_probe,
1327 .ops = &cpsw_eth_ops,
1328 .priv_auto_alloc_size = sizeof(struct cpsw_priv),
1329 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
1330 .flags = DM_FLAG_ALLOC_PRIV_DMA,
1332 #endif /* CONFIG_DM_ETH */