1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for Xilinx TEMAC Ethernet device
5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
9 * This is a driver for the Xilinx ll_temac ipcore which is often used
10 * in the Virtex and Spartan series of chips.
13 * - The ll_temac hardware uses indirect access for many of the TEMAC
14 * registers, include the MDIO bus. However, indirect access to MDIO
15 * registers take considerably more clock cycles than to TEMAC registers.
16 * MDIO accesses are long, so threads doing them should probably sleep
17 * rather than busywait. However, since only one indirect access can be
18 * in progress at any given time, that means that *all* indirect accesses
19 * could end up sleeping (to wait for an MDIO access to complete).
20 * Fortunately none of the indirect accesses are on the 'hot' path for tx
21 * or rx, so this should be okay.
24 * - Factor out locallink DMA code into separate driver
25 * - Fix support for hardware checksumming.
26 * - Testing. Lots and lots of testing.
30 #include <linux/delay.h>
31 #include <linux/etherdevice.h>
32 #include <linux/mii.h>
33 #include <linux/module.h>
34 #include <linux/mutex.h>
35 #include <linux/netdevice.h>
36 #include <linux/if_ether.h>
38 #include <linux/of_device.h>
39 #include <linux/of_irq.h>
40 #include <linux/of_mdio.h>
41 #include <linux/of_net.h>
42 #include <linux/of_platform.h>
43 #include <linux/of_address.h>
44 #include <linux/skbuff.h>
45 #include <linux/spinlock.h>
46 #include <linux/tcp.h> /* needed for sizeof(tcphdr) */
47 #include <linux/udp.h> /* needed for sizeof(udphdr) */
48 #include <linux/phy.h>
52 #include <linux/slab.h>
53 #include <linux/interrupt.h>
54 #include <linux/workqueue.h>
55 #include <linux/dma-mapping.h>
56 #include <linux/processor.h>
57 #include <linux/platform_data/xilinx-ll-temac.h>
61 /* Descriptors defines for Tx and Rx DMA */
62 #define TX_BD_NUM_DEFAULT 64
63 #define RX_BD_NUM_DEFAULT 1024
64 #define TX_BD_NUM_MAX 4096
65 #define RX_BD_NUM_MAX 4096
67 /* ---------------------------------------------------------------------
68 * Low level register access functions
71 static u32 _temac_ior_be(struct temac_local *lp, int offset)
73 return ioread32be(lp->regs + offset);
76 static void _temac_iow_be(struct temac_local *lp, int offset, u32 value)
78 return iowrite32be(value, lp->regs + offset);
81 static u32 _temac_ior_le(struct temac_local *lp, int offset)
83 return ioread32(lp->regs + offset);
86 static void _temac_iow_le(struct temac_local *lp, int offset, u32 value)
88 return iowrite32(value, lp->regs + offset);
91 static bool hard_acs_rdy(struct temac_local *lp)
93 return temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK;
96 static bool hard_acs_rdy_or_timeout(struct temac_local *lp, ktime_t timeout)
98 ktime_t cur = ktime_get();
100 return hard_acs_rdy(lp) || ktime_after(cur, timeout);
103 /* Poll for maximum 20 ms. This is similar to the 2 jiffies @ 100 Hz
104 * that was used before, and should cover MDIO bus speed down to 3200
107 #define HARD_ACS_RDY_POLL_NS (20 * NSEC_PER_MSEC)
110 * temac_indirect_busywait - Wait for current indirect register access
113 int temac_indirect_busywait(struct temac_local *lp)
115 ktime_t timeout = ktime_add_ns(ktime_get(), HARD_ACS_RDY_POLL_NS);
117 spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout));
118 if (WARN_ON(!hard_acs_rdy(lp)))
125 * temac_indirect_in32 - Indirect register read access. This function
126 * must be called without lp->indirect_lock being held.
128 u32 temac_indirect_in32(struct temac_local *lp, int reg)
133 spin_lock_irqsave(lp->indirect_lock, flags);
134 val = temac_indirect_in32_locked(lp, reg);
135 spin_unlock_irqrestore(lp->indirect_lock, flags);
140 * temac_indirect_in32_locked - Indirect register read access. This
141 * function must be called with lp->indirect_lock being held. Use
142 * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
143 * repeated lock/unlock and to ensure uninterrupted access to indirect
146 u32 temac_indirect_in32_locked(struct temac_local *lp, int reg)
148 /* This initial wait should normally not spin, as we always
149 * try to wait for indirect access to complete before
150 * releasing the indirect_lock.
152 if (WARN_ON(temac_indirect_busywait(lp)))
154 /* Initiate read from indirect register */
155 temac_iow(lp, XTE_CTL0_OFFSET, reg);
156 /* Wait for indirect register access to complete. We really
157 * should not see timeouts, and could even end up causing
158 * problem for following indirect access, so let's make a bit
161 if (WARN_ON(temac_indirect_busywait(lp)))
163 /* Value is ready now */
164 return temac_ior(lp, XTE_LSW0_OFFSET);
168 * temac_indirect_out32 - Indirect register write access. This function
169 * must be called without lp->indirect_lock being held.
171 void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
175 spin_lock_irqsave(lp->indirect_lock, flags);
176 temac_indirect_out32_locked(lp, reg, value);
177 spin_unlock_irqrestore(lp->indirect_lock, flags);
181 * temac_indirect_out32_locked - Indirect register write access. This
182 * function must be called with lp->indirect_lock being held. Use
183 * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
184 * repeated lock/unlock and to ensure uninterrupted access to indirect
187 void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value)
189 /* As in temac_indirect_in32_locked(), we should normally not
190 * spin here. And if it happens, we actually end up silently
191 * ignoring the write request. Ouch.
193 if (WARN_ON(temac_indirect_busywait(lp)))
195 /* Initiate write to indirect register */
196 temac_iow(lp, XTE_LSW0_OFFSET, value);
197 temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
198 /* As in temac_indirect_in32_locked(), we should not see timeouts
199 * here. And if it happens, we continue before the write has
200 * completed. Not good.
202 WARN_ON(temac_indirect_busywait(lp));
206 * temac_dma_in32_* - Memory mapped DMA read, these function expects a
207 * register input that is based on DCR word addresses which are then
208 * converted to memory mapped byte addresses. To be assigned to
211 static u32 temac_dma_in32_be(struct temac_local *lp, int reg)
213 return ioread32be(lp->sdma_regs + (reg << 2));
216 static u32 temac_dma_in32_le(struct temac_local *lp, int reg)
218 return ioread32(lp->sdma_regs + (reg << 2));
222 * temac_dma_out32_* - Memory mapped DMA read, these function expects
223 * a register input that is based on DCR word addresses which are then
224 * converted to memory mapped byte addresses. To be assigned to
227 static void temac_dma_out32_be(struct temac_local *lp, int reg, u32 value)
229 iowrite32be(value, lp->sdma_regs + (reg << 2));
232 static void temac_dma_out32_le(struct temac_local *lp, int reg, u32 value)
234 iowrite32(value, lp->sdma_regs + (reg << 2));
237 /* DMA register access functions can be DCR based or memory mapped.
238 * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
241 #ifdef CONFIG_PPC_DCR
244 * temac_dma_dcr_in32 - DCR based DMA read
246 static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
248 return dcr_read(lp->sdma_dcrs, reg);
252 * temac_dma_dcr_out32 - DCR based DMA write
254 static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
256 dcr_write(lp->sdma_dcrs, reg, value);
260 * temac_dcr_setup - If the DMA is DCR based, then setup the address and
263 static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
264 struct device_node *np)
268 /* setup the dcr address mapping if it's in the device tree */
270 dcrs = dcr_resource_start(np, 0);
272 lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
273 lp->dma_in = temac_dma_dcr_in;
274 lp->dma_out = temac_dma_dcr_out;
275 dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
278 /* no DCR in the device tree, indicate a failure */
285 * temac_dcr_setup - This is a stub for when DCR is not supported,
286 * such as with MicroBlaze and x86
288 static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
289 struct device_node *np)
297 * temac_dma_bd_release - Release buffer descriptor rings
299 static void temac_dma_bd_release(struct net_device *ndev)
301 struct temac_local *lp = netdev_priv(ndev);
304 /* Reset Local Link (DMA) */
305 lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
307 for (i = 0; i < lp->rx_bd_num; i++) {
310 dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
311 XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
312 dev_kfree_skb(lp->rx_skb[i]);
315 dma_free_coherent(ndev->dev.parent,
316 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
317 lp->rx_bd_v, lp->rx_bd_p);
319 dma_free_coherent(ndev->dev.parent,
320 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
321 lp->tx_bd_v, lp->tx_bd_p);
325 * temac_dma_bd_init - Setup buffer descriptor rings
327 static int temac_dma_bd_init(struct net_device *ndev)
329 struct temac_local *lp = netdev_priv(ndev);
331 dma_addr_t skb_dma_addr;
334 lp->rx_skb = devm_kcalloc(&ndev->dev, lp->rx_bd_num,
335 sizeof(*lp->rx_skb), GFP_KERNEL);
339 /* allocate the tx and rx ring buffer descriptors. */
340 /* returns a virtual address and a physical address. */
341 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
342 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
343 &lp->tx_bd_p, GFP_KERNEL);
347 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
348 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
349 &lp->rx_bd_p, GFP_KERNEL);
353 for (i = 0; i < lp->tx_bd_num; i++) {
354 lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p
355 + sizeof(*lp->tx_bd_v) * ((i + 1) % lp->tx_bd_num));
358 for (i = 0; i < lp->rx_bd_num; i++) {
359 lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
360 + sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num));
362 skb = __netdev_alloc_skb_ip_align(ndev,
363 XTE_MAX_JUMBO_FRAME_SIZE,
369 /* returns physical address of skb->data */
370 skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
371 XTE_MAX_JUMBO_FRAME_SIZE,
373 if (dma_mapping_error(ndev->dev.parent, skb_dma_addr))
375 lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr);
376 lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
377 lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
380 /* Configure DMA channel (irq setup) */
381 lp->dma_out(lp, TX_CHNL_CTRL,
382 lp->coalesce_delay_tx << 24 | lp->coalesce_count_tx << 16 |
383 0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used!
384 CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
385 CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
386 lp->dma_out(lp, RX_CHNL_CTRL,
387 lp->coalesce_delay_rx << 24 | lp->coalesce_count_rx << 16 |
389 CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
390 CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
392 /* Init descriptor indexes */
396 lp->rx_bd_tail = lp->rx_bd_num - 1;
398 /* Enable RX DMA transfers */
400 lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
401 lp->dma_out(lp, RX_TAILDESC_PTR,
402 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * lp->rx_bd_tail));
404 /* Prepare for TX DMA transfer */
405 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
410 temac_dma_bd_release(ndev);
414 /* ---------------------------------------------------------------------
418 static void temac_do_set_mac_address(struct net_device *ndev)
420 struct temac_local *lp = netdev_priv(ndev);
423 /* set up unicast MAC address filter set its mac address */
424 spin_lock_irqsave(lp->indirect_lock, flags);
425 temac_indirect_out32_locked(lp, XTE_UAW0_OFFSET,
426 (ndev->dev_addr[0]) |
427 (ndev->dev_addr[1] << 8) |
428 (ndev->dev_addr[2] << 16) |
429 (ndev->dev_addr[3] << 24));
430 /* There are reserved bits in EUAW1
431 * so don't affect them Set MAC bits [47:32] in EUAW1
433 temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET,
434 (ndev->dev_addr[4] & 0x000000ff) |
435 (ndev->dev_addr[5] << 8));
436 spin_unlock_irqrestore(lp->indirect_lock, flags);
439 static int temac_init_mac_address(struct net_device *ndev, const void *address)
441 eth_hw_addr_set(ndev, address);
442 if (!is_valid_ether_addr(ndev->dev_addr))
443 eth_hw_addr_random(ndev);
444 temac_do_set_mac_address(ndev);
448 static int temac_set_mac_address(struct net_device *ndev, void *p)
450 struct sockaddr *addr = p;
452 if (!is_valid_ether_addr(addr->sa_data))
453 return -EADDRNOTAVAIL;
454 eth_hw_addr_set(ndev, addr->sa_data);
455 temac_do_set_mac_address(ndev);
459 static void temac_set_multicast_list(struct net_device *ndev)
461 struct temac_local *lp = netdev_priv(ndev);
462 u32 multi_addr_msw, multi_addr_lsw;
465 bool promisc_mode_disabled = false;
467 if (ndev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
468 (netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM)) {
469 temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
470 dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
474 spin_lock_irqsave(lp->indirect_lock, flags);
476 if (!netdev_mc_empty(ndev)) {
477 struct netdev_hw_addr *ha;
479 netdev_for_each_mc_addr(ha, ndev) {
480 if (WARN_ON(i >= MULTICAST_CAM_TABLE_NUM))
482 multi_addr_msw = ((ha->addr[3] << 24) |
483 (ha->addr[2] << 16) |
486 temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET,
488 multi_addr_lsw = ((ha->addr[5] << 8) |
489 (ha->addr[4]) | (i << 16));
490 temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET,
496 /* Clear all or remaining/unused address table entries */
497 while (i < MULTICAST_CAM_TABLE_NUM) {
498 temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, 0);
499 temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, i << 16);
503 /* Enable address filter block if currently disabled */
504 if (temac_indirect_in32_locked(lp, XTE_AFM_OFFSET)
505 & XTE_AFM_EPPRM_MASK) {
506 temac_indirect_out32_locked(lp, XTE_AFM_OFFSET, 0);
507 promisc_mode_disabled = true;
510 spin_unlock_irqrestore(lp->indirect_lock, flags);
512 if (promisc_mode_disabled)
513 dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
516 static struct temac_option {
522 } temac_options[] = {
523 /* Turn on jumbo packet support for both Rx and Tx */
525 .opt = XTE_OPTION_JUMBO,
526 .reg = XTE_TXC_OFFSET,
527 .m_or = XTE_TXC_TXJMBO_MASK,
530 .opt = XTE_OPTION_JUMBO,
531 .reg = XTE_RXC1_OFFSET,
532 .m_or = XTE_RXC1_RXJMBO_MASK,
534 /* Turn on VLAN packet support for both Rx and Tx */
536 .opt = XTE_OPTION_VLAN,
537 .reg = XTE_TXC_OFFSET,
538 .m_or = XTE_TXC_TXVLAN_MASK,
541 .opt = XTE_OPTION_VLAN,
542 .reg = XTE_RXC1_OFFSET,
543 .m_or = XTE_RXC1_RXVLAN_MASK,
545 /* Turn on FCS stripping on receive packets */
547 .opt = XTE_OPTION_FCS_STRIP,
548 .reg = XTE_RXC1_OFFSET,
549 .m_or = XTE_RXC1_RXFCS_MASK,
551 /* Turn on FCS insertion on transmit packets */
553 .opt = XTE_OPTION_FCS_INSERT,
554 .reg = XTE_TXC_OFFSET,
555 .m_or = XTE_TXC_TXFCS_MASK,
557 /* Turn on length/type field checking on receive packets */
559 .opt = XTE_OPTION_LENTYPE_ERR,
560 .reg = XTE_RXC1_OFFSET,
561 .m_or = XTE_RXC1_RXLT_MASK,
563 /* Turn on flow control */
565 .opt = XTE_OPTION_FLOW_CONTROL,
566 .reg = XTE_FCC_OFFSET,
567 .m_or = XTE_FCC_RXFLO_MASK,
569 /* Turn on flow control */
571 .opt = XTE_OPTION_FLOW_CONTROL,
572 .reg = XTE_FCC_OFFSET,
573 .m_or = XTE_FCC_TXFLO_MASK,
575 /* Turn on promiscuous frame filtering (all frames are received ) */
577 .opt = XTE_OPTION_PROMISC,
578 .reg = XTE_AFM_OFFSET,
579 .m_or = XTE_AFM_EPPRM_MASK,
581 /* Enable transmitter if not already enabled */
583 .opt = XTE_OPTION_TXEN,
584 .reg = XTE_TXC_OFFSET,
585 .m_or = XTE_TXC_TXEN_MASK,
587 /* Enable receiver? */
589 .opt = XTE_OPTION_RXEN,
590 .reg = XTE_RXC1_OFFSET,
591 .m_or = XTE_RXC1_RXEN_MASK,
599 static u32 temac_setoptions(struct net_device *ndev, u32 options)
601 struct temac_local *lp = netdev_priv(ndev);
602 struct temac_option *tp = &temac_options[0];
606 spin_lock_irqsave(lp->indirect_lock, flags);
608 reg = temac_indirect_in32_locked(lp, tp->reg) & ~tp->m_or;
609 if (options & tp->opt) {
611 temac_indirect_out32_locked(lp, tp->reg, reg);
615 spin_unlock_irqrestore(lp->indirect_lock, flags);
616 lp->options |= options;
621 /* Initialize temac */
622 static void temac_device_reset(struct net_device *ndev)
624 struct temac_local *lp = netdev_priv(ndev);
629 /* Perform a software reset */
631 /* 0x300 host enable bit ? */
632 /* reset PHY through control register ?:1 */
634 dev_dbg(&ndev->dev, "%s()\n", __func__);
636 /* Reset the receiver and wait for it to finish reset */
637 temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
639 while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) {
641 if (--timeout == 0) {
643 "%s RX reset timeout!!\n", __func__);
648 /* Reset the transmitter and wait for it to finish reset */
649 temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
651 while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) {
653 if (--timeout == 0) {
655 "%s TX reset timeout!!\n", __func__);
660 /* Disable the receiver */
661 spin_lock_irqsave(lp->indirect_lock, flags);
662 val = temac_indirect_in32_locked(lp, XTE_RXC1_OFFSET);
663 temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET,
664 val & ~XTE_RXC1_RXEN_MASK);
665 spin_unlock_irqrestore(lp->indirect_lock, flags);
667 /* Reset Local Link (DMA) */
668 lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
670 while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
672 if (--timeout == 0) {
674 "%s DMA reset timeout!!\n", __func__);
678 lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
680 if (temac_dma_bd_init(ndev)) {
682 "%s descriptor allocation failed\n", __func__);
685 spin_lock_irqsave(lp->indirect_lock, flags);
686 temac_indirect_out32_locked(lp, XTE_RXC0_OFFSET, 0);
687 temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET, 0);
688 temac_indirect_out32_locked(lp, XTE_TXC_OFFSET, 0);
689 temac_indirect_out32_locked(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
690 spin_unlock_irqrestore(lp->indirect_lock, flags);
692 /* Sync default options with HW
693 * but leave receiver and transmitter disabled.
695 temac_setoptions(ndev,
696 lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
698 temac_do_set_mac_address(ndev);
700 /* Set address filter table */
701 temac_set_multicast_list(ndev);
702 if (temac_setoptions(ndev, lp->options))
703 dev_err(&ndev->dev, "Error setting TEMAC options\n");
705 /* Init Driver variable */
706 netif_trans_update(ndev); /* prevent tx timeout */
709 static void temac_adjust_link(struct net_device *ndev)
711 struct temac_local *lp = netdev_priv(ndev);
712 struct phy_device *phy = ndev->phydev;
717 /* hash together the state values to decide if something has changed */
718 link_state = phy->speed | (phy->duplex << 1) | phy->link;
720 if (lp->last_link != link_state) {
721 spin_lock_irqsave(lp->indirect_lock, flags);
722 mii_speed = temac_indirect_in32_locked(lp, XTE_EMCFG_OFFSET);
723 mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
725 switch (phy->speed) {
727 mii_speed |= XTE_EMCFG_LINKSPD_1000;
730 mii_speed |= XTE_EMCFG_LINKSPD_100;
733 mii_speed |= XTE_EMCFG_LINKSPD_10;
737 /* Write new speed setting out to TEMAC */
738 temac_indirect_out32_locked(lp, XTE_EMCFG_OFFSET, mii_speed);
739 spin_unlock_irqrestore(lp->indirect_lock, flags);
741 lp->last_link = link_state;
742 phy_print_status(phy);
748 static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
750 bd->app3 = (u32)(((u64)p) >> 32);
751 bd->app4 = (u32)((u64)p & 0xFFFFFFFF);
754 static void *ptr_from_txbd(struct cdmac_bd *bd)
756 return (void *)(((u64)(bd->app3) << 32) | bd->app4);
761 static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
766 static void *ptr_from_txbd(struct cdmac_bd *bd)
768 return (void *)(bd->app4);
773 static void temac_start_xmit_done(struct net_device *ndev)
775 struct temac_local *lp = netdev_priv(ndev);
776 struct cdmac_bd *cur_p;
777 unsigned int stat = 0;
780 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
781 stat = be32_to_cpu(cur_p->app0);
783 while (stat & STS_CTRL_APP0_CMPLT) {
784 /* Make sure that the other fields are read after bd is
788 dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
789 be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
790 skb = (struct sk_buff *)ptr_from_txbd(cur_p);
792 dev_consume_skb_irq(skb);
798 ndev->stats.tx_packets++;
799 ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
801 /* app0 must be visible last, as it is used to flag
802 * availability of the bd
808 if (lp->tx_bd_ci >= lp->tx_bd_num)
811 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
812 stat = be32_to_cpu(cur_p->app0);
815 /* Matches barrier in temac_start_xmit */
818 netif_wake_queue(ndev);
821 static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
823 struct cdmac_bd *cur_p;
826 tail = lp->tx_bd_tail;
827 cur_p = &lp->tx_bd_v[tail];
831 return NETDEV_TX_BUSY;
833 /* Make sure to read next bd app0 after this one */
837 if (tail >= lp->tx_bd_num)
840 cur_p = &lp->tx_bd_v[tail];
842 } while (num_frag >= 0);
848 temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
850 struct temac_local *lp = netdev_priv(ndev);
851 struct cdmac_bd *cur_p;
852 dma_addr_t tail_p, skb_dma_addr;
854 unsigned long num_frag;
857 num_frag = skb_shinfo(skb)->nr_frags;
858 frag = &skb_shinfo(skb)->frags[0];
859 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
861 if (temac_check_tx_bd_space(lp, num_frag + 1)) {
862 if (netif_queue_stopped(ndev))
863 return NETDEV_TX_BUSY;
865 netif_stop_queue(ndev);
867 /* Matches barrier in temac_start_xmit_done */
870 /* Space might have just been freed - check again */
871 if (temac_check_tx_bd_space(lp, num_frag + 1))
872 return NETDEV_TX_BUSY;
874 netif_wake_queue(ndev);
878 if (skb->ip_summed == CHECKSUM_PARTIAL) {
879 unsigned int csum_start_off = skb_checksum_start_offset(skb);
880 unsigned int csum_index_off = csum_start_off + skb->csum_offset;
882 cur_p->app0 |= cpu_to_be32(0x000001); /* TX Checksum Enabled */
883 cur_p->app1 = cpu_to_be32((csum_start_off << 16)
885 cur_p->app2 = 0; /* initial checksum seed */
888 cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_SOP);
889 skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
890 skb_headlen(skb), DMA_TO_DEVICE);
891 cur_p->len = cpu_to_be32(skb_headlen(skb));
892 if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr))) {
893 dev_kfree_skb_any(skb);
894 ndev->stats.tx_dropped++;
897 cur_p->phys = cpu_to_be32(skb_dma_addr);
899 for (ii = 0; ii < num_frag; ii++) {
900 if (++lp->tx_bd_tail >= lp->tx_bd_num)
903 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
904 skb_dma_addr = dma_map_single(ndev->dev.parent,
905 skb_frag_address(frag),
908 if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) {
909 if (--lp->tx_bd_tail < 0)
910 lp->tx_bd_tail = lp->tx_bd_num - 1;
911 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
914 dma_unmap_single(ndev->dev.parent,
915 be32_to_cpu(cur_p->phys),
918 if (--lp->tx_bd_tail < 0)
919 lp->tx_bd_tail = lp->tx_bd_num - 1;
920 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
922 dma_unmap_single(ndev->dev.parent,
923 be32_to_cpu(cur_p->phys),
924 skb_headlen(skb), DMA_TO_DEVICE);
925 dev_kfree_skb_any(skb);
926 ndev->stats.tx_dropped++;
929 cur_p->phys = cpu_to_be32(skb_dma_addr);
930 cur_p->len = cpu_to_be32(skb_frag_size(frag));
934 cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
936 /* Mark last fragment with skb address, so it can be consumed
937 * in temac_start_xmit_done()
939 ptr_to_txbd((void *)skb, cur_p);
941 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
943 if (lp->tx_bd_tail >= lp->tx_bd_num)
946 skb_tx_timestamp(skb);
948 /* Kick off the transfer */
950 lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
952 if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
953 netif_stop_queue(ndev);
958 static int ll_temac_recv_buffers_available(struct temac_local *lp)
962 if (!lp->rx_skb[lp->rx_bd_ci])
964 available = 1 + lp->rx_bd_tail - lp->rx_bd_ci;
966 available += lp->rx_bd_num;
970 static void ll_temac_recv(struct net_device *ndev)
972 struct temac_local *lp = netdev_priv(ndev);
975 bool update_tail = false;
977 spin_lock_irqsave(&lp->rx_lock, flags);
979 /* Process all received buffers, passing them on network
980 * stack. After this, the buffer descriptors will be in an
981 * un-allocated stage, where no skb is allocated for it, and
982 * they are therefore not available for TEMAC/DMA.
985 struct cdmac_bd *bd = &lp->rx_bd_v[lp->rx_bd_ci];
986 struct sk_buff *skb = lp->rx_skb[lp->rx_bd_ci];
987 unsigned int bdstat = be32_to_cpu(bd->app0);
990 /* While this should not normally happen, we can end
991 * here when GFP_ATOMIC allocations fail, and we
992 * therefore have un-allocated buffers.
997 /* Loop over all completed buffer descriptors */
998 if (!(bdstat & STS_CTRL_APP0_CMPLT))
1001 dma_unmap_single(ndev->dev.parent, be32_to_cpu(bd->phys),
1002 XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
1003 /* The buffer is not valid for DMA anymore */
1007 length = be32_to_cpu(bd->app4) & 0x3FFF;
1008 skb_put(skb, length);
1009 skb->protocol = eth_type_trans(skb, ndev);
1010 skb_checksum_none_assert(skb);
1012 /* if we're doing rx csum offload, set it up */
1013 if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
1014 (skb->protocol == htons(ETH_P_IP)) &&
1016 /* Convert from device endianness (be32) to cpu
1017 * endianness, and if necessary swap the bytes
1018 * (back) for proper IP checksum byte order
1021 skb->csum = htons(be32_to_cpu(bd->app3) & 0xFFFF);
1022 skb->ip_summed = CHECKSUM_COMPLETE;
1025 if (!skb_defer_rx_timestamp(skb))
1027 /* The skb buffer is now owned by network stack above */
1028 lp->rx_skb[lp->rx_bd_ci] = NULL;
1030 ndev->stats.rx_packets++;
1031 ndev->stats.rx_bytes += length;
1033 rx_bd = lp->rx_bd_ci;
1034 if (++lp->rx_bd_ci >= lp->rx_bd_num)
1036 } while (rx_bd != lp->rx_bd_tail);
1038 /* DMA operations will halt when the last buffer descriptor is
1039 * processed (ie. the one pointed to by RX_TAILDESC_PTR).
1040 * When that happens, no more interrupt events will be
1041 * generated. No IRQ_COAL or IRQ_DLY, and not even an
1042 * IRQ_ERR. To avoid stalling, we schedule a delayed work
1043 * when there is a potential risk of that happening. The work
1044 * will call this function, and thus re-schedule itself until
1045 * enough buffers are available again.
1047 if (ll_temac_recv_buffers_available(lp) < lp->coalesce_count_rx)
1048 schedule_delayed_work(&lp->restart_work, HZ / 1000);
1050 /* Allocate new buffers for those buffer descriptors that were
1051 * passed to network stack. Note that GFP_ATOMIC allocations
1052 * can fail (e.g. when a larger burst of GFP_ATOMIC
1053 * allocations occurs), so while we try to allocate all
1054 * buffers in the same interrupt where they were processed, we
1055 * continue with what we could get in case of allocation
1056 * failure. Allocation of remaining buffers will be retried
1057 * in following calls.
1060 struct sk_buff *skb;
1061 struct cdmac_bd *bd;
1062 dma_addr_t skb_dma_addr;
1064 rx_bd = lp->rx_bd_tail + 1;
1065 if (rx_bd >= lp->rx_bd_num)
1067 bd = &lp->rx_bd_v[rx_bd];
1070 break; /* All skb's allocated */
1072 skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE);
1074 dev_warn(&ndev->dev, "skb alloc failed\n");
1078 skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
1079 XTE_MAX_JUMBO_FRAME_SIZE,
1081 if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent,
1083 dev_kfree_skb_any(skb);
1087 bd->phys = cpu_to_be32(skb_dma_addr);
1088 bd->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
1089 bd->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
1090 lp->rx_skb[rx_bd] = skb;
1092 lp->rx_bd_tail = rx_bd;
1096 /* Move tail pointer when buffers have been allocated */
1098 lp->dma_out(lp, RX_TAILDESC_PTR,
1099 lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_tail);
1102 spin_unlock_irqrestore(&lp->rx_lock, flags);
1105 /* Function scheduled to ensure a restart in case of DMA halt
1106 * condition caused by running out of buffer descriptors.
1108 static void ll_temac_restart_work_func(struct work_struct *work)
1110 struct temac_local *lp = container_of(work, struct temac_local,
1112 struct net_device *ndev = lp->ndev;
1114 ll_temac_recv(ndev);
1117 static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
1119 struct net_device *ndev = _ndev;
1120 struct temac_local *lp = netdev_priv(ndev);
1121 unsigned int status;
1123 status = lp->dma_in(lp, TX_IRQ_REG);
1124 lp->dma_out(lp, TX_IRQ_REG, status);
1126 if (status & (IRQ_COAL | IRQ_DLY))
1127 temac_start_xmit_done(lp->ndev);
1128 if (status & (IRQ_ERR | IRQ_DMAERR))
1129 dev_err_ratelimited(&ndev->dev,
1130 "TX error 0x%x TX_CHNL_STS=0x%08x\n",
1131 status, lp->dma_in(lp, TX_CHNL_STS));
1136 static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
1138 struct net_device *ndev = _ndev;
1139 struct temac_local *lp = netdev_priv(ndev);
1140 unsigned int status;
1142 /* Read and clear the status registers */
1143 status = lp->dma_in(lp, RX_IRQ_REG);
1144 lp->dma_out(lp, RX_IRQ_REG, status);
1146 if (status & (IRQ_COAL | IRQ_DLY))
1147 ll_temac_recv(lp->ndev);
1148 if (status & (IRQ_ERR | IRQ_DMAERR))
1149 dev_err_ratelimited(&ndev->dev,
1150 "RX error 0x%x RX_CHNL_STS=0x%08x\n",
1151 status, lp->dma_in(lp, RX_CHNL_STS));
1156 static int temac_open(struct net_device *ndev)
1158 struct temac_local *lp = netdev_priv(ndev);
1159 struct phy_device *phydev = NULL;
1162 dev_dbg(&ndev->dev, "temac_open()\n");
1165 phydev = of_phy_connect(lp->ndev, lp->phy_node,
1166 temac_adjust_link, 0, 0);
1168 dev_err(lp->dev, "of_phy_connect() failed\n");
1172 } else if (strlen(lp->phy_name) > 0) {
1173 phydev = phy_connect(lp->ndev, lp->phy_name, temac_adjust_link,
1175 if (IS_ERR(phydev)) {
1176 dev_err(lp->dev, "phy_connect() failed\n");
1177 return PTR_ERR(phydev);
1182 temac_device_reset(ndev);
1184 rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
1187 rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev);
1194 free_irq(lp->tx_irq, ndev);
1197 phy_disconnect(phydev);
1198 dev_err(lp->dev, "request_irq() failed\n");
1202 static int temac_stop(struct net_device *ndev)
1204 struct temac_local *lp = netdev_priv(ndev);
1205 struct phy_device *phydev = ndev->phydev;
1207 dev_dbg(&ndev->dev, "temac_close()\n");
1209 cancel_delayed_work_sync(&lp->restart_work);
1211 free_irq(lp->tx_irq, ndev);
1212 free_irq(lp->rx_irq, ndev);
1215 phy_disconnect(phydev);
1217 temac_dma_bd_release(ndev);
1222 #ifdef CONFIG_NET_POLL_CONTROLLER
1224 temac_poll_controller(struct net_device *ndev)
1226 struct temac_local *lp = netdev_priv(ndev);
1228 disable_irq(lp->tx_irq);
1229 disable_irq(lp->rx_irq);
1231 ll_temac_rx_irq(lp->tx_irq, ndev);
1232 ll_temac_tx_irq(lp->rx_irq, ndev);
1234 enable_irq(lp->tx_irq);
1235 enable_irq(lp->rx_irq);
1239 static const struct net_device_ops temac_netdev_ops = {
1240 .ndo_open = temac_open,
1241 .ndo_stop = temac_stop,
1242 .ndo_start_xmit = temac_start_xmit,
1243 .ndo_set_rx_mode = temac_set_multicast_list,
1244 .ndo_set_mac_address = temac_set_mac_address,
1245 .ndo_validate_addr = eth_validate_addr,
1246 .ndo_eth_ioctl = phy_do_ioctl_running,
1247 #ifdef CONFIG_NET_POLL_CONTROLLER
1248 .ndo_poll_controller = temac_poll_controller,
1252 /* ---------------------------------------------------------------------
1253 * SYSFS device attributes
1255 static ssize_t temac_show_llink_regs(struct device *dev,
1256 struct device_attribute *attr, char *buf)
1258 struct net_device *ndev = dev_get_drvdata(dev);
1259 struct temac_local *lp = netdev_priv(ndev);
1262 for (i = 0; i < 0x11; i++)
1263 len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
1264 (i % 8) == 7 ? "\n" : " ");
1265 len += sprintf(buf + len, "\n");
1270 static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL);
1272 static struct attribute *temac_device_attrs[] = {
1273 &dev_attr_llink_regs.attr,
1277 static const struct attribute_group temac_attr_group = {
1278 .attrs = temac_device_attrs,
1281 /* ---------------------------------------------------------------------
1286 ll_temac_ethtools_get_ringparam(struct net_device *ndev,
1287 struct ethtool_ringparam *ering,
1288 struct kernel_ethtool_ringparam *kernel_ering,
1289 struct netlink_ext_ack *extack)
1291 struct temac_local *lp = netdev_priv(ndev);
1293 ering->rx_max_pending = RX_BD_NUM_MAX;
1294 ering->rx_mini_max_pending = 0;
1295 ering->rx_jumbo_max_pending = 0;
1296 ering->tx_max_pending = TX_BD_NUM_MAX;
1297 ering->rx_pending = lp->rx_bd_num;
1298 ering->rx_mini_pending = 0;
1299 ering->rx_jumbo_pending = 0;
1300 ering->tx_pending = lp->tx_bd_num;
1304 ll_temac_ethtools_set_ringparam(struct net_device *ndev,
1305 struct ethtool_ringparam *ering,
1306 struct kernel_ethtool_ringparam *kernel_ering,
1307 struct netlink_ext_ack *extack)
1309 struct temac_local *lp = netdev_priv(ndev);
1311 if (ering->rx_pending > RX_BD_NUM_MAX ||
1312 ering->rx_mini_pending ||
1313 ering->rx_jumbo_pending ||
1314 ering->rx_pending > TX_BD_NUM_MAX)
1317 if (netif_running(ndev))
1320 lp->rx_bd_num = ering->rx_pending;
1321 lp->tx_bd_num = ering->tx_pending;
1326 ll_temac_ethtools_get_coalesce(struct net_device *ndev,
1327 struct ethtool_coalesce *ec,
1328 struct kernel_ethtool_coalesce *kernel_coal,
1329 struct netlink_ext_ack *extack)
1331 struct temac_local *lp = netdev_priv(ndev);
1333 ec->rx_max_coalesced_frames = lp->coalesce_count_rx;
1334 ec->tx_max_coalesced_frames = lp->coalesce_count_tx;
1335 ec->rx_coalesce_usecs = (lp->coalesce_delay_rx * 512) / 100;
1336 ec->tx_coalesce_usecs = (lp->coalesce_delay_tx * 512) / 100;
1341 ll_temac_ethtools_set_coalesce(struct net_device *ndev,
1342 struct ethtool_coalesce *ec,
1343 struct kernel_ethtool_coalesce *kernel_coal,
1344 struct netlink_ext_ack *extack)
1346 struct temac_local *lp = netdev_priv(ndev);
1348 if (netif_running(ndev)) {
1350 "Please stop netif before applying configuration\n");
1354 if (ec->rx_max_coalesced_frames)
1355 lp->coalesce_count_rx = ec->rx_max_coalesced_frames;
1356 if (ec->tx_max_coalesced_frames)
1357 lp->coalesce_count_tx = ec->tx_max_coalesced_frames;
1358 /* With typical LocalLink clock speed of 200 MHz and
1359 * C_PRESCALAR=1023, each delay count corresponds to 5.12 us.
1361 if (ec->rx_coalesce_usecs)
1362 lp->coalesce_delay_rx =
1363 min(255U, (ec->rx_coalesce_usecs * 100) / 512);
1364 if (ec->tx_coalesce_usecs)
1365 lp->coalesce_delay_tx =
1366 min(255U, (ec->tx_coalesce_usecs * 100) / 512);
1371 static const struct ethtool_ops temac_ethtool_ops = {
1372 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1373 ETHTOOL_COALESCE_MAX_FRAMES,
1374 .nway_reset = phy_ethtool_nway_reset,
1375 .get_link = ethtool_op_get_link,
1376 .get_ts_info = ethtool_op_get_ts_info,
1377 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1378 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1379 .get_ringparam = ll_temac_ethtools_get_ringparam,
1380 .set_ringparam = ll_temac_ethtools_set_ringparam,
1381 .get_coalesce = ll_temac_ethtools_get_coalesce,
1382 .set_coalesce = ll_temac_ethtools_set_coalesce,
1385 static int temac_probe(struct platform_device *pdev)
1387 struct ll_temac_platform_data *pdata = dev_get_platdata(&pdev->dev);
1388 struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np;
1389 struct temac_local *lp;
1390 struct net_device *ndev;
1396 /* Init network device structure */
1397 ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp));
1401 platform_set_drvdata(pdev, ndev);
1402 SET_NETDEV_DEV(ndev, &pdev->dev);
1403 ndev->features = NETIF_F_SG;
1404 ndev->netdev_ops = &temac_netdev_ops;
1405 ndev->ethtool_ops = &temac_ethtool_ops;
1407 ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */
1408 ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
1409 ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
1410 ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
1411 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; /* Transmit VLAN hw accel */
1412 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; /* Receive VLAN hw acceleration */
1413 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; /* Receive VLAN filtering */
1414 ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
1415 ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
1416 ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */
1417 ndev->features |= NETIF_F_LRO; /* large receive offload */
1420 /* setup temac private info structure */
1421 lp = netdev_priv(ndev);
1423 lp->dev = &pdev->dev;
1424 lp->options = XTE_OPTION_DEFAULTS;
1425 lp->rx_bd_num = RX_BD_NUM_DEFAULT;
1426 lp->tx_bd_num = TX_BD_NUM_DEFAULT;
1427 spin_lock_init(&lp->rx_lock);
1428 INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func);
1430 /* Setup mutex for synchronization of indirect register access */
1432 if (!pdata->indirect_lock) {
1434 "indirect_lock missing in platform_data\n");
1437 lp->indirect_lock = pdata->indirect_lock;
1439 lp->indirect_lock = devm_kmalloc(&pdev->dev,
1440 sizeof(*lp->indirect_lock),
1442 if (!lp->indirect_lock)
1444 spin_lock_init(lp->indirect_lock);
1447 /* map device registers */
1448 lp->regs = devm_platform_ioremap_resource_byname(pdev, 0);
1449 if (IS_ERR(lp->regs)) {
1450 dev_err(&pdev->dev, "could not map TEMAC registers\n");
1454 /* Select register access functions with the specified
1455 * endianness mode. Default for OF devices is big-endian.
1457 little_endian = false;
1459 little_endian = of_property_read_bool(temac_np, "little-endian");
1461 little_endian = pdata->reg_little_endian;
1463 if (little_endian) {
1464 lp->temac_ior = _temac_ior_le;
1465 lp->temac_iow = _temac_iow_le;
1467 lp->temac_ior = _temac_ior_be;
1468 lp->temac_iow = _temac_iow_be;
1471 /* Setup checksum offload, but default to off if not specified */
1472 lp->temac_features = 0;
1474 p = (__be32 *)of_get_property(temac_np, "xlnx,txcsum", NULL);
1475 if (p && be32_to_cpu(*p))
1476 lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1477 p = (__be32 *)of_get_property(temac_np, "xlnx,rxcsum", NULL);
1478 if (p && be32_to_cpu(*p))
1479 lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1482 lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1484 lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1486 if (lp->temac_features & TEMAC_FEATURE_TX_CSUM)
1487 /* Can checksum TCP/UDP over IPv4. */
1488 ndev->features |= NETIF_F_IP_CSUM;
1490 /* Defaults for IRQ delay/coalescing setup. These are
1491 * configuration values, so does not belong in device-tree.
1493 lp->coalesce_delay_tx = 0x10;
1494 lp->coalesce_count_tx = 0x22;
1495 lp->coalesce_delay_rx = 0xff;
1496 lp->coalesce_count_rx = 0x07;
1498 /* Setup LocalLink DMA */
1500 /* Find the DMA node, map the DMA registers, and
1501 * decode the DMA IRQs.
1503 dma_np = of_parse_phandle(temac_np, "llink-connected", 0);
1505 dev_err(&pdev->dev, "could not find DMA node\n");
1509 /* Setup the DMA register accesses, could be DCR or
1512 if (temac_dcr_setup(lp, pdev, dma_np)) {
1513 /* no DCR in the device tree, try non-DCR */
1514 lp->sdma_regs = devm_of_iomap(&pdev->dev, dma_np, 0,
1516 if (IS_ERR(lp->sdma_regs)) {
1518 "unable to map DMA registers\n");
1519 of_node_put(dma_np);
1520 return PTR_ERR(lp->sdma_regs);
1522 if (of_property_read_bool(dma_np, "little-endian")) {
1523 lp->dma_in = temac_dma_in32_le;
1524 lp->dma_out = temac_dma_out32_le;
1526 lp->dma_in = temac_dma_in32_be;
1527 lp->dma_out = temac_dma_out32_be;
1529 dev_dbg(&pdev->dev, "MEM base: %p\n", lp->sdma_regs);
1532 /* Get DMA RX and TX interrupts */
1533 lp->rx_irq = irq_of_parse_and_map(dma_np, 0);
1534 lp->tx_irq = irq_of_parse_and_map(dma_np, 1);
1536 /* Finished with the DMA node; drop the reference */
1537 of_node_put(dma_np);
1539 /* 2nd memory resource specifies DMA registers */
1540 lp->sdma_regs = devm_platform_ioremap_resource(pdev, 1);
1541 if (IS_ERR(lp->sdma_regs)) {
1543 "could not map DMA registers\n");
1544 return PTR_ERR(lp->sdma_regs);
1546 if (pdata->dma_little_endian) {
1547 lp->dma_in = temac_dma_in32_le;
1548 lp->dma_out = temac_dma_out32_le;
1550 lp->dma_in = temac_dma_in32_be;
1551 lp->dma_out = temac_dma_out32_be;
1554 /* Get DMA RX and TX interrupts */
1555 lp->rx_irq = platform_get_irq(pdev, 0);
1556 lp->tx_irq = platform_get_irq(pdev, 1);
1558 /* IRQ delay/coalescing setup */
1559 if (pdata->tx_irq_timeout || pdata->tx_irq_count) {
1560 lp->coalesce_delay_tx = pdata->tx_irq_timeout;
1561 lp->coalesce_count_tx = pdata->tx_irq_count;
1563 if (pdata->rx_irq_timeout || pdata->rx_irq_count) {
1564 lp->coalesce_delay_rx = pdata->rx_irq_timeout;
1565 lp->coalesce_count_rx = pdata->rx_irq_count;
1569 /* Error handle returned DMA RX and TX interrupts */
1571 return dev_err_probe(&pdev->dev, lp->rx_irq,
1572 "could not get DMA RX irq\n");
1574 return dev_err_probe(&pdev->dev, lp->tx_irq,
1575 "could not get DMA TX irq\n");
1578 /* Retrieve the MAC address */
1579 rc = of_get_mac_address(temac_np, addr);
1581 dev_err(&pdev->dev, "could not find MAC address\n");
1584 temac_init_mac_address(ndev, addr);
1586 temac_init_mac_address(ndev, pdata->mac_addr);
1589 rc = temac_mdio_setup(lp, pdev);
1591 dev_warn(&pdev->dev, "error registering MDIO bus\n");
1594 lp->phy_node = of_parse_phandle(temac_np, "phy-handle", 0);
1596 dev_dbg(lp->dev, "using PHY node %pOF\n", temac_np);
1598 snprintf(lp->phy_name, sizeof(lp->phy_name),
1599 PHY_ID_FMT, lp->mii_bus->id, pdata->phy_addr);
1600 lp->phy_interface = pdata->phy_interface;
1603 /* Add the device attributes */
1604 rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
1606 dev_err(lp->dev, "Error creating sysfs files\n");
1607 goto err_sysfs_create;
1610 rc = register_netdev(lp->ndev);
1612 dev_err(lp->dev, "register_netdev() error (%i)\n", rc);
1613 goto err_register_ndev;
1619 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1622 of_node_put(lp->phy_node);
1623 temac_mdio_teardown(lp);
1627 static int temac_remove(struct platform_device *pdev)
1629 struct net_device *ndev = platform_get_drvdata(pdev);
1630 struct temac_local *lp = netdev_priv(ndev);
1632 unregister_netdev(ndev);
1633 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1635 of_node_put(lp->phy_node);
1636 temac_mdio_teardown(lp);
1640 static const struct of_device_id temac_of_match[] = {
1641 { .compatible = "xlnx,xps-ll-temac-1.01.b", },
1642 { .compatible = "xlnx,xps-ll-temac-2.00.a", },
1643 { .compatible = "xlnx,xps-ll-temac-2.02.a", },
1644 { .compatible = "xlnx,xps-ll-temac-2.03.a", },
1647 MODULE_DEVICE_TABLE(of, temac_of_match);
1649 static struct platform_driver temac_driver = {
1650 .probe = temac_probe,
1651 .remove = temac_remove,
1653 .name = "xilinx_temac",
1654 .of_match_table = temac_of_match,
1658 module_platform_driver(temac_driver);
1660 MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
1661 MODULE_AUTHOR("Yoshio Kashiwagi");
1662 MODULE_LICENSE("GPL");