1 // SPDX-License-Identifier: GPL-2.0-only
3 * Broadcom GENET (Gigabit Ethernet) controller driver
5 * Copyright (c) 2014-2019 Broadcom
8 #define pr_fmt(fmt) "bcmgenet: " fmt
10 #include <linux/acpi.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/types.h>
15 #include <linux/fcntl.h>
16 #include <linux/interrupt.h>
17 #include <linux/string.h>
18 #include <linux/if_ether.h>
19 #include <linux/init.h>
20 #include <linux/errno.h>
21 #include <linux/delay.h>
22 #include <linux/platform_device.h>
23 #include <linux/dma-mapping.h>
25 #include <linux/clk.h>
28 #include <linux/mii.h>
29 #include <linux/ethtool.h>
30 #include <linux/netdevice.h>
31 #include <linux/inetdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
36 #include <linux/ipv6.h>
37 #include <linux/phy.h>
38 #include <linux/platform_data/bcmgenet.h>
40 #include <asm/unaligned.h>
44 /* Maximum number of hardware queues, downsized if needed */
45 #define GENET_MAX_MQ_CNT 4
47 /* Default highest priority queue for multi queue support */
48 #define GENET_Q0_PRIORITY 0
50 #define GENET_Q16_RX_BD_CNT \
51 (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
52 #define GENET_Q16_TX_BD_CNT \
53 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
55 #define RX_BUF_LENGTH 2048
56 #define SKB_ALIGNMENT 32
58 /* Tx/Rx DMA register offset, skip 256 descriptors */
59 #define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
60 #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
62 #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
63 TOTAL_DESC * DMA_DESC_SIZE)
65 #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
66 TOTAL_DESC * DMA_DESC_SIZE)
68 static inline void bcmgenet_writel(u32 value, void __iomem *offset)
70 /* MIPS chips strapped for BE will automagically configure the
71 * peripheral registers for CPU-native byte order.
73 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
74 __raw_writel(value, offset);
76 writel_relaxed(value, offset);
79 static inline u32 bcmgenet_readl(void __iomem *offset)
81 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
82 return __raw_readl(offset);
84 return readl_relaxed(offset);
87 static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
88 void __iomem *d, u32 value)
90 bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS);
93 static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
97 bcmgenet_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
99 /* Register writes to GISB bus can take couple hundred nanoseconds
100 * and are done for each packet, save these expensive writes unless
101 * the platform is explicitly configured for 64-bits/LPAE.
103 #ifdef CONFIG_PHYS_ADDR_T_64BIT
104 if (priv->hw_params->flags & GENET_HAS_40BITS)
105 bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
109 /* Combined address + length/status setter */
110 static inline void dmadesc_set(struct bcmgenet_priv *priv,
111 void __iomem *d, dma_addr_t addr, u32 val)
113 dmadesc_set_addr(priv, d, addr);
114 dmadesc_set_length_status(priv, d, val);
117 static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
122 addr = bcmgenet_readl(d + DMA_DESC_ADDRESS_LO);
124 /* Register writes to GISB bus can take couple hundred nanoseconds
125 * and are done for each packet, save these expensive writes unless
126 * the platform is explicitly configured for 64-bits/LPAE.
128 #ifdef CONFIG_PHYS_ADDR_T_64BIT
129 if (priv->hw_params->flags & GENET_HAS_40BITS)
130 addr |= (u64)bcmgenet_readl(d + DMA_DESC_ADDRESS_HI) << 32;
135 #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
137 #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
140 static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
142 if (GENET_IS_V1(priv))
143 return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
145 return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
148 static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
150 if (GENET_IS_V1(priv))
151 bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
153 bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
156 /* These macros are defined to deal with register map change
157 * between GENET1.1 and GENET2. Only those currently being used
158 * by driver are defined.
160 static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
162 if (GENET_IS_V1(priv))
163 return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
165 return bcmgenet_readl(priv->base +
166 priv->hw_params->tbuf_offset + TBUF_CTRL);
169 static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
171 if (GENET_IS_V1(priv))
172 bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
174 bcmgenet_writel(val, priv->base +
175 priv->hw_params->tbuf_offset + TBUF_CTRL);
178 static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
180 if (GENET_IS_V1(priv))
181 return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
183 return bcmgenet_readl(priv->base +
184 priv->hw_params->tbuf_offset + TBUF_BP_MC);
187 static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
189 if (GENET_IS_V1(priv))
190 bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
192 bcmgenet_writel(val, priv->base +
193 priv->hw_params->tbuf_offset + TBUF_BP_MC);
196 /* RX/TX DMA register accessors */
233 static const u8 bcmgenet_dma_regs_v3plus[] = {
234 [DMA_RING_CFG] = 0x00,
237 [DMA_SCB_BURST_SIZE] = 0x0C,
238 [DMA_ARB_CTRL] = 0x2C,
239 [DMA_PRIORITY_0] = 0x30,
240 [DMA_PRIORITY_1] = 0x34,
241 [DMA_PRIORITY_2] = 0x38,
242 [DMA_RING0_TIMEOUT] = 0x2C,
243 [DMA_RING1_TIMEOUT] = 0x30,
244 [DMA_RING2_TIMEOUT] = 0x34,
245 [DMA_RING3_TIMEOUT] = 0x38,
246 [DMA_RING4_TIMEOUT] = 0x3c,
247 [DMA_RING5_TIMEOUT] = 0x40,
248 [DMA_RING6_TIMEOUT] = 0x44,
249 [DMA_RING7_TIMEOUT] = 0x48,
250 [DMA_RING8_TIMEOUT] = 0x4c,
251 [DMA_RING9_TIMEOUT] = 0x50,
252 [DMA_RING10_TIMEOUT] = 0x54,
253 [DMA_RING11_TIMEOUT] = 0x58,
254 [DMA_RING12_TIMEOUT] = 0x5c,
255 [DMA_RING13_TIMEOUT] = 0x60,
256 [DMA_RING14_TIMEOUT] = 0x64,
257 [DMA_RING15_TIMEOUT] = 0x68,
258 [DMA_RING16_TIMEOUT] = 0x6C,
259 [DMA_INDEX2RING_0] = 0x70,
260 [DMA_INDEX2RING_1] = 0x74,
261 [DMA_INDEX2RING_2] = 0x78,
262 [DMA_INDEX2RING_3] = 0x7C,
263 [DMA_INDEX2RING_4] = 0x80,
264 [DMA_INDEX2RING_5] = 0x84,
265 [DMA_INDEX2RING_6] = 0x88,
266 [DMA_INDEX2RING_7] = 0x8C,
269 static const u8 bcmgenet_dma_regs_v2[] = {
270 [DMA_RING_CFG] = 0x00,
273 [DMA_SCB_BURST_SIZE] = 0x0C,
274 [DMA_ARB_CTRL] = 0x30,
275 [DMA_PRIORITY_0] = 0x34,
276 [DMA_PRIORITY_1] = 0x38,
277 [DMA_PRIORITY_2] = 0x3C,
278 [DMA_RING0_TIMEOUT] = 0x2C,
279 [DMA_RING1_TIMEOUT] = 0x30,
280 [DMA_RING2_TIMEOUT] = 0x34,
281 [DMA_RING3_TIMEOUT] = 0x38,
282 [DMA_RING4_TIMEOUT] = 0x3c,
283 [DMA_RING5_TIMEOUT] = 0x40,
284 [DMA_RING6_TIMEOUT] = 0x44,
285 [DMA_RING7_TIMEOUT] = 0x48,
286 [DMA_RING8_TIMEOUT] = 0x4c,
287 [DMA_RING9_TIMEOUT] = 0x50,
288 [DMA_RING10_TIMEOUT] = 0x54,
289 [DMA_RING11_TIMEOUT] = 0x58,
290 [DMA_RING12_TIMEOUT] = 0x5c,
291 [DMA_RING13_TIMEOUT] = 0x60,
292 [DMA_RING14_TIMEOUT] = 0x64,
293 [DMA_RING15_TIMEOUT] = 0x68,
294 [DMA_RING16_TIMEOUT] = 0x6C,
297 static const u8 bcmgenet_dma_regs_v1[] = {
300 [DMA_SCB_BURST_SIZE] = 0x0C,
301 [DMA_ARB_CTRL] = 0x30,
302 [DMA_PRIORITY_0] = 0x34,
303 [DMA_PRIORITY_1] = 0x38,
304 [DMA_PRIORITY_2] = 0x3C,
305 [DMA_RING0_TIMEOUT] = 0x2C,
306 [DMA_RING1_TIMEOUT] = 0x30,
307 [DMA_RING2_TIMEOUT] = 0x34,
308 [DMA_RING3_TIMEOUT] = 0x38,
309 [DMA_RING4_TIMEOUT] = 0x3c,
310 [DMA_RING5_TIMEOUT] = 0x40,
311 [DMA_RING6_TIMEOUT] = 0x44,
312 [DMA_RING7_TIMEOUT] = 0x48,
313 [DMA_RING8_TIMEOUT] = 0x4c,
314 [DMA_RING9_TIMEOUT] = 0x50,
315 [DMA_RING10_TIMEOUT] = 0x54,
316 [DMA_RING11_TIMEOUT] = 0x58,
317 [DMA_RING12_TIMEOUT] = 0x5c,
318 [DMA_RING13_TIMEOUT] = 0x60,
319 [DMA_RING14_TIMEOUT] = 0x64,
320 [DMA_RING15_TIMEOUT] = 0x68,
321 [DMA_RING16_TIMEOUT] = 0x6C,
324 /* Set at runtime once bcmgenet version is known */
325 static const u8 *bcmgenet_dma_regs;
327 static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
329 return netdev_priv(dev_get_drvdata(dev));
332 static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
335 return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF +
336 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
339 static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
340 u32 val, enum dma_reg r)
342 bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF +
343 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
346 static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
349 return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF +
350 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
353 static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
354 u32 val, enum dma_reg r)
356 bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF +
357 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
360 /* RDMA/TDMA ring registers and accessors
361 * we merge the common fields and just prefix with T/D the registers
362 * having different meaning depending on the direction
366 RDMA_WRITE_PTR = TDMA_READ_PTR,
368 RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
370 RDMA_PROD_INDEX = TDMA_CONS_INDEX,
372 RDMA_CONS_INDEX = TDMA_PROD_INDEX,
378 DMA_MBUF_DONE_THRESH,
380 RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
382 RDMA_READ_PTR = TDMA_WRITE_PTR,
384 RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
387 /* GENET v4 supports 40-bits pointer addressing
388 * for obvious reasons the LO and HI word parts
389 * are contiguous, but this offsets the other
392 static const u8 genet_dma_ring_regs_v4[] = {
393 [TDMA_READ_PTR] = 0x00,
394 [TDMA_READ_PTR_HI] = 0x04,
395 [TDMA_CONS_INDEX] = 0x08,
396 [TDMA_PROD_INDEX] = 0x0C,
397 [DMA_RING_BUF_SIZE] = 0x10,
398 [DMA_START_ADDR] = 0x14,
399 [DMA_START_ADDR_HI] = 0x18,
400 [DMA_END_ADDR] = 0x1C,
401 [DMA_END_ADDR_HI] = 0x20,
402 [DMA_MBUF_DONE_THRESH] = 0x24,
403 [TDMA_FLOW_PERIOD] = 0x28,
404 [TDMA_WRITE_PTR] = 0x2C,
405 [TDMA_WRITE_PTR_HI] = 0x30,
408 static const u8 genet_dma_ring_regs_v123[] = {
409 [TDMA_READ_PTR] = 0x00,
410 [TDMA_CONS_INDEX] = 0x04,
411 [TDMA_PROD_INDEX] = 0x08,
412 [DMA_RING_BUF_SIZE] = 0x0C,
413 [DMA_START_ADDR] = 0x10,
414 [DMA_END_ADDR] = 0x14,
415 [DMA_MBUF_DONE_THRESH] = 0x18,
416 [TDMA_FLOW_PERIOD] = 0x1C,
417 [TDMA_WRITE_PTR] = 0x20,
420 /* Set at runtime once GENET version is known */
421 static const u8 *genet_dma_ring_regs;
423 static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
427 return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF +
428 (DMA_RING_SIZE * ring) +
429 genet_dma_ring_regs[r]);
432 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
433 unsigned int ring, u32 val,
436 bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF +
437 (DMA_RING_SIZE * ring) +
438 genet_dma_ring_regs[r]);
441 static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
445 return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF +
446 (DMA_RING_SIZE * ring) +
447 genet_dma_ring_regs[r]);
450 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
451 unsigned int ring, u32 val,
454 bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF +
455 (DMA_RING_SIZE * ring) +
456 genet_dma_ring_regs[r]);
459 static int bcmgenet_begin(struct net_device *dev)
461 struct bcmgenet_priv *priv = netdev_priv(dev);
463 /* Turn on the clock */
464 return clk_prepare_enable(priv->clk);
467 static void bcmgenet_complete(struct net_device *dev)
469 struct bcmgenet_priv *priv = netdev_priv(dev);
471 /* Turn off the clock */
472 clk_disable_unprepare(priv->clk);
475 static int bcmgenet_get_link_ksettings(struct net_device *dev,
476 struct ethtool_link_ksettings *cmd)
478 if (!netif_running(dev))
484 phy_ethtool_ksettings_get(dev->phydev, cmd);
489 static int bcmgenet_set_link_ksettings(struct net_device *dev,
490 const struct ethtool_link_ksettings *cmd)
492 if (!netif_running(dev))
498 return phy_ethtool_ksettings_set(dev->phydev, cmd);
501 static int bcmgenet_set_features(struct net_device *dev,
502 netdev_features_t features)
504 struct bcmgenet_priv *priv = netdev_priv(dev);
508 ret = clk_prepare_enable(priv->clk);
512 /* Make sure we reflect the value of CRC_CMD_FWD */
513 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
514 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
516 clk_disable_unprepare(priv->clk);
521 static u32 bcmgenet_get_msglevel(struct net_device *dev)
523 struct bcmgenet_priv *priv = netdev_priv(dev);
525 return priv->msg_enable;
528 static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
530 struct bcmgenet_priv *priv = netdev_priv(dev);
532 priv->msg_enable = level;
535 static int bcmgenet_get_coalesce(struct net_device *dev,
536 struct ethtool_coalesce *ec)
538 struct bcmgenet_priv *priv = netdev_priv(dev);
539 struct bcmgenet_rx_ring *ring;
542 ec->tx_max_coalesced_frames =
543 bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
544 DMA_MBUF_DONE_THRESH);
545 ec->rx_max_coalesced_frames =
546 bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
547 DMA_MBUF_DONE_THRESH);
548 ec->rx_coalesce_usecs =
549 bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
551 for (i = 0; i < priv->hw_params->rx_queues; i++) {
552 ring = &priv->rx_rings[i];
553 ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
555 ring = &priv->rx_rings[DESC_INDEX];
556 ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
561 static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring *ring,
564 struct bcmgenet_priv *priv = ring->priv;
565 unsigned int i = ring->index;
568 bcmgenet_rdma_ring_writel(priv, i, pkts, DMA_MBUF_DONE_THRESH);
570 reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i);
571 reg &= ~DMA_TIMEOUT_MASK;
572 reg |= DIV_ROUND_UP(usecs * 1000, 8192);
573 bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i);
576 static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring,
577 struct ethtool_coalesce *ec)
579 struct dim_cq_moder moder;
582 ring->rx_coalesce_usecs = ec->rx_coalesce_usecs;
583 ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
584 usecs = ring->rx_coalesce_usecs;
585 pkts = ring->rx_max_coalesced_frames;
587 if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) {
588 moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode);
593 ring->dim.use_dim = ec->use_adaptive_rx_coalesce;
594 bcmgenet_set_rx_coalesce(ring, usecs, pkts);
597 static int bcmgenet_set_coalesce(struct net_device *dev,
598 struct ethtool_coalesce *ec)
600 struct bcmgenet_priv *priv = netdev_priv(dev);
603 /* Base system clock is 125Mhz, DMA timeout is this reference clock
604 * divided by 1024, which yields roughly 8.192us, our maximum value
605 * has to fit in the DMA_TIMEOUT_MASK (16 bits)
607 if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
608 ec->tx_max_coalesced_frames == 0 ||
609 ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
610 ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1)
613 if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)
616 /* GENET TDMA hardware does not support a configurable timeout, but will
617 * always generate an interrupt either after MBDONE packets have been
618 * transmitted, or when the ring is empty.
621 /* Program all TX queues with the same values, as there is no
622 * ethtool knob to do coalescing on a per-queue basis
624 for (i = 0; i < priv->hw_params->tx_queues; i++)
625 bcmgenet_tdma_ring_writel(priv, i,
626 ec->tx_max_coalesced_frames,
627 DMA_MBUF_DONE_THRESH);
628 bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
629 ec->tx_max_coalesced_frames,
630 DMA_MBUF_DONE_THRESH);
632 for (i = 0; i < priv->hw_params->rx_queues; i++)
633 bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec);
634 bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec);
639 /* standard ethtool support functions. */
640 enum bcmgenet_stat_type {
641 BCMGENET_STAT_NETDEV = -1,
642 BCMGENET_STAT_MIB_RX,
643 BCMGENET_STAT_MIB_TX,
649 struct bcmgenet_stats {
650 char stat_string[ETH_GSTRING_LEN];
653 enum bcmgenet_stat_type type;
654 /* reg offset from UMAC base for misc counters */
658 #define STAT_NETDEV(m) { \
659 .stat_string = __stringify(m), \
660 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
661 .stat_offset = offsetof(struct net_device_stats, m), \
662 .type = BCMGENET_STAT_NETDEV, \
665 #define STAT_GENET_MIB(str, m, _type) { \
666 .stat_string = str, \
667 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
668 .stat_offset = offsetof(struct bcmgenet_priv, m), \
672 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
673 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
674 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
675 #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
677 #define STAT_GENET_MISC(str, m, offset) { \
678 .stat_string = str, \
679 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
680 .stat_offset = offsetof(struct bcmgenet_priv, m), \
681 .type = BCMGENET_STAT_MISC, \
682 .reg_offset = offset, \
685 #define STAT_GENET_Q(num) \
686 STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \
687 tx_rings[num].packets), \
688 STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \
689 tx_rings[num].bytes), \
690 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \
691 rx_rings[num].bytes), \
692 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \
693 rx_rings[num].packets), \
694 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \
695 rx_rings[num].errors), \
696 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \
697 rx_rings[num].dropped)
699 /* There is a 0xC gap between the end of RX and beginning of TX stats and then
700 * between the end of TX stats and the beginning of the RX RUNT
702 #define BCMGENET_STAT_OFFSET 0xc
704 /* Hardware counters must be kept in sync because the order/offset
705 * is important here (order in structure declaration = order in hardware)
707 static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
709 STAT_NETDEV(rx_packets),
710 STAT_NETDEV(tx_packets),
711 STAT_NETDEV(rx_bytes),
712 STAT_NETDEV(tx_bytes),
713 STAT_NETDEV(rx_errors),
714 STAT_NETDEV(tx_errors),
715 STAT_NETDEV(rx_dropped),
716 STAT_NETDEV(tx_dropped),
717 STAT_NETDEV(multicast),
718 /* UniMAC RSV counters */
719 STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
720 STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
721 STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
722 STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
723 STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
724 STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
725 STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
726 STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
727 STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
728 STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
729 STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
730 STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
731 STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
732 STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
733 STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
734 STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
735 STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
736 STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
737 STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
738 STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
739 STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
740 STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
741 STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
742 STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
743 STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
744 STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
745 STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
746 STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
747 STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
748 /* UniMAC TSV counters */
749 STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
750 STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
751 STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
752 STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
753 STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
754 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
755 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
756 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
757 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
758 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
759 STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
760 STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
761 STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
762 STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
763 STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
764 STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
765 STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
766 STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
767 STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
768 STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
769 STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
770 STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
771 STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
772 STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
773 STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
774 STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
775 STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
776 STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
777 STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
778 /* UniMAC RUNT counters */
779 STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
780 STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
781 STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
782 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
783 /* Misc UniMAC counters */
784 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
785 UMAC_RBUF_OVFL_CNT_V1),
786 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
787 UMAC_RBUF_ERR_CNT_V1),
788 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
789 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
790 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
791 STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
792 STAT_GENET_SOFT_MIB("tx_realloc_tsb", mib.tx_realloc_tsb),
793 STAT_GENET_SOFT_MIB("tx_realloc_tsb_failed",
794 mib.tx_realloc_tsb_failed),
803 #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
805 static void bcmgenet_get_drvinfo(struct net_device *dev,
806 struct ethtool_drvinfo *info)
808 strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
811 static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
813 switch (string_set) {
815 return BCMGENET_STATS_LEN;
821 static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
828 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
829 memcpy(data + i * ETH_GSTRING_LEN,
830 bcmgenet_gstrings_stats[i].stat_string,
837 static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
843 case UMAC_RBUF_OVFL_CNT_V1:
844 if (GENET_IS_V2(priv))
845 new_offset = RBUF_OVFL_CNT_V2;
847 new_offset = RBUF_OVFL_CNT_V3PLUS;
849 val = bcmgenet_rbuf_readl(priv, new_offset);
850 /* clear if overflowed */
852 bcmgenet_rbuf_writel(priv, 0, new_offset);
854 case UMAC_RBUF_ERR_CNT_V1:
855 if (GENET_IS_V2(priv))
856 new_offset = RBUF_ERR_CNT_V2;
858 new_offset = RBUF_ERR_CNT_V3PLUS;
860 val = bcmgenet_rbuf_readl(priv, new_offset);
861 /* clear if overflowed */
863 bcmgenet_rbuf_writel(priv, 0, new_offset);
866 val = bcmgenet_umac_readl(priv, offset);
867 /* clear if overflowed */
869 bcmgenet_umac_writel(priv, 0, offset);
876 static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
880 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
881 const struct bcmgenet_stats *s;
886 s = &bcmgenet_gstrings_stats[i];
888 case BCMGENET_STAT_NETDEV:
889 case BCMGENET_STAT_SOFT:
891 case BCMGENET_STAT_RUNT:
892 offset += BCMGENET_STAT_OFFSET;
894 case BCMGENET_STAT_MIB_TX:
895 offset += BCMGENET_STAT_OFFSET;
897 case BCMGENET_STAT_MIB_RX:
898 val = bcmgenet_umac_readl(priv,
899 UMAC_MIB_START + j + offset);
900 offset = 0; /* Reset Offset */
902 case BCMGENET_STAT_MISC:
903 if (GENET_IS_V1(priv)) {
904 val = bcmgenet_umac_readl(priv, s->reg_offset);
905 /* clear if overflowed */
907 bcmgenet_umac_writel(priv, 0,
910 val = bcmgenet_update_stat_misc(priv,
917 p = (char *)priv + s->stat_offset;
922 static void bcmgenet_get_ethtool_stats(struct net_device *dev,
923 struct ethtool_stats *stats,
926 struct bcmgenet_priv *priv = netdev_priv(dev);
929 if (netif_running(dev))
930 bcmgenet_update_mib_counters(priv);
932 dev->netdev_ops->ndo_get_stats(dev);
934 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
935 const struct bcmgenet_stats *s;
938 s = &bcmgenet_gstrings_stats[i];
939 if (s->type == BCMGENET_STAT_NETDEV)
940 p = (char *)&dev->stats;
944 if (sizeof(unsigned long) != sizeof(u32) &&
945 s->stat_sizeof == sizeof(unsigned long))
946 data[i] = *(unsigned long *)p;
952 static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
954 struct bcmgenet_priv *priv = netdev_priv(dev);
955 u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
958 if (enable && !priv->clk_eee_enabled) {
959 clk_prepare_enable(priv->clk_eee);
960 priv->clk_eee_enabled = true;
963 reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
968 bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
970 /* Enable EEE and switch to a 27Mhz clock automatically */
971 reg = bcmgenet_readl(priv->base + off);
973 reg |= TBUF_EEE_EN | TBUF_PM_EN;
975 reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
976 bcmgenet_writel(reg, priv->base + off);
978 /* Do the same for thing for RBUF */
979 reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
981 reg |= RBUF_EEE_EN | RBUF_PM_EN;
983 reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
984 bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
986 if (!enable && priv->clk_eee_enabled) {
987 clk_disable_unprepare(priv->clk_eee);
988 priv->clk_eee_enabled = false;
991 priv->eee.eee_enabled = enable;
992 priv->eee.eee_active = enable;
995 static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
997 struct bcmgenet_priv *priv = netdev_priv(dev);
998 struct ethtool_eee *p = &priv->eee;
1000 if (GENET_IS_V1(priv))
1006 e->eee_enabled = p->eee_enabled;
1007 e->eee_active = p->eee_active;
1008 e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
1010 return phy_ethtool_get_eee(dev->phydev, e);
1013 static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
1015 struct bcmgenet_priv *priv = netdev_priv(dev);
1016 struct ethtool_eee *p = &priv->eee;
1019 if (GENET_IS_V1(priv))
1025 p->eee_enabled = e->eee_enabled;
1027 if (!p->eee_enabled) {
1028 bcmgenet_eee_enable_set(dev, false);
1030 ret = phy_init_eee(dev->phydev, 0);
1032 netif_err(priv, hw, dev, "EEE initialization failed\n");
1036 bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
1037 bcmgenet_eee_enable_set(dev, true);
1040 return phy_ethtool_set_eee(dev->phydev, e);
1043 /* standard ethtool support functions. */
1044 static const struct ethtool_ops bcmgenet_ethtool_ops = {
1045 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
1046 ETHTOOL_COALESCE_MAX_FRAMES |
1047 ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
1048 .begin = bcmgenet_begin,
1049 .complete = bcmgenet_complete,
1050 .get_strings = bcmgenet_get_strings,
1051 .get_sset_count = bcmgenet_get_sset_count,
1052 .get_ethtool_stats = bcmgenet_get_ethtool_stats,
1053 .get_drvinfo = bcmgenet_get_drvinfo,
1054 .get_link = ethtool_op_get_link,
1055 .get_msglevel = bcmgenet_get_msglevel,
1056 .set_msglevel = bcmgenet_set_msglevel,
1057 .get_wol = bcmgenet_get_wol,
1058 .set_wol = bcmgenet_set_wol,
1059 .get_eee = bcmgenet_get_eee,
1060 .set_eee = bcmgenet_set_eee,
1061 .nway_reset = phy_ethtool_nway_reset,
1062 .get_coalesce = bcmgenet_get_coalesce,
1063 .set_coalesce = bcmgenet_set_coalesce,
1064 .get_link_ksettings = bcmgenet_get_link_ksettings,
1065 .set_link_ksettings = bcmgenet_set_link_ksettings,
1066 .get_ts_info = ethtool_op_get_ts_info,
1069 /* Power down the unimac, based on mode. */
1070 static int bcmgenet_power_down(struct bcmgenet_priv *priv,
1071 enum bcmgenet_power_mode mode)
1077 case GENET_POWER_CABLE_SENSE:
1078 phy_detach(priv->dev->phydev);
1081 case GENET_POWER_WOL_MAGIC:
1082 ret = bcmgenet_wol_power_down_cfg(priv, mode);
1085 case GENET_POWER_PASSIVE:
1086 /* Power down LED */
1087 if (priv->hw_params->flags & GENET_HAS_EXT) {
1088 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1089 if (GENET_IS_V5(priv))
1090 reg |= EXT_PWR_DOWN_PHY_EN |
1091 EXT_PWR_DOWN_PHY_RD |
1092 EXT_PWR_DOWN_PHY_SD |
1093 EXT_PWR_DOWN_PHY_RX |
1094 EXT_PWR_DOWN_PHY_TX |
1097 reg |= EXT_PWR_DOWN_PHY;
1099 reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
1100 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1102 bcmgenet_phy_power_set(priv->dev, false);
1112 static void bcmgenet_power_up(struct bcmgenet_priv *priv,
1113 enum bcmgenet_power_mode mode)
1117 if (!(priv->hw_params->flags & GENET_HAS_EXT))
1120 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1123 case GENET_POWER_PASSIVE:
1124 reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
1125 if (GENET_IS_V5(priv)) {
1126 reg &= ~(EXT_PWR_DOWN_PHY_EN |
1127 EXT_PWR_DOWN_PHY_RD |
1128 EXT_PWR_DOWN_PHY_SD |
1129 EXT_PWR_DOWN_PHY_RX |
1130 EXT_PWR_DOWN_PHY_TX |
1132 reg |= EXT_PHY_RESET;
1133 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1136 reg &= ~EXT_PHY_RESET;
1138 reg &= ~EXT_PWR_DOWN_PHY;
1139 reg |= EXT_PWR_DN_EN_LD;
1141 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1142 bcmgenet_phy_power_set(priv->dev, true);
1145 case GENET_POWER_CABLE_SENSE:
1147 if (!GENET_IS_V5(priv)) {
1148 reg |= EXT_PWR_DN_EN_LD;
1149 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1152 case GENET_POWER_WOL_MAGIC:
1153 bcmgenet_wol_power_up_cfg(priv, mode);
1160 static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
1161 struct bcmgenet_tx_ring *ring)
1163 struct enet_cb *tx_cb_ptr;
1165 tx_cb_ptr = ring->cbs;
1166 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
1168 /* Advancing local write pointer */
1169 if (ring->write_ptr == ring->end_ptr)
1170 ring->write_ptr = ring->cb_ptr;
1177 static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
1178 struct bcmgenet_tx_ring *ring)
1180 struct enet_cb *tx_cb_ptr;
1182 tx_cb_ptr = ring->cbs;
1183 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
1185 /* Rewinding local write pointer */
1186 if (ring->write_ptr == ring->cb_ptr)
1187 ring->write_ptr = ring->end_ptr;
1194 static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
1196 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
1197 INTRL2_CPU_MASK_SET);
1200 static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
1202 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
1203 INTRL2_CPU_MASK_CLEAR);
1206 static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
1208 bcmgenet_intrl2_1_writel(ring->priv,
1209 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1210 INTRL2_CPU_MASK_SET);
1213 static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
1215 bcmgenet_intrl2_1_writel(ring->priv,
1216 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1217 INTRL2_CPU_MASK_CLEAR);
1220 static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
1222 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1223 INTRL2_CPU_MASK_SET);
1226 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
1228 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1229 INTRL2_CPU_MASK_CLEAR);
1232 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
1234 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1235 INTRL2_CPU_MASK_CLEAR);
1238 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
1240 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1241 INTRL2_CPU_MASK_SET);
1244 /* Simple helper to free a transmit control block's resources
1245 * Returns an skb when the last transmit control block associated with the
1246 * skb is freed. The skb should be freed by the caller if necessary.
1248 static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev,
1251 struct sk_buff *skb;
1257 if (cb == GENET_CB(skb)->first_cb)
1258 dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
1259 dma_unmap_len(cb, dma_len),
1262 dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr),
1263 dma_unmap_len(cb, dma_len),
1265 dma_unmap_addr_set(cb, dma_addr, 0);
1267 if (cb == GENET_CB(skb)->last_cb)
1270 } else if (dma_unmap_addr(cb, dma_addr)) {
1272 dma_unmap_addr(cb, dma_addr),
1273 dma_unmap_len(cb, dma_len),
1275 dma_unmap_addr_set(cb, dma_addr, 0);
1281 /* Simple helper to free a receive control block's resources */
1282 static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev,
1285 struct sk_buff *skb;
1290 if (dma_unmap_addr(cb, dma_addr)) {
1291 dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
1292 dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE);
1293 dma_unmap_addr_set(cb, dma_addr, 0);
1299 /* Unlocked version of the reclaim routine */
1300 static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1301 struct bcmgenet_tx_ring *ring)
1303 struct bcmgenet_priv *priv = netdev_priv(dev);
1304 unsigned int txbds_processed = 0;
1305 unsigned int bytes_compl = 0;
1306 unsigned int pkts_compl = 0;
1307 unsigned int txbds_ready;
1308 unsigned int c_index;
1309 struct sk_buff *skb;
1311 /* Clear status before servicing to reduce spurious interrupts */
1312 if (ring->index == DESC_INDEX)
1313 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
1316 bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
1319 /* Compute how many buffers are transmitted since last xmit call */
1320 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
1322 txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK;
1324 netif_dbg(priv, tx_done, dev,
1325 "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
1326 __func__, ring->index, ring->c_index, c_index, txbds_ready);
1328 /* Reclaim transmitted buffers */
1329 while (txbds_processed < txbds_ready) {
1330 skb = bcmgenet_free_tx_cb(&priv->pdev->dev,
1331 &priv->tx_cbs[ring->clean_ptr]);
1334 bytes_compl += GENET_CB(skb)->bytes_sent;
1335 dev_consume_skb_any(skb);
1339 if (likely(ring->clean_ptr < ring->end_ptr))
1342 ring->clean_ptr = ring->cb_ptr;
1345 ring->free_bds += txbds_processed;
1346 ring->c_index = c_index;
1348 ring->packets += pkts_compl;
1349 ring->bytes += bytes_compl;
1351 netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
1352 pkts_compl, bytes_compl);
1354 return txbds_processed;
1357 static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
1358 struct bcmgenet_tx_ring *ring)
1360 unsigned int released;
1362 spin_lock_bh(&ring->lock);
1363 released = __bcmgenet_tx_reclaim(dev, ring);
1364 spin_unlock_bh(&ring->lock);
1369 static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
1371 struct bcmgenet_tx_ring *ring =
1372 container_of(napi, struct bcmgenet_tx_ring, napi);
1373 unsigned int work_done = 0;
1374 struct netdev_queue *txq;
1376 spin_lock(&ring->lock);
1377 work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
1378 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1379 txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
1380 netif_tx_wake_queue(txq);
1382 spin_unlock(&ring->lock);
1384 if (work_done == 0) {
1385 napi_complete(napi);
1386 ring->int_enable(ring);
1394 static void bcmgenet_tx_reclaim_all(struct net_device *dev)
1396 struct bcmgenet_priv *priv = netdev_priv(dev);
1399 if (netif_is_multiqueue(dev)) {
1400 for (i = 0; i < priv->hw_params->tx_queues; i++)
1401 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
1404 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
1407 /* Reallocate the SKB to put enough headroom in front of it and insert
1408 * the transmit checksum offsets in the descriptors
1410 static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
1411 struct sk_buff *skb)
1413 struct bcmgenet_priv *priv = netdev_priv(dev);
1414 struct status_64 *status = NULL;
1415 struct sk_buff *new_skb;
1421 if (unlikely(skb_headroom(skb) < sizeof(*status))) {
1422 /* If 64 byte status block enabled, must make sure skb has
1423 * enough headroom for us to insert 64B status block.
1425 new_skb = skb_realloc_headroom(skb, sizeof(*status));
1427 dev_kfree_skb_any(skb);
1428 priv->mib.tx_realloc_tsb_failed++;
1429 dev->stats.tx_dropped++;
1432 dev_consume_skb_any(skb);
1434 priv->mib.tx_realloc_tsb++;
1437 skb_push(skb, sizeof(*status));
1438 status = (struct status_64 *)skb->data;
1440 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1441 ip_ver = skb->protocol;
1443 case htons(ETH_P_IP):
1444 ip_proto = ip_hdr(skb)->protocol;
1446 case htons(ETH_P_IPV6):
1447 ip_proto = ipv6_hdr(skb)->nexthdr;
1450 /* don't use UDP flag */
1455 offset = skb_checksum_start_offset(skb) - sizeof(*status);
1456 tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
1457 (offset + skb->csum_offset) |
1460 /* Set the special UDP flag for UDP */
1461 if (ip_proto == IPPROTO_UDP)
1462 tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
1464 status->tx_csum_info = tx_csum_info;
1470 static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1472 struct bcmgenet_priv *priv = netdev_priv(dev);
1473 struct device *kdev = &priv->pdev->dev;
1474 struct bcmgenet_tx_ring *ring = NULL;
1475 struct enet_cb *tx_cb_ptr;
1476 struct netdev_queue *txq;
1477 int nr_frags, index;
1485 index = skb_get_queue_mapping(skb);
1486 /* Mapping strategy:
1487 * queue_mapping = 0, unclassified, packet xmited through ring16
1488 * queue_mapping = 1, goes to ring 0. (highest priority queue
1489 * queue_mapping = 2, goes to ring 1.
1490 * queue_mapping = 3, goes to ring 2.
1491 * queue_mapping = 4, goes to ring 3.
1498 ring = &priv->tx_rings[index];
1499 txq = netdev_get_tx_queue(dev, ring->queue);
1501 nr_frags = skb_shinfo(skb)->nr_frags;
1503 spin_lock(&ring->lock);
1504 if (ring->free_bds <= (nr_frags + 1)) {
1505 if (!netif_tx_queue_stopped(txq)) {
1506 netif_tx_stop_queue(txq);
1508 "%s: tx ring %d full when queue %d awake\n",
1509 __func__, index, ring->queue);
1511 ret = NETDEV_TX_BUSY;
1515 if (skb_padto(skb, ETH_ZLEN)) {
1520 /* Retain how many bytes will be sent on the wire, without TSB inserted
1521 * by transmit checksum offload
1523 GENET_CB(skb)->bytes_sent = skb->len;
1525 /* add the Transmit Status Block */
1526 skb = bcmgenet_add_tsb(dev, skb);
1532 for (i = 0; i <= nr_frags; i++) {
1533 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1538 /* Transmit single SKB or head of fragment list */
1539 GENET_CB(skb)->first_cb = tx_cb_ptr;
1540 size = skb_headlen(skb);
1541 mapping = dma_map_single(kdev, skb->data, size,
1545 frag = &skb_shinfo(skb)->frags[i - 1];
1546 size = skb_frag_size(frag);
1547 mapping = skb_frag_dma_map(kdev, frag, 0, size,
1551 ret = dma_mapping_error(kdev, mapping);
1553 priv->mib.tx_dma_failed++;
1554 netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
1556 goto out_unmap_frags;
1558 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1559 dma_unmap_len_set(tx_cb_ptr, dma_len, size);
1561 tx_cb_ptr->skb = skb;
1563 len_stat = (size << DMA_BUFLENGTH_SHIFT) |
1564 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
1567 len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
1568 if (skb->ip_summed == CHECKSUM_PARTIAL)
1569 len_stat |= DMA_TX_DO_CSUM;
1572 len_stat |= DMA_EOP;
1574 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat);
1577 GENET_CB(skb)->last_cb = tx_cb_ptr;
1578 skb_tx_timestamp(skb);
1580 /* Decrement total BD count and advance our write pointer */
1581 ring->free_bds -= nr_frags + 1;
1582 ring->prod_index += nr_frags + 1;
1583 ring->prod_index &= DMA_P_INDEX_MASK;
1585 netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent);
1587 if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
1588 netif_tx_stop_queue(txq);
1590 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
1591 /* Packets are ready, update producer index */
1592 bcmgenet_tdma_ring_writel(priv, ring->index,
1593 ring->prod_index, TDMA_PROD_INDEX);
1595 spin_unlock(&ring->lock);
1600 /* Back up for failed control block mapping */
1601 bcmgenet_put_txcb(priv, ring);
1603 /* Unmap successfully mapped control blocks */
1605 tx_cb_ptr = bcmgenet_put_txcb(priv, ring);
1606 bcmgenet_free_tx_cb(kdev, tx_cb_ptr);
1613 static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
1616 struct device *kdev = &priv->pdev->dev;
1617 struct sk_buff *skb;
1618 struct sk_buff *rx_skb;
1621 /* Allocate a new Rx skb */
1622 skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT,
1623 GFP_ATOMIC | __GFP_NOWARN);
1625 priv->mib.alloc_rx_buff_failed++;
1626 netif_err(priv, rx_err, priv->dev,
1627 "%s: Rx skb allocation failed\n", __func__);
1631 /* DMA-map the new Rx skb */
1632 mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len,
1634 if (dma_mapping_error(kdev, mapping)) {
1635 priv->mib.rx_dma_failed++;
1636 dev_kfree_skb_any(skb);
1637 netif_err(priv, rx_err, priv->dev,
1638 "%s: Rx skb DMA mapping failed\n", __func__);
1642 /* Grab the current Rx skb from the ring and DMA-unmap it */
1643 rx_skb = bcmgenet_free_rx_cb(kdev, cb);
1645 /* Put the new Rx skb on the ring */
1647 dma_unmap_addr_set(cb, dma_addr, mapping);
1648 dma_unmap_len_set(cb, dma_len, priv->rx_buf_len);
1649 dmadesc_set_addr(priv, cb->bd_addr, mapping);
1651 /* Return the current Rx skb to caller */
1655 /* bcmgenet_desc_rx - descriptor based rx process.
1656 * this could be called from bottom half, or from NAPI polling method.
1658 static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
1659 unsigned int budget)
1661 struct bcmgenet_priv *priv = ring->priv;
1662 struct net_device *dev = priv->dev;
1664 struct sk_buff *skb;
1665 u32 dma_length_status;
1666 unsigned long dma_flag;
1668 unsigned int rxpktprocessed = 0, rxpkttoprocess;
1669 unsigned int bytes_processed = 0;
1670 unsigned int p_index, mask;
1671 unsigned int discards;
1673 /* Clear status before servicing to reduce spurious interrupts */
1674 if (ring->index == DESC_INDEX) {
1675 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
1678 mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
1679 bcmgenet_intrl2_1_writel(priv,
1684 p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
1686 discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
1687 DMA_P_INDEX_DISCARD_CNT_MASK;
1688 if (discards > ring->old_discards) {
1689 discards = discards - ring->old_discards;
1690 ring->errors += discards;
1691 ring->old_discards += discards;
1693 /* Clear HW register when we reach 75% of maximum 0xFFFF */
1694 if (ring->old_discards >= 0xC000) {
1695 ring->old_discards = 0;
1696 bcmgenet_rdma_ring_writel(priv, ring->index, 0,
1701 p_index &= DMA_P_INDEX_MASK;
1702 rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK;
1704 netif_dbg(priv, rx_status, dev,
1705 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
1707 while ((rxpktprocessed < rxpkttoprocess) &&
1708 (rxpktprocessed < budget)) {
1709 struct status_64 *status;
1712 cb = &priv->rx_cbs[ring->read_ptr];
1713 skb = bcmgenet_rx_refill(priv, cb);
1715 if (unlikely(!skb)) {
1720 status = (struct status_64 *)skb->data;
1721 dma_length_status = status->length_status;
1722 if (dev->features & NETIF_F_RXCSUM) {
1723 rx_csum = (__force __be16)(status->rx_csum & 0xffff);
1724 skb->csum = (__force __wsum)ntohs(rx_csum);
1725 skb->ip_summed = CHECKSUM_COMPLETE;
1728 /* DMA flags and length are still valid no matter how
1729 * we got the Receive Status Vector (64B RSB or register)
1731 dma_flag = dma_length_status & 0xffff;
1732 len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
1734 netif_dbg(priv, rx_status, dev,
1735 "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
1736 __func__, p_index, ring->c_index,
1737 ring->read_ptr, dma_length_status);
1739 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
1740 netif_err(priv, rx_status, dev,
1741 "dropping fragmented packet!\n");
1743 dev_kfree_skb_any(skb);
1748 if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
1753 netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
1754 (unsigned int)dma_flag);
1755 if (dma_flag & DMA_RX_CRC_ERROR)
1756 dev->stats.rx_crc_errors++;
1757 if (dma_flag & DMA_RX_OV)
1758 dev->stats.rx_over_errors++;
1759 if (dma_flag & DMA_RX_NO)
1760 dev->stats.rx_frame_errors++;
1761 if (dma_flag & DMA_RX_LG)
1762 dev->stats.rx_length_errors++;
1763 dev->stats.rx_errors++;
1764 dev_kfree_skb_any(skb);
1766 } /* error packet */
1770 /* remove RSB and hardware 2bytes added for IP alignment */
1774 if (priv->crc_fwd_en) {
1775 skb_trim(skb, len - ETH_FCS_LEN);
1779 bytes_processed += len;
1781 /*Finish setting up the received SKB and send it to the kernel*/
1782 skb->protocol = eth_type_trans(skb, priv->dev);
1785 if (dma_flag & DMA_RX_MULT)
1786 dev->stats.multicast++;
1789 napi_gro_receive(&ring->napi, skb);
1790 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
1794 if (likely(ring->read_ptr < ring->end_ptr))
1797 ring->read_ptr = ring->cb_ptr;
1799 ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
1800 bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
1803 ring->dim.bytes = bytes_processed;
1804 ring->dim.packets = rxpktprocessed;
1806 return rxpktprocessed;
1809 /* Rx NAPI polling method */
1810 static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
1812 struct bcmgenet_rx_ring *ring = container_of(napi,
1813 struct bcmgenet_rx_ring, napi);
1814 struct dim_sample dim_sample = {};
1815 unsigned int work_done;
1817 work_done = bcmgenet_desc_rx(ring, budget);
1819 if (work_done < budget) {
1820 napi_complete_done(napi, work_done);
1821 ring->int_enable(ring);
1824 if (ring->dim.use_dim) {
1825 dim_update_sample(ring->dim.event_ctr, ring->dim.packets,
1826 ring->dim.bytes, &dim_sample);
1827 net_dim(&ring->dim.dim, dim_sample);
1833 static void bcmgenet_dim_work(struct work_struct *work)
1835 struct dim *dim = container_of(work, struct dim, work);
1836 struct bcmgenet_net_dim *ndim =
1837 container_of(dim, struct bcmgenet_net_dim, dim);
1838 struct bcmgenet_rx_ring *ring =
1839 container_of(ndim, struct bcmgenet_rx_ring, dim);
1840 struct dim_cq_moder cur_profile =
1841 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1843 bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts);
1844 dim->state = DIM_START_MEASURE;
1847 /* Assign skb to RX DMA descriptor. */
1848 static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
1849 struct bcmgenet_rx_ring *ring)
1852 struct sk_buff *skb;
1855 netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
1857 /* loop here for each buffer needing assign */
1858 for (i = 0; i < ring->size; i++) {
1860 skb = bcmgenet_rx_refill(priv, cb);
1862 dev_consume_skb_any(skb);
1870 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1872 struct sk_buff *skb;
1876 for (i = 0; i < priv->num_rx_bds; i++) {
1877 cb = &priv->rx_cbs[i];
1879 skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
1881 dev_consume_skb_any(skb);
1885 static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
1889 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1890 if (reg & CMD_SW_RESET)
1896 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
1898 /* UniMAC stops on a packet boundary, wait for a full-size packet
1902 usleep_range(1000, 2000);
1905 static void reset_umac(struct bcmgenet_priv *priv)
1907 /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
1908 bcmgenet_rbuf_ctrl_set(priv, 0);
1911 /* issue soft reset and disable MAC while updating its registers */
1912 bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
1916 static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
1918 /* Mask all interrupts.*/
1919 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1920 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1921 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1922 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1925 static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
1927 u32 int0_enable = 0;
1929 /* Monitor cable plug/unplugged event for internal PHY, external PHY
1932 if (priv->internal_phy) {
1933 int0_enable |= UMAC_IRQ_LINK_EVENT;
1934 if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
1935 int0_enable |= UMAC_IRQ_PHY_DET_R;
1936 } else if (priv->ext_phy) {
1937 int0_enable |= UMAC_IRQ_LINK_EVENT;
1938 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1939 if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
1940 int0_enable |= UMAC_IRQ_LINK_EVENT;
1942 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
1945 static void init_umac(struct bcmgenet_priv *priv)
1947 struct device *kdev = &priv->pdev->dev;
1949 u32 int0_enable = 0;
1951 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
1955 /* clear tx/rx counter */
1956 bcmgenet_umac_writel(priv,
1957 MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
1959 bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
1961 bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1963 /* init tx registers, enable TSB */
1964 reg = bcmgenet_tbuf_ctrl_get(priv);
1966 bcmgenet_tbuf_ctrl_set(priv, reg);
1968 /* init rx registers, enable ip header optimization and RSB */
1969 reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
1970 reg |= RBUF_ALIGN_2B | RBUF_64B_EN;
1971 bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
1973 /* enable rx checksumming */
1974 reg = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
1975 reg |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS;
1976 /* If UniMAC forwards CRC, we need to skip over it to get
1977 * a valid CHK bit to be set in the per-packet status word
1979 if (priv->crc_fwd_en)
1980 reg |= RBUF_SKIP_FCS;
1982 reg &= ~RBUF_SKIP_FCS;
1983 bcmgenet_rbuf_writel(priv, reg, RBUF_CHK_CTRL);
1985 if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
1986 bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
1988 bcmgenet_intr_disable(priv);
1990 /* Configure backpressure vectors for MoCA */
1991 if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1992 reg = bcmgenet_bp_mc_get(priv);
1993 reg |= BIT(priv->hw_params->bp_in_en_shift);
1995 /* bp_mask: back pressure mask */
1996 if (netif_is_multiqueue(priv->dev))
1997 reg |= priv->hw_params->bp_in_mask;
1999 reg &= ~priv->hw_params->bp_in_mask;
2000 bcmgenet_bp_mc_set(priv, reg);
2003 /* Enable MDIO interrupts on GENET v3+ */
2004 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
2005 int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
2007 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
2009 dev_dbg(kdev, "done init umac\n");
2012 static void bcmgenet_init_dim(struct bcmgenet_rx_ring *ring,
2013 void (*cb)(struct work_struct *work))
2015 struct bcmgenet_net_dim *dim = &ring->dim;
2017 INIT_WORK(&dim->dim.work, cb);
2018 dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
2024 static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring)
2026 struct bcmgenet_net_dim *dim = &ring->dim;
2027 struct dim_cq_moder moder;
2030 usecs = ring->rx_coalesce_usecs;
2031 pkts = ring->rx_max_coalesced_frames;
2033 /* If DIM was enabled, re-apply default parameters */
2035 moder = net_dim_get_def_rx_moderation(dim->dim.mode);
2040 bcmgenet_set_rx_coalesce(ring, usecs, pkts);
2043 /* Initialize a Tx ring along with corresponding hardware registers */
2044 static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
2045 unsigned int index, unsigned int size,
2046 unsigned int start_ptr, unsigned int end_ptr)
2048 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
2049 u32 words_per_bd = WORDS_PER_BD(priv);
2050 u32 flow_period_val = 0;
2052 spin_lock_init(&ring->lock);
2054 ring->index = index;
2055 if (index == DESC_INDEX) {
2057 ring->int_enable = bcmgenet_tx_ring16_int_enable;
2058 ring->int_disable = bcmgenet_tx_ring16_int_disable;
2060 ring->queue = index + 1;
2061 ring->int_enable = bcmgenet_tx_ring_int_enable;
2062 ring->int_disable = bcmgenet_tx_ring_int_disable;
2064 ring->cbs = priv->tx_cbs + start_ptr;
2066 ring->clean_ptr = start_ptr;
2068 ring->free_bds = size;
2069 ring->write_ptr = start_ptr;
2070 ring->cb_ptr = start_ptr;
2071 ring->end_ptr = end_ptr - 1;
2072 ring->prod_index = 0;
2074 /* Set flow period for ring != 16 */
2075 if (index != DESC_INDEX)
2076 flow_period_val = ENET_MAX_MTU_SIZE << 16;
2078 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
2079 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
2080 bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
2081 /* Disable rate control for now */
2082 bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
2084 bcmgenet_tdma_ring_writel(priv, index,
2085 ((size << DMA_RING_SIZE_SHIFT) |
2086 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
2088 /* Set start and end address, read and write pointers */
2089 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2091 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2093 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2095 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
2098 /* Initialize Tx NAPI */
2099 netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
2103 /* Initialize a RDMA ring */
2104 static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
2105 unsigned int index, unsigned int size,
2106 unsigned int start_ptr, unsigned int end_ptr)
2108 struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
2109 u32 words_per_bd = WORDS_PER_BD(priv);
2113 ring->index = index;
2114 if (index == DESC_INDEX) {
2115 ring->int_enable = bcmgenet_rx_ring16_int_enable;
2116 ring->int_disable = bcmgenet_rx_ring16_int_disable;
2118 ring->int_enable = bcmgenet_rx_ring_int_enable;
2119 ring->int_disable = bcmgenet_rx_ring_int_disable;
2121 ring->cbs = priv->rx_cbs + start_ptr;
2124 ring->read_ptr = start_ptr;
2125 ring->cb_ptr = start_ptr;
2126 ring->end_ptr = end_ptr - 1;
2128 ret = bcmgenet_alloc_rx_buffers(priv, ring);
2132 bcmgenet_init_dim(ring, bcmgenet_dim_work);
2133 bcmgenet_init_rx_coalesce(ring);
2135 /* Initialize Rx NAPI */
2136 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll,
2139 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
2140 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
2141 bcmgenet_rdma_ring_writel(priv, index,
2142 ((size << DMA_RING_SIZE_SHIFT) |
2143 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
2144 bcmgenet_rdma_ring_writel(priv, index,
2145 (DMA_FC_THRESH_LO <<
2146 DMA_XOFF_THRESHOLD_SHIFT) |
2147 DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
2149 /* Set start and end address, read and write pointers */
2150 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2152 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2154 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2156 bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
2162 static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
2165 struct bcmgenet_tx_ring *ring;
2167 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2168 ring = &priv->tx_rings[i];
2169 napi_enable(&ring->napi);
2170 ring->int_enable(ring);
2173 ring = &priv->tx_rings[DESC_INDEX];
2174 napi_enable(&ring->napi);
2175 ring->int_enable(ring);
2178 static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
2181 struct bcmgenet_tx_ring *ring;
2183 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2184 ring = &priv->tx_rings[i];
2185 napi_disable(&ring->napi);
2188 ring = &priv->tx_rings[DESC_INDEX];
2189 napi_disable(&ring->napi);
2192 static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
2195 struct bcmgenet_tx_ring *ring;
2197 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2198 ring = &priv->tx_rings[i];
2199 netif_napi_del(&ring->napi);
2202 ring = &priv->tx_rings[DESC_INDEX];
2203 netif_napi_del(&ring->napi);
2206 /* Initialize Tx queues
2208 * Queues 0-3 are priority-based, each one has 32 descriptors,
2209 * with queue 0 being the highest priority queue.
2211 * Queue 16 is the default Tx queue with
2212 * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
2214 * The transmit control block pool is then partitioned as follows:
2215 * - Tx queue 0 uses tx_cbs[0..31]
2216 * - Tx queue 1 uses tx_cbs[32..63]
2217 * - Tx queue 2 uses tx_cbs[64..95]
2218 * - Tx queue 3 uses tx_cbs[96..127]
2219 * - Tx queue 16 uses tx_cbs[128..255]
2221 static void bcmgenet_init_tx_queues(struct net_device *dev)
2223 struct bcmgenet_priv *priv = netdev_priv(dev);
2225 u32 dma_ctrl, ring_cfg;
2226 u32 dma_priority[3] = {0, 0, 0};
2228 dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
2229 dma_enable = dma_ctrl & DMA_EN;
2230 dma_ctrl &= ~DMA_EN;
2231 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2236 /* Enable strict priority arbiter mode */
2237 bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
2239 /* Initialize Tx priority queues */
2240 for (i = 0; i < priv->hw_params->tx_queues; i++) {
2241 bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
2242 i * priv->hw_params->tx_bds_per_q,
2243 (i + 1) * priv->hw_params->tx_bds_per_q);
2244 ring_cfg |= (1 << i);
2245 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2246 dma_priority[DMA_PRIO_REG_INDEX(i)] |=
2247 ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
2250 /* Initialize Tx default queue 16 */
2251 bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
2252 priv->hw_params->tx_queues *
2253 priv->hw_params->tx_bds_per_q,
2255 ring_cfg |= (1 << DESC_INDEX);
2256 dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2257 dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
2258 ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
2259 DMA_PRIO_REG_SHIFT(DESC_INDEX));
2261 /* Set Tx queue priorities */
2262 bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
2263 bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
2264 bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
2266 /* Enable Tx queues */
2267 bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
2272 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2275 static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
2278 struct bcmgenet_rx_ring *ring;
2280 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2281 ring = &priv->rx_rings[i];
2282 napi_enable(&ring->napi);
2283 ring->int_enable(ring);
2286 ring = &priv->rx_rings[DESC_INDEX];
2287 napi_enable(&ring->napi);
2288 ring->int_enable(ring);
2291 static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
2294 struct bcmgenet_rx_ring *ring;
2296 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2297 ring = &priv->rx_rings[i];
2298 napi_disable(&ring->napi);
2299 cancel_work_sync(&ring->dim.dim.work);
2302 ring = &priv->rx_rings[DESC_INDEX];
2303 napi_disable(&ring->napi);
2304 cancel_work_sync(&ring->dim.dim.work);
2307 static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
2310 struct bcmgenet_rx_ring *ring;
2312 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2313 ring = &priv->rx_rings[i];
2314 netif_napi_del(&ring->napi);
2317 ring = &priv->rx_rings[DESC_INDEX];
2318 netif_napi_del(&ring->napi);
2321 /* Initialize Rx queues
2323 * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
2324 * used to direct traffic to these queues.
2326 * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
2328 static int bcmgenet_init_rx_queues(struct net_device *dev)
2330 struct bcmgenet_priv *priv = netdev_priv(dev);
2337 dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
2338 dma_enable = dma_ctrl & DMA_EN;
2339 dma_ctrl &= ~DMA_EN;
2340 bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2345 /* Initialize Rx priority queues */
2346 for (i = 0; i < priv->hw_params->rx_queues; i++) {
2347 ret = bcmgenet_init_rx_ring(priv, i,
2348 priv->hw_params->rx_bds_per_q,
2349 i * priv->hw_params->rx_bds_per_q,
2351 priv->hw_params->rx_bds_per_q);
2355 ring_cfg |= (1 << i);
2356 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2359 /* Initialize Rx default queue 16 */
2360 ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
2361 priv->hw_params->rx_queues *
2362 priv->hw_params->rx_bds_per_q,
2367 ring_cfg |= (1 << DESC_INDEX);
2368 dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2371 bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
2373 /* Configure ring as descriptor ring and re-enable DMA if enabled */
2376 bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2381 static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
2389 /* Disable TDMA to stop add more frames in TX DMA */
2390 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2392 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2394 /* Check TDMA status register to confirm TDMA is disabled */
2395 while (timeout++ < DMA_TIMEOUT_VAL) {
2396 reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
2397 if (reg & DMA_DISABLED)
2403 if (timeout == DMA_TIMEOUT_VAL) {
2404 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
2408 /* Wait 10ms for packet drain in both tx and rx dma */
2409 usleep_range(10000, 20000);
2412 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2414 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2417 /* Check RDMA status register to confirm RDMA is disabled */
2418 while (timeout++ < DMA_TIMEOUT_VAL) {
2419 reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
2420 if (reg & DMA_DISABLED)
2426 if (timeout == DMA_TIMEOUT_VAL) {
2427 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
2432 for (i = 0; i < priv->hw_params->rx_queues; i++)
2433 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2434 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2436 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2439 for (i = 0; i < priv->hw_params->tx_queues; i++)
2440 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2441 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2443 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2448 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
2450 struct netdev_queue *txq;
2453 bcmgenet_fini_rx_napi(priv);
2454 bcmgenet_fini_tx_napi(priv);
2456 for (i = 0; i < priv->num_tx_bds; i++)
2457 dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev,
2460 for (i = 0; i < priv->hw_params->tx_queues; i++) {
2461 txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
2462 netdev_tx_reset_queue(txq);
2465 txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue);
2466 netdev_tx_reset_queue(txq);
2468 bcmgenet_free_rx_buffers(priv);
2469 kfree(priv->rx_cbs);
2470 kfree(priv->tx_cbs);
2473 /* init_edma: Initialize DMA control register */
2474 static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
2480 netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
2482 /* Initialize common Rx ring structures */
2483 priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
2484 priv->num_rx_bds = TOTAL_DESC;
2485 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
2490 for (i = 0; i < priv->num_rx_bds; i++) {
2491 cb = priv->rx_cbs + i;
2492 cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
2495 /* Initialize common TX ring structures */
2496 priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
2497 priv->num_tx_bds = TOTAL_DESC;
2498 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
2500 if (!priv->tx_cbs) {
2501 kfree(priv->rx_cbs);
2505 for (i = 0; i < priv->num_tx_bds; i++) {
2506 cb = priv->tx_cbs + i;
2507 cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
2511 bcmgenet_rdma_writel(priv, priv->dma_max_burst_length,
2512 DMA_SCB_BURST_SIZE);
2514 /* Initialize Rx queues */
2515 ret = bcmgenet_init_rx_queues(priv->dev);
2517 netdev_err(priv->dev, "failed to initialize Rx queues\n");
2518 bcmgenet_free_rx_buffers(priv);
2519 kfree(priv->rx_cbs);
2520 kfree(priv->tx_cbs);
2525 bcmgenet_tdma_writel(priv, priv->dma_max_burst_length,
2526 DMA_SCB_BURST_SIZE);
2528 /* Initialize Tx queues */
2529 bcmgenet_init_tx_queues(priv->dev);
2534 /* Interrupt bottom half */
2535 static void bcmgenet_irq_task(struct work_struct *work)
2537 unsigned int status;
2538 struct bcmgenet_priv *priv = container_of(
2539 work, struct bcmgenet_priv, bcmgenet_irq_work);
2541 netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
2543 spin_lock_irq(&priv->lock);
2544 status = priv->irq0_stat;
2545 priv->irq0_stat = 0;
2546 spin_unlock_irq(&priv->lock);
2548 if (status & UMAC_IRQ_PHY_DET_R &&
2549 priv->dev->phydev->autoneg != AUTONEG_ENABLE) {
2550 phy_init_hw(priv->dev->phydev);
2551 genphy_config_aneg(priv->dev->phydev);
2554 /* Link UP/DOWN event */
2555 if (status & UMAC_IRQ_LINK_EVENT)
2556 phy_mac_interrupt(priv->dev->phydev);
2560 /* bcmgenet_isr1: handle Rx and Tx priority queues */
2561 static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
2563 struct bcmgenet_priv *priv = dev_id;
2564 struct bcmgenet_rx_ring *rx_ring;
2565 struct bcmgenet_tx_ring *tx_ring;
2566 unsigned int index, status;
2568 /* Read irq status */
2569 status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
2570 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2572 /* clear interrupts */
2573 bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
2575 netif_dbg(priv, intr, priv->dev,
2576 "%s: IRQ=0x%x\n", __func__, status);
2578 /* Check Rx priority queue interrupts */
2579 for (index = 0; index < priv->hw_params->rx_queues; index++) {
2580 if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
2583 rx_ring = &priv->rx_rings[index];
2584 rx_ring->dim.event_ctr++;
2586 if (likely(napi_schedule_prep(&rx_ring->napi))) {
2587 rx_ring->int_disable(rx_ring);
2588 __napi_schedule_irqoff(&rx_ring->napi);
2592 /* Check Tx priority queue interrupts */
2593 for (index = 0; index < priv->hw_params->tx_queues; index++) {
2594 if (!(status & BIT(index)))
2597 tx_ring = &priv->tx_rings[index];
2599 if (likely(napi_schedule_prep(&tx_ring->napi))) {
2600 tx_ring->int_disable(tx_ring);
2601 __napi_schedule_irqoff(&tx_ring->napi);
2608 /* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
2609 static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2611 struct bcmgenet_priv *priv = dev_id;
2612 struct bcmgenet_rx_ring *rx_ring;
2613 struct bcmgenet_tx_ring *tx_ring;
2614 unsigned int status;
2615 unsigned long flags;
2617 /* Read irq status */
2618 status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
2619 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
2621 /* clear interrupts */
2622 bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
2624 netif_dbg(priv, intr, priv->dev,
2625 "IRQ=0x%x\n", status);
2627 if (status & UMAC_IRQ_RXDMA_DONE) {
2628 rx_ring = &priv->rx_rings[DESC_INDEX];
2629 rx_ring->dim.event_ctr++;
2631 if (likely(napi_schedule_prep(&rx_ring->napi))) {
2632 rx_ring->int_disable(rx_ring);
2633 __napi_schedule_irqoff(&rx_ring->napi);
2637 if (status & UMAC_IRQ_TXDMA_DONE) {
2638 tx_ring = &priv->tx_rings[DESC_INDEX];
2640 if (likely(napi_schedule_prep(&tx_ring->napi))) {
2641 tx_ring->int_disable(tx_ring);
2642 __napi_schedule_irqoff(&tx_ring->napi);
2646 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
2647 status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
2651 /* all other interested interrupts handled in bottom half */
2652 status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R);
2654 /* Save irq status for bottom-half processing. */
2655 spin_lock_irqsave(&priv->lock, flags);
2656 priv->irq0_stat |= status;
2657 spin_unlock_irqrestore(&priv->lock, flags);
2659 schedule_work(&priv->bcmgenet_irq_work);
2665 static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
2667 struct bcmgenet_priv *priv = dev_id;
2669 pm_wakeup_event(&priv->pdev->dev, 0);
2674 #ifdef CONFIG_NET_POLL_CONTROLLER
2675 static void bcmgenet_poll_controller(struct net_device *dev)
2677 struct bcmgenet_priv *priv = netdev_priv(dev);
2679 /* Invoke the main RX/TX interrupt handler */
2680 disable_irq(priv->irq0);
2681 bcmgenet_isr0(priv->irq0, priv);
2682 enable_irq(priv->irq0);
2684 /* And the interrupt handler for RX/TX priority queues */
2685 disable_irq(priv->irq1);
2686 bcmgenet_isr1(priv->irq1, priv);
2687 enable_irq(priv->irq1);
2691 static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
2695 reg = bcmgenet_rbuf_ctrl_get(priv);
2697 bcmgenet_rbuf_ctrl_set(priv, reg);
2701 bcmgenet_rbuf_ctrl_set(priv, reg);
2705 static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
2706 unsigned char *addr)
2708 bcmgenet_umac_writel(priv, get_unaligned_be32(&addr[0]), UMAC_MAC0);
2709 bcmgenet_umac_writel(priv, get_unaligned_be16(&addr[4]), UMAC_MAC1);
2712 static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
2713 unsigned char *addr)
2717 addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC0);
2718 put_unaligned_be32(addr_tmp, &addr[0]);
2719 addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC1);
2720 put_unaligned_be16(addr_tmp, &addr[4]);
2723 /* Returns a reusable dma control register value */
2724 static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
2730 dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
2731 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2733 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2735 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2737 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2739 bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
2741 bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
2746 static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
2750 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2752 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2754 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2756 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2759 /* bcmgenet_hfb_clear
2761 * Clear Hardware Filter Block and disable all filtering.
2763 static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
2767 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
2768 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
2769 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
2771 for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
2772 bcmgenet_rdma_writel(priv, 0x0, i);
2774 for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
2775 bcmgenet_hfb_reg_writel(priv, 0x0,
2776 HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
2778 for (i = 0; i < priv->hw_params->hfb_filter_cnt *
2779 priv->hw_params->hfb_filter_size; i++)
2780 bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
2783 static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
2785 if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
2788 bcmgenet_hfb_clear(priv);
2791 static void bcmgenet_netif_start(struct net_device *dev)
2793 struct bcmgenet_priv *priv = netdev_priv(dev);
2795 /* Start the network engine */
2796 bcmgenet_enable_rx_napi(priv);
2798 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
2800 bcmgenet_enable_tx_napi(priv);
2802 /* Monitor link interrupts now */
2803 bcmgenet_link_intr_enable(priv);
2805 phy_start(dev->phydev);
2808 static int bcmgenet_open(struct net_device *dev)
2810 struct bcmgenet_priv *priv = netdev_priv(dev);
2811 unsigned long dma_ctrl;
2815 netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
2817 /* Turn on the clock */
2818 clk_prepare_enable(priv->clk);
2820 /* If this is an internal GPHY, power it back on now, before UniMAC is
2821 * brought out of reset as absolutely no UniMAC activity is allowed
2823 if (priv->internal_phy)
2824 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
2826 /* take MAC out of reset */
2827 bcmgenet_umac_reset(priv);
2831 /* Apply features again in case we changed them while interface was
2834 bcmgenet_set_features(dev, dev->features);
2836 bcmgenet_set_hw_addr(priv, dev->dev_addr);
2838 if (priv->internal_phy) {
2839 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
2840 reg |= EXT_ENERGY_DET_MASK;
2841 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
2844 /* Disable RX/TX DMA and flush TX queues */
2845 dma_ctrl = bcmgenet_dma_disable(priv);
2847 /* Reinitialize TDMA and RDMA and SW housekeeping */
2848 ret = bcmgenet_init_dma(priv);
2850 netdev_err(dev, "failed to initialize DMA\n");
2851 goto err_clk_disable;
2854 /* Always enable ring 16 - descriptor ring */
2855 bcmgenet_enable_dma(priv, dma_ctrl);
2858 bcmgenet_hfb_init(priv);
2860 ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
2863 netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
2867 ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
2870 netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
2874 ret = bcmgenet_mii_probe(dev);
2876 netdev_err(dev, "failed to connect to PHY\n");
2880 bcmgenet_netif_start(dev);
2882 netif_tx_start_all_queues(dev);
2887 free_irq(priv->irq1, priv);
2889 free_irq(priv->irq0, priv);
2891 bcmgenet_dma_teardown(priv);
2892 bcmgenet_fini_dma(priv);
2894 if (priv->internal_phy)
2895 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
2896 clk_disable_unprepare(priv->clk);
2900 static void bcmgenet_netif_stop(struct net_device *dev)
2902 struct bcmgenet_priv *priv = netdev_priv(dev);
2904 bcmgenet_disable_tx_napi(priv);
2905 netif_tx_disable(dev);
2907 /* Disable MAC receive */
2908 umac_enable_set(priv, CMD_RX_EN, false);
2910 bcmgenet_dma_teardown(priv);
2912 /* Disable MAC transmit. TX DMA disabled must be done before this */
2913 umac_enable_set(priv, CMD_TX_EN, false);
2915 phy_stop(dev->phydev);
2916 bcmgenet_disable_rx_napi(priv);
2917 bcmgenet_intr_disable(priv);
2919 /* Wait for pending work items to complete. Since interrupts are
2920 * disabled no new work will be scheduled.
2922 cancel_work_sync(&priv->bcmgenet_irq_work);
2924 priv->old_link = -1;
2925 priv->old_speed = -1;
2926 priv->old_duplex = -1;
2927 priv->old_pause = -1;
2930 bcmgenet_tx_reclaim_all(dev);
2931 bcmgenet_fini_dma(priv);
2934 static int bcmgenet_close(struct net_device *dev)
2936 struct bcmgenet_priv *priv = netdev_priv(dev);
2939 netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
2941 bcmgenet_netif_stop(dev);
2943 /* Really kill the PHY state machine and disconnect from it */
2944 phy_disconnect(dev->phydev);
2946 free_irq(priv->irq0, priv);
2947 free_irq(priv->irq1, priv);
2949 if (priv->internal_phy)
2950 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
2952 clk_disable_unprepare(priv->clk);
2957 static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
2959 struct bcmgenet_priv *priv = ring->priv;
2960 u32 p_index, c_index, intsts, intmsk;
2961 struct netdev_queue *txq;
2962 unsigned int free_bds;
2965 if (!netif_msg_tx_err(priv))
2968 txq = netdev_get_tx_queue(priv->dev, ring->queue);
2970 spin_lock(&ring->lock);
2971 if (ring->index == DESC_INDEX) {
2972 intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
2973 intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
2975 intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2976 intmsk = 1 << ring->index;
2978 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
2979 p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
2980 txq_stopped = netif_tx_queue_stopped(txq);
2981 free_bds = ring->free_bds;
2982 spin_unlock(&ring->lock);
2984 netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
2985 "TX queue status: %s, interrupts: %s\n"
2986 "(sw)free_bds: %d (sw)size: %d\n"
2987 "(sw)p_index: %d (hw)p_index: %d\n"
2988 "(sw)c_index: %d (hw)c_index: %d\n"
2989 "(sw)clean_p: %d (sw)write_p: %d\n"
2990 "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
2991 ring->index, ring->queue,
2992 txq_stopped ? "stopped" : "active",
2993 intsts & intmsk ? "enabled" : "disabled",
2994 free_bds, ring->size,
2995 ring->prod_index, p_index & DMA_P_INDEX_MASK,
2996 ring->c_index, c_index & DMA_C_INDEX_MASK,
2997 ring->clean_ptr, ring->write_ptr,
2998 ring->cb_ptr, ring->end_ptr);
3001 static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
3003 struct bcmgenet_priv *priv = netdev_priv(dev);
3004 u32 int0_enable = 0;
3005 u32 int1_enable = 0;
3008 netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
3010 for (q = 0; q < priv->hw_params->tx_queues; q++)
3011 bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
3012 bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
3014 bcmgenet_tx_reclaim_all(dev);
3016 for (q = 0; q < priv->hw_params->tx_queues; q++)
3017 int1_enable |= (1 << q);
3019 int0_enable = UMAC_IRQ_TXDMA_DONE;
3021 /* Re-enable TX interrupts if disabled */
3022 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
3023 bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
3025 netif_trans_update(dev);
3027 dev->stats.tx_errors++;
3029 netif_tx_wake_all_queues(dev);
3032 #define MAX_MDF_FILTER 17
3034 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
3035 unsigned char *addr,
3038 bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
3039 UMAC_MDF_ADDR + (*i * 4));
3040 bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
3041 addr[4] << 8 | addr[5],
3042 UMAC_MDF_ADDR + ((*i + 1) * 4));
3046 static void bcmgenet_set_rx_mode(struct net_device *dev)
3048 struct bcmgenet_priv *priv = netdev_priv(dev);
3049 struct netdev_hw_addr *ha;
3053 netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
3055 /* Number of filters needed */
3056 nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2;
3059 * Turn on promicuous mode for three scenarios
3060 * 1. IFF_PROMISC flag is set
3061 * 2. IFF_ALLMULTI flag is set
3062 * 3. The number of filters needed exceeds the number filters
3063 * supported by the hardware.
3065 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
3066 if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
3067 (nfilter > MAX_MDF_FILTER)) {
3069 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3070 bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
3073 reg &= ~CMD_PROMISC;
3074 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3077 /* update MDF filter */
3080 bcmgenet_set_mdf_addr(priv, dev->broadcast, &i);
3081 /* my own address.*/
3082 bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i);
3085 netdev_for_each_uc_addr(ha, dev)
3086 bcmgenet_set_mdf_addr(priv, ha->addr, &i);
3089 netdev_for_each_mc_addr(ha, dev)
3090 bcmgenet_set_mdf_addr(priv, ha->addr, &i);
3092 /* Enable filters */
3093 reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter);
3094 bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
3097 /* Set the hardware MAC address. */
3098 static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
3100 struct sockaddr *addr = p;
3102 /* Setting the MAC address at the hardware level is not possible
3103 * without disabling the UniMAC RX/TX enable bits.
3105 if (netif_running(dev))
3108 ether_addr_copy(dev->dev_addr, addr->sa_data);
3113 static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
3115 struct bcmgenet_priv *priv = netdev_priv(dev);
3116 unsigned long tx_bytes = 0, tx_packets = 0;
3117 unsigned long rx_bytes = 0, rx_packets = 0;
3118 unsigned long rx_errors = 0, rx_dropped = 0;
3119 struct bcmgenet_tx_ring *tx_ring;
3120 struct bcmgenet_rx_ring *rx_ring;
3123 for (q = 0; q < priv->hw_params->tx_queues; q++) {
3124 tx_ring = &priv->tx_rings[q];
3125 tx_bytes += tx_ring->bytes;
3126 tx_packets += tx_ring->packets;
3128 tx_ring = &priv->tx_rings[DESC_INDEX];
3129 tx_bytes += tx_ring->bytes;
3130 tx_packets += tx_ring->packets;
3132 for (q = 0; q < priv->hw_params->rx_queues; q++) {
3133 rx_ring = &priv->rx_rings[q];
3135 rx_bytes += rx_ring->bytes;
3136 rx_packets += rx_ring->packets;
3137 rx_errors += rx_ring->errors;
3138 rx_dropped += rx_ring->dropped;
3140 rx_ring = &priv->rx_rings[DESC_INDEX];
3141 rx_bytes += rx_ring->bytes;
3142 rx_packets += rx_ring->packets;
3143 rx_errors += rx_ring->errors;
3144 rx_dropped += rx_ring->dropped;
3146 dev->stats.tx_bytes = tx_bytes;
3147 dev->stats.tx_packets = tx_packets;
3148 dev->stats.rx_bytes = rx_bytes;
3149 dev->stats.rx_packets = rx_packets;
3150 dev->stats.rx_errors = rx_errors;
3151 dev->stats.rx_missed_errors = rx_errors;
3152 dev->stats.rx_dropped = rx_dropped;
3156 static const struct net_device_ops bcmgenet_netdev_ops = {
3157 .ndo_open = bcmgenet_open,
3158 .ndo_stop = bcmgenet_close,
3159 .ndo_start_xmit = bcmgenet_xmit,
3160 .ndo_tx_timeout = bcmgenet_timeout,
3161 .ndo_set_rx_mode = bcmgenet_set_rx_mode,
3162 .ndo_set_mac_address = bcmgenet_set_mac_addr,
3163 .ndo_do_ioctl = phy_do_ioctl_running,
3164 .ndo_set_features = bcmgenet_set_features,
3165 #ifdef CONFIG_NET_POLL_CONTROLLER
3166 .ndo_poll_controller = bcmgenet_poll_controller,
3168 .ndo_get_stats = bcmgenet_get_stats,
3171 /* Array of GENET hardware parameters/characteristics */
3172 static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
3178 .bp_in_en_shift = 16,
3179 .bp_in_mask = 0xffff,
3180 .hfb_filter_cnt = 16,
3182 .hfb_offset = 0x1000,
3183 .rdma_offset = 0x2000,
3184 .tdma_offset = 0x3000,
3192 .bp_in_en_shift = 16,
3193 .bp_in_mask = 0xffff,
3194 .hfb_filter_cnt = 16,
3196 .tbuf_offset = 0x0600,
3197 .hfb_offset = 0x1000,
3198 .hfb_reg_offset = 0x2000,
3199 .rdma_offset = 0x3000,
3200 .tdma_offset = 0x4000,
3202 .flags = GENET_HAS_EXT,
3209 .bp_in_en_shift = 17,
3210 .bp_in_mask = 0x1ffff,
3211 .hfb_filter_cnt = 48,
3212 .hfb_filter_size = 128,
3214 .tbuf_offset = 0x0600,
3215 .hfb_offset = 0x8000,
3216 .hfb_reg_offset = 0xfc00,
3217 .rdma_offset = 0x10000,
3218 .tdma_offset = 0x11000,
3220 .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
3221 GENET_HAS_MOCA_LINK_DET,
3228 .bp_in_en_shift = 17,
3229 .bp_in_mask = 0x1ffff,
3230 .hfb_filter_cnt = 48,
3231 .hfb_filter_size = 128,
3233 .tbuf_offset = 0x0600,
3234 .hfb_offset = 0x8000,
3235 .hfb_reg_offset = 0xfc00,
3236 .rdma_offset = 0x2000,
3237 .tdma_offset = 0x4000,
3239 .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3240 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3247 .bp_in_en_shift = 17,
3248 .bp_in_mask = 0x1ffff,
3249 .hfb_filter_cnt = 48,
3250 .hfb_filter_size = 128,
3252 .tbuf_offset = 0x0600,
3253 .hfb_offset = 0x8000,
3254 .hfb_reg_offset = 0xfc00,
3255 .rdma_offset = 0x2000,
3256 .tdma_offset = 0x4000,
3258 .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3259 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3263 /* Infer hardware parameters from the detected GENET version */
3264 static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
3266 struct bcmgenet_hw_params *params;
3271 if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
3272 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3273 genet_dma_ring_regs = genet_dma_ring_regs_v4;
3274 } else if (GENET_IS_V3(priv)) {
3275 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3276 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3277 } else if (GENET_IS_V2(priv)) {
3278 bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
3279 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3280 } else if (GENET_IS_V1(priv)) {
3281 bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
3282 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3285 /* enum genet_version starts at 1 */
3286 priv->hw_params = &bcmgenet_hw_params[priv->version];
3287 params = priv->hw_params;
3289 /* Read GENET HW version */
3290 reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
3291 major = (reg >> 24 & 0x0f);
3294 else if (major == 5)
3296 else if (major == 0)
3298 if (major != priv->version) {
3299 dev_err(&priv->pdev->dev,
3300 "GENET version mismatch, got: %d, configured for: %d\n",
3301 major, priv->version);
3304 /* Print the GENET core version */
3305 dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
3306 major, (reg >> 16) & 0x0f, reg & 0xffff);
3308 /* Store the integrated PHY revision for the MDIO probing function
3309 * to pass this information to the PHY driver. The PHY driver expects
3310 * to find the PHY major revision in bits 15:8 while the GENET register
3311 * stores that information in bits 7:0, account for that.
3313 * On newer chips, starting with PHY revision G0, a new scheme is
3314 * deployed similar to the Starfighter 2 switch with GPHY major
3315 * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
3316 * is reserved as well as special value 0x01ff, we have a small
3317 * heuristic to check for the new GPHY revision and re-arrange things
3318 * so the GPHY driver is happy.
3320 gphy_rev = reg & 0xffff;
3322 if (GENET_IS_V5(priv)) {
3323 /* The EPHY revision should come from the MDIO registers of
3324 * the PHY not from GENET.
3326 if (gphy_rev != 0) {
3327 pr_warn("GENET is reporting EPHY revision: 0x%04x\n",
3330 /* This is reserved so should require special treatment */
3331 } else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
3332 pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
3334 /* This is the good old scheme, just GPHY major, no minor nor patch */
3335 } else if ((gphy_rev & 0xf0) != 0) {
3336 priv->gphy_rev = gphy_rev << 8;
3337 /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
3338 } else if ((gphy_rev & 0xff00) != 0) {
3339 priv->gphy_rev = gphy_rev;
3342 #ifdef CONFIG_PHYS_ADDR_T_64BIT
3343 if (!(params->flags & GENET_HAS_40BITS))
3344 pr_warn("GENET does not support 40-bits PA\n");
3347 pr_debug("Configuration for version: %d\n"
3348 "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
3349 "BP << en: %2d, BP msk: 0x%05x\n"
3350 "HFB count: %2d, QTAQ msk: 0x%05x\n"
3351 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
3352 "RDMA: 0x%05x, TDMA: 0x%05x\n"
3355 params->tx_queues, params->tx_bds_per_q,
3356 params->rx_queues, params->rx_bds_per_q,
3357 params->bp_in_en_shift, params->bp_in_mask,
3358 params->hfb_filter_cnt, params->qtag_mask,
3359 params->tbuf_offset, params->hfb_offset,
3360 params->hfb_reg_offset,
3361 params->rdma_offset, params->tdma_offset,
3362 params->words_per_bd);
3365 struct bcmgenet_plat_data {
3366 enum bcmgenet_version version;
3367 u32 dma_max_burst_length;
3370 static const struct bcmgenet_plat_data v1_plat_data = {
3371 .version = GENET_V1,
3372 .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3375 static const struct bcmgenet_plat_data v2_plat_data = {
3376 .version = GENET_V2,
3377 .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3380 static const struct bcmgenet_plat_data v3_plat_data = {
3381 .version = GENET_V3,
3382 .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3385 static const struct bcmgenet_plat_data v4_plat_data = {
3386 .version = GENET_V4,
3387 .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3390 static const struct bcmgenet_plat_data v5_plat_data = {
3391 .version = GENET_V5,
3392 .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3395 static const struct bcmgenet_plat_data bcm2711_plat_data = {
3396 .version = GENET_V5,
3397 .dma_max_burst_length = 0x08,
3400 static const struct of_device_id bcmgenet_match[] = {
3401 { .compatible = "brcm,genet-v1", .data = &v1_plat_data },
3402 { .compatible = "brcm,genet-v2", .data = &v2_plat_data },
3403 { .compatible = "brcm,genet-v3", .data = &v3_plat_data },
3404 { .compatible = "brcm,genet-v4", .data = &v4_plat_data },
3405 { .compatible = "brcm,genet-v5", .data = &v5_plat_data },
3406 { .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data },
3409 MODULE_DEVICE_TABLE(of, bcmgenet_match);
3411 static int bcmgenet_probe(struct platform_device *pdev)
3413 struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
3414 const struct bcmgenet_plat_data *pdata;
3415 struct bcmgenet_priv *priv;
3416 struct net_device *dev;
3420 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
3421 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
3422 GENET_MAX_MQ_CNT + 1);
3424 dev_err(&pdev->dev, "can't allocate net device\n");
3428 priv = netdev_priv(dev);
3429 priv->irq0 = platform_get_irq(pdev, 0);
3430 if (priv->irq0 < 0) {
3434 priv->irq1 = platform_get_irq(pdev, 1);
3435 if (priv->irq1 < 0) {
3439 priv->wol_irq = platform_get_irq_optional(pdev, 2);
3441 priv->base = devm_platform_ioremap_resource(pdev, 0);
3442 if (IS_ERR(priv->base)) {
3443 err = PTR_ERR(priv->base);
3447 spin_lock_init(&priv->lock);
3449 SET_NETDEV_DEV(dev, &pdev->dev);
3450 dev_set_drvdata(&pdev->dev, dev);
3451 dev->watchdog_timeo = 2 * HZ;
3452 dev->ethtool_ops = &bcmgenet_ethtool_ops;
3453 dev->netdev_ops = &bcmgenet_netdev_ops;
3455 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
3457 /* Set default features */
3458 dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
3460 dev->hw_features |= dev->features;
3461 dev->vlan_features |= dev->features;
3463 /* Request the WOL interrupt and advertise suspend if available */
3464 priv->wol_irq_disabled = true;
3465 err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0,
3468 device_set_wakeup_capable(&pdev->dev, 1);
3470 /* Set the needed headroom to account for any possible
3471 * features enabling/disabling at runtime
3473 dev->needed_headroom += 64;
3475 netdev_boot_setup_check(dev);
3480 pdata = device_get_match_data(&pdev->dev);
3482 priv->version = pdata->version;
3483 priv->dma_max_burst_length = pdata->dma_max_burst_length;
3485 priv->version = pd->genet_version;
3486 priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH;
3489 priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet");
3490 if (IS_ERR(priv->clk)) {
3491 dev_dbg(&priv->pdev->dev, "failed to get enet clock\n");
3492 err = PTR_ERR(priv->clk);
3496 err = clk_prepare_enable(priv->clk);
3500 bcmgenet_set_hw_params(priv);
3503 if (priv->hw_params->flags & GENET_HAS_40BITS)
3504 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
3506 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3510 /* Mii wait queue */
3511 init_waitqueue_head(&priv->wq);
3512 /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
3513 priv->rx_buf_len = RX_BUF_LENGTH;
3514 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
3516 priv->clk_wol = devm_clk_get_optional(&priv->pdev->dev, "enet-wol");
3517 if (IS_ERR(priv->clk_wol)) {
3518 dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n");
3519 err = PTR_ERR(priv->clk_wol);
3523 priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee");
3524 if (IS_ERR(priv->clk_eee)) {
3525 dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n");
3526 err = PTR_ERR(priv->clk_eee);
3530 /* If this is an internal GPHY, power it on now, before UniMAC is
3531 * brought out of reset as absolutely no UniMAC activity is allowed
3533 if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL)
3534 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
3536 if (pd && !IS_ERR_OR_NULL(pd->mac_address))
3537 ether_addr_copy(dev->dev_addr, pd->mac_address);
3539 if (!device_get_mac_address(&pdev->dev, dev->dev_addr, ETH_ALEN))
3540 if (has_acpi_companion(&pdev->dev))
3541 bcmgenet_get_hw_addr(priv, dev->dev_addr);
3543 if (!is_valid_ether_addr(dev->dev_addr)) {
3544 dev_warn(&pdev->dev, "using random Ethernet MAC\n");
3545 eth_hw_addr_random(dev);
3550 err = bcmgenet_mii_init(dev);
3552 goto err_clk_disable;
3554 /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
3555 * just the ring 16 descriptor based TX
3557 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
3558 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
3560 /* Set default coalescing parameters */
3561 for (i = 0; i < priv->hw_params->rx_queues; i++)
3562 priv->rx_rings[i].rx_max_coalesced_frames = 1;
3563 priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1;
3565 /* libphy will determine the link state */
3566 netif_carrier_off(dev);
3568 /* Turn off the main clock, WOL clock is handled separately */
3569 clk_disable_unprepare(priv->clk);
3571 err = register_netdev(dev);
3578 clk_disable_unprepare(priv->clk);
3584 static int bcmgenet_remove(struct platform_device *pdev)
3586 struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
3588 dev_set_drvdata(&pdev->dev, NULL);
3589 unregister_netdev(priv->dev);
3590 bcmgenet_mii_exit(priv->dev);
3591 free_netdev(priv->dev);
3596 static void bcmgenet_shutdown(struct platform_device *pdev)
3598 bcmgenet_remove(pdev);
3601 #ifdef CONFIG_PM_SLEEP
3602 static int bcmgenet_resume(struct device *d)
3604 struct net_device *dev = dev_get_drvdata(d);
3605 struct bcmgenet_priv *priv = netdev_priv(dev);
3606 unsigned long dma_ctrl;
3610 if (!netif_running(dev))
3613 /* Turn on the clock */
3614 ret = clk_prepare_enable(priv->clk);
3618 /* If this is an internal GPHY, power it back on now, before UniMAC is
3619 * brought out of reset as absolutely no UniMAC activity is allowed
3621 if (priv->internal_phy)
3622 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
3624 bcmgenet_umac_reset(priv);
3628 /* From WOL-enabled suspend, switch to regular clock */
3630 clk_disable_unprepare(priv->clk_wol);
3632 phy_init_hw(dev->phydev);
3634 /* Speed settings must be restored */
3635 genphy_config_aneg(dev->phydev);
3636 bcmgenet_mii_config(priv->dev, false);
3638 /* Restore enabled features */
3639 bcmgenet_set_features(dev, dev->features);
3641 bcmgenet_set_hw_addr(priv, dev->dev_addr);
3643 if (priv->internal_phy) {
3644 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
3645 reg |= EXT_ENERGY_DET_MASK;
3646 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
3650 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
3652 /* Disable RX/TX DMA and flush TX queues */
3653 dma_ctrl = bcmgenet_dma_disable(priv);
3655 /* Reinitialize TDMA and RDMA and SW housekeeping */
3656 ret = bcmgenet_init_dma(priv);
3658 netdev_err(dev, "failed to initialize DMA\n");
3659 goto out_clk_disable;
3662 /* Always enable ring 16 - descriptor ring */
3663 bcmgenet_enable_dma(priv, dma_ctrl);
3665 if (!device_may_wakeup(d))
3666 phy_resume(dev->phydev);
3668 if (priv->eee.eee_enabled)
3669 bcmgenet_eee_enable_set(dev, true);
3671 bcmgenet_netif_start(dev);
3673 netif_device_attach(dev);
3678 if (priv->internal_phy)
3679 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3680 clk_disable_unprepare(priv->clk);
3684 static int bcmgenet_suspend(struct device *d)
3686 struct net_device *dev = dev_get_drvdata(d);
3687 struct bcmgenet_priv *priv = netdev_priv(dev);
3690 if (!netif_running(dev))
3693 netif_device_detach(dev);
3695 bcmgenet_netif_stop(dev);
3697 if (!device_may_wakeup(d))
3698 phy_suspend(dev->phydev);
3700 /* Prepare the device for Wake-on-LAN and switch to the slow clock */
3701 if (device_may_wakeup(d) && priv->wolopts) {
3702 ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
3703 clk_prepare_enable(priv->clk_wol);
3704 } else if (priv->internal_phy) {
3705 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3708 /* Turn off the clocks */
3709 clk_disable_unprepare(priv->clk);
3716 #endif /* CONFIG_PM_SLEEP */
3718 static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
3720 static const struct acpi_device_id genet_acpi_match[] = {
3721 { "BCM6E4E", (kernel_ulong_t)&bcm2711_plat_data },
3724 MODULE_DEVICE_TABLE(acpi, genet_acpi_match);
3726 static struct platform_driver bcmgenet_driver = {
3727 .probe = bcmgenet_probe,
3728 .remove = bcmgenet_remove,
3729 .shutdown = bcmgenet_shutdown,
3732 .of_match_table = bcmgenet_match,
3733 .pm = &bcmgenet_pm_ops,
3734 .acpi_match_table = genet_acpi_match,
3737 module_platform_driver(bcmgenet_driver);
3739 MODULE_AUTHOR("Broadcom Corporation");
3740 MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
3741 MODULE_ALIAS("platform:bcmgenet");
3742 MODULE_LICENSE("GPL");