2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
4 * Copyright (C) 2012 Marvell
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
14 #include <linux/kernel.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/platform_device.h>
18 #include <linux/skbuff.h>
19 #include <linux/inetdevice.h>
20 #include <linux/mbus.h>
21 #include <linux/module.h>
22 #include <linux/interrupt.h>
27 #include <linux/of_irq.h>
28 #include <linux/of_mdio.h>
29 #include <linux/of_net.h>
30 #include <linux/of_address.h>
31 #include <linux/phy.h>
32 #include <linux/clk.h>
35 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
36 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(1)
37 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
38 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
39 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
40 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
41 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
42 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
43 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19
44 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
45 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
46 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
47 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
48 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
49 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
50 #define MVNETA_PORT_RX_RESET 0x1cc0
51 #define MVNETA_PORT_RX_DMA_RESET BIT(0)
52 #define MVNETA_PHY_ADDR 0x2000
53 #define MVNETA_PHY_ADDR_MASK 0x1f
54 #define MVNETA_MBUS_RETRY 0x2010
55 #define MVNETA_UNIT_INTR_CAUSE 0x2080
56 #define MVNETA_UNIT_CONTROL 0x20B0
57 #define MVNETA_PHY_POLLING_ENABLE BIT(1)
58 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
59 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
60 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
61 #define MVNETA_BASE_ADDR_ENABLE 0x2290
62 #define MVNETA_PORT_CONFIG 0x2400
63 #define MVNETA_UNI_PROMISC_MODE BIT(0)
64 #define MVNETA_DEF_RXQ(q) ((q) << 1)
65 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
66 #define MVNETA_TX_UNSET_ERR_SUM BIT(12)
67 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
68 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
69 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
70 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
71 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
72 MVNETA_DEF_RXQ_ARP(q) | \
73 MVNETA_DEF_RXQ_TCP(q) | \
74 MVNETA_DEF_RXQ_UDP(q) | \
75 MVNETA_DEF_RXQ_BPDU(q) | \
76 MVNETA_TX_UNSET_ERR_SUM | \
77 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
78 #define MVNETA_PORT_CONFIG_EXTEND 0x2404
79 #define MVNETA_MAC_ADDR_LOW 0x2414
80 #define MVNETA_MAC_ADDR_HIGH 0x2418
81 #define MVNETA_SDMA_CONFIG 0x241c
82 #define MVNETA_SDMA_BRST_SIZE_16 4
83 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
84 #define MVNETA_RX_NO_DATA_SWAP BIT(4)
85 #define MVNETA_TX_NO_DATA_SWAP BIT(5)
86 #define MVNETA_DESC_SWAP BIT(6)
87 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
88 #define MVNETA_PORT_STATUS 0x2444
89 #define MVNETA_TX_IN_PRGRS BIT(1)
90 #define MVNETA_TX_FIFO_EMPTY BIT(8)
91 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c
92 #define MVNETA_SGMII_SERDES_CFG 0x24A0
93 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7
94 #define MVNETA_TYPE_PRIO 0x24bc
95 #define MVNETA_FORCE_UNI BIT(21)
96 #define MVNETA_TXQ_CMD_1 0x24e4
97 #define MVNETA_TXQ_CMD 0x2448
98 #define MVNETA_TXQ_DISABLE_SHIFT 8
99 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff
100 #define MVNETA_ACC_MODE 0x2500
101 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
102 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
103 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
104 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
106 /* Exception Interrupt Port/Queue Cause register */
108 #define MVNETA_INTR_NEW_CAUSE 0x25a0
109 #define MVNETA_INTR_NEW_MASK 0x25a4
111 /* bits 0..7 = TXQ SENT, one bit per queue.
112 * bits 8..15 = RXQ OCCUP, one bit per queue.
113 * bits 16..23 = RXQ FREE, one bit per queue.
114 * bit 29 = OLD_REG_SUM, see old reg ?
115 * bit 30 = TX_ERR_SUM, one bit for 4 ports
116 * bit 31 = MISC_SUM, one bit for 4 ports
118 #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
119 #define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
120 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
121 #define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
123 #define MVNETA_INTR_OLD_CAUSE 0x25a8
124 #define MVNETA_INTR_OLD_MASK 0x25ac
126 /* Data Path Port/Queue Cause Register */
127 #define MVNETA_INTR_MISC_CAUSE 0x25b0
128 #define MVNETA_INTR_MISC_MASK 0x25b4
130 #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
131 #define MVNETA_CAUSE_LINK_CHANGE BIT(1)
132 #define MVNETA_CAUSE_PTP BIT(4)
134 #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
135 #define MVNETA_CAUSE_RX_OVERRUN BIT(8)
136 #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
137 #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
138 #define MVNETA_CAUSE_TX_UNDERUN BIT(11)
139 #define MVNETA_CAUSE_PRBS_ERR BIT(12)
140 #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
141 #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
143 #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
144 #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
145 #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
147 #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
148 #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
149 #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
151 #define MVNETA_INTR_ENABLE 0x25b8
152 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
153 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF
155 #define MVNETA_RXQ_CMD 0x2680
156 #define MVNETA_RXQ_DISABLE_SHIFT 8
157 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff
158 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
159 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
160 #define MVNETA_GMAC_CTRL_0 0x2c00
161 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
162 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
163 #define MVNETA_GMAC0_PORT_ENABLE BIT(0)
164 #define MVNETA_GMAC_CTRL_2 0x2c08
165 #define MVNETA_GMAC2_PCS_ENABLE BIT(3)
166 #define MVNETA_GMAC2_PORT_RGMII BIT(4)
167 #define MVNETA_GMAC2_PORT_RESET BIT(6)
168 #define MVNETA_GMAC_STATUS 0x2c10
169 #define MVNETA_GMAC_LINK_UP BIT(0)
170 #define MVNETA_GMAC_SPEED_1000 BIT(1)
171 #define MVNETA_GMAC_SPEED_100 BIT(2)
172 #define MVNETA_GMAC_FULL_DUPLEX BIT(3)
173 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
174 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
175 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
176 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
177 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
178 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
179 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
180 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
181 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
182 #define MVNETA_GMAC_AN_SPEED_EN BIT(7)
183 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
184 #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
185 #define MVNETA_MIB_COUNTERS_BASE 0x3080
186 #define MVNETA_MIB_LATE_COLLISION 0x7c
187 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
188 #define MVNETA_DA_FILT_OTH_MCAST 0x3500
189 #define MVNETA_DA_FILT_UCAST_BASE 0x3600
190 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
191 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
192 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
193 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
194 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
195 #define MVNETA_TXQ_DEC_SENT_SHIFT 16
196 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
197 #define MVNETA_TXQ_SENT_DESC_SHIFT 16
198 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
199 #define MVNETA_PORT_TX_RESET 0x3cf0
200 #define MVNETA_PORT_TX_DMA_RESET BIT(0)
201 #define MVNETA_TX_MTU 0x3e0c
202 #define MVNETA_TX_TOKEN_SIZE 0x3e14
203 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
204 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
205 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
207 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
209 /* Descriptor ring Macros */
210 #define MVNETA_QUEUE_NEXT_DESC(q, index) \
211 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
213 /* Various constants */
216 #define MVNETA_TXDONE_COAL_PKTS 16
217 #define MVNETA_RX_COAL_PKTS 32
218 #define MVNETA_RX_COAL_USEC 100
220 /* Napi polling weight */
221 #define MVNETA_RX_POLL_WEIGHT 64
223 /* The two bytes Marvell header. Either contains a special value used
224 * by Marvell switches when a specific hardware mode is enabled (not
225 * supported by this driver) or is filled automatically by zeroes on
226 * the RX side. Those two bytes being at the front of the Ethernet
227 * header, they allow to have the IP header aligned on a 4 bytes
228 * boundary automatically: the hardware skips those two bytes on its
231 #define MVNETA_MH_SIZE 2
233 #define MVNETA_VLAN_TAG_LEN 4
235 #define MVNETA_CPU_D_CACHE_LINE_SIZE 32
236 #define MVNETA_TX_CSUM_MAX_SIZE 9800
237 #define MVNETA_ACC_MODE_EXT 1
239 /* Timeout constants */
240 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
241 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
242 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
244 #define MVNETA_TX_MTU_MAX 0x3ffff
246 /* Max number of Rx descriptors */
247 #define MVNETA_MAX_RXD 128
249 /* Max number of Tx descriptors */
250 #define MVNETA_MAX_TXD 532
252 /* descriptor aligned size */
253 #define MVNETA_DESC_ALIGNED_SIZE 32
255 #define MVNETA_RX_PKT_SIZE(mtu) \
256 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
257 ETH_HLEN + ETH_FCS_LEN, \
258 MVNETA_CPU_D_CACHE_LINE_SIZE)
260 #define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
262 struct mvneta_pcpu_stats {
263 struct u64_stats_sync syncp;
272 unsigned int frag_size;
274 struct mvneta_rx_queue *rxqs;
275 struct mvneta_tx_queue *txqs;
276 struct net_device *dev;
279 struct napi_struct napi;
289 struct mvneta_pcpu_stats *stats;
291 struct mii_bus *mii_bus;
292 struct phy_device *phy_dev;
293 phy_interface_t phy_interface;
294 struct device_node *phy_node;
300 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
301 * layout of the transmit and reception DMA descriptors, and their
302 * layout is therefore defined by the hardware design
305 #define MVNETA_TX_L3_OFF_SHIFT 0
306 #define MVNETA_TX_IP_HLEN_SHIFT 8
307 #define MVNETA_TX_L4_UDP BIT(16)
308 #define MVNETA_TX_L3_IP6 BIT(17)
309 #define MVNETA_TXD_IP_CSUM BIT(18)
310 #define MVNETA_TXD_Z_PAD BIT(19)
311 #define MVNETA_TXD_L_DESC BIT(20)
312 #define MVNETA_TXD_F_DESC BIT(21)
313 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
314 MVNETA_TXD_L_DESC | \
316 #define MVNETA_TX_L4_CSUM_FULL BIT(30)
317 #define MVNETA_TX_L4_CSUM_NOT BIT(31)
319 #define MVNETA_RXD_ERR_CRC 0x0
320 #define MVNETA_RXD_ERR_SUMMARY BIT(16)
321 #define MVNETA_RXD_ERR_OVERRUN BIT(17)
322 #define MVNETA_RXD_ERR_LEN BIT(18)
323 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
324 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
325 #define MVNETA_RXD_L3_IP4 BIT(25)
326 #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
327 #define MVNETA_RXD_L4_CSUM_OK BIT(30)
329 #if defined(__LITTLE_ENDIAN)
330 struct mvneta_tx_desc {
331 u32 command; /* Options used by HW for packet transmitting.*/
332 u16 reserverd1; /* csum_l4 (for future use) */
333 u16 data_size; /* Data size of transmitted packet in bytes */
334 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
335 u32 reserved2; /* hw_cmd - (for future use, PMT) */
336 u32 reserved3[4]; /* Reserved - (for future use) */
339 struct mvneta_rx_desc {
340 u32 status; /* Info about received packet */
341 u16 reserved1; /* pnc_info - (for future use, PnC) */
342 u16 data_size; /* Size of received packet in bytes */
344 u32 buf_phys_addr; /* Physical address of the buffer */
345 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
347 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
348 u16 reserved3; /* prefetch_cmd, for future use */
349 u16 reserved4; /* csum_l4 - (for future use, PnC) */
351 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
352 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
355 struct mvneta_tx_desc {
356 u16 data_size; /* Data size of transmitted packet in bytes */
357 u16 reserverd1; /* csum_l4 (for future use) */
358 u32 command; /* Options used by HW for packet transmitting.*/
359 u32 reserved2; /* hw_cmd - (for future use, PMT) */
360 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
361 u32 reserved3[4]; /* Reserved - (for future use) */
364 struct mvneta_rx_desc {
365 u16 data_size; /* Size of received packet in bytes */
366 u16 reserved1; /* pnc_info - (for future use, PnC) */
367 u32 status; /* Info about received packet */
369 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
370 u32 buf_phys_addr; /* Physical address of the buffer */
372 u16 reserved4; /* csum_l4 - (for future use, PnC) */
373 u16 reserved3; /* prefetch_cmd, for future use */
374 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
376 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
377 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
381 struct mvneta_tx_queue {
382 /* Number of this TX queue, in the range 0-7 */
385 /* Number of TX DMA descriptors in the descriptor ring */
388 /* Number of currently used TX DMA descriptor in the
393 /* Array of transmitted skb */
394 struct sk_buff **tx_skb;
396 /* Index of last TX DMA descriptor that was inserted */
399 /* Index of the TX DMA descriptor to be cleaned up */
404 /* Virtual address of the TX DMA descriptors array */
405 struct mvneta_tx_desc *descs;
407 /* DMA address of the TX DMA descriptors array */
408 dma_addr_t descs_phys;
410 /* Index of the last TX DMA descriptor */
413 /* Index of the next TX DMA descriptor to process */
414 int next_desc_to_proc;
417 struct mvneta_rx_queue {
418 /* rx queue number, in the range 0-7 */
421 /* num of rx descriptors in the rx descriptor ring */
424 /* counter of times when mvneta_refill() failed */
430 /* Virtual address of the RX DMA descriptors array */
431 struct mvneta_rx_desc *descs;
433 /* DMA address of the RX DMA descriptors array */
434 dma_addr_t descs_phys;
436 /* Index of the last RX DMA descriptor */
439 /* Index of the next RX DMA descriptor to process */
440 int next_desc_to_proc;
443 static int rxq_number = 8;
444 static int txq_number = 8;
448 static int rx_copybreak __read_mostly = 256;
450 #define MVNETA_DRIVER_NAME "mvneta"
451 #define MVNETA_DRIVER_VERSION "1.0"
453 /* Utility/helper methods */
455 /* Write helper method */
456 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
458 writel(data, pp->base + offset);
461 /* Read helper method */
462 static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
464 return readl(pp->base + offset);
467 /* Increment txq get counter */
468 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
470 txq->txq_get_index++;
471 if (txq->txq_get_index == txq->size)
472 txq->txq_get_index = 0;
475 /* Increment txq put counter */
476 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
478 txq->txq_put_index++;
479 if (txq->txq_put_index == txq->size)
480 txq->txq_put_index = 0;
484 /* Clear all MIB counters */
485 static void mvneta_mib_counters_clear(struct mvneta_port *pp)
490 /* Perform dummy reads from MIB counters */
491 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
492 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
495 /* Get System Network Statistics */
496 struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
497 struct rtnl_link_stats64 *stats)
499 struct mvneta_port *pp = netdev_priv(dev);
503 for_each_possible_cpu(cpu) {
504 struct mvneta_pcpu_stats *cpu_stats;
510 cpu_stats = per_cpu_ptr(pp->stats, cpu);
512 start = u64_stats_fetch_begin_bh(&cpu_stats->syncp);
513 rx_packets = cpu_stats->rx_packets;
514 rx_bytes = cpu_stats->rx_bytes;
515 tx_packets = cpu_stats->tx_packets;
516 tx_bytes = cpu_stats->tx_bytes;
517 } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start));
519 stats->rx_packets += rx_packets;
520 stats->rx_bytes += rx_bytes;
521 stats->tx_packets += tx_packets;
522 stats->tx_bytes += tx_bytes;
525 stats->rx_errors = dev->stats.rx_errors;
526 stats->rx_dropped = dev->stats.rx_dropped;
528 stats->tx_dropped = dev->stats.tx_dropped;
533 /* Rx descriptors helper methods */
535 /* Checks whether the RX descriptor having this status is both the first
536 * and the last descriptor for the RX packet. Each RX packet is currently
537 * received through a single RX descriptor, so not having each RX
538 * descriptor with its first and last bits set is an error
540 static int mvneta_rxq_desc_is_first_last(u32 status)
542 return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
543 MVNETA_RXD_FIRST_LAST_DESC;
546 /* Add number of descriptors ready to receive new packets */
547 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
548 struct mvneta_rx_queue *rxq,
551 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
554 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
555 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
556 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
557 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
558 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
561 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
562 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
565 /* Get number of RX descriptors occupied by received packets */
566 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
567 struct mvneta_rx_queue *rxq)
571 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
572 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
575 /* Update num of rx desc called upon return from rx path or
576 * from mvneta_rxq_drop_pkts().
578 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
579 struct mvneta_rx_queue *rxq,
580 int rx_done, int rx_filled)
584 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
586 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
587 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
591 /* Only 255 descriptors can be added at once */
592 while ((rx_done > 0) || (rx_filled > 0)) {
593 if (rx_done <= 0xff) {
600 if (rx_filled <= 0xff) {
601 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
604 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
607 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
611 /* Get pointer to next RX descriptor to be processed by SW */
612 static struct mvneta_rx_desc *
613 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
615 int rx_desc = rxq->next_desc_to_proc;
617 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
618 prefetch(rxq->descs + rxq->next_desc_to_proc);
619 return rxq->descs + rx_desc;
622 /* Change maximum receive size of the port. */
623 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
627 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
628 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
629 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
630 MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
631 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
635 /* Set rx queue offset */
636 static void mvneta_rxq_offset_set(struct mvneta_port *pp,
637 struct mvneta_rx_queue *rxq,
642 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
643 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
646 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
647 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
651 /* Tx descriptors helper methods */
653 /* Update HW with number of TX descriptors to be sent */
654 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
655 struct mvneta_tx_queue *txq,
660 /* Only 255 descriptors can be added at once ; Assume caller
661 * process TX desriptors in quanta less than 256
664 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
667 /* Get pointer to next TX descriptor to be processed (send) by HW */
668 static struct mvneta_tx_desc *
669 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
671 int tx_desc = txq->next_desc_to_proc;
673 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
674 return txq->descs + tx_desc;
677 /* Release the last allocated TX descriptor. Useful to handle DMA
678 * mapping failures in the TX path.
680 static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
682 if (txq->next_desc_to_proc == 0)
683 txq->next_desc_to_proc = txq->last_desc - 1;
685 txq->next_desc_to_proc--;
688 /* Set rxq buf size */
689 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
690 struct mvneta_rx_queue *rxq,
695 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
697 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
698 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
700 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
703 /* Disable buffer management (BM) */
704 static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
705 struct mvneta_rx_queue *rxq)
709 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
710 val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
711 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
716 /* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
717 static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
721 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
724 val |= MVNETA_GMAC2_PORT_RGMII;
726 val &= ~MVNETA_GMAC2_PORT_RGMII;
728 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
731 /* Config SGMII port */
732 static void mvneta_port_sgmii_config(struct mvneta_port *pp)
736 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
737 val |= MVNETA_GMAC2_PCS_ENABLE;
738 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
740 mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
743 /* Start the Ethernet port RX and TX activity */
744 static void mvneta_port_up(struct mvneta_port *pp)
749 /* Enable all initialized TXs. */
750 mvneta_mib_counters_clear(pp);
752 for (queue = 0; queue < txq_number; queue++) {
753 struct mvneta_tx_queue *txq = &pp->txqs[queue];
754 if (txq->descs != NULL)
755 q_map |= (1 << queue);
757 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
759 /* Enable all initialized RXQs. */
761 for (queue = 0; queue < rxq_number; queue++) {
762 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
763 if (rxq->descs != NULL)
764 q_map |= (1 << queue);
767 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
770 /* Stop the Ethernet port activity */
771 static void mvneta_port_down(struct mvneta_port *pp)
776 /* Stop Rx port activity. Check port Rx activity. */
777 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
779 /* Issue stop command for active channels only */
781 mvreg_write(pp, MVNETA_RXQ_CMD,
782 val << MVNETA_RXQ_DISABLE_SHIFT);
784 /* Wait for all Rx activity to terminate. */
787 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
789 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
795 val = mvreg_read(pp, MVNETA_RXQ_CMD);
796 } while (val & 0xff);
798 /* Stop Tx port activity. Check port Tx activity. Issue stop
799 * command for active channels only
801 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
804 mvreg_write(pp, MVNETA_TXQ_CMD,
805 (val << MVNETA_TXQ_DISABLE_SHIFT));
807 /* Wait for all Tx activity to terminate. */
810 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
812 "TIMEOUT for TX stopped status=0x%08x\n",
818 /* Check TX Command reg that all Txqs are stopped */
819 val = mvreg_read(pp, MVNETA_TXQ_CMD);
821 } while (val & 0xff);
823 /* Double check to verify that TX FIFO is empty */
826 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
828 "TX FIFO empty timeout status=0x08%x\n",
834 val = mvreg_read(pp, MVNETA_PORT_STATUS);
835 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
836 (val & MVNETA_TX_IN_PRGRS));
841 /* Enable the port by setting the port enable bit of the MAC control register */
842 static void mvneta_port_enable(struct mvneta_port *pp)
847 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
848 val |= MVNETA_GMAC0_PORT_ENABLE;
849 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
852 /* Disable the port and wait for about 200 usec before retuning */
853 static void mvneta_port_disable(struct mvneta_port *pp)
857 /* Reset the Enable bit in the Serial Control Register */
858 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
859 val &= ~MVNETA_GMAC0_PORT_ENABLE;
860 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
865 /* Multicast tables methods */
867 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
868 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
876 val = 0x1 | (queue << 1);
877 val |= (val << 24) | (val << 16) | (val << 8);
880 for (offset = 0; offset <= 0xc; offset += 4)
881 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
884 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
885 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
893 val = 0x1 | (queue << 1);
894 val |= (val << 24) | (val << 16) | (val << 8);
897 for (offset = 0; offset <= 0xfc; offset += 4)
898 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
902 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
903 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
909 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
912 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
913 val = 0x1 | (queue << 1);
914 val |= (val << 24) | (val << 16) | (val << 8);
917 for (offset = 0; offset <= 0xfc; offset += 4)
918 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
921 /* This method sets defaults to the NETA port:
922 * Clears interrupt Cause and Mask registers.
923 * Clears all MAC tables.
924 * Sets defaults to all registers.
925 * Resets RX and TX descriptor rings.
927 * This method can be called after mvneta_port_down() to return the port
928 * settings to defaults.
930 static void mvneta_defaults_set(struct mvneta_port *pp)
936 /* Clear all Cause registers */
937 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
938 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
939 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
941 /* Mask all interrupts */
942 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
943 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
944 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
945 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
947 /* Enable MBUS Retry bit16 */
948 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
950 /* Set CPU queue access map - all CPUs have access to all RX
951 * queues and to all TX queues
953 for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
954 mvreg_write(pp, MVNETA_CPU_MAP(cpu),
955 (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
956 MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
958 /* Reset RX and TX DMAs */
959 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
960 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
962 /* Disable Legacy WRR, Disable EJP, Release from reset */
963 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
964 for (queue = 0; queue < txq_number; queue++) {
965 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
966 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
969 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
970 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
972 /* Set Port Acceleration Mode */
973 val = MVNETA_ACC_MODE_EXT;
974 mvreg_write(pp, MVNETA_ACC_MODE, val);
976 /* Update val of portCfg register accordingly with all RxQueue types */
977 val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def);
978 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
981 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
982 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
984 /* Build PORT_SDMA_CONFIG_REG */
987 /* Default burst size */
988 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
989 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
990 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
992 #if defined(__BIG_ENDIAN)
993 val |= MVNETA_DESC_SWAP;
996 /* Assign port SDMA configuration */
997 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
999 /* Disable PHY polling in hardware, since we're using the
1000 * kernel phylib to do this.
1002 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1003 val &= ~MVNETA_PHY_POLLING_ENABLE;
1004 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1006 mvneta_set_ucast_table(pp, -1);
1007 mvneta_set_special_mcast_table(pp, -1);
1008 mvneta_set_other_mcast_table(pp, -1);
1010 /* Set port interrupt enable register - default enable all */
1011 mvreg_write(pp, MVNETA_INTR_ENABLE,
1012 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1013 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
1016 /* Set max sizes for tx queues */
1017 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1023 mtu = max_tx_size * 8;
1024 if (mtu > MVNETA_TX_MTU_MAX)
1025 mtu = MVNETA_TX_MTU_MAX;
1028 val = mvreg_read(pp, MVNETA_TX_MTU);
1029 val &= ~MVNETA_TX_MTU_MAX;
1031 mvreg_write(pp, MVNETA_TX_MTU, val);
1033 /* TX token size and all TXQs token size must be larger that MTU */
1034 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1036 size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1039 val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1041 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1043 for (queue = 0; queue < txq_number; queue++) {
1044 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1046 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1049 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1051 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1056 /* Set unicast address */
1057 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1060 unsigned int unicast_reg;
1061 unsigned int tbl_offset;
1062 unsigned int reg_offset;
1064 /* Locate the Unicast table entry */
1065 last_nibble = (0xf & last_nibble);
1067 /* offset from unicast tbl base */
1068 tbl_offset = (last_nibble / 4) * 4;
1070 /* offset within the above reg */
1071 reg_offset = last_nibble % 4;
1073 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1076 /* Clear accepts frame bit at specified unicast DA tbl entry */
1077 unicast_reg &= ~(0xff << (8 * reg_offset));
1079 unicast_reg &= ~(0xff << (8 * reg_offset));
1080 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1083 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1086 /* Set mac address */
1087 static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1094 mac_l = (addr[4] << 8) | (addr[5]);
1095 mac_h = (addr[0] << 24) | (addr[1] << 16) |
1096 (addr[2] << 8) | (addr[3] << 0);
1098 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1099 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1102 /* Accept frames of this address */
1103 mvneta_set_ucast_addr(pp, addr[5], queue);
1106 /* Set the number of packets that will be received before RX interrupt
1107 * will be generated by HW.
1109 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1110 struct mvneta_rx_queue *rxq, u32 value)
1112 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1113 value | MVNETA_RXQ_NON_OCCUPIED(0));
1114 rxq->pkts_coal = value;
1117 /* Set the time delay in usec before RX interrupt will be generated by
1120 static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1121 struct mvneta_rx_queue *rxq, u32 value)
1124 unsigned long clk_rate;
1126 clk_rate = clk_get_rate(pp->clk);
1127 val = (clk_rate / 1000000) * value;
1129 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1130 rxq->time_coal = value;
1133 /* Set threshold for TX_DONE pkts coalescing */
1134 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1135 struct mvneta_tx_queue *txq, u32 value)
1139 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1141 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1142 val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1144 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1146 txq->done_pkts_coal = value;
1149 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1150 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1151 u32 phys_addr, u32 cookie)
1153 rx_desc->buf_cookie = cookie;
1154 rx_desc->buf_phys_addr = phys_addr;
1157 /* Decrement sent descriptors counter */
1158 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1159 struct mvneta_tx_queue *txq,
1164 /* Only 255 TX descriptors can be updated at once */
1165 while (sent_desc > 0xff) {
1166 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1167 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1168 sent_desc = sent_desc - 0xff;
1171 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1172 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1175 /* Get number of TX descriptors already sent by HW */
1176 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1177 struct mvneta_tx_queue *txq)
1182 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1183 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1184 MVNETA_TXQ_SENT_DESC_SHIFT;
1189 /* Get number of sent descriptors and decrement counter.
1190 * The number of sent descriptors is returned.
1192 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1193 struct mvneta_tx_queue *txq)
1197 /* Get number of sent descriptors */
1198 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1200 /* Decrement sent descriptors counter */
1202 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1207 /* Set TXQ descriptors fields relevant for CSUM calculation */
1208 static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1209 int ip_hdr_len, int l4_proto)
1213 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1214 * G_L4_chk, L4_type; required only for checksum
1217 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1218 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1220 if (l3_proto == swab16(ETH_P_IP))
1221 command |= MVNETA_TXD_IP_CSUM;
1223 command |= MVNETA_TX_L3_IP6;
1225 if (l4_proto == IPPROTO_TCP)
1226 command |= MVNETA_TX_L4_CSUM_FULL;
1227 else if (l4_proto == IPPROTO_UDP)
1228 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1230 command |= MVNETA_TX_L4_CSUM_NOT;
1236 /* Display more error info */
1237 static void mvneta_rx_error(struct mvneta_port *pp,
1238 struct mvneta_rx_desc *rx_desc)
1240 u32 status = rx_desc->status;
1242 if (!mvneta_rxq_desc_is_first_last(status)) {
1244 "bad rx status %08x (buffer oversize), size=%d\n",
1245 status, rx_desc->data_size);
1249 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1250 case MVNETA_RXD_ERR_CRC:
1251 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1252 status, rx_desc->data_size);
1254 case MVNETA_RXD_ERR_OVERRUN:
1255 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1256 status, rx_desc->data_size);
1258 case MVNETA_RXD_ERR_LEN:
1259 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1260 status, rx_desc->data_size);
1262 case MVNETA_RXD_ERR_RESOURCE:
1263 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1264 status, rx_desc->data_size);
1269 /* Handle RX checksum offload based on the descriptor's status */
1270 static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
1271 struct sk_buff *skb)
1273 if ((status & MVNETA_RXD_L3_IP4) &&
1274 (status & MVNETA_RXD_L4_CSUM_OK)) {
1276 skb->ip_summed = CHECKSUM_UNNECESSARY;
1280 skb->ip_summed = CHECKSUM_NONE;
1283 /* Return tx queue pointer (find last set bit) according to <cause> returned
1284 * form tx_done reg. <cause> must not be null. The return value is always a
1285 * valid queue for matching the first one found in <cause>.
1287 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1290 int queue = fls(cause) - 1;
1292 return &pp->txqs[queue];
1295 /* Free tx queue skbuffs */
1296 static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1297 struct mvneta_tx_queue *txq, int num)
1301 for (i = 0; i < num; i++) {
1302 struct mvneta_tx_desc *tx_desc = txq->descs +
1304 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1306 mvneta_txq_inc_get(txq);
1311 dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr,
1312 tx_desc->data_size, DMA_TO_DEVICE);
1313 dev_kfree_skb_any(skb);
1317 /* Handle end of transmission */
1318 static void mvneta_txq_done(struct mvneta_port *pp,
1319 struct mvneta_tx_queue *txq)
1321 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1324 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1328 mvneta_txq_bufs_free(pp, txq, tx_done);
1330 txq->count -= tx_done;
1332 if (netif_tx_queue_stopped(nq)) {
1333 if (txq->size - txq->count >= MAX_SKB_FRAGS + 1)
1334 netif_tx_wake_queue(nq);
1338 static void *mvneta_frag_alloc(const struct mvneta_port *pp)
1340 if (likely(pp->frag_size <= PAGE_SIZE))
1341 return netdev_alloc_frag(pp->frag_size);
1343 return kmalloc(pp->frag_size, GFP_ATOMIC);
1346 static void mvneta_frag_free(const struct mvneta_port *pp, void *data)
1348 if (likely(pp->frag_size <= PAGE_SIZE))
1349 put_page(virt_to_head_page(data));
1354 /* Refill processing */
1355 static int mvneta_rx_refill(struct mvneta_port *pp,
1356 struct mvneta_rx_desc *rx_desc)
1359 dma_addr_t phys_addr;
1362 data = mvneta_frag_alloc(pp);
1366 phys_addr = dma_map_single(pp->dev->dev.parent, data,
1367 MVNETA_RX_BUF_SIZE(pp->pkt_size),
1369 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
1370 mvneta_frag_free(pp, data);
1374 mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)data);
1378 /* Handle tx checksum */
1379 static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1381 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1385 if (skb->protocol == htons(ETH_P_IP)) {
1386 struct iphdr *ip4h = ip_hdr(skb);
1388 /* Calculate IPv4 checksum and L4 checksum */
1389 ip_hdr_len = ip4h->ihl;
1390 l4_proto = ip4h->protocol;
1391 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1392 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1394 /* Read l4_protocol from one of IPv6 extra headers */
1395 if (skb_network_header_len(skb) > 0)
1396 ip_hdr_len = (skb_network_header_len(skb) >> 2);
1397 l4_proto = ip6h->nexthdr;
1399 return MVNETA_TX_L4_CSUM_NOT;
1401 return mvneta_txq_desc_csum(skb_network_offset(skb),
1402 skb->protocol, ip_hdr_len, l4_proto);
1405 return MVNETA_TX_L4_CSUM_NOT;
1408 /* Returns rx queue pointer (find last set bit) according to causeRxTx
1411 static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp,
1414 int queue = fls(cause >> 8) - 1;
1416 return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue];
1419 /* Drop packets received by the RXQ and free buffers */
1420 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1421 struct mvneta_rx_queue *rxq)
1425 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1426 for (i = 0; i < rxq->size; i++) {
1427 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1428 void *data = (void *)rx_desc->buf_cookie;
1430 mvneta_frag_free(pp, data);
1431 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1432 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1436 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1439 /* Main rx processing */
1440 static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1441 struct mvneta_rx_queue *rxq)
1443 struct net_device *dev = pp->dev;
1444 int rx_done, rx_filled;
1448 /* Get number of received packets */
1449 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1451 if (rx_todo > rx_done)
1457 /* Fairness NAPI loop */
1458 while (rx_done < rx_todo) {
1459 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1460 struct sk_buff *skb;
1461 unsigned char *data;
1467 rx_status = rx_desc->status;
1468 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
1469 data = (unsigned char *)rx_desc->buf_cookie;
1471 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
1472 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1474 dev->stats.rx_errors++;
1475 mvneta_rx_error(pp, rx_desc);
1476 /* leave the descriptor untouched */
1480 if (rx_bytes <= rx_copybreak) {
1481 /* better copy a small frame and not unmap the DMA region */
1482 skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
1484 goto err_drop_frame;
1486 dma_sync_single_range_for_cpu(dev->dev.parent,
1487 rx_desc->buf_phys_addr,
1488 MVNETA_MH_SIZE + NET_SKB_PAD,
1491 memcpy(skb_put(skb, rx_bytes),
1492 data + MVNETA_MH_SIZE + NET_SKB_PAD,
1495 skb->protocol = eth_type_trans(skb, dev);
1496 mvneta_rx_csum(pp, rx_status, skb);
1497 napi_gro_receive(&pp->napi, skb);
1500 rcvd_bytes += rx_bytes;
1502 /* leave the descriptor and buffer untouched */
1506 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
1508 goto err_drop_frame;
1510 dma_unmap_single(dev->dev.parent, rx_desc->buf_phys_addr,
1511 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1514 rcvd_bytes += rx_bytes;
1516 /* Linux processing */
1517 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
1518 skb_put(skb, rx_bytes);
1520 skb->protocol = eth_type_trans(skb, dev);
1522 mvneta_rx_csum(pp, rx_status, skb);
1524 napi_gro_receive(&pp->napi, skb);
1526 /* Refill processing */
1527 err = mvneta_rx_refill(pp, rx_desc);
1529 netdev_err(dev, "Linux processing - Can't refill\n");
1536 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1538 u64_stats_update_begin(&stats->syncp);
1539 stats->rx_packets += rcvd_pkts;
1540 stats->rx_bytes += rcvd_bytes;
1541 u64_stats_update_end(&stats->syncp);
1544 /* Update rxq management counters */
1545 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
1550 /* Handle tx fragmentation processing */
1551 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1552 struct mvneta_tx_queue *txq)
1554 struct mvneta_tx_desc *tx_desc;
1557 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1558 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1559 void *addr = page_address(frag->page.p) + frag->page_offset;
1561 tx_desc = mvneta_txq_next_desc_get(txq);
1562 tx_desc->data_size = frag->size;
1564 tx_desc->buf_phys_addr =
1565 dma_map_single(pp->dev->dev.parent, addr,
1566 tx_desc->data_size, DMA_TO_DEVICE);
1568 if (dma_mapping_error(pp->dev->dev.parent,
1569 tx_desc->buf_phys_addr)) {
1570 mvneta_txq_desc_put(txq);
1574 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
1575 /* Last descriptor */
1576 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
1578 txq->tx_skb[txq->txq_put_index] = skb;
1580 mvneta_txq_inc_put(txq);
1582 /* Descriptor in the middle: Not First, Not Last */
1583 tx_desc->command = 0;
1585 txq->tx_skb[txq->txq_put_index] = NULL;
1586 mvneta_txq_inc_put(txq);
1593 /* Release all descriptors that were used to map fragments of
1594 * this packet, as well as the corresponding DMA mappings
1596 for (i = i - 1; i >= 0; i--) {
1597 tx_desc = txq->descs + i;
1598 dma_unmap_single(pp->dev->dev.parent,
1599 tx_desc->buf_phys_addr,
1602 mvneta_txq_desc_put(txq);
1608 /* Main tx processing */
1609 static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1611 struct mvneta_port *pp = netdev_priv(dev);
1612 u16 txq_id = skb_get_queue_mapping(skb);
1613 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
1614 struct mvneta_tx_desc *tx_desc;
1615 struct netdev_queue *nq;
1619 if (!netif_running(dev))
1622 frags = skb_shinfo(skb)->nr_frags + 1;
1623 nq = netdev_get_tx_queue(dev, txq_id);
1625 /* Get a descriptor for the first part of the packet */
1626 tx_desc = mvneta_txq_next_desc_get(txq);
1628 tx_cmd = mvneta_skb_tx_csum(pp, skb);
1630 tx_desc->data_size = skb_headlen(skb);
1632 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
1635 if (unlikely(dma_mapping_error(dev->dev.parent,
1636 tx_desc->buf_phys_addr))) {
1637 mvneta_txq_desc_put(txq);
1643 /* First and Last descriptor */
1644 tx_cmd |= MVNETA_TXD_FLZ_DESC;
1645 tx_desc->command = tx_cmd;
1646 txq->tx_skb[txq->txq_put_index] = skb;
1647 mvneta_txq_inc_put(txq);
1649 /* First but not Last */
1650 tx_cmd |= MVNETA_TXD_F_DESC;
1651 txq->tx_skb[txq->txq_put_index] = NULL;
1652 mvneta_txq_inc_put(txq);
1653 tx_desc->command = tx_cmd;
1654 /* Continue with other skb fragments */
1655 if (mvneta_tx_frag_process(pp, skb, txq)) {
1656 dma_unmap_single(dev->dev.parent,
1657 tx_desc->buf_phys_addr,
1660 mvneta_txq_desc_put(txq);
1666 txq->count += frags;
1667 mvneta_txq_pend_desc_add(pp, txq, frags);
1669 if (txq->size - txq->count < MAX_SKB_FRAGS + 1)
1670 netif_tx_stop_queue(nq);
1674 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1676 u64_stats_update_begin(&stats->syncp);
1677 stats->tx_packets++;
1678 stats->tx_bytes += skb->len;
1679 u64_stats_update_end(&stats->syncp);
1681 dev->stats.tx_dropped++;
1682 dev_kfree_skb_any(skb);
1685 return NETDEV_TX_OK;
1689 /* Free tx resources, when resetting a port */
1690 static void mvneta_txq_done_force(struct mvneta_port *pp,
1691 struct mvneta_tx_queue *txq)
1694 int tx_done = txq->count;
1696 mvneta_txq_bufs_free(pp, txq, tx_done);
1700 txq->txq_put_index = 0;
1701 txq->txq_get_index = 0;
1704 /* Handle tx done - called in softirq context. The <cause_tx_done> argument
1705 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
1707 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
1709 struct mvneta_tx_queue *txq;
1710 struct netdev_queue *nq;
1712 while (cause_tx_done) {
1713 txq = mvneta_tx_done_policy(pp, cause_tx_done);
1715 nq = netdev_get_tx_queue(pp->dev, txq->id);
1716 __netif_tx_lock(nq, smp_processor_id());
1719 mvneta_txq_done(pp, txq);
1721 __netif_tx_unlock(nq);
1722 cause_tx_done &= ~((1 << txq->id));
1726 /* Compute crc8 of the specified address, using a unique algorithm ,
1727 * according to hw spec, different than generic crc8 algorithm
1729 static int mvneta_addr_crc(unsigned char *addr)
1734 for (i = 0; i < ETH_ALEN; i++) {
1737 crc = (crc ^ addr[i]) << 8;
1738 for (j = 7; j >= 0; j--) {
1739 if (crc & (0x100 << j))
1747 /* This method controls the net device special MAC multicast support.
1748 * The Special Multicast Table for MAC addresses supports MAC of the form
1749 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1750 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1751 * Table entries in the DA-Filter table. This method set the Special
1752 * Multicast Table appropriate entry.
1754 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
1755 unsigned char last_byte,
1758 unsigned int smc_table_reg;
1759 unsigned int tbl_offset;
1760 unsigned int reg_offset;
1762 /* Register offset from SMC table base */
1763 tbl_offset = (last_byte / 4);
1764 /* Entry offset within the above reg */
1765 reg_offset = last_byte % 4;
1767 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
1771 smc_table_reg &= ~(0xff << (8 * reg_offset));
1773 smc_table_reg &= ~(0xff << (8 * reg_offset));
1774 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1777 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
1781 /* This method controls the network device Other MAC multicast support.
1782 * The Other Multicast Table is used for multicast of another type.
1783 * A CRC-8 is used as an index to the Other Multicast Table entries
1784 * in the DA-Filter table.
1785 * The method gets the CRC-8 value from the calling routine and
1786 * sets the Other Multicast Table appropriate entry according to the
1789 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
1793 unsigned int omc_table_reg;
1794 unsigned int tbl_offset;
1795 unsigned int reg_offset;
1797 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
1798 reg_offset = crc8 % 4; /* Entry offset within the above reg */
1800 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
1803 /* Clear accepts frame bit at specified Other DA table entry */
1804 omc_table_reg &= ~(0xff << (8 * reg_offset));
1806 omc_table_reg &= ~(0xff << (8 * reg_offset));
1807 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1810 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
1813 /* The network device supports multicast using two tables:
1814 * 1) Special Multicast Table for MAC addresses of the form
1815 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1816 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1817 * Table entries in the DA-Filter table.
1818 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
1819 * is used as an index to the Other Multicast Table entries in the
1822 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
1825 unsigned char crc_result = 0;
1827 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
1828 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
1832 crc_result = mvneta_addr_crc(p_addr);
1834 if (pp->mcast_count[crc_result] == 0) {
1835 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
1840 pp->mcast_count[crc_result]--;
1841 if (pp->mcast_count[crc_result] != 0) {
1842 netdev_info(pp->dev,
1843 "After delete there are %d valid Mcast for crc8=0x%02x\n",
1844 pp->mcast_count[crc_result], crc_result);
1848 pp->mcast_count[crc_result]++;
1850 mvneta_set_other_mcast_addr(pp, crc_result, queue);
1855 /* Configure Fitering mode of Ethernet port */
1856 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
1859 u32 port_cfg_reg, val;
1861 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
1863 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
1865 /* Set / Clear UPM bit in port configuration register */
1867 /* Accept all Unicast addresses */
1868 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
1869 val |= MVNETA_FORCE_UNI;
1870 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
1871 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
1873 /* Reject all Unicast addresses */
1874 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
1875 val &= ~MVNETA_FORCE_UNI;
1878 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
1879 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
1882 /* register unicast and multicast addresses */
1883 static void mvneta_set_rx_mode(struct net_device *dev)
1885 struct mvneta_port *pp = netdev_priv(dev);
1886 struct netdev_hw_addr *ha;
1888 if (dev->flags & IFF_PROMISC) {
1889 /* Accept all: Multicast + Unicast */
1890 mvneta_rx_unicast_promisc_set(pp, 1);
1891 mvneta_set_ucast_table(pp, rxq_def);
1892 mvneta_set_special_mcast_table(pp, rxq_def);
1893 mvneta_set_other_mcast_table(pp, rxq_def);
1895 /* Accept single Unicast */
1896 mvneta_rx_unicast_promisc_set(pp, 0);
1897 mvneta_set_ucast_table(pp, -1);
1898 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
1900 if (dev->flags & IFF_ALLMULTI) {
1901 /* Accept all multicast */
1902 mvneta_set_special_mcast_table(pp, rxq_def);
1903 mvneta_set_other_mcast_table(pp, rxq_def);
1905 /* Accept only initialized multicast */
1906 mvneta_set_special_mcast_table(pp, -1);
1907 mvneta_set_other_mcast_table(pp, -1);
1909 if (!netdev_mc_empty(dev)) {
1910 netdev_for_each_mc_addr(ha, dev) {
1911 mvneta_mcast_addr_set(pp, ha->addr,
1919 /* Interrupt handling - the callback for request_irq() */
1920 static irqreturn_t mvneta_isr(int irq, void *dev_id)
1922 struct mvneta_port *pp = (struct mvneta_port *)dev_id;
1924 /* Mask all interrupts */
1925 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1927 napi_schedule(&pp->napi);
1933 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
1934 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
1935 * Bits 8 -15 of the cause Rx Tx register indicate that are received
1936 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
1937 * Each CPU has its own causeRxTx register
1939 static int mvneta_poll(struct napi_struct *napi, int budget)
1943 unsigned long flags;
1944 struct mvneta_port *pp = netdev_priv(napi->dev);
1946 if (!netif_running(pp->dev)) {
1947 napi_complete(napi);
1951 /* Read cause register */
1952 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
1953 (MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
1955 /* Release Tx descriptors */
1956 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
1957 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
1958 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
1961 /* For the case where the last mvneta_poll did not process all
1964 cause_rx_tx |= pp->cause_rx_tx;
1965 if (rxq_number > 1) {
1966 while ((cause_rx_tx & MVNETA_RX_INTR_MASK_ALL) && (budget > 0)) {
1968 struct mvneta_rx_queue *rxq;
1969 /* get rx queue number from cause_rx_tx */
1970 rxq = mvneta_rx_policy(pp, cause_rx_tx);
1974 /* process the packet in that rx queue */
1975 count = mvneta_rx(pp, budget, rxq);
1979 /* set off the rx bit of the
1980 * corresponding bit in the cause rx
1981 * tx register, so that next iteration
1982 * will find the next rx queue where
1983 * packets are received on
1985 cause_rx_tx &= ~((1 << rxq->id) << 8);
1989 rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
1995 napi_complete(napi);
1996 local_irq_save(flags);
1997 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1998 MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
1999 local_irq_restore(flags);
2002 pp->cause_rx_tx = cause_rx_tx;
2006 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
2007 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2012 for (i = 0; i < num; i++) {
2013 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
2014 if (mvneta_rx_refill(pp, rxq->descs + i) != 0) {
2015 netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n",
2016 __func__, rxq->id, i, num);
2021 /* Add this number of RX descriptors as non occupied (ready to
2024 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
2029 /* Free all packets pending transmit from all TXQs and reset TX port */
2030 static void mvneta_tx_reset(struct mvneta_port *pp)
2034 /* free the skb's in the hal tx ring */
2035 for (queue = 0; queue < txq_number; queue++)
2036 mvneta_txq_done_force(pp, &pp->txqs[queue]);
2038 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
2039 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
2042 static void mvneta_rx_reset(struct mvneta_port *pp)
2044 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
2045 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
2048 /* Rx/Tx queue initialization/cleanup methods */
2050 /* Create a specified RX queue */
2051 static int mvneta_rxq_init(struct mvneta_port *pp,
2052 struct mvneta_rx_queue *rxq)
2055 rxq->size = pp->rx_ring_size;
2057 /* Allocate memory for RX descriptors */
2058 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2059 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2060 &rxq->descs_phys, GFP_KERNEL);
2061 if (rxq->descs == NULL)
2064 BUG_ON(rxq->descs !=
2065 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2067 rxq->last_desc = rxq->size - 1;
2069 /* Set Rx descriptors queue starting address */
2070 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
2071 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
2074 mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD);
2076 /* Set coalescing pkts and time */
2077 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2078 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2080 /* Fill RXQ with buffers from RX pool */
2081 mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size));
2082 mvneta_rxq_bm_disable(pp, rxq);
2083 mvneta_rxq_fill(pp, rxq, rxq->size);
2088 /* Cleanup Rx queue */
2089 static void mvneta_rxq_deinit(struct mvneta_port *pp,
2090 struct mvneta_rx_queue *rxq)
2092 mvneta_rxq_drop_pkts(pp, rxq);
2095 dma_free_coherent(pp->dev->dev.parent,
2096 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2102 rxq->next_desc_to_proc = 0;
2103 rxq->descs_phys = 0;
2106 /* Create and initialize a tx queue */
2107 static int mvneta_txq_init(struct mvneta_port *pp,
2108 struct mvneta_tx_queue *txq)
2110 txq->size = pp->tx_ring_size;
2112 /* Allocate memory for TX descriptors */
2113 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2114 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2115 &txq->descs_phys, GFP_KERNEL);
2116 if (txq->descs == NULL)
2119 /* Make sure descriptor address is cache line size aligned */
2120 BUG_ON(txq->descs !=
2121 PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2123 txq->last_desc = txq->size - 1;
2125 /* Set maximum bandwidth for enabled TXQs */
2126 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2127 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2129 /* Set Tx descriptors queue starting address */
2130 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2131 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2133 txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
2134 if (txq->tx_skb == NULL) {
2135 dma_free_coherent(pp->dev->dev.parent,
2136 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2137 txq->descs, txq->descs_phys);
2140 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2145 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2146 static void mvneta_txq_deinit(struct mvneta_port *pp,
2147 struct mvneta_tx_queue *txq)
2152 dma_free_coherent(pp->dev->dev.parent,
2153 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2154 txq->descs, txq->descs_phys);
2158 txq->next_desc_to_proc = 0;
2159 txq->descs_phys = 0;
2161 /* Set minimum bandwidth for disabled TXQs */
2162 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
2163 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
2165 /* Set Tx descriptors queue starting address and size */
2166 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
2167 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
2170 /* Cleanup all Tx queues */
2171 static void mvneta_cleanup_txqs(struct mvneta_port *pp)
2175 for (queue = 0; queue < txq_number; queue++)
2176 mvneta_txq_deinit(pp, &pp->txqs[queue]);
2179 /* Cleanup all Rx queues */
2180 static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
2184 for (queue = 0; queue < rxq_number; queue++)
2185 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
2189 /* Init all Rx queues */
2190 static int mvneta_setup_rxqs(struct mvneta_port *pp)
2194 for (queue = 0; queue < rxq_number; queue++) {
2195 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
2197 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
2199 mvneta_cleanup_rxqs(pp);
2207 /* Init all tx queues */
2208 static int mvneta_setup_txqs(struct mvneta_port *pp)
2212 for (queue = 0; queue < txq_number; queue++) {
2213 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
2215 netdev_err(pp->dev, "%s: can't create txq=%d\n",
2217 mvneta_cleanup_txqs(pp);
2225 static void mvneta_start_dev(struct mvneta_port *pp)
2227 mvneta_max_rx_size_set(pp, pp->pkt_size);
2228 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
2230 /* start the Rx/Tx activity */
2231 mvneta_port_enable(pp);
2233 /* Enable polling on the port */
2234 napi_enable(&pp->napi);
2236 /* Unmask interrupts */
2237 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2238 MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
2240 phy_start(pp->phy_dev);
2241 netif_tx_start_all_queues(pp->dev);
2244 static void mvneta_stop_dev(struct mvneta_port *pp)
2246 phy_stop(pp->phy_dev);
2248 napi_disable(&pp->napi);
2250 netif_carrier_off(pp->dev);
2252 mvneta_port_down(pp);
2253 netif_tx_stop_all_queues(pp->dev);
2255 /* Stop the port activity */
2256 mvneta_port_disable(pp);
2258 /* Clear all ethernet port interrupts */
2259 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2260 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
2262 /* Mask all ethernet port interrupts */
2263 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2264 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2265 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2267 mvneta_tx_reset(pp);
2268 mvneta_rx_reset(pp);
2271 /* Return positive if MTU is valid */
2272 static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
2275 netdev_err(dev, "cannot change mtu to less than 68\n");
2279 /* 9676 == 9700 - 20 and rounding to 8 */
2281 netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
2285 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
2286 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
2287 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
2288 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
2294 /* Change the device mtu */
2295 static int mvneta_change_mtu(struct net_device *dev, int mtu)
2297 struct mvneta_port *pp = netdev_priv(dev);
2300 mtu = mvneta_check_mtu_valid(dev, mtu);
2306 if (!netif_running(dev))
2309 /* The interface is running, so we have to force a
2310 * reallocation of the RXQs
2312 mvneta_stop_dev(pp);
2314 mvneta_cleanup_txqs(pp);
2315 mvneta_cleanup_rxqs(pp);
2317 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2318 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2319 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2321 ret = mvneta_setup_rxqs(pp);
2323 netdev_err(pp->dev, "unable to setup rxqs after MTU change\n");
2327 mvneta_setup_txqs(pp);
2329 mvneta_start_dev(pp);
2335 /* Get mac address */
2336 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
2338 u32 mac_addr_l, mac_addr_h;
2340 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
2341 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
2342 addr[0] = (mac_addr_h >> 24) & 0xFF;
2343 addr[1] = (mac_addr_h >> 16) & 0xFF;
2344 addr[2] = (mac_addr_h >> 8) & 0xFF;
2345 addr[3] = mac_addr_h & 0xFF;
2346 addr[4] = (mac_addr_l >> 8) & 0xFF;
2347 addr[5] = mac_addr_l & 0xFF;
2350 /* Handle setting mac address */
2351 static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
2353 struct mvneta_port *pp = netdev_priv(dev);
2357 if (netif_running(dev))
2360 /* Remove previous address table entry */
2361 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
2363 /* Set new addr in hw */
2364 mvneta_mac_addr_set(pp, mac, rxq_def);
2366 /* Set addr in the device */
2367 for (i = 0; i < ETH_ALEN; i++)
2368 dev->dev_addr[i] = mac[i];
2373 static void mvneta_adjust_link(struct net_device *ndev)
2375 struct mvneta_port *pp = netdev_priv(ndev);
2376 struct phy_device *phydev = pp->phy_dev;
2377 int status_change = 0;
2380 if ((pp->speed != phydev->speed) ||
2381 (pp->duplex != phydev->duplex)) {
2384 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2385 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
2386 MVNETA_GMAC_CONFIG_GMII_SPEED |
2387 MVNETA_GMAC_CONFIG_FULL_DUPLEX |
2388 MVNETA_GMAC_AN_SPEED_EN |
2389 MVNETA_GMAC_AN_DUPLEX_EN);
2392 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
2394 if (phydev->speed == SPEED_1000)
2395 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
2397 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
2399 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2401 pp->duplex = phydev->duplex;
2402 pp->speed = phydev->speed;
2406 if (phydev->link != pp->link) {
2407 if (!phydev->link) {
2412 pp->link = phydev->link;
2416 if (status_change) {
2418 u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2419 val |= (MVNETA_GMAC_FORCE_LINK_PASS |
2420 MVNETA_GMAC_FORCE_LINK_DOWN);
2421 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2423 netdev_info(pp->dev, "link up\n");
2425 mvneta_port_down(pp);
2426 netdev_info(pp->dev, "link down\n");
2431 static int mvneta_mdio_probe(struct mvneta_port *pp)
2433 struct phy_device *phy_dev;
2435 phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
2438 netdev_err(pp->dev, "could not find the PHY\n");
2442 phy_dev->supported &= PHY_GBIT_FEATURES;
2443 phy_dev->advertising = phy_dev->supported;
2445 pp->phy_dev = phy_dev;
2453 static void mvneta_mdio_remove(struct mvneta_port *pp)
2455 phy_disconnect(pp->phy_dev);
2459 static int mvneta_open(struct net_device *dev)
2461 struct mvneta_port *pp = netdev_priv(dev);
2464 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
2466 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2467 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2468 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2470 ret = mvneta_setup_rxqs(pp);
2474 ret = mvneta_setup_txqs(pp);
2476 goto err_cleanup_rxqs;
2478 /* Connect to port interrupt line */
2479 ret = request_irq(pp->dev->irq, mvneta_isr, 0,
2480 MVNETA_DRIVER_NAME, pp);
2482 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
2483 goto err_cleanup_txqs;
2486 /* In default link is down */
2487 netif_carrier_off(pp->dev);
2489 ret = mvneta_mdio_probe(pp);
2491 netdev_err(dev, "cannot probe MDIO bus\n");
2495 mvneta_start_dev(pp);
2500 free_irq(pp->dev->irq, pp);
2502 mvneta_cleanup_txqs(pp);
2504 mvneta_cleanup_rxqs(pp);
2508 /* Stop the port, free port interrupt line */
2509 static int mvneta_stop(struct net_device *dev)
2511 struct mvneta_port *pp = netdev_priv(dev);
2513 mvneta_stop_dev(pp);
2514 mvneta_mdio_remove(pp);
2515 free_irq(dev->irq, pp);
2516 mvneta_cleanup_rxqs(pp);
2517 mvneta_cleanup_txqs(pp);
2522 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2524 struct mvneta_port *pp = netdev_priv(dev);
2530 ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd);
2532 mvneta_adjust_link(dev);
2537 /* Ethtool methods */
2539 /* Get settings (phy address, speed) for ethtools */
2540 int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2542 struct mvneta_port *pp = netdev_priv(dev);
2547 return phy_ethtool_gset(pp->phy_dev, cmd);
2550 /* Set settings (phy address, speed) for ethtools */
2551 int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2553 struct mvneta_port *pp = netdev_priv(dev);
2558 return phy_ethtool_sset(pp->phy_dev, cmd);
2561 /* Set interrupt coalescing for ethtools */
2562 static int mvneta_ethtool_set_coalesce(struct net_device *dev,
2563 struct ethtool_coalesce *c)
2565 struct mvneta_port *pp = netdev_priv(dev);
2568 for (queue = 0; queue < rxq_number; queue++) {
2569 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2570 rxq->time_coal = c->rx_coalesce_usecs;
2571 rxq->pkts_coal = c->rx_max_coalesced_frames;
2572 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2573 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2576 for (queue = 0; queue < txq_number; queue++) {
2577 struct mvneta_tx_queue *txq = &pp->txqs[queue];
2578 txq->done_pkts_coal = c->tx_max_coalesced_frames;
2579 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2585 /* get coalescing for ethtools */
2586 static int mvneta_ethtool_get_coalesce(struct net_device *dev,
2587 struct ethtool_coalesce *c)
2589 struct mvneta_port *pp = netdev_priv(dev);
2591 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
2592 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
2594 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
2599 static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
2600 struct ethtool_drvinfo *drvinfo)
2602 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
2603 sizeof(drvinfo->driver));
2604 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
2605 sizeof(drvinfo->version));
2606 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
2607 sizeof(drvinfo->bus_info));
2611 static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
2612 struct ethtool_ringparam *ring)
2614 struct mvneta_port *pp = netdev_priv(netdev);
2616 ring->rx_max_pending = MVNETA_MAX_RXD;
2617 ring->tx_max_pending = MVNETA_MAX_TXD;
2618 ring->rx_pending = pp->rx_ring_size;
2619 ring->tx_pending = pp->tx_ring_size;
2622 static int mvneta_ethtool_set_ringparam(struct net_device *dev,
2623 struct ethtool_ringparam *ring)
2625 struct mvneta_port *pp = netdev_priv(dev);
2627 if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
2629 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
2630 ring->rx_pending : MVNETA_MAX_RXD;
2631 pp->tx_ring_size = ring->tx_pending < MVNETA_MAX_TXD ?
2632 ring->tx_pending : MVNETA_MAX_TXD;
2634 if (netif_running(dev)) {
2636 if (mvneta_open(dev)) {
2638 "error on opening device after ring param change\n");
2646 static const struct net_device_ops mvneta_netdev_ops = {
2647 .ndo_open = mvneta_open,
2648 .ndo_stop = mvneta_stop,
2649 .ndo_start_xmit = mvneta_tx,
2650 .ndo_set_rx_mode = mvneta_set_rx_mode,
2651 .ndo_set_mac_address = mvneta_set_mac_addr,
2652 .ndo_change_mtu = mvneta_change_mtu,
2653 .ndo_get_stats64 = mvneta_get_stats64,
2654 .ndo_do_ioctl = mvneta_ioctl,
2657 const struct ethtool_ops mvneta_eth_tool_ops = {
2658 .get_link = ethtool_op_get_link,
2659 .get_settings = mvneta_ethtool_get_settings,
2660 .set_settings = mvneta_ethtool_set_settings,
2661 .set_coalesce = mvneta_ethtool_set_coalesce,
2662 .get_coalesce = mvneta_ethtool_get_coalesce,
2663 .get_drvinfo = mvneta_ethtool_get_drvinfo,
2664 .get_ringparam = mvneta_ethtool_get_ringparam,
2665 .set_ringparam = mvneta_ethtool_set_ringparam,
2669 static int mvneta_init(struct mvneta_port *pp, int phy_addr)
2674 mvneta_port_disable(pp);
2676 /* Set port default values */
2677 mvneta_defaults_set(pp);
2679 pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue),
2684 /* Initialize TX descriptor rings */
2685 for (queue = 0; queue < txq_number; queue++) {
2686 struct mvneta_tx_queue *txq = &pp->txqs[queue];
2688 txq->size = pp->tx_ring_size;
2689 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
2692 pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue),
2699 /* Create Rx descriptor rings */
2700 for (queue = 0; queue < rxq_number; queue++) {
2701 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2703 rxq->size = pp->rx_ring_size;
2704 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
2705 rxq->time_coal = MVNETA_RX_COAL_USEC;
2711 static void mvneta_deinit(struct mvneta_port *pp)
2717 /* platform glue : initialize decoding windows */
2718 static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
2719 const struct mbus_dram_target_info *dram)
2725 for (i = 0; i < 6; i++) {
2726 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
2727 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
2730 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
2736 for (i = 0; i < dram->num_cs; i++) {
2737 const struct mbus_dram_window *cs = dram->cs + i;
2738 mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
2739 (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
2741 mvreg_write(pp, MVNETA_WIN_SIZE(i),
2742 (cs->size - 1) & 0xffff0000);
2744 win_enable &= ~(1 << i);
2745 win_protect |= 3 << (2 * i);
2748 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
2751 /* Power up the port */
2752 static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
2756 /* MAC Cause register should be cleared */
2757 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
2759 if (phy_mode == PHY_INTERFACE_MODE_SGMII)
2760 mvneta_port_sgmii_config(pp);
2762 mvneta_gmac_rgmii_set(pp, 1);
2764 /* Cancel Port Reset */
2765 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
2766 val &= ~MVNETA_GMAC2_PORT_RESET;
2767 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
2769 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
2770 MVNETA_GMAC2_PORT_RESET) != 0)
2774 /* Device initialization routine */
2775 static int mvneta_probe(struct platform_device *pdev)
2777 const struct mbus_dram_target_info *dram_target_info;
2778 struct resource *res;
2779 struct device_node *dn = pdev->dev.of_node;
2780 struct device_node *phy_node;
2782 struct mvneta_port *pp;
2783 struct net_device *dev;
2784 const char *dt_mac_addr;
2785 char hw_mac_addr[ETH_ALEN];
2786 const char *mac_from;
2791 /* Our multiqueue support is not complete, so for now, only
2792 * allow the usage of the first RX queue
2795 dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def);
2799 dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
2803 dev->irq = irq_of_parse_and_map(dn, 0);
2804 if (dev->irq == 0) {
2806 goto err_free_netdev;
2809 phy_node = of_parse_phandle(dn, "phy", 0);
2811 dev_err(&pdev->dev, "no associated PHY\n");
2816 phy_mode = of_get_phy_mode(dn);
2818 dev_err(&pdev->dev, "incorrect phy-mode\n");
2823 dev->tx_queue_len = MVNETA_MAX_TXD;
2824 dev->watchdog_timeo = 5 * HZ;
2825 dev->netdev_ops = &mvneta_netdev_ops;
2827 SET_ETHTOOL_OPS(dev, &mvneta_eth_tool_ops);
2829 pp = netdev_priv(dev);
2831 pp->weight = MVNETA_RX_POLL_WEIGHT;
2832 pp->phy_node = phy_node;
2833 pp->phy_interface = phy_mode;
2835 pp->clk = devm_clk_get(&pdev->dev, NULL);
2836 if (IS_ERR(pp->clk)) {
2837 err = PTR_ERR(pp->clk);
2841 clk_prepare_enable(pp->clk);
2843 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2849 pp->base = devm_ioremap_resource(&pdev->dev, res);
2850 if (pp->base == NULL) {
2851 err = PTR_ERR(pp->base);
2855 /* Alloc per-cpu stats */
2856 pp->stats = alloc_percpu(struct mvneta_pcpu_stats);
2862 for_each_possible_cpu(cpu) {
2863 struct mvneta_pcpu_stats *stats;
2864 stats = per_cpu_ptr(pp->stats, cpu);
2865 u64_stats_init(&stats->syncp);
2868 dt_mac_addr = of_get_mac_address(dn);
2870 mac_from = "device tree";
2871 memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
2873 mvneta_get_mac_addr(pp, hw_mac_addr);
2874 if (is_valid_ether_addr(hw_mac_addr)) {
2875 mac_from = "hardware";
2876 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
2878 mac_from = "random";
2879 eth_hw_addr_random(dev);
2883 pp->tx_ring_size = MVNETA_MAX_TXD;
2884 pp->rx_ring_size = MVNETA_MAX_RXD;
2887 SET_NETDEV_DEV(dev, &pdev->dev);
2889 err = mvneta_init(pp, phy_addr);
2891 dev_err(&pdev->dev, "can't init eth hal\n");
2892 goto err_free_stats;
2894 mvneta_port_power_up(pp, phy_mode);
2896 dram_target_info = mv_mbus_dram_info();
2897 if (dram_target_info)
2898 mvneta_conf_mbus_windows(pp, dram_target_info);
2900 netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
2902 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
2903 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2904 dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2905 dev->priv_flags |= IFF_UNICAST_FLT;
2907 err = register_netdev(dev);
2909 dev_err(&pdev->dev, "failed to register\n");
2913 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
2916 platform_set_drvdata(pdev, pp->dev);
2923 free_percpu(pp->stats);
2925 clk_disable_unprepare(pp->clk);
2927 irq_dispose_mapping(dev->irq);
2933 /* Device removal routine */
2934 static int mvneta_remove(struct platform_device *pdev)
2936 struct net_device *dev = platform_get_drvdata(pdev);
2937 struct mvneta_port *pp = netdev_priv(dev);
2939 unregister_netdev(dev);
2941 clk_disable_unprepare(pp->clk);
2942 free_percpu(pp->stats);
2943 irq_dispose_mapping(dev->irq);
2949 static const struct of_device_id mvneta_match[] = {
2950 { .compatible = "marvell,armada-370-neta" },
2953 MODULE_DEVICE_TABLE(of, mvneta_match);
2955 static struct platform_driver mvneta_driver = {
2956 .probe = mvneta_probe,
2957 .remove = mvneta_remove,
2959 .name = MVNETA_DRIVER_NAME,
2960 .of_match_table = mvneta_match,
2964 module_platform_driver(mvneta_driver);
2966 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
2967 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
2968 MODULE_LICENSE("GPL");
2970 module_param(rxq_number, int, S_IRUGO);
2971 module_param(txq_number, int, S_IRUGO);
2973 module_param(rxq_def, int, S_IRUGO);
2974 module_param(rx_copybreak, int, S_IRUGO | S_IWUSR);