1e1b154ed2ec765067a78f4c9f81c49050ebbeb7
[platform/kernel/linux-rpi.git] / drivers / net / ethernet / hisilicon / hip04_eth.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /* Copyright (c) 2014 Linaro Ltd.
4  * Copyright (c) 2014 Hisilicon Limited.
5  */
6
7 #include <linux/module.h>
8 #include <linux/etherdevice.h>
9 #include <linux/platform_device.h>
10 #include <linux/interrupt.h>
11 #include <linux/ktime.h>
12 #include <linux/of_address.h>
13 #include <linux/phy.h>
14 #include <linux/of_mdio.h>
15 #include <linux/of_net.h>
16 #include <linux/mfd/syscon.h>
17 #include <linux/regmap.h>
18
19 #define SC_PPE_RESET_DREQ               0x026C
20
21 #define PPE_CFG_RX_ADDR                 0x100
22 #define PPE_CFG_POOL_GRP                0x300
23 #define PPE_CFG_RX_BUF_SIZE             0x400
24 #define PPE_CFG_RX_FIFO_SIZE            0x500
25 #define PPE_CURR_BUF_CNT                0xa200
26
27 #define GE_DUPLEX_TYPE                  0x08
28 #define GE_MAX_FRM_SIZE_REG             0x3c
29 #define GE_PORT_MODE                    0x40
30 #define GE_PORT_EN                      0x44
31 #define GE_SHORT_RUNTS_THR_REG          0x50
32 #define GE_TX_LOCAL_PAGE_REG            0x5c
33 #define GE_TRANSMIT_CONTROL_REG         0x60
34 #define GE_CF_CRC_STRIP_REG             0x1b0
35 #define GE_MODE_CHANGE_REG              0x1b4
36 #define GE_RECV_CONTROL_REG             0x1e0
37 #define GE_STATION_MAC_ADDRESS          0x210
38
39 #define PPE_CFG_BUS_CTRL_REG            0x424
40 #define PPE_CFG_RX_CTRL_REG             0x428
41
42 #if defined(CONFIG_HI13X1_GMAC)
43 #define PPE_CFG_CPU_ADD_ADDR            0x6D0
44 #define PPE_CFG_MAX_FRAME_LEN_REG       0x500
45 #define PPE_CFG_RX_PKT_MODE_REG         0x504
46 #define PPE_CFG_QOS_VMID_GEN            0x520
47 #define PPE_CFG_RX_PKT_INT              0x740
48 #define PPE_INTEN                       0x700
49 #define PPE_INTSTS                      0x708
50 #define PPE_RINT                        0x704
51 #define PPE_CFG_STS_MODE                0x880
52 #else
53 #define PPE_CFG_CPU_ADD_ADDR            0x580
54 #define PPE_CFG_MAX_FRAME_LEN_REG       0x408
55 #define PPE_CFG_RX_PKT_MODE_REG         0x438
56 #define PPE_CFG_QOS_VMID_GEN            0x500
57 #define PPE_CFG_RX_PKT_INT              0x538
58 #define PPE_INTEN                       0x600
59 #define PPE_INTSTS                      0x608
60 #define PPE_RINT                        0x604
61 #define PPE_CFG_STS_MODE                0x700
62 #endif /* CONFIG_HI13X1_GMAC */
63
64 #define PPE_HIS_RX_PKT_CNT              0x804
65
66 #define RESET_DREQ_ALL                  0xffffffff
67
68 /* REG_INTERRUPT */
69 #define RCV_INT                         BIT(10)
70 #define RCV_NOBUF                       BIT(8)
71 #define RCV_DROP                        BIT(7)
72 #define TX_DROP                         BIT(6)
73 #define DEF_INT_ERR                     (RCV_NOBUF | RCV_DROP | TX_DROP)
74 #define DEF_INT_MASK                    (RCV_INT | DEF_INT_ERR)
75
76 /* TX descriptor config */
77 #define TX_FREE_MEM                     BIT(0)
78 #define TX_READ_ALLOC_L3                BIT(1)
79 #if defined(CONFIG_HI13X1_GMAC)
80 #define TX_CLEAR_WB                     BIT(7)
81 #define TX_RELEASE_TO_PPE               BIT(4)
82 #define TX_FINISH_CACHE_INV             BIT(6)
83 #define TX_POOL_SHIFT                   16
84 #else
85 #define TX_CLEAR_WB                     BIT(4)
86 #define TX_FINISH_CACHE_INV             BIT(2)
87 #endif
88 #define TX_L3_CHECKSUM                  BIT(5)
89 #define TX_LOOP_BACK                    BIT(11)
90
91 /* RX error */
92 #define RX_PKT_DROP                     BIT(0)
93 #define RX_L2_ERR                       BIT(1)
94 #define RX_PKT_ERR                      (RX_PKT_DROP | RX_L2_ERR)
95
96 #define SGMII_SPEED_1000                0x08
97 #define SGMII_SPEED_100                 0x07
98 #define SGMII_SPEED_10                  0x06
99 #define MII_SPEED_100                   0x01
100 #define MII_SPEED_10                    0x00
101
102 #define GE_DUPLEX_FULL                  BIT(0)
103 #define GE_DUPLEX_HALF                  0x00
104 #define GE_MODE_CHANGE_EN               BIT(0)
105
106 #define GE_TX_AUTO_NEG                  BIT(5)
107 #define GE_TX_ADD_CRC                   BIT(6)
108 #define GE_TX_SHORT_PAD_THROUGH         BIT(7)
109
110 #define GE_RX_STRIP_CRC                 BIT(0)
111 #define GE_RX_STRIP_PAD                 BIT(3)
112 #define GE_RX_PAD_EN                    BIT(4)
113
114 #define GE_AUTO_NEG_CTL                 BIT(0)
115
116 #define GE_RX_INT_THRESHOLD             BIT(6)
117 #define GE_RX_TIMEOUT                   0x04
118
119 #define GE_RX_PORT_EN                   BIT(1)
120 #define GE_TX_PORT_EN                   BIT(2)
121
122 #define PPE_CFG_RX_PKT_ALIGN            BIT(18)
123
124 #if defined(CONFIG_HI13X1_GMAC)
125 #define PPE_CFG_QOS_VMID_GRP_SHIFT      4
126 #define PPE_CFG_RX_CTRL_ALIGN_SHIFT     7
127 #define PPE_CFG_STS_RX_PKT_CNT_RC       BIT(0)
128 #define PPE_CFG_QOS_VMID_MODE           BIT(15)
129 #define PPE_CFG_BUS_LOCAL_REL           (BIT(9) | BIT(15) | BIT(19) | BIT(23))
130
131 /* buf unit size is cache_line_size, which is 64, so the shift is 6 */
132 #define PPE_BUF_SIZE_SHIFT              6
133 #define PPE_TX_BUF_HOLD                 BIT(31)
134 #define CACHE_LINE_MASK                 0x3F
135 #else
136 #define PPE_CFG_QOS_VMID_GRP_SHIFT      8
137 #define PPE_CFG_RX_CTRL_ALIGN_SHIFT     11
138 #define PPE_CFG_STS_RX_PKT_CNT_RC       BIT(12)
139 #define PPE_CFG_QOS_VMID_MODE           BIT(14)
140 #define PPE_CFG_BUS_LOCAL_REL           BIT(14)
141
142 /* buf unit size is 1, so the shift is 6 */
143 #define PPE_BUF_SIZE_SHIFT              0
144 #define PPE_TX_BUF_HOLD                 0
145 #endif /* CONFIG_HI13X1_GMAC */
146
147 #define PPE_CFG_RX_FIFO_FSFU            BIT(11)
148 #define PPE_CFG_RX_DEPTH_SHIFT          16
149 #define PPE_CFG_RX_START_SHIFT          0
150
151 #define PPE_CFG_BUS_BIG_ENDIEN          BIT(0)
152
153 #define RX_DESC_NUM                     128
154 #define TX_DESC_NUM                     256
155 #define TX_NEXT(N)                      (((N) + 1) & (TX_DESC_NUM-1))
156 #define RX_NEXT(N)                      (((N) + 1) & (RX_DESC_NUM-1))
157
158 #define GMAC_PPE_RX_PKT_MAX_LEN         379
159 #define GMAC_MAX_PKT_LEN                1516
160 #define GMAC_MIN_PKT_LEN                31
161 #define RX_BUF_SIZE                     1600
162 #define RESET_TIMEOUT                   1000
163 #define TX_TIMEOUT                      (6 * HZ)
164
165 #define DRV_NAME                        "hip04-ether"
166 #define DRV_VERSION                     "v1.0"
167
168 #define HIP04_MAX_TX_COALESCE_USECS     200
169 #define HIP04_MIN_TX_COALESCE_USECS     100
170 #define HIP04_MAX_TX_COALESCE_FRAMES    200
171 #define HIP04_MIN_TX_COALESCE_FRAMES    100
172
173 struct tx_desc {
174 #if defined(CONFIG_HI13X1_GMAC)
175         u32 reserved1[2];
176         u32 send_addr;
177         u16 send_size;
178         u16 data_offset;
179         u32 reserved2[7];
180         u32 cfg;
181         u32 wb_addr;
182         u32 reserved3[3];
183 #else
184         u32 send_addr;
185         u32 send_size;
186         u32 next_addr;
187         u32 cfg;
188         u32 wb_addr;
189 #endif
190 } __aligned(64);
191
192 struct rx_desc {
193 #if defined(CONFIG_HI13X1_GMAC)
194         u32 reserved1[3];
195         u16 pkt_len;
196         u16 reserved_16;
197         u32 reserved2[6];
198         u32 pkt_err;
199         u32 reserved3[5];
200 #else
201         u16 reserved_16;
202         u16 pkt_len;
203         u32 reserve1[3];
204         u32 pkt_err;
205         u32 reserve2[4];
206 #endif
207 };
208
209 struct hip04_priv {
210         void __iomem *base;
211 #if defined(CONFIG_HI13X1_GMAC)
212         void __iomem *sysctrl_base;
213 #endif
214         int phy_mode;
215         int chan;
216         unsigned int port;
217         unsigned int group;
218         unsigned int speed;
219         unsigned int duplex;
220         unsigned int reg_inten;
221
222         struct napi_struct napi;
223         struct net_device *ndev;
224
225         struct tx_desc *tx_desc;
226         dma_addr_t tx_desc_dma;
227         struct sk_buff *tx_skb[TX_DESC_NUM];
228         dma_addr_t tx_phys[TX_DESC_NUM];
229         unsigned int tx_head;
230
231         int tx_coalesce_frames;
232         int tx_coalesce_usecs;
233         struct hrtimer tx_coalesce_timer;
234
235         unsigned char *rx_buf[RX_DESC_NUM];
236         dma_addr_t rx_phys[RX_DESC_NUM];
237         unsigned int rx_head;
238         unsigned int rx_buf_size;
239
240         struct device_node *phy_node;
241         struct phy_device *phy;
242         struct regmap *map;
243         struct work_struct tx_timeout_task;
244
245         /* written only by tx cleanup */
246         unsigned int tx_tail ____cacheline_aligned_in_smp;
247 };
248
249 static inline unsigned int tx_count(unsigned int head, unsigned int tail)
250 {
251         return (head - tail) % (TX_DESC_NUM - 1);
252 }
253
254 static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
255 {
256         struct hip04_priv *priv = netdev_priv(ndev);
257         u32 val;
258
259         priv->speed = speed;
260         priv->duplex = duplex;
261
262         switch (priv->phy_mode) {
263         case PHY_INTERFACE_MODE_SGMII:
264                 if (speed == SPEED_1000)
265                         val = SGMII_SPEED_1000;
266                 else if (speed == SPEED_100)
267                         val = SGMII_SPEED_100;
268                 else
269                         val = SGMII_SPEED_10;
270                 break;
271         case PHY_INTERFACE_MODE_MII:
272                 if (speed == SPEED_100)
273                         val = MII_SPEED_100;
274                 else
275                         val = MII_SPEED_10;
276                 break;
277         default:
278                 netdev_warn(ndev, "not supported mode\n");
279                 val = MII_SPEED_10;
280                 break;
281         }
282         writel_relaxed(val, priv->base + GE_PORT_MODE);
283
284         val = duplex ? GE_DUPLEX_FULL : GE_DUPLEX_HALF;
285         writel_relaxed(val, priv->base + GE_DUPLEX_TYPE);
286
287         val = GE_MODE_CHANGE_EN;
288         writel_relaxed(val, priv->base + GE_MODE_CHANGE_REG);
289 }
290
291 static void hip04_reset_dreq(struct hip04_priv *priv)
292 {
293 #if defined(CONFIG_HI13X1_GMAC)
294         writel_relaxed(RESET_DREQ_ALL, priv->sysctrl_base + SC_PPE_RESET_DREQ);
295 #endif
296 }
297
298 static void hip04_reset_ppe(struct hip04_priv *priv)
299 {
300         u32 val, tmp, timeout = 0;
301
302         do {
303                 regmap_read(priv->map, priv->port * 4 + PPE_CURR_BUF_CNT, &val);
304                 regmap_read(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, &tmp);
305                 if (timeout++ > RESET_TIMEOUT)
306                         break;
307         } while (val & 0xfff);
308 }
309
310 static void hip04_config_fifo(struct hip04_priv *priv)
311 {
312         u32 val;
313
314         val = readl_relaxed(priv->base + PPE_CFG_STS_MODE);
315         val |= PPE_CFG_STS_RX_PKT_CNT_RC;
316         writel_relaxed(val, priv->base + PPE_CFG_STS_MODE);
317
318         val = BIT(priv->group);
319         regmap_write(priv->map, priv->port * 4 + PPE_CFG_POOL_GRP, val);
320
321         val = priv->group << PPE_CFG_QOS_VMID_GRP_SHIFT;
322         val |= PPE_CFG_QOS_VMID_MODE;
323         writel_relaxed(val, priv->base + PPE_CFG_QOS_VMID_GEN);
324
325         val = RX_BUF_SIZE >> PPE_BUF_SIZE_SHIFT;
326         regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_BUF_SIZE, val);
327
328         val = RX_DESC_NUM << PPE_CFG_RX_DEPTH_SHIFT;
329         val |= PPE_CFG_RX_FIFO_FSFU;
330         val |= priv->chan << PPE_CFG_RX_START_SHIFT;
331         regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_FIFO_SIZE, val);
332
333         val = NET_IP_ALIGN << PPE_CFG_RX_CTRL_ALIGN_SHIFT;
334         writel_relaxed(val, priv->base + PPE_CFG_RX_CTRL_REG);
335
336         val = PPE_CFG_RX_PKT_ALIGN;
337         writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_MODE_REG);
338
339         val = PPE_CFG_BUS_LOCAL_REL | PPE_CFG_BUS_BIG_ENDIEN;
340         writel_relaxed(val, priv->base + PPE_CFG_BUS_CTRL_REG);
341
342         val = GMAC_PPE_RX_PKT_MAX_LEN;
343         writel_relaxed(val, priv->base + PPE_CFG_MAX_FRAME_LEN_REG);
344
345         val = GMAC_MAX_PKT_LEN;
346         writel_relaxed(val, priv->base + GE_MAX_FRM_SIZE_REG);
347
348         val = GMAC_MIN_PKT_LEN;
349         writel_relaxed(val, priv->base + GE_SHORT_RUNTS_THR_REG);
350
351         val = readl_relaxed(priv->base + GE_TRANSMIT_CONTROL_REG);
352         val |= GE_TX_AUTO_NEG | GE_TX_ADD_CRC | GE_TX_SHORT_PAD_THROUGH;
353         writel_relaxed(val, priv->base + GE_TRANSMIT_CONTROL_REG);
354
355         val = GE_RX_STRIP_CRC;
356         writel_relaxed(val, priv->base + GE_CF_CRC_STRIP_REG);
357
358         val = readl_relaxed(priv->base + GE_RECV_CONTROL_REG);
359         val |= GE_RX_STRIP_PAD | GE_RX_PAD_EN;
360         writel_relaxed(val, priv->base + GE_RECV_CONTROL_REG);
361
362 #ifndef CONFIG_HI13X1_GMAC
363         val = GE_AUTO_NEG_CTL;
364         writel_relaxed(val, priv->base + GE_TX_LOCAL_PAGE_REG);
365 #endif
366 }
367
368 static void hip04_mac_enable(struct net_device *ndev)
369 {
370         struct hip04_priv *priv = netdev_priv(ndev);
371         u32 val;
372
373         /* enable tx & rx */
374         val = readl_relaxed(priv->base + GE_PORT_EN);
375         val |= GE_RX_PORT_EN | GE_TX_PORT_EN;
376         writel_relaxed(val, priv->base + GE_PORT_EN);
377
378         /* clear rx int */
379         val = RCV_INT;
380         writel_relaxed(val, priv->base + PPE_RINT);
381
382         /* config recv int */
383         val = GE_RX_INT_THRESHOLD | GE_RX_TIMEOUT;
384         writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_INT);
385
386         /* enable interrupt */
387         priv->reg_inten = DEF_INT_MASK;
388         writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
389 }
390
391 static void hip04_mac_disable(struct net_device *ndev)
392 {
393         struct hip04_priv *priv = netdev_priv(ndev);
394         u32 val;
395
396         /* disable int */
397         priv->reg_inten &= ~(DEF_INT_MASK);
398         writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
399
400         /* disable tx & rx */
401         val = readl_relaxed(priv->base + GE_PORT_EN);
402         val &= ~(GE_RX_PORT_EN | GE_TX_PORT_EN);
403         writel_relaxed(val, priv->base + GE_PORT_EN);
404 }
405
406 static void hip04_set_xmit_desc(struct hip04_priv *priv, dma_addr_t phys)
407 {
408         u32 val;
409
410         val = phys >> PPE_BUF_SIZE_SHIFT | PPE_TX_BUF_HOLD;
411         writel(val, priv->base + PPE_CFG_CPU_ADD_ADDR);
412 }
413
414 static void hip04_set_recv_desc(struct hip04_priv *priv, dma_addr_t phys)
415 {
416         u32 val;
417
418         val = phys >> PPE_BUF_SIZE_SHIFT;
419         regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, val);
420 }
421
422 static u32 hip04_recv_cnt(struct hip04_priv *priv)
423 {
424         return readl(priv->base + PPE_HIS_RX_PKT_CNT);
425 }
426
427 static void hip04_update_mac_address(struct net_device *ndev)
428 {
429         struct hip04_priv *priv = netdev_priv(ndev);
430
431         writel_relaxed(((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])),
432                        priv->base + GE_STATION_MAC_ADDRESS);
433         writel_relaxed(((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
434                         (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5])),
435                        priv->base + GE_STATION_MAC_ADDRESS + 4);
436 }
437
438 static int hip04_set_mac_address(struct net_device *ndev, void *addr)
439 {
440         eth_mac_addr(ndev, addr);
441         hip04_update_mac_address(ndev);
442         return 0;
443 }
444
445 static int hip04_tx_reclaim(struct net_device *ndev, bool force)
446 {
447         struct hip04_priv *priv = netdev_priv(ndev);
448         unsigned tx_tail = priv->tx_tail;
449         struct tx_desc *desc;
450         unsigned int bytes_compl = 0, pkts_compl = 0;
451         unsigned int count;
452
453         smp_rmb();
454         count = tx_count(READ_ONCE(priv->tx_head), tx_tail);
455         if (count == 0)
456                 goto out;
457
458         while (count) {
459                 desc = &priv->tx_desc[tx_tail];
460                 if (desc->send_addr != 0) {
461                         if (force)
462                                 desc->send_addr = 0;
463                         else
464                                 break;
465                 }
466
467                 if (priv->tx_phys[tx_tail]) {
468                         dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
469                                          priv->tx_skb[tx_tail]->len,
470                                          DMA_TO_DEVICE);
471                         priv->tx_phys[tx_tail] = 0;
472                 }
473                 pkts_compl++;
474                 bytes_compl += priv->tx_skb[tx_tail]->len;
475                 dev_kfree_skb(priv->tx_skb[tx_tail]);
476                 priv->tx_skb[tx_tail] = NULL;
477                 tx_tail = TX_NEXT(tx_tail);
478                 count--;
479         }
480
481         priv->tx_tail = tx_tail;
482         smp_wmb(); /* Ensure tx_tail visible to xmit */
483
484 out:
485         if (pkts_compl || bytes_compl)
486                 netdev_completed_queue(ndev, pkts_compl, bytes_compl);
487
488         if (unlikely(netif_queue_stopped(ndev)) && (count < (TX_DESC_NUM - 1)))
489                 netif_wake_queue(ndev);
490
491         return count;
492 }
493
494 static void hip04_start_tx_timer(struct hip04_priv *priv)
495 {
496         unsigned long ns = priv->tx_coalesce_usecs * NSEC_PER_USEC / 2;
497
498         /* allow timer to fire after half the time at the earliest */
499         hrtimer_start_range_ns(&priv->tx_coalesce_timer, ns_to_ktime(ns),
500                                ns, HRTIMER_MODE_REL);
501 }
502
503 static netdev_tx_t
504 hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
505 {
506         struct hip04_priv *priv = netdev_priv(ndev);
507         struct net_device_stats *stats = &ndev->stats;
508         unsigned int tx_head = priv->tx_head, count;
509         struct tx_desc *desc = &priv->tx_desc[tx_head];
510         dma_addr_t phys;
511
512         smp_rmb();
513         count = tx_count(tx_head, READ_ONCE(priv->tx_tail));
514         if (count == (TX_DESC_NUM - 1)) {
515                 netif_stop_queue(ndev);
516                 return NETDEV_TX_BUSY;
517         }
518
519         phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
520         if (dma_mapping_error(&ndev->dev, phys)) {
521                 dev_kfree_skb(skb);
522                 return NETDEV_TX_OK;
523         }
524
525         priv->tx_skb[tx_head] = skb;
526         priv->tx_phys[tx_head] = phys;
527
528         desc->send_size = (__force u32)cpu_to_be32(skb->len);
529 #if defined(CONFIG_HI13X1_GMAC)
530         desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV
531                 | TX_RELEASE_TO_PPE | priv->port << TX_POOL_SHIFT);
532         desc->data_offset = (__force u32)cpu_to_be32(phys & CACHE_LINE_MASK);
533         desc->send_addr =  (__force u32)cpu_to_be32(phys & ~CACHE_LINE_MASK);
534 #else
535         desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
536         desc->send_addr = (__force u32)cpu_to_be32(phys);
537 #endif
538         phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc);
539         desc->wb_addr = (__force u32)cpu_to_be32(phys +
540                 offsetof(struct tx_desc, send_addr));
541         skb_tx_timestamp(skb);
542
543         hip04_set_xmit_desc(priv, phys);
544         priv->tx_head = TX_NEXT(tx_head);
545         count++;
546         netdev_sent_queue(ndev, skb->len);
547
548         stats->tx_bytes += skb->len;
549         stats->tx_packets++;
550
551         /* Ensure tx_head update visible to tx reclaim */
552         smp_wmb();
553
554         /* queue is getting full, better start cleaning up now */
555         if (count >= priv->tx_coalesce_frames) {
556                 if (napi_schedule_prep(&priv->napi)) {
557                         /* disable rx interrupt and timer */
558                         priv->reg_inten &= ~(RCV_INT);
559                         writel_relaxed(DEF_INT_MASK & ~RCV_INT,
560                                        priv->base + PPE_INTEN);
561                         hrtimer_cancel(&priv->tx_coalesce_timer);
562                         __napi_schedule(&priv->napi);
563                 }
564         } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) {
565                 /* cleanup not pending yet, start a new timer */
566                 hip04_start_tx_timer(priv);
567         }
568
569         return NETDEV_TX_OK;
570 }
571
572 static int hip04_rx_poll(struct napi_struct *napi, int budget)
573 {
574         struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
575         struct net_device *ndev = priv->ndev;
576         struct net_device_stats *stats = &ndev->stats;
577         unsigned int cnt = hip04_recv_cnt(priv);
578         struct rx_desc *desc;
579         struct sk_buff *skb;
580         unsigned char *buf;
581         bool last = false;
582         dma_addr_t phys;
583         int rx = 0;
584         int tx_remaining;
585         u16 len;
586         u32 err;
587
588         /* clean up tx descriptors */
589         tx_remaining = hip04_tx_reclaim(ndev, false);
590
591         while (cnt && !last) {
592                 buf = priv->rx_buf[priv->rx_head];
593                 skb = build_skb(buf, priv->rx_buf_size);
594                 if (unlikely(!skb)) {
595                         net_dbg_ratelimited("build_skb failed\n");
596                         goto refill;
597                 }
598
599                 dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
600                                  RX_BUF_SIZE, DMA_FROM_DEVICE);
601                 priv->rx_phys[priv->rx_head] = 0;
602
603                 desc = (struct rx_desc *)skb->data;
604                 len = be16_to_cpu((__force __be16)desc->pkt_len);
605                 err = be32_to_cpu((__force __be32)desc->pkt_err);
606
607                 if (0 == len) {
608                         dev_kfree_skb_any(skb);
609                         last = true;
610                 } else if ((err & RX_PKT_ERR) || (len >= GMAC_MAX_PKT_LEN)) {
611                         dev_kfree_skb_any(skb);
612                         stats->rx_dropped++;
613                         stats->rx_errors++;
614                 } else {
615                         skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
616                         skb_put(skb, len);
617                         skb->protocol = eth_type_trans(skb, ndev);
618                         napi_gro_receive(&priv->napi, skb);
619                         stats->rx_packets++;
620                         stats->rx_bytes += len;
621                         rx++;
622                 }
623
624 refill:
625                 buf = netdev_alloc_frag(priv->rx_buf_size);
626                 if (!buf)
627                         goto done;
628                 phys = dma_map_single(&ndev->dev, buf,
629                                       RX_BUF_SIZE, DMA_FROM_DEVICE);
630                 if (dma_mapping_error(&ndev->dev, phys))
631                         goto done;
632                 priv->rx_buf[priv->rx_head] = buf;
633                 priv->rx_phys[priv->rx_head] = phys;
634                 hip04_set_recv_desc(priv, phys);
635
636                 priv->rx_head = RX_NEXT(priv->rx_head);
637                 if (rx >= budget)
638                         goto done;
639
640                 if (--cnt == 0)
641                         cnt = hip04_recv_cnt(priv);
642         }
643
644         if (!(priv->reg_inten & RCV_INT)) {
645                 /* enable rx interrupt */
646                 priv->reg_inten |= RCV_INT;
647                 writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
648         }
649         napi_complete_done(napi, rx);
650 done:
651         /* start a new timer if necessary */
652         if (rx < budget && tx_remaining)
653                 hip04_start_tx_timer(priv);
654
655         return rx;
656 }
657
658 static irqreturn_t hip04_mac_interrupt(int irq, void *dev_id)
659 {
660         struct net_device *ndev = (struct net_device *)dev_id;
661         struct hip04_priv *priv = netdev_priv(ndev);
662         struct net_device_stats *stats = &ndev->stats;
663         u32 ists = readl_relaxed(priv->base + PPE_INTSTS);
664
665         if (!ists)
666                 return IRQ_NONE;
667
668         writel_relaxed(DEF_INT_MASK, priv->base + PPE_RINT);
669
670         if (unlikely(ists & DEF_INT_ERR)) {
671                 if (ists & (RCV_NOBUF | RCV_DROP)) {
672                         stats->rx_errors++;
673                         stats->rx_dropped++;
674                         netdev_err(ndev, "rx drop\n");
675                 }
676                 if (ists & TX_DROP) {
677                         stats->tx_dropped++;
678                         netdev_err(ndev, "tx drop\n");
679                 }
680         }
681
682         if (ists & RCV_INT && napi_schedule_prep(&priv->napi)) {
683                 /* disable rx interrupt */
684                 priv->reg_inten &= ~(RCV_INT);
685                 writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
686                 hrtimer_cancel(&priv->tx_coalesce_timer);
687                 __napi_schedule(&priv->napi);
688         }
689
690         return IRQ_HANDLED;
691 }
692
693 static enum hrtimer_restart tx_done(struct hrtimer *hrtimer)
694 {
695         struct hip04_priv *priv;
696
697         priv = container_of(hrtimer, struct hip04_priv, tx_coalesce_timer);
698
699         if (napi_schedule_prep(&priv->napi)) {
700                 /* disable rx interrupt */
701                 priv->reg_inten &= ~(RCV_INT);
702                 writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
703                 __napi_schedule(&priv->napi);
704         }
705
706         return HRTIMER_NORESTART;
707 }
708
709 static void hip04_adjust_link(struct net_device *ndev)
710 {
711         struct hip04_priv *priv = netdev_priv(ndev);
712         struct phy_device *phy = priv->phy;
713
714         if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
715                 hip04_config_port(ndev, phy->speed, phy->duplex);
716                 phy_print_status(phy);
717         }
718 }
719
720 static int hip04_mac_open(struct net_device *ndev)
721 {
722         struct hip04_priv *priv = netdev_priv(ndev);
723         int i;
724
725         priv->rx_head = 0;
726         priv->tx_head = 0;
727         priv->tx_tail = 0;
728         hip04_reset_ppe(priv);
729
730         for (i = 0; i < RX_DESC_NUM; i++) {
731                 dma_addr_t phys;
732
733                 phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
734                                       RX_BUF_SIZE, DMA_FROM_DEVICE);
735                 if (dma_mapping_error(&ndev->dev, phys))
736                         return -EIO;
737
738                 priv->rx_phys[i] = phys;
739                 hip04_set_recv_desc(priv, phys);
740         }
741
742         if (priv->phy)
743                 phy_start(priv->phy);
744
745         netdev_reset_queue(ndev);
746         netif_start_queue(ndev);
747         hip04_mac_enable(ndev);
748         napi_enable(&priv->napi);
749
750         return 0;
751 }
752
753 static int hip04_mac_stop(struct net_device *ndev)
754 {
755         struct hip04_priv *priv = netdev_priv(ndev);
756         int i;
757
758         napi_disable(&priv->napi);
759         netif_stop_queue(ndev);
760         hip04_mac_disable(ndev);
761         hip04_tx_reclaim(ndev, true);
762         hip04_reset_ppe(priv);
763
764         if (priv->phy)
765                 phy_stop(priv->phy);
766
767         for (i = 0; i < RX_DESC_NUM; i++) {
768                 if (priv->rx_phys[i]) {
769                         dma_unmap_single(&ndev->dev, priv->rx_phys[i],
770                                          RX_BUF_SIZE, DMA_FROM_DEVICE);
771                         priv->rx_phys[i] = 0;
772                 }
773         }
774
775         return 0;
776 }
777
778 static void hip04_timeout(struct net_device *ndev)
779 {
780         struct hip04_priv *priv = netdev_priv(ndev);
781
782         schedule_work(&priv->tx_timeout_task);
783 }
784
785 static void hip04_tx_timeout_task(struct work_struct *work)
786 {
787         struct hip04_priv *priv;
788
789         priv = container_of(work, struct hip04_priv, tx_timeout_task);
790         hip04_mac_stop(priv->ndev);
791         hip04_mac_open(priv->ndev);
792 }
793
794 static int hip04_get_coalesce(struct net_device *netdev,
795                               struct ethtool_coalesce *ec)
796 {
797         struct hip04_priv *priv = netdev_priv(netdev);
798
799         ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
800         ec->tx_max_coalesced_frames = priv->tx_coalesce_frames;
801
802         return 0;
803 }
804
805 static int hip04_set_coalesce(struct net_device *netdev,
806                               struct ethtool_coalesce *ec)
807 {
808         struct hip04_priv *priv = netdev_priv(netdev);
809
810         /* Check not supported parameters  */
811         if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) ||
812             (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
813             (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
814             (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
815             (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) ||
816             (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) ||
817             (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) ||
818             (ec->rx_max_coalesced_frames_high) || (ec->rx_coalesce_usecs) ||
819             (ec->tx_max_coalesced_frames_irq) ||
820             (ec->stats_block_coalesce_usecs) ||
821             (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval))
822                 return -EOPNOTSUPP;
823
824         if ((ec->tx_coalesce_usecs > HIP04_MAX_TX_COALESCE_USECS ||
825              ec->tx_coalesce_usecs < HIP04_MIN_TX_COALESCE_USECS) ||
826             (ec->tx_max_coalesced_frames > HIP04_MAX_TX_COALESCE_FRAMES ||
827              ec->tx_max_coalesced_frames < HIP04_MIN_TX_COALESCE_FRAMES))
828                 return -EINVAL;
829
830         priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
831         priv->tx_coalesce_frames = ec->tx_max_coalesced_frames;
832
833         return 0;
834 }
835
836 static void hip04_get_drvinfo(struct net_device *netdev,
837                               struct ethtool_drvinfo *drvinfo)
838 {
839         strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
840         strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
841 }
842
843 static const struct ethtool_ops hip04_ethtool_ops = {
844         .get_coalesce           = hip04_get_coalesce,
845         .set_coalesce           = hip04_set_coalesce,
846         .get_drvinfo            = hip04_get_drvinfo,
847 };
848
849 static const struct net_device_ops hip04_netdev_ops = {
850         .ndo_open               = hip04_mac_open,
851         .ndo_stop               = hip04_mac_stop,
852         .ndo_start_xmit         = hip04_mac_start_xmit,
853         .ndo_set_mac_address    = hip04_set_mac_address,
854         .ndo_tx_timeout         = hip04_timeout,
855         .ndo_validate_addr      = eth_validate_addr,
856 };
857
858 static int hip04_alloc_ring(struct net_device *ndev, struct device *d)
859 {
860         struct hip04_priv *priv = netdev_priv(ndev);
861         int i;
862
863         priv->tx_desc = dma_alloc_coherent(d,
864                                            TX_DESC_NUM * sizeof(struct tx_desc),
865                                            &priv->tx_desc_dma, GFP_KERNEL);
866         if (!priv->tx_desc)
867                 return -ENOMEM;
868
869         priv->rx_buf_size = RX_BUF_SIZE +
870                             SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
871         for (i = 0; i < RX_DESC_NUM; i++) {
872                 priv->rx_buf[i] = netdev_alloc_frag(priv->rx_buf_size);
873                 if (!priv->rx_buf[i])
874                         return -ENOMEM;
875         }
876
877         return 0;
878 }
879
880 static void hip04_free_ring(struct net_device *ndev, struct device *d)
881 {
882         struct hip04_priv *priv = netdev_priv(ndev);
883         int i;
884
885         for (i = 0; i < RX_DESC_NUM; i++)
886                 if (priv->rx_buf[i])
887                         skb_free_frag(priv->rx_buf[i]);
888
889         for (i = 0; i < TX_DESC_NUM; i++)
890                 if (priv->tx_skb[i])
891                         dev_kfree_skb_any(priv->tx_skb[i]);
892
893         dma_free_coherent(d, TX_DESC_NUM * sizeof(struct tx_desc),
894                           priv->tx_desc, priv->tx_desc_dma);
895 }
896
897 static int hip04_mac_probe(struct platform_device *pdev)
898 {
899         struct device *d = &pdev->dev;
900         struct device_node *node = d->of_node;
901         struct of_phandle_args arg;
902         struct net_device *ndev;
903         struct hip04_priv *priv;
904         int irq;
905         int ret;
906
907         ndev = alloc_etherdev(sizeof(struct hip04_priv));
908         if (!ndev)
909                 return -ENOMEM;
910
911         priv = netdev_priv(ndev);
912         priv->ndev = ndev;
913         platform_set_drvdata(pdev, ndev);
914         SET_NETDEV_DEV(ndev, &pdev->dev);
915
916         priv->base = devm_platform_ioremap_resource(pdev, 0);
917         if (IS_ERR(priv->base)) {
918                 ret = PTR_ERR(priv->base);
919                 goto init_fail;
920         }
921
922 #if defined(CONFIG_HI13X1_GMAC)
923         priv->sysctrl_base = devm_platform_ioremap_resource(pdev, 1);
924         if (IS_ERR(priv->sysctrl_base)) {
925                 ret = PTR_ERR(priv->sysctrl_base);
926                 goto init_fail;
927         }
928 #endif
929
930         ret = of_parse_phandle_with_fixed_args(node, "port-handle", 3, 0, &arg);
931         if (ret < 0) {
932                 dev_warn(d, "no port-handle\n");
933                 goto init_fail;
934         }
935
936         priv->port = arg.args[0];
937         priv->chan = arg.args[1] * RX_DESC_NUM;
938         priv->group = arg.args[2];
939
940         hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
941
942         /* BQL will try to keep the TX queue as short as possible, but it can't
943          * be faster than tx_coalesce_usecs, so we need a fast timeout here,
944          * but also long enough to gather up enough frames to ensure we don't
945          * get more interrupts than necessary.
946          * 200us is enough for 16 frames of 1500 bytes at gigabit ethernet rate
947          */
948         priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
949         priv->tx_coalesce_usecs = 200;
950         priv->tx_coalesce_timer.function = tx_done;
951
952         priv->map = syscon_node_to_regmap(arg.np);
953         if (IS_ERR(priv->map)) {
954                 dev_warn(d, "no syscon hisilicon,hip04-ppe\n");
955                 ret = PTR_ERR(priv->map);
956                 goto init_fail;
957         }
958
959         priv->phy_mode = of_get_phy_mode(node);
960         if (priv->phy_mode < 0) {
961                 dev_warn(d, "not find phy-mode\n");
962                 ret = -EINVAL;
963                 goto init_fail;
964         }
965
966         irq = platform_get_irq(pdev, 0);
967         if (irq <= 0) {
968                 ret = -EINVAL;
969                 goto init_fail;
970         }
971
972         ret = devm_request_irq(d, irq, hip04_mac_interrupt,
973                                0, pdev->name, ndev);
974         if (ret) {
975                 netdev_err(ndev, "devm_request_irq failed\n");
976                 goto init_fail;
977         }
978
979         priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
980         if (priv->phy_node) {
981                 priv->phy = of_phy_connect(ndev, priv->phy_node,
982                                            &hip04_adjust_link,
983                                            0, priv->phy_mode);
984                 if (!priv->phy) {
985                         ret = -EPROBE_DEFER;
986                         goto init_fail;
987                 }
988         }
989
990         INIT_WORK(&priv->tx_timeout_task, hip04_tx_timeout_task);
991
992         ndev->netdev_ops = &hip04_netdev_ops;
993         ndev->ethtool_ops = &hip04_ethtool_ops;
994         ndev->watchdog_timeo = TX_TIMEOUT;
995         ndev->priv_flags |= IFF_UNICAST_FLT;
996         ndev->irq = irq;
997         netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT);
998
999         hip04_reset_dreq(priv);
1000         hip04_reset_ppe(priv);
1001         if (priv->phy_mode == PHY_INTERFACE_MODE_MII)
1002                 hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
1003
1004         hip04_config_fifo(priv);
1005         eth_random_addr(ndev->dev_addr);
1006         hip04_update_mac_address(ndev);
1007
1008         ret = hip04_alloc_ring(ndev, d);
1009         if (ret) {
1010                 netdev_err(ndev, "alloc ring fail\n");
1011                 goto alloc_fail;
1012         }
1013
1014         ret = register_netdev(ndev);
1015         if (ret)
1016                 goto alloc_fail;
1017
1018         return 0;
1019
1020 alloc_fail:
1021         hip04_free_ring(ndev, d);
1022 init_fail:
1023         of_node_put(priv->phy_node);
1024         free_netdev(ndev);
1025         return ret;
1026 }
1027
1028 static int hip04_remove(struct platform_device *pdev)
1029 {
1030         struct net_device *ndev = platform_get_drvdata(pdev);
1031         struct hip04_priv *priv = netdev_priv(ndev);
1032         struct device *d = &pdev->dev;
1033
1034         if (priv->phy)
1035                 phy_disconnect(priv->phy);
1036
1037         hip04_free_ring(ndev, d);
1038         unregister_netdev(ndev);
1039         free_irq(ndev->irq, ndev);
1040         of_node_put(priv->phy_node);
1041         cancel_work_sync(&priv->tx_timeout_task);
1042         free_netdev(ndev);
1043
1044         return 0;
1045 }
1046
1047 static const struct of_device_id hip04_mac_match[] = {
1048         { .compatible = "hisilicon,hip04-mac" },
1049         { }
1050 };
1051
1052 MODULE_DEVICE_TABLE(of, hip04_mac_match);
1053
1054 static struct platform_driver hip04_mac_driver = {
1055         .probe  = hip04_mac_probe,
1056         .remove = hip04_remove,
1057         .driver = {
1058                 .name           = DRV_NAME,
1059                 .of_match_table = hip04_mac_match,
1060         },
1061 };
1062 module_platform_driver(hip04_mac_driver);
1063
1064 MODULE_DESCRIPTION("HISILICON P04 Ethernet driver");
1065 MODULE_LICENSE("GPL");