Merge branch 'for-5.9/upstream-fixes' into for-linus
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / ti / netcp_ethss.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Keystone GBE and XGBE subsystem code
4  *
5  * Copyright (C) 2014 Texas Instruments Incorporated
6  * Authors:     Sandeep Nair <sandeep_n@ti.com>
7  *              Sandeep Paulraj <s-paulraj@ti.com>
8  *              Cyril Chemparathy <cyril@ti.com>
9  *              Santosh Shilimkar <santosh.shilimkar@ti.com>
10  *              Wingman Kwok <w-kwok2@ti.com>
11  */
12
13 #include <linux/io.h>
14 #include <linux/module.h>
15 #include <linux/of_mdio.h>
16 #include <linux/of_net.h>
17 #include <linux/of_address.h>
18 #include <linux/if_vlan.h>
19 #include <linux/ptp_classify.h>
20 #include <linux/net_tstamp.h>
21 #include <linux/ethtool.h>
22
23 #include "cpsw.h"
24 #include "cpsw_ale.h"
25 #include "netcp.h"
26 #include "cpts.h"
27
28 #define NETCP_DRIVER_NAME               "TI KeyStone Ethernet Driver"
29 #define NETCP_DRIVER_VERSION            "v1.0"
30
31 #define GBE_IDENT(reg)                  ((reg >> 16) & 0xffff)
32 #define GBE_MAJOR_VERSION(reg)          (reg >> 8 & 0x7)
33 #define GBE_MINOR_VERSION(reg)          (reg & 0xff)
34 #define GBE_RTL_VERSION(reg)            ((reg >> 11) & 0x1f)
35
36 /* 1G Ethernet SS defines */
37 #define GBE_MODULE_NAME                 "netcp-gbe"
38 #define GBE_SS_VERSION_14               0x4ed2
39
40 #define GBE_SS_REG_INDEX                0
41 #define GBE_SGMII34_REG_INDEX           1
42 #define GBE_SM_REG_INDEX                2
43 /* offset relative to base of GBE_SS_REG_INDEX */
44 #define GBE13_SGMII_MODULE_OFFSET       0x100
45 /* offset relative to base of GBE_SM_REG_INDEX */
46 #define GBE13_HOST_PORT_OFFSET          0x34
47 #define GBE13_SLAVE_PORT_OFFSET         0x60
48 #define GBE13_EMAC_OFFSET               0x100
49 #define GBE13_SLAVE_PORT2_OFFSET        0x200
50 #define GBE13_HW_STATS_OFFSET           0x300
51 #define GBE13_CPTS_OFFSET               0x500
52 #define GBE13_ALE_OFFSET                0x600
53 #define GBE13_HOST_PORT_NUM             0
54 #define GBE13_NUM_ALE_ENTRIES           1024
55
56 /* 1G Ethernet NU SS defines */
57 #define GBENU_MODULE_NAME               "netcp-gbenu"
58 #define GBE_SS_ID_NU                    0x4ee6
59 #define GBE_SS_ID_2U                    0x4ee8
60
61 #define IS_SS_ID_MU(d) \
62         ((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
63          (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
64
65 #define IS_SS_ID_NU(d) \
66         (GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
67
68 #define IS_SS_ID_VER_14(d) \
69         (GBE_IDENT((d)->ss_version) == GBE_SS_VERSION_14)
70 #define IS_SS_ID_2U(d) \
71         (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U)
72
73 #define GBENU_SS_REG_INDEX              0
74 #define GBENU_SM_REG_INDEX              1
75 #define GBENU_SGMII_MODULE_OFFSET       0x100
76 #define GBENU_HOST_PORT_OFFSET          0x1000
77 #define GBENU_SLAVE_PORT_OFFSET         0x2000
78 #define GBENU_EMAC_OFFSET               0x2330
79 #define GBENU_HW_STATS_OFFSET           0x1a000
80 #define GBENU_CPTS_OFFSET               0x1d000
81 #define GBENU_ALE_OFFSET                0x1e000
82 #define GBENU_HOST_PORT_NUM             0
83 #define GBENU_SGMII_MODULE_SIZE         0x100
84
85 /* 10G Ethernet SS defines */
86 #define XGBE_MODULE_NAME                "netcp-xgbe"
87 #define XGBE_SS_VERSION_10              0x4ee4
88
89 #define XGBE_SS_REG_INDEX               0
90 #define XGBE_SM_REG_INDEX               1
91 #define XGBE_SERDES_REG_INDEX           2
92
93 /* offset relative to base of XGBE_SS_REG_INDEX */
94 #define XGBE10_SGMII_MODULE_OFFSET      0x100
95 #define IS_SS_ID_XGBE(d)                ((d)->ss_version == XGBE_SS_VERSION_10)
96 /* offset relative to base of XGBE_SM_REG_INDEX */
97 #define XGBE10_HOST_PORT_OFFSET         0x34
98 #define XGBE10_SLAVE_PORT_OFFSET        0x64
99 #define XGBE10_EMAC_OFFSET              0x400
100 #define XGBE10_CPTS_OFFSET              0x600
101 #define XGBE10_ALE_OFFSET               0x700
102 #define XGBE10_HW_STATS_OFFSET          0x800
103 #define XGBE10_HOST_PORT_NUM            0
104 #define XGBE10_NUM_ALE_ENTRIES          2048
105
106 #define GBE_TIMER_INTERVAL                      (HZ / 2)
107
108 /* Soft reset register values */
109 #define SOFT_RESET_MASK                         BIT(0)
110 #define SOFT_RESET                              BIT(0)
111 #define DEVICE_EMACSL_RESET_POLL_COUNT          100
112 #define GMACSL_RET_WARN_RESET_INCOMPLETE        -2
113
114 #define MACSL_RX_ENABLE_CSF                     BIT(23)
115 #define MACSL_ENABLE_EXT_CTL                    BIT(18)
116 #define MACSL_XGMII_ENABLE                      BIT(13)
117 #define MACSL_XGIG_MODE                         BIT(8)
118 #define MACSL_GIG_MODE                          BIT(7)
119 #define MACSL_GMII_ENABLE                       BIT(5)
120 #define MACSL_FULLDUPLEX                        BIT(0)
121
122 #define GBE_CTL_P0_ENABLE                       BIT(2)
123 #define ETH_SW_CTL_P0_TX_CRC_REMOVE             BIT(13)
124 #define GBE13_REG_VAL_STAT_ENABLE_ALL           0xff
125 #define XGBE_REG_VAL_STAT_ENABLE_ALL            0xf
126 #define GBE_STATS_CD_SEL                        BIT(28)
127
128 #define GBE_PORT_MASK(x)                        (BIT(x) - 1)
129 #define GBE_MASK_NO_PORTS                       0
130
131 #define GBE_DEF_1G_MAC_CONTROL                                  \
132                 (MACSL_GIG_MODE | MACSL_GMII_ENABLE |           \
133                  MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
134
135 #define GBE_DEF_10G_MAC_CONTROL                         \
136                 (MACSL_XGIG_MODE | MACSL_XGMII_ENABLE |         \
137                  MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
138
139 #define GBE_STATSA_MODULE                       0
140 #define GBE_STATSB_MODULE                       1
141 #define GBE_STATSC_MODULE                       2
142 #define GBE_STATSD_MODULE                       3
143
144 #define GBENU_STATS0_MODULE                     0
145 #define GBENU_STATS1_MODULE                     1
146 #define GBENU_STATS2_MODULE                     2
147 #define GBENU_STATS3_MODULE                     3
148 #define GBENU_STATS4_MODULE                     4
149 #define GBENU_STATS5_MODULE                     5
150 #define GBENU_STATS6_MODULE                     6
151 #define GBENU_STATS7_MODULE                     7
152 #define GBENU_STATS8_MODULE                     8
153
154 #define XGBE_STATS0_MODULE                      0
155 #define XGBE_STATS1_MODULE                      1
156 #define XGBE_STATS2_MODULE                      2
157
158 /* s: 0-based slave_port */
159 #define SGMII_BASE(d, s) \
160         (((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
161
162 #define GBE_TX_QUEUE                            648
163 #define GBE_TXHOOK_ORDER                        0
164 #define GBE_RXHOOK_ORDER                        0
165 #define GBE_DEFAULT_ALE_AGEOUT                  30
166 #define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
167 #define SLAVE_LINK_IS_RGMII(s) \
168         (((s)->link_interface >= RGMII_LINK_MAC_PHY) && \
169          ((s)->link_interface <= RGMII_LINK_MAC_PHY_NO_MDIO))
170 #define SLAVE_LINK_IS_SGMII(s) \
171         ((s)->link_interface <= SGMII_LINK_MAC_PHY_NO_MDIO)
172 #define NETCP_LINK_STATE_INVALID                -1
173
174 #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
175                 offsetof(struct gbe##_##rb, rn)
176 #define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
177                 offsetof(struct gbenu##_##rb, rn)
178 #define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
179                 offsetof(struct xgbe##_##rb, rn)
180 #define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
181
182 #define HOST_TX_PRI_MAP_DEFAULT                 0x00000000
183
184 #if IS_ENABLED(CONFIG_TI_CPTS)
185 /* Px_TS_CTL register fields */
186 #define TS_RX_ANX_F_EN                          BIT(0)
187 #define TS_RX_VLAN_LT1_EN                       BIT(1)
188 #define TS_RX_VLAN_LT2_EN                       BIT(2)
189 #define TS_RX_ANX_D_EN                          BIT(3)
190 #define TS_TX_ANX_F_EN                          BIT(4)
191 #define TS_TX_VLAN_LT1_EN                       BIT(5)
192 #define TS_TX_VLAN_LT2_EN                       BIT(6)
193 #define TS_TX_ANX_D_EN                          BIT(7)
194 #define TS_LT2_EN                               BIT(8)
195 #define TS_RX_ANX_E_EN                          BIT(9)
196 #define TS_TX_ANX_E_EN                          BIT(10)
197 #define TS_MSG_TYPE_EN_SHIFT                    16
198 #define TS_MSG_TYPE_EN_MASK                     0xffff
199
200 /* Px_TS_SEQ_LTYPE register fields */
201 #define TS_SEQ_ID_OFS_SHIFT                     16
202 #define TS_SEQ_ID_OFS_MASK                      0x3f
203
204 /* Px_TS_CTL_LTYPE2 register fields */
205 #define TS_107                                  BIT(16)
206 #define TS_129                                  BIT(17)
207 #define TS_130                                  BIT(18)
208 #define TS_131                                  BIT(19)
209 #define TS_132                                  BIT(20)
210 #define TS_319                                  BIT(21)
211 #define TS_320                                  BIT(22)
212 #define TS_TTL_NONZERO                          BIT(23)
213 #define TS_UNI_EN                               BIT(24)
214 #define TS_UNI_EN_SHIFT                         24
215
216 #define TS_TX_ANX_ALL_EN         \
217         (TS_TX_ANX_D_EN | TS_TX_ANX_E_EN | TS_TX_ANX_F_EN)
218
219 #define TS_RX_ANX_ALL_EN         \
220         (TS_RX_ANX_D_EN | TS_RX_ANX_E_EN | TS_RX_ANX_F_EN)
221
222 #define TS_CTL_DST_PORT                         TS_319
223 #define TS_CTL_DST_PORT_SHIFT                   21
224
225 #define TS_CTL_MADDR_ALL        \
226         (TS_107 | TS_129 | TS_130 | TS_131 | TS_132)
227
228 #define TS_CTL_MADDR_SHIFT                      16
229
230 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
231 #define EVENT_MSG_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
232 #endif /* CONFIG_TI_CPTS */
233
234 struct xgbe_ss_regs {
235         u32     id_ver;
236         u32     synce_count;
237         u32     synce_mux;
238         u32     control;
239 };
240
241 struct xgbe_switch_regs {
242         u32     id_ver;
243         u32     control;
244         u32     emcontrol;
245         u32     stat_port_en;
246         u32     ptype;
247         u32     soft_idle;
248         u32     thru_rate;
249         u32     gap_thresh;
250         u32     tx_start_wds;
251         u32     flow_control;
252         u32     cppi_thresh;
253 };
254
255 struct xgbe_port_regs {
256         u32     blk_cnt;
257         u32     port_vlan;
258         u32     tx_pri_map;
259         u32     sa_lo;
260         u32     sa_hi;
261         u32     ts_ctl;
262         u32     ts_seq_ltype;
263         u32     ts_vlan;
264         u32     ts_ctl_ltype2;
265         u32     ts_ctl2;
266         u32     control;
267 };
268
269 struct xgbe_host_port_regs {
270         u32     blk_cnt;
271         u32     port_vlan;
272         u32     tx_pri_map;
273         u32     src_id;
274         u32     rx_pri_map;
275         u32     rx_maxlen;
276 };
277
278 struct xgbe_emac_regs {
279         u32     id_ver;
280         u32     mac_control;
281         u32     mac_status;
282         u32     soft_reset;
283         u32     rx_maxlen;
284         u32     __reserved_0;
285         u32     rx_pause;
286         u32     tx_pause;
287         u32     em_control;
288         u32     __reserved_1;
289         u32     tx_gap;
290         u32     rsvd[4];
291 };
292
293 struct xgbe_host_hw_stats {
294         u32     rx_good_frames;
295         u32     rx_broadcast_frames;
296         u32     rx_multicast_frames;
297         u32     __rsvd_0[3];
298         u32     rx_oversized_frames;
299         u32     __rsvd_1;
300         u32     rx_undersized_frames;
301         u32     __rsvd_2;
302         u32     overrun_type4;
303         u32     overrun_type5;
304         u32     rx_bytes;
305         u32     tx_good_frames;
306         u32     tx_broadcast_frames;
307         u32     tx_multicast_frames;
308         u32     __rsvd_3[9];
309         u32     tx_bytes;
310         u32     tx_64byte_frames;
311         u32     tx_65_to_127byte_frames;
312         u32     tx_128_to_255byte_frames;
313         u32     tx_256_to_511byte_frames;
314         u32     tx_512_to_1023byte_frames;
315         u32     tx_1024byte_frames;
316         u32     net_bytes;
317         u32     rx_sof_overruns;
318         u32     rx_mof_overruns;
319         u32     rx_dma_overruns;
320 };
321
322 struct xgbe_hw_stats {
323         u32     rx_good_frames;
324         u32     rx_broadcast_frames;
325         u32     rx_multicast_frames;
326         u32     rx_pause_frames;
327         u32     rx_crc_errors;
328         u32     rx_align_code_errors;
329         u32     rx_oversized_frames;
330         u32     rx_jabber_frames;
331         u32     rx_undersized_frames;
332         u32     rx_fragments;
333         u32     overrun_type4;
334         u32     overrun_type5;
335         u32     rx_bytes;
336         u32     tx_good_frames;
337         u32     tx_broadcast_frames;
338         u32     tx_multicast_frames;
339         u32     tx_pause_frames;
340         u32     tx_deferred_frames;
341         u32     tx_collision_frames;
342         u32     tx_single_coll_frames;
343         u32     tx_mult_coll_frames;
344         u32     tx_excessive_collisions;
345         u32     tx_late_collisions;
346         u32     tx_underrun;
347         u32     tx_carrier_sense_errors;
348         u32     tx_bytes;
349         u32     tx_64byte_frames;
350         u32     tx_65_to_127byte_frames;
351         u32     tx_128_to_255byte_frames;
352         u32     tx_256_to_511byte_frames;
353         u32     tx_512_to_1023byte_frames;
354         u32     tx_1024byte_frames;
355         u32     net_bytes;
356         u32     rx_sof_overruns;
357         u32     rx_mof_overruns;
358         u32     rx_dma_overruns;
359 };
360
361 struct gbenu_ss_regs {
362         u32     id_ver;
363         u32     synce_count;            /* NU */
364         u32     synce_mux;              /* NU */
365         u32     control;                /* 2U */
366         u32     __rsvd_0[2];            /* 2U */
367         u32     rgmii_status;           /* 2U */
368         u32     ss_status;              /* 2U */
369 };
370
371 struct gbenu_switch_regs {
372         u32     id_ver;
373         u32     control;
374         u32     __rsvd_0[2];
375         u32     emcontrol;
376         u32     stat_port_en;
377         u32     ptype;                  /* NU */
378         u32     soft_idle;
379         u32     thru_rate;              /* NU */
380         u32     gap_thresh;             /* NU */
381         u32     tx_start_wds;           /* NU */
382         u32     eee_prescale;           /* 2U */
383         u32     tx_g_oflow_thresh_set;  /* NU */
384         u32     tx_g_oflow_thresh_clr;  /* NU */
385         u32     tx_g_buf_thresh_set_l;  /* NU */
386         u32     tx_g_buf_thresh_set_h;  /* NU */
387         u32     tx_g_buf_thresh_clr_l;  /* NU */
388         u32     tx_g_buf_thresh_clr_h;  /* NU */
389 };
390
391 struct gbenu_port_regs {
392         u32     __rsvd_0;
393         u32     control;
394         u32     max_blks;               /* 2U */
395         u32     mem_align1;
396         u32     blk_cnt;
397         u32     port_vlan;
398         u32     tx_pri_map;             /* NU */
399         u32     pri_ctl;                /* 2U */
400         u32     rx_pri_map;
401         u32     rx_maxlen;
402         u32     tx_blks_pri;            /* NU */
403         u32     __rsvd_1;
404         u32     idle2lpi;               /* 2U */
405         u32     lpi2idle;               /* 2U */
406         u32     eee_status;             /* 2U */
407         u32     __rsvd_2;
408         u32     __rsvd_3[176];          /* NU: more to add */
409         u32     __rsvd_4[2];
410         u32     sa_lo;
411         u32     sa_hi;
412         u32     ts_ctl;
413         u32     ts_seq_ltype;
414         u32     ts_vlan;
415         u32     ts_ctl_ltype2;
416         u32     ts_ctl2;
417 };
418
419 struct gbenu_host_port_regs {
420         u32     __rsvd_0;
421         u32     control;
422         u32     flow_id_offset;         /* 2U */
423         u32     __rsvd_1;
424         u32     blk_cnt;
425         u32     port_vlan;
426         u32     tx_pri_map;             /* NU */
427         u32     pri_ctl;
428         u32     rx_pri_map;
429         u32     rx_maxlen;
430         u32     tx_blks_pri;            /* NU */
431         u32     __rsvd_2;
432         u32     idle2lpi;               /* 2U */
433         u32     lpi2wake;               /* 2U */
434         u32     eee_status;             /* 2U */
435         u32     __rsvd_3;
436         u32     __rsvd_4[184];          /* NU */
437         u32     host_blks_pri;          /* NU */
438 };
439
440 struct gbenu_emac_regs {
441         u32     mac_control;
442         u32     mac_status;
443         u32     soft_reset;
444         u32     boff_test;
445         u32     rx_pause;
446         u32     __rsvd_0[11];           /* NU */
447         u32     tx_pause;
448         u32     __rsvd_1[11];           /* NU */
449         u32     em_control;
450         u32     tx_gap;
451 };
452
453 /* Some hw stat regs are applicable to slave port only.
454  * This is handled by gbenu_et_stats struct.  Also some
455  * are for SS version NU and some are for 2U.
456  */
457 struct gbenu_hw_stats {
458         u32     rx_good_frames;
459         u32     rx_broadcast_frames;
460         u32     rx_multicast_frames;
461         u32     rx_pause_frames;                /* slave */
462         u32     rx_crc_errors;
463         u32     rx_align_code_errors;           /* slave */
464         u32     rx_oversized_frames;
465         u32     rx_jabber_frames;               /* slave */
466         u32     rx_undersized_frames;
467         u32     rx_fragments;                   /* slave */
468         u32     ale_drop;
469         u32     ale_overrun_drop;
470         u32     rx_bytes;
471         u32     tx_good_frames;
472         u32     tx_broadcast_frames;
473         u32     tx_multicast_frames;
474         u32     tx_pause_frames;                /* slave */
475         u32     tx_deferred_frames;             /* slave */
476         u32     tx_collision_frames;            /* slave */
477         u32     tx_single_coll_frames;          /* slave */
478         u32     tx_mult_coll_frames;            /* slave */
479         u32     tx_excessive_collisions;        /* slave */
480         u32     tx_late_collisions;             /* slave */
481         u32     rx_ipg_error;                   /* slave 10G only */
482         u32     tx_carrier_sense_errors;        /* slave */
483         u32     tx_bytes;
484         u32     tx_64B_frames;
485         u32     tx_65_to_127B_frames;
486         u32     tx_128_to_255B_frames;
487         u32     tx_256_to_511B_frames;
488         u32     tx_512_to_1023B_frames;
489         u32     tx_1024B_frames;
490         u32     net_bytes;
491         u32     rx_bottom_fifo_drop;
492         u32     rx_port_mask_drop;
493         u32     rx_top_fifo_drop;
494         u32     ale_rate_limit_drop;
495         u32     ale_vid_ingress_drop;
496         u32     ale_da_eq_sa_drop;
497         u32     __rsvd_0[3];
498         u32     ale_unknown_ucast;
499         u32     ale_unknown_ucast_bytes;
500         u32     ale_unknown_mcast;
501         u32     ale_unknown_mcast_bytes;
502         u32     ale_unknown_bcast;
503         u32     ale_unknown_bcast_bytes;
504         u32     ale_pol_match;
505         u32     ale_pol_match_red;              /* NU */
506         u32     ale_pol_match_yellow;           /* NU */
507         u32     __rsvd_1[44];
508         u32     tx_mem_protect_err;
509         /* following NU only */
510         u32     tx_pri0;
511         u32     tx_pri1;
512         u32     tx_pri2;
513         u32     tx_pri3;
514         u32     tx_pri4;
515         u32     tx_pri5;
516         u32     tx_pri6;
517         u32     tx_pri7;
518         u32     tx_pri0_bcnt;
519         u32     tx_pri1_bcnt;
520         u32     tx_pri2_bcnt;
521         u32     tx_pri3_bcnt;
522         u32     tx_pri4_bcnt;
523         u32     tx_pri5_bcnt;
524         u32     tx_pri6_bcnt;
525         u32     tx_pri7_bcnt;
526         u32     tx_pri0_drop;
527         u32     tx_pri1_drop;
528         u32     tx_pri2_drop;
529         u32     tx_pri3_drop;
530         u32     tx_pri4_drop;
531         u32     tx_pri5_drop;
532         u32     tx_pri6_drop;
533         u32     tx_pri7_drop;
534         u32     tx_pri0_drop_bcnt;
535         u32     tx_pri1_drop_bcnt;
536         u32     tx_pri2_drop_bcnt;
537         u32     tx_pri3_drop_bcnt;
538         u32     tx_pri4_drop_bcnt;
539         u32     tx_pri5_drop_bcnt;
540         u32     tx_pri6_drop_bcnt;
541         u32     tx_pri7_drop_bcnt;
542 };
543
544 #define GBENU_HW_STATS_REG_MAP_SZ       0x200
545
546 struct gbe_ss_regs {
547         u32     id_ver;
548         u32     synce_count;
549         u32     synce_mux;
550 };
551
552 struct gbe_ss_regs_ofs {
553         u16     id_ver;
554         u16     control;
555         u16     rgmii_status; /* 2U */
556 };
557
558 struct gbe_switch_regs {
559         u32     id_ver;
560         u32     control;
561         u32     soft_reset;
562         u32     stat_port_en;
563         u32     ptype;
564         u32     soft_idle;
565         u32     thru_rate;
566         u32     gap_thresh;
567         u32     tx_start_wds;
568         u32     flow_control;
569 };
570
571 struct gbe_switch_regs_ofs {
572         u16     id_ver;
573         u16     control;
574         u16     soft_reset;
575         u16     emcontrol;
576         u16     stat_port_en;
577         u16     ptype;
578         u16     flow_control;
579 };
580
581 struct gbe_port_regs {
582         u32     max_blks;
583         u32     blk_cnt;
584         u32     port_vlan;
585         u32     tx_pri_map;
586         u32     sa_lo;
587         u32     sa_hi;
588         u32     ts_ctl;
589         u32     ts_seq_ltype;
590         u32     ts_vlan;
591         u32     ts_ctl_ltype2;
592         u32     ts_ctl2;
593 };
594
595 struct gbe_port_regs_ofs {
596         u16     port_vlan;
597         u16     tx_pri_map;
598         u16     rx_pri_map;
599         u16     sa_lo;
600         u16     sa_hi;
601         u16     ts_ctl;
602         u16     ts_seq_ltype;
603         u16     ts_vlan;
604         u16     ts_ctl_ltype2;
605         u16     ts_ctl2;
606         u16     rx_maxlen;      /* 2U, NU */
607 };
608
609 struct gbe_host_port_regs {
610         u32     src_id;
611         u32     port_vlan;
612         u32     rx_pri_map;
613         u32     rx_maxlen;
614 };
615
616 struct gbe_host_port_regs_ofs {
617         u16     port_vlan;
618         u16     tx_pri_map;
619         u16     rx_maxlen;
620 };
621
622 struct gbe_emac_regs {
623         u32     id_ver;
624         u32     mac_control;
625         u32     mac_status;
626         u32     soft_reset;
627         u32     rx_maxlen;
628         u32     __reserved_0;
629         u32     rx_pause;
630         u32     tx_pause;
631         u32     __reserved_1;
632         u32     rx_pri_map;
633         u32     rsvd[6];
634 };
635
636 struct gbe_emac_regs_ofs {
637         u16     mac_control;
638         u16     soft_reset;
639         u16     rx_maxlen;
640 };
641
642 struct gbe_hw_stats {
643         u32     rx_good_frames;
644         u32     rx_broadcast_frames;
645         u32     rx_multicast_frames;
646         u32     rx_pause_frames;
647         u32     rx_crc_errors;
648         u32     rx_align_code_errors;
649         u32     rx_oversized_frames;
650         u32     rx_jabber_frames;
651         u32     rx_undersized_frames;
652         u32     rx_fragments;
653         u32     __pad_0[2];
654         u32     rx_bytes;
655         u32     tx_good_frames;
656         u32     tx_broadcast_frames;
657         u32     tx_multicast_frames;
658         u32     tx_pause_frames;
659         u32     tx_deferred_frames;
660         u32     tx_collision_frames;
661         u32     tx_single_coll_frames;
662         u32     tx_mult_coll_frames;
663         u32     tx_excessive_collisions;
664         u32     tx_late_collisions;
665         u32     tx_underrun;
666         u32     tx_carrier_sense_errors;
667         u32     tx_bytes;
668         u32     tx_64byte_frames;
669         u32     tx_65_to_127byte_frames;
670         u32     tx_128_to_255byte_frames;
671         u32     tx_256_to_511byte_frames;
672         u32     tx_512_to_1023byte_frames;
673         u32     tx_1024byte_frames;
674         u32     net_bytes;
675         u32     rx_sof_overruns;
676         u32     rx_mof_overruns;
677         u32     rx_dma_overruns;
678 };
679
680 #define GBE_MAX_HW_STAT_MODS                    9
681 #define GBE_HW_STATS_REG_MAP_SZ                 0x100
682
683 struct ts_ctl {
684         int     uni;
685         u8      dst_port_map;
686         u8      maddr_map;
687         u8      ts_mcast_type;
688 };
689
690 struct gbe_slave {
691         void __iomem                    *port_regs;
692         void __iomem                    *emac_regs;
693         struct gbe_port_regs_ofs        port_regs_ofs;
694         struct gbe_emac_regs_ofs        emac_regs_ofs;
695         int                             slave_num; /* 0 based logical number */
696         int                             port_num;  /* actual port number */
697         atomic_t                        link_state;
698         bool                            open;
699         struct phy_device               *phy;
700         u32                             link_interface;
701         u32                             mac_control;
702         u8                              phy_port_t;
703         struct device_node              *node;
704         struct device_node              *phy_node;
705         struct ts_ctl                   ts_ctl;
706         struct list_head                slave_list;
707 };
708
709 struct gbe_priv {
710         struct device                   *dev;
711         struct netcp_device             *netcp_device;
712         struct timer_list               timer;
713         u32                             num_slaves;
714         u32                             ale_entries;
715         u32                             ale_ports;
716         bool                            enable_ale;
717         u8                              max_num_slaves;
718         u8                              max_num_ports; /* max_num_slaves + 1 */
719         u8                              num_stats_mods;
720         struct netcp_tx_pipe            tx_pipe;
721
722         int                             host_port;
723         u32                             rx_packet_max;
724         u32                             ss_version;
725         u32                             stats_en_mask;
726
727         void __iomem                    *ss_regs;
728         void __iomem                    *switch_regs;
729         void __iomem                    *host_port_regs;
730         void __iomem                    *ale_reg;
731         void __iomem                    *cpts_reg;
732         void __iomem                    *sgmii_port_regs;
733         void __iomem                    *sgmii_port34_regs;
734         void __iomem                    *xgbe_serdes_regs;
735         void __iomem                    *hw_stats_regs[GBE_MAX_HW_STAT_MODS];
736
737         struct gbe_ss_regs_ofs          ss_regs_ofs;
738         struct gbe_switch_regs_ofs      switch_regs_ofs;
739         struct gbe_host_port_regs_ofs   host_port_regs_ofs;
740
741         struct cpsw_ale                 *ale;
742         unsigned int                    tx_queue_id;
743         const char                      *dma_chan_name;
744
745         struct list_head                gbe_intf_head;
746         struct list_head                secondary_slaves;
747         struct net_device               *dummy_ndev;
748
749         u64                             *hw_stats;
750         u32                             *hw_stats_prev;
751         const struct netcp_ethtool_stat *et_stats;
752         int                             num_et_stats;
753         /*  Lock for updating the hwstats */
754         spinlock_t                      hw_stats_lock;
755
756         int                             cpts_registered;
757         struct cpts                     *cpts;
758         int                             rx_ts_enabled;
759         int                             tx_ts_enabled;
760 };
761
762 struct gbe_intf {
763         struct net_device       *ndev;
764         struct device           *dev;
765         struct gbe_priv         *gbe_dev;
766         struct netcp_tx_pipe    tx_pipe;
767         struct gbe_slave        *slave;
768         struct list_head        gbe_intf_list;
769         unsigned long           active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
770 };
771
772 static struct netcp_module gbe_module;
773 static struct netcp_module xgbe_module;
774
775 /* Statistic management */
776 struct netcp_ethtool_stat {
777         char desc[ETH_GSTRING_LEN];
778         int type;
779         u32 size;
780         int offset;
781 };
782
783 #define GBE_STATSA_INFO(field)                                          \
784 {                                                                       \
785         "GBE_A:"#field, GBE_STATSA_MODULE,                              \
786         sizeof_field(struct gbe_hw_stats, field),                       \
787         offsetof(struct gbe_hw_stats, field)                            \
788 }
789
790 #define GBE_STATSB_INFO(field)                                          \
791 {                                                                       \
792         "GBE_B:"#field, GBE_STATSB_MODULE,                              \
793         sizeof_field(struct gbe_hw_stats, field),                       \
794         offsetof(struct gbe_hw_stats, field)                            \
795 }
796
797 #define GBE_STATSC_INFO(field)                                          \
798 {                                                                       \
799         "GBE_C:"#field, GBE_STATSC_MODULE,                              \
800         sizeof_field(struct gbe_hw_stats, field),                       \
801         offsetof(struct gbe_hw_stats, field)                            \
802 }
803
804 #define GBE_STATSD_INFO(field)                                          \
805 {                                                                       \
806         "GBE_D:"#field, GBE_STATSD_MODULE,                              \
807         sizeof_field(struct gbe_hw_stats, field),                       \
808         offsetof(struct gbe_hw_stats, field)                            \
809 }
810
811 static const struct netcp_ethtool_stat gbe13_et_stats[] = {
812         /* GBE module A */
813         GBE_STATSA_INFO(rx_good_frames),
814         GBE_STATSA_INFO(rx_broadcast_frames),
815         GBE_STATSA_INFO(rx_multicast_frames),
816         GBE_STATSA_INFO(rx_pause_frames),
817         GBE_STATSA_INFO(rx_crc_errors),
818         GBE_STATSA_INFO(rx_align_code_errors),
819         GBE_STATSA_INFO(rx_oversized_frames),
820         GBE_STATSA_INFO(rx_jabber_frames),
821         GBE_STATSA_INFO(rx_undersized_frames),
822         GBE_STATSA_INFO(rx_fragments),
823         GBE_STATSA_INFO(rx_bytes),
824         GBE_STATSA_INFO(tx_good_frames),
825         GBE_STATSA_INFO(tx_broadcast_frames),
826         GBE_STATSA_INFO(tx_multicast_frames),
827         GBE_STATSA_INFO(tx_pause_frames),
828         GBE_STATSA_INFO(tx_deferred_frames),
829         GBE_STATSA_INFO(tx_collision_frames),
830         GBE_STATSA_INFO(tx_single_coll_frames),
831         GBE_STATSA_INFO(tx_mult_coll_frames),
832         GBE_STATSA_INFO(tx_excessive_collisions),
833         GBE_STATSA_INFO(tx_late_collisions),
834         GBE_STATSA_INFO(tx_underrun),
835         GBE_STATSA_INFO(tx_carrier_sense_errors),
836         GBE_STATSA_INFO(tx_bytes),
837         GBE_STATSA_INFO(tx_64byte_frames),
838         GBE_STATSA_INFO(tx_65_to_127byte_frames),
839         GBE_STATSA_INFO(tx_128_to_255byte_frames),
840         GBE_STATSA_INFO(tx_256_to_511byte_frames),
841         GBE_STATSA_INFO(tx_512_to_1023byte_frames),
842         GBE_STATSA_INFO(tx_1024byte_frames),
843         GBE_STATSA_INFO(net_bytes),
844         GBE_STATSA_INFO(rx_sof_overruns),
845         GBE_STATSA_INFO(rx_mof_overruns),
846         GBE_STATSA_INFO(rx_dma_overruns),
847         /* GBE module B */
848         GBE_STATSB_INFO(rx_good_frames),
849         GBE_STATSB_INFO(rx_broadcast_frames),
850         GBE_STATSB_INFO(rx_multicast_frames),
851         GBE_STATSB_INFO(rx_pause_frames),
852         GBE_STATSB_INFO(rx_crc_errors),
853         GBE_STATSB_INFO(rx_align_code_errors),
854         GBE_STATSB_INFO(rx_oversized_frames),
855         GBE_STATSB_INFO(rx_jabber_frames),
856         GBE_STATSB_INFO(rx_undersized_frames),
857         GBE_STATSB_INFO(rx_fragments),
858         GBE_STATSB_INFO(rx_bytes),
859         GBE_STATSB_INFO(tx_good_frames),
860         GBE_STATSB_INFO(tx_broadcast_frames),
861         GBE_STATSB_INFO(tx_multicast_frames),
862         GBE_STATSB_INFO(tx_pause_frames),
863         GBE_STATSB_INFO(tx_deferred_frames),
864         GBE_STATSB_INFO(tx_collision_frames),
865         GBE_STATSB_INFO(tx_single_coll_frames),
866         GBE_STATSB_INFO(tx_mult_coll_frames),
867         GBE_STATSB_INFO(tx_excessive_collisions),
868         GBE_STATSB_INFO(tx_late_collisions),
869         GBE_STATSB_INFO(tx_underrun),
870         GBE_STATSB_INFO(tx_carrier_sense_errors),
871         GBE_STATSB_INFO(tx_bytes),
872         GBE_STATSB_INFO(tx_64byte_frames),
873         GBE_STATSB_INFO(tx_65_to_127byte_frames),
874         GBE_STATSB_INFO(tx_128_to_255byte_frames),
875         GBE_STATSB_INFO(tx_256_to_511byte_frames),
876         GBE_STATSB_INFO(tx_512_to_1023byte_frames),
877         GBE_STATSB_INFO(tx_1024byte_frames),
878         GBE_STATSB_INFO(net_bytes),
879         GBE_STATSB_INFO(rx_sof_overruns),
880         GBE_STATSB_INFO(rx_mof_overruns),
881         GBE_STATSB_INFO(rx_dma_overruns),
882         /* GBE module C */
883         GBE_STATSC_INFO(rx_good_frames),
884         GBE_STATSC_INFO(rx_broadcast_frames),
885         GBE_STATSC_INFO(rx_multicast_frames),
886         GBE_STATSC_INFO(rx_pause_frames),
887         GBE_STATSC_INFO(rx_crc_errors),
888         GBE_STATSC_INFO(rx_align_code_errors),
889         GBE_STATSC_INFO(rx_oversized_frames),
890         GBE_STATSC_INFO(rx_jabber_frames),
891         GBE_STATSC_INFO(rx_undersized_frames),
892         GBE_STATSC_INFO(rx_fragments),
893         GBE_STATSC_INFO(rx_bytes),
894         GBE_STATSC_INFO(tx_good_frames),
895         GBE_STATSC_INFO(tx_broadcast_frames),
896         GBE_STATSC_INFO(tx_multicast_frames),
897         GBE_STATSC_INFO(tx_pause_frames),
898         GBE_STATSC_INFO(tx_deferred_frames),
899         GBE_STATSC_INFO(tx_collision_frames),
900         GBE_STATSC_INFO(tx_single_coll_frames),
901         GBE_STATSC_INFO(tx_mult_coll_frames),
902         GBE_STATSC_INFO(tx_excessive_collisions),
903         GBE_STATSC_INFO(tx_late_collisions),
904         GBE_STATSC_INFO(tx_underrun),
905         GBE_STATSC_INFO(tx_carrier_sense_errors),
906         GBE_STATSC_INFO(tx_bytes),
907         GBE_STATSC_INFO(tx_64byte_frames),
908         GBE_STATSC_INFO(tx_65_to_127byte_frames),
909         GBE_STATSC_INFO(tx_128_to_255byte_frames),
910         GBE_STATSC_INFO(tx_256_to_511byte_frames),
911         GBE_STATSC_INFO(tx_512_to_1023byte_frames),
912         GBE_STATSC_INFO(tx_1024byte_frames),
913         GBE_STATSC_INFO(net_bytes),
914         GBE_STATSC_INFO(rx_sof_overruns),
915         GBE_STATSC_INFO(rx_mof_overruns),
916         GBE_STATSC_INFO(rx_dma_overruns),
917         /* GBE module D */
918         GBE_STATSD_INFO(rx_good_frames),
919         GBE_STATSD_INFO(rx_broadcast_frames),
920         GBE_STATSD_INFO(rx_multicast_frames),
921         GBE_STATSD_INFO(rx_pause_frames),
922         GBE_STATSD_INFO(rx_crc_errors),
923         GBE_STATSD_INFO(rx_align_code_errors),
924         GBE_STATSD_INFO(rx_oversized_frames),
925         GBE_STATSD_INFO(rx_jabber_frames),
926         GBE_STATSD_INFO(rx_undersized_frames),
927         GBE_STATSD_INFO(rx_fragments),
928         GBE_STATSD_INFO(rx_bytes),
929         GBE_STATSD_INFO(tx_good_frames),
930         GBE_STATSD_INFO(tx_broadcast_frames),
931         GBE_STATSD_INFO(tx_multicast_frames),
932         GBE_STATSD_INFO(tx_pause_frames),
933         GBE_STATSD_INFO(tx_deferred_frames),
934         GBE_STATSD_INFO(tx_collision_frames),
935         GBE_STATSD_INFO(tx_single_coll_frames),
936         GBE_STATSD_INFO(tx_mult_coll_frames),
937         GBE_STATSD_INFO(tx_excessive_collisions),
938         GBE_STATSD_INFO(tx_late_collisions),
939         GBE_STATSD_INFO(tx_underrun),
940         GBE_STATSD_INFO(tx_carrier_sense_errors),
941         GBE_STATSD_INFO(tx_bytes),
942         GBE_STATSD_INFO(tx_64byte_frames),
943         GBE_STATSD_INFO(tx_65_to_127byte_frames),
944         GBE_STATSD_INFO(tx_128_to_255byte_frames),
945         GBE_STATSD_INFO(tx_256_to_511byte_frames),
946         GBE_STATSD_INFO(tx_512_to_1023byte_frames),
947         GBE_STATSD_INFO(tx_1024byte_frames),
948         GBE_STATSD_INFO(net_bytes),
949         GBE_STATSD_INFO(rx_sof_overruns),
950         GBE_STATSD_INFO(rx_mof_overruns),
951         GBE_STATSD_INFO(rx_dma_overruns),
952 };
953
954 /* This is the size of entries in GBENU_STATS_HOST */
955 #define GBENU_ET_STATS_HOST_SIZE        52
956
957 #define GBENU_STATS_HOST(field)                                 \
958 {                                                               \
959         "GBE_HOST:"#field, GBENU_STATS0_MODULE,                 \
960         sizeof_field(struct gbenu_hw_stats, field),             \
961         offsetof(struct gbenu_hw_stats, field)                  \
962 }
963
964 /* This is the size of entries in GBENU_STATS_PORT */
965 #define GBENU_ET_STATS_PORT_SIZE        65
966
967 #define GBENU_STATS_P1(field)                                   \
968 {                                                               \
969         "GBE_P1:"#field, GBENU_STATS1_MODULE,                   \
970         sizeof_field(struct gbenu_hw_stats, field),             \
971         offsetof(struct gbenu_hw_stats, field)                  \
972 }
973
974 #define GBENU_STATS_P2(field)                                   \
975 {                                                               \
976         "GBE_P2:"#field, GBENU_STATS2_MODULE,                   \
977         sizeof_field(struct gbenu_hw_stats, field),             \
978         offsetof(struct gbenu_hw_stats, field)                  \
979 }
980
981 #define GBENU_STATS_P3(field)                                   \
982 {                                                               \
983         "GBE_P3:"#field, GBENU_STATS3_MODULE,                   \
984         sizeof_field(struct gbenu_hw_stats, field),             \
985         offsetof(struct gbenu_hw_stats, field)                  \
986 }
987
988 #define GBENU_STATS_P4(field)                                   \
989 {                                                               \
990         "GBE_P4:"#field, GBENU_STATS4_MODULE,                   \
991         sizeof_field(struct gbenu_hw_stats, field),             \
992         offsetof(struct gbenu_hw_stats, field)                  \
993 }
994
995 #define GBENU_STATS_P5(field)                                   \
996 {                                                               \
997         "GBE_P5:"#field, GBENU_STATS5_MODULE,                   \
998         sizeof_field(struct gbenu_hw_stats, field),             \
999         offsetof(struct gbenu_hw_stats, field)                  \
1000 }
1001
1002 #define GBENU_STATS_P6(field)                                   \
1003 {                                                               \
1004         "GBE_P6:"#field, GBENU_STATS6_MODULE,                   \
1005         sizeof_field(struct gbenu_hw_stats, field),             \
1006         offsetof(struct gbenu_hw_stats, field)                  \
1007 }
1008
1009 #define GBENU_STATS_P7(field)                                   \
1010 {                                                               \
1011         "GBE_P7:"#field, GBENU_STATS7_MODULE,                   \
1012         sizeof_field(struct gbenu_hw_stats, field),             \
1013         offsetof(struct gbenu_hw_stats, field)                  \
1014 }
1015
1016 #define GBENU_STATS_P8(field)                                   \
1017 {                                                               \
1018         "GBE_P8:"#field, GBENU_STATS8_MODULE,                   \
1019         sizeof_field(struct gbenu_hw_stats, field),             \
1020         offsetof(struct gbenu_hw_stats, field)                  \
1021 }
1022
1023 static const struct netcp_ethtool_stat gbenu_et_stats[] = {
1024         /* GBENU Host Module */
1025         GBENU_STATS_HOST(rx_good_frames),
1026         GBENU_STATS_HOST(rx_broadcast_frames),
1027         GBENU_STATS_HOST(rx_multicast_frames),
1028         GBENU_STATS_HOST(rx_crc_errors),
1029         GBENU_STATS_HOST(rx_oversized_frames),
1030         GBENU_STATS_HOST(rx_undersized_frames),
1031         GBENU_STATS_HOST(ale_drop),
1032         GBENU_STATS_HOST(ale_overrun_drop),
1033         GBENU_STATS_HOST(rx_bytes),
1034         GBENU_STATS_HOST(tx_good_frames),
1035         GBENU_STATS_HOST(tx_broadcast_frames),
1036         GBENU_STATS_HOST(tx_multicast_frames),
1037         GBENU_STATS_HOST(tx_bytes),
1038         GBENU_STATS_HOST(tx_64B_frames),
1039         GBENU_STATS_HOST(tx_65_to_127B_frames),
1040         GBENU_STATS_HOST(tx_128_to_255B_frames),
1041         GBENU_STATS_HOST(tx_256_to_511B_frames),
1042         GBENU_STATS_HOST(tx_512_to_1023B_frames),
1043         GBENU_STATS_HOST(tx_1024B_frames),
1044         GBENU_STATS_HOST(net_bytes),
1045         GBENU_STATS_HOST(rx_bottom_fifo_drop),
1046         GBENU_STATS_HOST(rx_port_mask_drop),
1047         GBENU_STATS_HOST(rx_top_fifo_drop),
1048         GBENU_STATS_HOST(ale_rate_limit_drop),
1049         GBENU_STATS_HOST(ale_vid_ingress_drop),
1050         GBENU_STATS_HOST(ale_da_eq_sa_drop),
1051         GBENU_STATS_HOST(ale_unknown_ucast),
1052         GBENU_STATS_HOST(ale_unknown_ucast_bytes),
1053         GBENU_STATS_HOST(ale_unknown_mcast),
1054         GBENU_STATS_HOST(ale_unknown_mcast_bytes),
1055         GBENU_STATS_HOST(ale_unknown_bcast),
1056         GBENU_STATS_HOST(ale_unknown_bcast_bytes),
1057         GBENU_STATS_HOST(ale_pol_match),
1058         GBENU_STATS_HOST(ale_pol_match_red),
1059         GBENU_STATS_HOST(ale_pol_match_yellow),
1060         GBENU_STATS_HOST(tx_mem_protect_err),
1061         GBENU_STATS_HOST(tx_pri0_drop),
1062         GBENU_STATS_HOST(tx_pri1_drop),
1063         GBENU_STATS_HOST(tx_pri2_drop),
1064         GBENU_STATS_HOST(tx_pri3_drop),
1065         GBENU_STATS_HOST(tx_pri4_drop),
1066         GBENU_STATS_HOST(tx_pri5_drop),
1067         GBENU_STATS_HOST(tx_pri6_drop),
1068         GBENU_STATS_HOST(tx_pri7_drop),
1069         GBENU_STATS_HOST(tx_pri0_drop_bcnt),
1070         GBENU_STATS_HOST(tx_pri1_drop_bcnt),
1071         GBENU_STATS_HOST(tx_pri2_drop_bcnt),
1072         GBENU_STATS_HOST(tx_pri3_drop_bcnt),
1073         GBENU_STATS_HOST(tx_pri4_drop_bcnt),
1074         GBENU_STATS_HOST(tx_pri5_drop_bcnt),
1075         GBENU_STATS_HOST(tx_pri6_drop_bcnt),
1076         GBENU_STATS_HOST(tx_pri7_drop_bcnt),
1077         /* GBENU Module 1 */
1078         GBENU_STATS_P1(rx_good_frames),
1079         GBENU_STATS_P1(rx_broadcast_frames),
1080         GBENU_STATS_P1(rx_multicast_frames),
1081         GBENU_STATS_P1(rx_pause_frames),
1082         GBENU_STATS_P1(rx_crc_errors),
1083         GBENU_STATS_P1(rx_align_code_errors),
1084         GBENU_STATS_P1(rx_oversized_frames),
1085         GBENU_STATS_P1(rx_jabber_frames),
1086         GBENU_STATS_P1(rx_undersized_frames),
1087         GBENU_STATS_P1(rx_fragments),
1088         GBENU_STATS_P1(ale_drop),
1089         GBENU_STATS_P1(ale_overrun_drop),
1090         GBENU_STATS_P1(rx_bytes),
1091         GBENU_STATS_P1(tx_good_frames),
1092         GBENU_STATS_P1(tx_broadcast_frames),
1093         GBENU_STATS_P1(tx_multicast_frames),
1094         GBENU_STATS_P1(tx_pause_frames),
1095         GBENU_STATS_P1(tx_deferred_frames),
1096         GBENU_STATS_P1(tx_collision_frames),
1097         GBENU_STATS_P1(tx_single_coll_frames),
1098         GBENU_STATS_P1(tx_mult_coll_frames),
1099         GBENU_STATS_P1(tx_excessive_collisions),
1100         GBENU_STATS_P1(tx_late_collisions),
1101         GBENU_STATS_P1(rx_ipg_error),
1102         GBENU_STATS_P1(tx_carrier_sense_errors),
1103         GBENU_STATS_P1(tx_bytes),
1104         GBENU_STATS_P1(tx_64B_frames),
1105         GBENU_STATS_P1(tx_65_to_127B_frames),
1106         GBENU_STATS_P1(tx_128_to_255B_frames),
1107         GBENU_STATS_P1(tx_256_to_511B_frames),
1108         GBENU_STATS_P1(tx_512_to_1023B_frames),
1109         GBENU_STATS_P1(tx_1024B_frames),
1110         GBENU_STATS_P1(net_bytes),
1111         GBENU_STATS_P1(rx_bottom_fifo_drop),
1112         GBENU_STATS_P1(rx_port_mask_drop),
1113         GBENU_STATS_P1(rx_top_fifo_drop),
1114         GBENU_STATS_P1(ale_rate_limit_drop),
1115         GBENU_STATS_P1(ale_vid_ingress_drop),
1116         GBENU_STATS_P1(ale_da_eq_sa_drop),
1117         GBENU_STATS_P1(ale_unknown_ucast),
1118         GBENU_STATS_P1(ale_unknown_ucast_bytes),
1119         GBENU_STATS_P1(ale_unknown_mcast),
1120         GBENU_STATS_P1(ale_unknown_mcast_bytes),
1121         GBENU_STATS_P1(ale_unknown_bcast),
1122         GBENU_STATS_P1(ale_unknown_bcast_bytes),
1123         GBENU_STATS_P1(ale_pol_match),
1124         GBENU_STATS_P1(ale_pol_match_red),
1125         GBENU_STATS_P1(ale_pol_match_yellow),
1126         GBENU_STATS_P1(tx_mem_protect_err),
1127         GBENU_STATS_P1(tx_pri0_drop),
1128         GBENU_STATS_P1(tx_pri1_drop),
1129         GBENU_STATS_P1(tx_pri2_drop),
1130         GBENU_STATS_P1(tx_pri3_drop),
1131         GBENU_STATS_P1(tx_pri4_drop),
1132         GBENU_STATS_P1(tx_pri5_drop),
1133         GBENU_STATS_P1(tx_pri6_drop),
1134         GBENU_STATS_P1(tx_pri7_drop),
1135         GBENU_STATS_P1(tx_pri0_drop_bcnt),
1136         GBENU_STATS_P1(tx_pri1_drop_bcnt),
1137         GBENU_STATS_P1(tx_pri2_drop_bcnt),
1138         GBENU_STATS_P1(tx_pri3_drop_bcnt),
1139         GBENU_STATS_P1(tx_pri4_drop_bcnt),
1140         GBENU_STATS_P1(tx_pri5_drop_bcnt),
1141         GBENU_STATS_P1(tx_pri6_drop_bcnt),
1142         GBENU_STATS_P1(tx_pri7_drop_bcnt),
1143         /* GBENU Module 2 */
1144         GBENU_STATS_P2(rx_good_frames),
1145         GBENU_STATS_P2(rx_broadcast_frames),
1146         GBENU_STATS_P2(rx_multicast_frames),
1147         GBENU_STATS_P2(rx_pause_frames),
1148         GBENU_STATS_P2(rx_crc_errors),
1149         GBENU_STATS_P2(rx_align_code_errors),
1150         GBENU_STATS_P2(rx_oversized_frames),
1151         GBENU_STATS_P2(rx_jabber_frames),
1152         GBENU_STATS_P2(rx_undersized_frames),
1153         GBENU_STATS_P2(rx_fragments),
1154         GBENU_STATS_P2(ale_drop),
1155         GBENU_STATS_P2(ale_overrun_drop),
1156         GBENU_STATS_P2(rx_bytes),
1157         GBENU_STATS_P2(tx_good_frames),
1158         GBENU_STATS_P2(tx_broadcast_frames),
1159         GBENU_STATS_P2(tx_multicast_frames),
1160         GBENU_STATS_P2(tx_pause_frames),
1161         GBENU_STATS_P2(tx_deferred_frames),
1162         GBENU_STATS_P2(tx_collision_frames),
1163         GBENU_STATS_P2(tx_single_coll_frames),
1164         GBENU_STATS_P2(tx_mult_coll_frames),
1165         GBENU_STATS_P2(tx_excessive_collisions),
1166         GBENU_STATS_P2(tx_late_collisions),
1167         GBENU_STATS_P2(rx_ipg_error),
1168         GBENU_STATS_P2(tx_carrier_sense_errors),
1169         GBENU_STATS_P2(tx_bytes),
1170         GBENU_STATS_P2(tx_64B_frames),
1171         GBENU_STATS_P2(tx_65_to_127B_frames),
1172         GBENU_STATS_P2(tx_128_to_255B_frames),
1173         GBENU_STATS_P2(tx_256_to_511B_frames),
1174         GBENU_STATS_P2(tx_512_to_1023B_frames),
1175         GBENU_STATS_P2(tx_1024B_frames),
1176         GBENU_STATS_P2(net_bytes),
1177         GBENU_STATS_P2(rx_bottom_fifo_drop),
1178         GBENU_STATS_P2(rx_port_mask_drop),
1179         GBENU_STATS_P2(rx_top_fifo_drop),
1180         GBENU_STATS_P2(ale_rate_limit_drop),
1181         GBENU_STATS_P2(ale_vid_ingress_drop),
1182         GBENU_STATS_P2(ale_da_eq_sa_drop),
1183         GBENU_STATS_P2(ale_unknown_ucast),
1184         GBENU_STATS_P2(ale_unknown_ucast_bytes),
1185         GBENU_STATS_P2(ale_unknown_mcast),
1186         GBENU_STATS_P2(ale_unknown_mcast_bytes),
1187         GBENU_STATS_P2(ale_unknown_bcast),
1188         GBENU_STATS_P2(ale_unknown_bcast_bytes),
1189         GBENU_STATS_P2(ale_pol_match),
1190         GBENU_STATS_P2(ale_pol_match_red),
1191         GBENU_STATS_P2(ale_pol_match_yellow),
1192         GBENU_STATS_P2(tx_mem_protect_err),
1193         GBENU_STATS_P2(tx_pri0_drop),
1194         GBENU_STATS_P2(tx_pri1_drop),
1195         GBENU_STATS_P2(tx_pri2_drop),
1196         GBENU_STATS_P2(tx_pri3_drop),
1197         GBENU_STATS_P2(tx_pri4_drop),
1198         GBENU_STATS_P2(tx_pri5_drop),
1199         GBENU_STATS_P2(tx_pri6_drop),
1200         GBENU_STATS_P2(tx_pri7_drop),
1201         GBENU_STATS_P2(tx_pri0_drop_bcnt),
1202         GBENU_STATS_P2(tx_pri1_drop_bcnt),
1203         GBENU_STATS_P2(tx_pri2_drop_bcnt),
1204         GBENU_STATS_P2(tx_pri3_drop_bcnt),
1205         GBENU_STATS_P2(tx_pri4_drop_bcnt),
1206         GBENU_STATS_P2(tx_pri5_drop_bcnt),
1207         GBENU_STATS_P2(tx_pri6_drop_bcnt),
1208         GBENU_STATS_P2(tx_pri7_drop_bcnt),
1209         /* GBENU Module 3 */
1210         GBENU_STATS_P3(rx_good_frames),
1211         GBENU_STATS_P3(rx_broadcast_frames),
1212         GBENU_STATS_P3(rx_multicast_frames),
1213         GBENU_STATS_P3(rx_pause_frames),
1214         GBENU_STATS_P3(rx_crc_errors),
1215         GBENU_STATS_P3(rx_align_code_errors),
1216         GBENU_STATS_P3(rx_oversized_frames),
1217         GBENU_STATS_P3(rx_jabber_frames),
1218         GBENU_STATS_P3(rx_undersized_frames),
1219         GBENU_STATS_P3(rx_fragments),
1220         GBENU_STATS_P3(ale_drop),
1221         GBENU_STATS_P3(ale_overrun_drop),
1222         GBENU_STATS_P3(rx_bytes),
1223         GBENU_STATS_P3(tx_good_frames),
1224         GBENU_STATS_P3(tx_broadcast_frames),
1225         GBENU_STATS_P3(tx_multicast_frames),
1226         GBENU_STATS_P3(tx_pause_frames),
1227         GBENU_STATS_P3(tx_deferred_frames),
1228         GBENU_STATS_P3(tx_collision_frames),
1229         GBENU_STATS_P3(tx_single_coll_frames),
1230         GBENU_STATS_P3(tx_mult_coll_frames),
1231         GBENU_STATS_P3(tx_excessive_collisions),
1232         GBENU_STATS_P3(tx_late_collisions),
1233         GBENU_STATS_P3(rx_ipg_error),
1234         GBENU_STATS_P3(tx_carrier_sense_errors),
1235         GBENU_STATS_P3(tx_bytes),
1236         GBENU_STATS_P3(tx_64B_frames),
1237         GBENU_STATS_P3(tx_65_to_127B_frames),
1238         GBENU_STATS_P3(tx_128_to_255B_frames),
1239         GBENU_STATS_P3(tx_256_to_511B_frames),
1240         GBENU_STATS_P3(tx_512_to_1023B_frames),
1241         GBENU_STATS_P3(tx_1024B_frames),
1242         GBENU_STATS_P3(net_bytes),
1243         GBENU_STATS_P3(rx_bottom_fifo_drop),
1244         GBENU_STATS_P3(rx_port_mask_drop),
1245         GBENU_STATS_P3(rx_top_fifo_drop),
1246         GBENU_STATS_P3(ale_rate_limit_drop),
1247         GBENU_STATS_P3(ale_vid_ingress_drop),
1248         GBENU_STATS_P3(ale_da_eq_sa_drop),
1249         GBENU_STATS_P3(ale_unknown_ucast),
1250         GBENU_STATS_P3(ale_unknown_ucast_bytes),
1251         GBENU_STATS_P3(ale_unknown_mcast),
1252         GBENU_STATS_P3(ale_unknown_mcast_bytes),
1253         GBENU_STATS_P3(ale_unknown_bcast),
1254         GBENU_STATS_P3(ale_unknown_bcast_bytes),
1255         GBENU_STATS_P3(ale_pol_match),
1256         GBENU_STATS_P3(ale_pol_match_red),
1257         GBENU_STATS_P3(ale_pol_match_yellow),
1258         GBENU_STATS_P3(tx_mem_protect_err),
1259         GBENU_STATS_P3(tx_pri0_drop),
1260         GBENU_STATS_P3(tx_pri1_drop),
1261         GBENU_STATS_P3(tx_pri2_drop),
1262         GBENU_STATS_P3(tx_pri3_drop),
1263         GBENU_STATS_P3(tx_pri4_drop),
1264         GBENU_STATS_P3(tx_pri5_drop),
1265         GBENU_STATS_P3(tx_pri6_drop),
1266         GBENU_STATS_P3(tx_pri7_drop),
1267         GBENU_STATS_P3(tx_pri0_drop_bcnt),
1268         GBENU_STATS_P3(tx_pri1_drop_bcnt),
1269         GBENU_STATS_P3(tx_pri2_drop_bcnt),
1270         GBENU_STATS_P3(tx_pri3_drop_bcnt),
1271         GBENU_STATS_P3(tx_pri4_drop_bcnt),
1272         GBENU_STATS_P3(tx_pri5_drop_bcnt),
1273         GBENU_STATS_P3(tx_pri6_drop_bcnt),
1274         GBENU_STATS_P3(tx_pri7_drop_bcnt),
1275         /* GBENU Module 4 */
1276         GBENU_STATS_P4(rx_good_frames),
1277         GBENU_STATS_P4(rx_broadcast_frames),
1278         GBENU_STATS_P4(rx_multicast_frames),
1279         GBENU_STATS_P4(rx_pause_frames),
1280         GBENU_STATS_P4(rx_crc_errors),
1281         GBENU_STATS_P4(rx_align_code_errors),
1282         GBENU_STATS_P4(rx_oversized_frames),
1283         GBENU_STATS_P4(rx_jabber_frames),
1284         GBENU_STATS_P4(rx_undersized_frames),
1285         GBENU_STATS_P4(rx_fragments),
1286         GBENU_STATS_P4(ale_drop),
1287         GBENU_STATS_P4(ale_overrun_drop),
1288         GBENU_STATS_P4(rx_bytes),
1289         GBENU_STATS_P4(tx_good_frames),
1290         GBENU_STATS_P4(tx_broadcast_frames),
1291         GBENU_STATS_P4(tx_multicast_frames),
1292         GBENU_STATS_P4(tx_pause_frames),
1293         GBENU_STATS_P4(tx_deferred_frames),
1294         GBENU_STATS_P4(tx_collision_frames),
1295         GBENU_STATS_P4(tx_single_coll_frames),
1296         GBENU_STATS_P4(tx_mult_coll_frames),
1297         GBENU_STATS_P4(tx_excessive_collisions),
1298         GBENU_STATS_P4(tx_late_collisions),
1299         GBENU_STATS_P4(rx_ipg_error),
1300         GBENU_STATS_P4(tx_carrier_sense_errors),
1301         GBENU_STATS_P4(tx_bytes),
1302         GBENU_STATS_P4(tx_64B_frames),
1303         GBENU_STATS_P4(tx_65_to_127B_frames),
1304         GBENU_STATS_P4(tx_128_to_255B_frames),
1305         GBENU_STATS_P4(tx_256_to_511B_frames),
1306         GBENU_STATS_P4(tx_512_to_1023B_frames),
1307         GBENU_STATS_P4(tx_1024B_frames),
1308         GBENU_STATS_P4(net_bytes),
1309         GBENU_STATS_P4(rx_bottom_fifo_drop),
1310         GBENU_STATS_P4(rx_port_mask_drop),
1311         GBENU_STATS_P4(rx_top_fifo_drop),
1312         GBENU_STATS_P4(ale_rate_limit_drop),
1313         GBENU_STATS_P4(ale_vid_ingress_drop),
1314         GBENU_STATS_P4(ale_da_eq_sa_drop),
1315         GBENU_STATS_P4(ale_unknown_ucast),
1316         GBENU_STATS_P4(ale_unknown_ucast_bytes),
1317         GBENU_STATS_P4(ale_unknown_mcast),
1318         GBENU_STATS_P4(ale_unknown_mcast_bytes),
1319         GBENU_STATS_P4(ale_unknown_bcast),
1320         GBENU_STATS_P4(ale_unknown_bcast_bytes),
1321         GBENU_STATS_P4(ale_pol_match),
1322         GBENU_STATS_P4(ale_pol_match_red),
1323         GBENU_STATS_P4(ale_pol_match_yellow),
1324         GBENU_STATS_P4(tx_mem_protect_err),
1325         GBENU_STATS_P4(tx_pri0_drop),
1326         GBENU_STATS_P4(tx_pri1_drop),
1327         GBENU_STATS_P4(tx_pri2_drop),
1328         GBENU_STATS_P4(tx_pri3_drop),
1329         GBENU_STATS_P4(tx_pri4_drop),
1330         GBENU_STATS_P4(tx_pri5_drop),
1331         GBENU_STATS_P4(tx_pri6_drop),
1332         GBENU_STATS_P4(tx_pri7_drop),
1333         GBENU_STATS_P4(tx_pri0_drop_bcnt),
1334         GBENU_STATS_P4(tx_pri1_drop_bcnt),
1335         GBENU_STATS_P4(tx_pri2_drop_bcnt),
1336         GBENU_STATS_P4(tx_pri3_drop_bcnt),
1337         GBENU_STATS_P4(tx_pri4_drop_bcnt),
1338         GBENU_STATS_P4(tx_pri5_drop_bcnt),
1339         GBENU_STATS_P4(tx_pri6_drop_bcnt),
1340         GBENU_STATS_P4(tx_pri7_drop_bcnt),
1341         /* GBENU Module 5 */
1342         GBENU_STATS_P5(rx_good_frames),
1343         GBENU_STATS_P5(rx_broadcast_frames),
1344         GBENU_STATS_P5(rx_multicast_frames),
1345         GBENU_STATS_P5(rx_pause_frames),
1346         GBENU_STATS_P5(rx_crc_errors),
1347         GBENU_STATS_P5(rx_align_code_errors),
1348         GBENU_STATS_P5(rx_oversized_frames),
1349         GBENU_STATS_P5(rx_jabber_frames),
1350         GBENU_STATS_P5(rx_undersized_frames),
1351         GBENU_STATS_P5(rx_fragments),
1352         GBENU_STATS_P5(ale_drop),
1353         GBENU_STATS_P5(ale_overrun_drop),
1354         GBENU_STATS_P5(rx_bytes),
1355         GBENU_STATS_P5(tx_good_frames),
1356         GBENU_STATS_P5(tx_broadcast_frames),
1357         GBENU_STATS_P5(tx_multicast_frames),
1358         GBENU_STATS_P5(tx_pause_frames),
1359         GBENU_STATS_P5(tx_deferred_frames),
1360         GBENU_STATS_P5(tx_collision_frames),
1361         GBENU_STATS_P5(tx_single_coll_frames),
1362         GBENU_STATS_P5(tx_mult_coll_frames),
1363         GBENU_STATS_P5(tx_excessive_collisions),
1364         GBENU_STATS_P5(tx_late_collisions),
1365         GBENU_STATS_P5(rx_ipg_error),
1366         GBENU_STATS_P5(tx_carrier_sense_errors),
1367         GBENU_STATS_P5(tx_bytes),
1368         GBENU_STATS_P5(tx_64B_frames),
1369         GBENU_STATS_P5(tx_65_to_127B_frames),
1370         GBENU_STATS_P5(tx_128_to_255B_frames),
1371         GBENU_STATS_P5(tx_256_to_511B_frames),
1372         GBENU_STATS_P5(tx_512_to_1023B_frames),
1373         GBENU_STATS_P5(tx_1024B_frames),
1374         GBENU_STATS_P5(net_bytes),
1375         GBENU_STATS_P5(rx_bottom_fifo_drop),
1376         GBENU_STATS_P5(rx_port_mask_drop),
1377         GBENU_STATS_P5(rx_top_fifo_drop),
1378         GBENU_STATS_P5(ale_rate_limit_drop),
1379         GBENU_STATS_P5(ale_vid_ingress_drop),
1380         GBENU_STATS_P5(ale_da_eq_sa_drop),
1381         GBENU_STATS_P5(ale_unknown_ucast),
1382         GBENU_STATS_P5(ale_unknown_ucast_bytes),
1383         GBENU_STATS_P5(ale_unknown_mcast),
1384         GBENU_STATS_P5(ale_unknown_mcast_bytes),
1385         GBENU_STATS_P5(ale_unknown_bcast),
1386         GBENU_STATS_P5(ale_unknown_bcast_bytes),
1387         GBENU_STATS_P5(ale_pol_match),
1388         GBENU_STATS_P5(ale_pol_match_red),
1389         GBENU_STATS_P5(ale_pol_match_yellow),
1390         GBENU_STATS_P5(tx_mem_protect_err),
1391         GBENU_STATS_P5(tx_pri0_drop),
1392         GBENU_STATS_P5(tx_pri1_drop),
1393         GBENU_STATS_P5(tx_pri2_drop),
1394         GBENU_STATS_P5(tx_pri3_drop),
1395         GBENU_STATS_P5(tx_pri4_drop),
1396         GBENU_STATS_P5(tx_pri5_drop),
1397         GBENU_STATS_P5(tx_pri6_drop),
1398         GBENU_STATS_P5(tx_pri7_drop),
1399         GBENU_STATS_P5(tx_pri0_drop_bcnt),
1400         GBENU_STATS_P5(tx_pri1_drop_bcnt),
1401         GBENU_STATS_P5(tx_pri2_drop_bcnt),
1402         GBENU_STATS_P5(tx_pri3_drop_bcnt),
1403         GBENU_STATS_P5(tx_pri4_drop_bcnt),
1404         GBENU_STATS_P5(tx_pri5_drop_bcnt),
1405         GBENU_STATS_P5(tx_pri6_drop_bcnt),
1406         GBENU_STATS_P5(tx_pri7_drop_bcnt),
1407         /* GBENU Module 6 */
1408         GBENU_STATS_P6(rx_good_frames),
1409         GBENU_STATS_P6(rx_broadcast_frames),
1410         GBENU_STATS_P6(rx_multicast_frames),
1411         GBENU_STATS_P6(rx_pause_frames),
1412         GBENU_STATS_P6(rx_crc_errors),
1413         GBENU_STATS_P6(rx_align_code_errors),
1414         GBENU_STATS_P6(rx_oversized_frames),
1415         GBENU_STATS_P6(rx_jabber_frames),
1416         GBENU_STATS_P6(rx_undersized_frames),
1417         GBENU_STATS_P6(rx_fragments),
1418         GBENU_STATS_P6(ale_drop),
1419         GBENU_STATS_P6(ale_overrun_drop),
1420         GBENU_STATS_P6(rx_bytes),
1421         GBENU_STATS_P6(tx_good_frames),
1422         GBENU_STATS_P6(tx_broadcast_frames),
1423         GBENU_STATS_P6(tx_multicast_frames),
1424         GBENU_STATS_P6(tx_pause_frames),
1425         GBENU_STATS_P6(tx_deferred_frames),
1426         GBENU_STATS_P6(tx_collision_frames),
1427         GBENU_STATS_P6(tx_single_coll_frames),
1428         GBENU_STATS_P6(tx_mult_coll_frames),
1429         GBENU_STATS_P6(tx_excessive_collisions),
1430         GBENU_STATS_P6(tx_late_collisions),
1431         GBENU_STATS_P6(rx_ipg_error),
1432         GBENU_STATS_P6(tx_carrier_sense_errors),
1433         GBENU_STATS_P6(tx_bytes),
1434         GBENU_STATS_P6(tx_64B_frames),
1435         GBENU_STATS_P6(tx_65_to_127B_frames),
1436         GBENU_STATS_P6(tx_128_to_255B_frames),
1437         GBENU_STATS_P6(tx_256_to_511B_frames),
1438         GBENU_STATS_P6(tx_512_to_1023B_frames),
1439         GBENU_STATS_P6(tx_1024B_frames),
1440         GBENU_STATS_P6(net_bytes),
1441         GBENU_STATS_P6(rx_bottom_fifo_drop),
1442         GBENU_STATS_P6(rx_port_mask_drop),
1443         GBENU_STATS_P6(rx_top_fifo_drop),
1444         GBENU_STATS_P6(ale_rate_limit_drop),
1445         GBENU_STATS_P6(ale_vid_ingress_drop),
1446         GBENU_STATS_P6(ale_da_eq_sa_drop),
1447         GBENU_STATS_P6(ale_unknown_ucast),
1448         GBENU_STATS_P6(ale_unknown_ucast_bytes),
1449         GBENU_STATS_P6(ale_unknown_mcast),
1450         GBENU_STATS_P6(ale_unknown_mcast_bytes),
1451         GBENU_STATS_P6(ale_unknown_bcast),
1452         GBENU_STATS_P6(ale_unknown_bcast_bytes),
1453         GBENU_STATS_P6(ale_pol_match),
1454         GBENU_STATS_P6(ale_pol_match_red),
1455         GBENU_STATS_P6(ale_pol_match_yellow),
1456         GBENU_STATS_P6(tx_mem_protect_err),
1457         GBENU_STATS_P6(tx_pri0_drop),
1458         GBENU_STATS_P6(tx_pri1_drop),
1459         GBENU_STATS_P6(tx_pri2_drop),
1460         GBENU_STATS_P6(tx_pri3_drop),
1461         GBENU_STATS_P6(tx_pri4_drop),
1462         GBENU_STATS_P6(tx_pri5_drop),
1463         GBENU_STATS_P6(tx_pri6_drop),
1464         GBENU_STATS_P6(tx_pri7_drop),
1465         GBENU_STATS_P6(tx_pri0_drop_bcnt),
1466         GBENU_STATS_P6(tx_pri1_drop_bcnt),
1467         GBENU_STATS_P6(tx_pri2_drop_bcnt),
1468         GBENU_STATS_P6(tx_pri3_drop_bcnt),
1469         GBENU_STATS_P6(tx_pri4_drop_bcnt),
1470         GBENU_STATS_P6(tx_pri5_drop_bcnt),
1471         GBENU_STATS_P6(tx_pri6_drop_bcnt),
1472         GBENU_STATS_P6(tx_pri7_drop_bcnt),
1473         /* GBENU Module 7 */
1474         GBENU_STATS_P7(rx_good_frames),
1475         GBENU_STATS_P7(rx_broadcast_frames),
1476         GBENU_STATS_P7(rx_multicast_frames),
1477         GBENU_STATS_P7(rx_pause_frames),
1478         GBENU_STATS_P7(rx_crc_errors),
1479         GBENU_STATS_P7(rx_align_code_errors),
1480         GBENU_STATS_P7(rx_oversized_frames),
1481         GBENU_STATS_P7(rx_jabber_frames),
1482         GBENU_STATS_P7(rx_undersized_frames),
1483         GBENU_STATS_P7(rx_fragments),
1484         GBENU_STATS_P7(ale_drop),
1485         GBENU_STATS_P7(ale_overrun_drop),
1486         GBENU_STATS_P7(rx_bytes),
1487         GBENU_STATS_P7(tx_good_frames),
1488         GBENU_STATS_P7(tx_broadcast_frames),
1489         GBENU_STATS_P7(tx_multicast_frames),
1490         GBENU_STATS_P7(tx_pause_frames),
1491         GBENU_STATS_P7(tx_deferred_frames),
1492         GBENU_STATS_P7(tx_collision_frames),
1493         GBENU_STATS_P7(tx_single_coll_frames),
1494         GBENU_STATS_P7(tx_mult_coll_frames),
1495         GBENU_STATS_P7(tx_excessive_collisions),
1496         GBENU_STATS_P7(tx_late_collisions),
1497         GBENU_STATS_P7(rx_ipg_error),
1498         GBENU_STATS_P7(tx_carrier_sense_errors),
1499         GBENU_STATS_P7(tx_bytes),
1500         GBENU_STATS_P7(tx_64B_frames),
1501         GBENU_STATS_P7(tx_65_to_127B_frames),
1502         GBENU_STATS_P7(tx_128_to_255B_frames),
1503         GBENU_STATS_P7(tx_256_to_511B_frames),
1504         GBENU_STATS_P7(tx_512_to_1023B_frames),
1505         GBENU_STATS_P7(tx_1024B_frames),
1506         GBENU_STATS_P7(net_bytes),
1507         GBENU_STATS_P7(rx_bottom_fifo_drop),
1508         GBENU_STATS_P7(rx_port_mask_drop),
1509         GBENU_STATS_P7(rx_top_fifo_drop),
1510         GBENU_STATS_P7(ale_rate_limit_drop),
1511         GBENU_STATS_P7(ale_vid_ingress_drop),
1512         GBENU_STATS_P7(ale_da_eq_sa_drop),
1513         GBENU_STATS_P7(ale_unknown_ucast),
1514         GBENU_STATS_P7(ale_unknown_ucast_bytes),
1515         GBENU_STATS_P7(ale_unknown_mcast),
1516         GBENU_STATS_P7(ale_unknown_mcast_bytes),
1517         GBENU_STATS_P7(ale_unknown_bcast),
1518         GBENU_STATS_P7(ale_unknown_bcast_bytes),
1519         GBENU_STATS_P7(ale_pol_match),
1520         GBENU_STATS_P7(ale_pol_match_red),
1521         GBENU_STATS_P7(ale_pol_match_yellow),
1522         GBENU_STATS_P7(tx_mem_protect_err),
1523         GBENU_STATS_P7(tx_pri0_drop),
1524         GBENU_STATS_P7(tx_pri1_drop),
1525         GBENU_STATS_P7(tx_pri2_drop),
1526         GBENU_STATS_P7(tx_pri3_drop),
1527         GBENU_STATS_P7(tx_pri4_drop),
1528         GBENU_STATS_P7(tx_pri5_drop),
1529         GBENU_STATS_P7(tx_pri6_drop),
1530         GBENU_STATS_P7(tx_pri7_drop),
1531         GBENU_STATS_P7(tx_pri0_drop_bcnt),
1532         GBENU_STATS_P7(tx_pri1_drop_bcnt),
1533         GBENU_STATS_P7(tx_pri2_drop_bcnt),
1534         GBENU_STATS_P7(tx_pri3_drop_bcnt),
1535         GBENU_STATS_P7(tx_pri4_drop_bcnt),
1536         GBENU_STATS_P7(tx_pri5_drop_bcnt),
1537         GBENU_STATS_P7(tx_pri6_drop_bcnt),
1538         GBENU_STATS_P7(tx_pri7_drop_bcnt),
1539         /* GBENU Module 8 */
1540         GBENU_STATS_P8(rx_good_frames),
1541         GBENU_STATS_P8(rx_broadcast_frames),
1542         GBENU_STATS_P8(rx_multicast_frames),
1543         GBENU_STATS_P8(rx_pause_frames),
1544         GBENU_STATS_P8(rx_crc_errors),
1545         GBENU_STATS_P8(rx_align_code_errors),
1546         GBENU_STATS_P8(rx_oversized_frames),
1547         GBENU_STATS_P8(rx_jabber_frames),
1548         GBENU_STATS_P8(rx_undersized_frames),
1549         GBENU_STATS_P8(rx_fragments),
1550         GBENU_STATS_P8(ale_drop),
1551         GBENU_STATS_P8(ale_overrun_drop),
1552         GBENU_STATS_P8(rx_bytes),
1553         GBENU_STATS_P8(tx_good_frames),
1554         GBENU_STATS_P8(tx_broadcast_frames),
1555         GBENU_STATS_P8(tx_multicast_frames),
1556         GBENU_STATS_P8(tx_pause_frames),
1557         GBENU_STATS_P8(tx_deferred_frames),
1558         GBENU_STATS_P8(tx_collision_frames),
1559         GBENU_STATS_P8(tx_single_coll_frames),
1560         GBENU_STATS_P8(tx_mult_coll_frames),
1561         GBENU_STATS_P8(tx_excessive_collisions),
1562         GBENU_STATS_P8(tx_late_collisions),
1563         GBENU_STATS_P8(rx_ipg_error),
1564         GBENU_STATS_P8(tx_carrier_sense_errors),
1565         GBENU_STATS_P8(tx_bytes),
1566         GBENU_STATS_P8(tx_64B_frames),
1567         GBENU_STATS_P8(tx_65_to_127B_frames),
1568         GBENU_STATS_P8(tx_128_to_255B_frames),
1569         GBENU_STATS_P8(tx_256_to_511B_frames),
1570         GBENU_STATS_P8(tx_512_to_1023B_frames),
1571         GBENU_STATS_P8(tx_1024B_frames),
1572         GBENU_STATS_P8(net_bytes),
1573         GBENU_STATS_P8(rx_bottom_fifo_drop),
1574         GBENU_STATS_P8(rx_port_mask_drop),
1575         GBENU_STATS_P8(rx_top_fifo_drop),
1576         GBENU_STATS_P8(ale_rate_limit_drop),
1577         GBENU_STATS_P8(ale_vid_ingress_drop),
1578         GBENU_STATS_P8(ale_da_eq_sa_drop),
1579         GBENU_STATS_P8(ale_unknown_ucast),
1580         GBENU_STATS_P8(ale_unknown_ucast_bytes),
1581         GBENU_STATS_P8(ale_unknown_mcast),
1582         GBENU_STATS_P8(ale_unknown_mcast_bytes),
1583         GBENU_STATS_P8(ale_unknown_bcast),
1584         GBENU_STATS_P8(ale_unknown_bcast_bytes),
1585         GBENU_STATS_P8(ale_pol_match),
1586         GBENU_STATS_P8(ale_pol_match_red),
1587         GBENU_STATS_P8(ale_pol_match_yellow),
1588         GBENU_STATS_P8(tx_mem_protect_err),
1589         GBENU_STATS_P8(tx_pri0_drop),
1590         GBENU_STATS_P8(tx_pri1_drop),
1591         GBENU_STATS_P8(tx_pri2_drop),
1592         GBENU_STATS_P8(tx_pri3_drop),
1593         GBENU_STATS_P8(tx_pri4_drop),
1594         GBENU_STATS_P8(tx_pri5_drop),
1595         GBENU_STATS_P8(tx_pri6_drop),
1596         GBENU_STATS_P8(tx_pri7_drop),
1597         GBENU_STATS_P8(tx_pri0_drop_bcnt),
1598         GBENU_STATS_P8(tx_pri1_drop_bcnt),
1599         GBENU_STATS_P8(tx_pri2_drop_bcnt),
1600         GBENU_STATS_P8(tx_pri3_drop_bcnt),
1601         GBENU_STATS_P8(tx_pri4_drop_bcnt),
1602         GBENU_STATS_P8(tx_pri5_drop_bcnt),
1603         GBENU_STATS_P8(tx_pri6_drop_bcnt),
1604         GBENU_STATS_P8(tx_pri7_drop_bcnt),
1605 };
1606
1607 #define XGBE_STATS0_INFO(field)                         \
1608 {                                                       \
1609         "GBE_0:"#field, XGBE_STATS0_MODULE,             \
1610         sizeof_field(struct xgbe_hw_stats, field),      \
1611         offsetof(struct xgbe_hw_stats, field)           \
1612 }
1613
1614 #define XGBE_STATS1_INFO(field)                         \
1615 {                                                       \
1616         "GBE_1:"#field, XGBE_STATS1_MODULE,             \
1617         sizeof_field(struct xgbe_hw_stats, field),      \
1618         offsetof(struct xgbe_hw_stats, field)           \
1619 }
1620
1621 #define XGBE_STATS2_INFO(field)                         \
1622 {                                                       \
1623         "GBE_2:"#field, XGBE_STATS2_MODULE,             \
1624         sizeof_field(struct xgbe_hw_stats, field),      \
1625         offsetof(struct xgbe_hw_stats, field)           \
1626 }
1627
1628 static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
1629         /* GBE module 0 */
1630         XGBE_STATS0_INFO(rx_good_frames),
1631         XGBE_STATS0_INFO(rx_broadcast_frames),
1632         XGBE_STATS0_INFO(rx_multicast_frames),
1633         XGBE_STATS0_INFO(rx_oversized_frames),
1634         XGBE_STATS0_INFO(rx_undersized_frames),
1635         XGBE_STATS0_INFO(overrun_type4),
1636         XGBE_STATS0_INFO(overrun_type5),
1637         XGBE_STATS0_INFO(rx_bytes),
1638         XGBE_STATS0_INFO(tx_good_frames),
1639         XGBE_STATS0_INFO(tx_broadcast_frames),
1640         XGBE_STATS0_INFO(tx_multicast_frames),
1641         XGBE_STATS0_INFO(tx_bytes),
1642         XGBE_STATS0_INFO(tx_64byte_frames),
1643         XGBE_STATS0_INFO(tx_65_to_127byte_frames),
1644         XGBE_STATS0_INFO(tx_128_to_255byte_frames),
1645         XGBE_STATS0_INFO(tx_256_to_511byte_frames),
1646         XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
1647         XGBE_STATS0_INFO(tx_1024byte_frames),
1648         XGBE_STATS0_INFO(net_bytes),
1649         XGBE_STATS0_INFO(rx_sof_overruns),
1650         XGBE_STATS0_INFO(rx_mof_overruns),
1651         XGBE_STATS0_INFO(rx_dma_overruns),
1652         /* XGBE module 1 */
1653         XGBE_STATS1_INFO(rx_good_frames),
1654         XGBE_STATS1_INFO(rx_broadcast_frames),
1655         XGBE_STATS1_INFO(rx_multicast_frames),
1656         XGBE_STATS1_INFO(rx_pause_frames),
1657         XGBE_STATS1_INFO(rx_crc_errors),
1658         XGBE_STATS1_INFO(rx_align_code_errors),
1659         XGBE_STATS1_INFO(rx_oversized_frames),
1660         XGBE_STATS1_INFO(rx_jabber_frames),
1661         XGBE_STATS1_INFO(rx_undersized_frames),
1662         XGBE_STATS1_INFO(rx_fragments),
1663         XGBE_STATS1_INFO(overrun_type4),
1664         XGBE_STATS1_INFO(overrun_type5),
1665         XGBE_STATS1_INFO(rx_bytes),
1666         XGBE_STATS1_INFO(tx_good_frames),
1667         XGBE_STATS1_INFO(tx_broadcast_frames),
1668         XGBE_STATS1_INFO(tx_multicast_frames),
1669         XGBE_STATS1_INFO(tx_pause_frames),
1670         XGBE_STATS1_INFO(tx_deferred_frames),
1671         XGBE_STATS1_INFO(tx_collision_frames),
1672         XGBE_STATS1_INFO(tx_single_coll_frames),
1673         XGBE_STATS1_INFO(tx_mult_coll_frames),
1674         XGBE_STATS1_INFO(tx_excessive_collisions),
1675         XGBE_STATS1_INFO(tx_late_collisions),
1676         XGBE_STATS1_INFO(tx_underrun),
1677         XGBE_STATS1_INFO(tx_carrier_sense_errors),
1678         XGBE_STATS1_INFO(tx_bytes),
1679         XGBE_STATS1_INFO(tx_64byte_frames),
1680         XGBE_STATS1_INFO(tx_65_to_127byte_frames),
1681         XGBE_STATS1_INFO(tx_128_to_255byte_frames),
1682         XGBE_STATS1_INFO(tx_256_to_511byte_frames),
1683         XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
1684         XGBE_STATS1_INFO(tx_1024byte_frames),
1685         XGBE_STATS1_INFO(net_bytes),
1686         XGBE_STATS1_INFO(rx_sof_overruns),
1687         XGBE_STATS1_INFO(rx_mof_overruns),
1688         XGBE_STATS1_INFO(rx_dma_overruns),
1689         /* XGBE module 2 */
1690         XGBE_STATS2_INFO(rx_good_frames),
1691         XGBE_STATS2_INFO(rx_broadcast_frames),
1692         XGBE_STATS2_INFO(rx_multicast_frames),
1693         XGBE_STATS2_INFO(rx_pause_frames),
1694         XGBE_STATS2_INFO(rx_crc_errors),
1695         XGBE_STATS2_INFO(rx_align_code_errors),
1696         XGBE_STATS2_INFO(rx_oversized_frames),
1697         XGBE_STATS2_INFO(rx_jabber_frames),
1698         XGBE_STATS2_INFO(rx_undersized_frames),
1699         XGBE_STATS2_INFO(rx_fragments),
1700         XGBE_STATS2_INFO(overrun_type4),
1701         XGBE_STATS2_INFO(overrun_type5),
1702         XGBE_STATS2_INFO(rx_bytes),
1703         XGBE_STATS2_INFO(tx_good_frames),
1704         XGBE_STATS2_INFO(tx_broadcast_frames),
1705         XGBE_STATS2_INFO(tx_multicast_frames),
1706         XGBE_STATS2_INFO(tx_pause_frames),
1707         XGBE_STATS2_INFO(tx_deferred_frames),
1708         XGBE_STATS2_INFO(tx_collision_frames),
1709         XGBE_STATS2_INFO(tx_single_coll_frames),
1710         XGBE_STATS2_INFO(tx_mult_coll_frames),
1711         XGBE_STATS2_INFO(tx_excessive_collisions),
1712         XGBE_STATS2_INFO(tx_late_collisions),
1713         XGBE_STATS2_INFO(tx_underrun),
1714         XGBE_STATS2_INFO(tx_carrier_sense_errors),
1715         XGBE_STATS2_INFO(tx_bytes),
1716         XGBE_STATS2_INFO(tx_64byte_frames),
1717         XGBE_STATS2_INFO(tx_65_to_127byte_frames),
1718         XGBE_STATS2_INFO(tx_128_to_255byte_frames),
1719         XGBE_STATS2_INFO(tx_256_to_511byte_frames),
1720         XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
1721         XGBE_STATS2_INFO(tx_1024byte_frames),
1722         XGBE_STATS2_INFO(net_bytes),
1723         XGBE_STATS2_INFO(rx_sof_overruns),
1724         XGBE_STATS2_INFO(rx_mof_overruns),
1725         XGBE_STATS2_INFO(rx_dma_overruns),
1726 };
1727
1728 #define for_each_intf(i, priv) \
1729         list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
1730
1731 #define for_each_sec_slave(slave, priv) \
1732         list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
1733
1734 #define first_sec_slave(priv)                                   \
1735         list_first_entry(&priv->secondary_slaves, \
1736                         struct gbe_slave, slave_list)
1737
1738 static void keystone_get_drvinfo(struct net_device *ndev,
1739                                  struct ethtool_drvinfo *info)
1740 {
1741         strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
1742         strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
1743 }
1744
1745 static u32 keystone_get_msglevel(struct net_device *ndev)
1746 {
1747         struct netcp_intf *netcp = netdev_priv(ndev);
1748
1749         return netcp->msg_enable;
1750 }
1751
1752 static void keystone_set_msglevel(struct net_device *ndev, u32 value)
1753 {
1754         struct netcp_intf *netcp = netdev_priv(ndev);
1755
1756         netcp->msg_enable = value;
1757 }
1758
1759 static struct gbe_intf *keystone_get_intf_data(struct netcp_intf *netcp)
1760 {
1761         struct gbe_intf *gbe_intf;
1762
1763         gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1764         if (!gbe_intf)
1765                 gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
1766
1767         return gbe_intf;
1768 }
1769
1770 static void keystone_get_stat_strings(struct net_device *ndev,
1771                                       uint32_t stringset, uint8_t *data)
1772 {
1773         struct netcp_intf *netcp = netdev_priv(ndev);
1774         struct gbe_intf *gbe_intf;
1775         struct gbe_priv *gbe_dev;
1776         int i;
1777
1778         gbe_intf = keystone_get_intf_data(netcp);
1779         if (!gbe_intf)
1780                 return;
1781         gbe_dev = gbe_intf->gbe_dev;
1782
1783         switch (stringset) {
1784         case ETH_SS_STATS:
1785                 for (i = 0; i < gbe_dev->num_et_stats; i++) {
1786                         memcpy(data, gbe_dev->et_stats[i].desc,
1787                                ETH_GSTRING_LEN);
1788                         data += ETH_GSTRING_LEN;
1789                 }
1790                 break;
1791         case ETH_SS_TEST:
1792                 break;
1793         }
1794 }
1795
1796 static int keystone_get_sset_count(struct net_device *ndev, int stringset)
1797 {
1798         struct netcp_intf *netcp = netdev_priv(ndev);
1799         struct gbe_intf *gbe_intf;
1800         struct gbe_priv *gbe_dev;
1801
1802         gbe_intf = keystone_get_intf_data(netcp);
1803         if (!gbe_intf)
1804                 return -EINVAL;
1805         gbe_dev = gbe_intf->gbe_dev;
1806
1807         switch (stringset) {
1808         case ETH_SS_TEST:
1809                 return 0;
1810         case ETH_SS_STATS:
1811                 return gbe_dev->num_et_stats;
1812         default:
1813                 return -EINVAL;
1814         }
1815 }
1816
1817 static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
1818 {
1819         void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
1820         u32  __iomem *p_stats_entry;
1821         int i;
1822
1823         for (i = 0; i < gbe_dev->num_et_stats; i++) {
1824                 if (gbe_dev->et_stats[i].type == stats_mod) {
1825                         p_stats_entry = base + gbe_dev->et_stats[i].offset;
1826                         gbe_dev->hw_stats[i] = 0;
1827                         gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
1828                 }
1829         }
1830 }
1831
1832 static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
1833                                              int et_stats_entry)
1834 {
1835         void __iomem *base = NULL;
1836         u32  __iomem *p_stats_entry;
1837         u32 curr, delta;
1838
1839         /* The hw_stats_regs pointers are already
1840          * properly set to point to the right base:
1841          */
1842         base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
1843         p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
1844         curr = readl(p_stats_entry);
1845         delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
1846         gbe_dev->hw_stats_prev[et_stats_entry] = curr;
1847         gbe_dev->hw_stats[et_stats_entry] += delta;
1848 }
1849
1850 static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
1851 {
1852         int i;
1853
1854         for (i = 0; i < gbe_dev->num_et_stats; i++) {
1855                 gbe_update_hw_stats_entry(gbe_dev, i);
1856
1857                 if (data)
1858                         data[i] = gbe_dev->hw_stats[i];
1859         }
1860 }
1861
1862 static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
1863                                                int stats_mod)
1864 {
1865         u32 val;
1866
1867         val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1868
1869         switch (stats_mod) {
1870         case GBE_STATSA_MODULE:
1871         case GBE_STATSB_MODULE:
1872                 val &= ~GBE_STATS_CD_SEL;
1873                 break;
1874         case GBE_STATSC_MODULE:
1875         case GBE_STATSD_MODULE:
1876                 val |= GBE_STATS_CD_SEL;
1877                 break;
1878         default:
1879                 return;
1880         }
1881
1882         /* make the stat module visible */
1883         writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1884 }
1885
1886 static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
1887 {
1888         gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
1889         gbe_reset_mod_stats(gbe_dev, stats_mod);
1890 }
1891
1892 static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
1893 {
1894         u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
1895         int et_entry, j, pair;
1896
1897         for (pair = 0; pair < 2; pair++) {
1898                 gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
1899                                                       GBE_STATSC_MODULE :
1900                                                       GBE_STATSA_MODULE));
1901
1902                 for (j = 0; j < half_num_et_stats; j++) {
1903                         et_entry = pair * half_num_et_stats + j;
1904                         gbe_update_hw_stats_entry(gbe_dev, et_entry);
1905
1906                         if (data)
1907                                 data[et_entry] = gbe_dev->hw_stats[et_entry];
1908                 }
1909         }
1910 }
1911
1912 static void keystone_get_ethtool_stats(struct net_device *ndev,
1913                                        struct ethtool_stats *stats,
1914                                        uint64_t *data)
1915 {
1916         struct netcp_intf *netcp = netdev_priv(ndev);
1917         struct gbe_intf *gbe_intf;
1918         struct gbe_priv *gbe_dev;
1919
1920         gbe_intf = keystone_get_intf_data(netcp);
1921         if (!gbe_intf)
1922                 return;
1923
1924         gbe_dev = gbe_intf->gbe_dev;
1925         spin_lock_bh(&gbe_dev->hw_stats_lock);
1926         if (IS_SS_ID_VER_14(gbe_dev))
1927                 gbe_update_stats_ver14(gbe_dev, data);
1928         else
1929                 gbe_update_stats(gbe_dev, data);
1930         spin_unlock_bh(&gbe_dev->hw_stats_lock);
1931 }
1932
1933 static int keystone_get_link_ksettings(struct net_device *ndev,
1934                                        struct ethtool_link_ksettings *cmd)
1935 {
1936         struct netcp_intf *netcp = netdev_priv(ndev);
1937         struct phy_device *phy = ndev->phydev;
1938         struct gbe_intf *gbe_intf;
1939
1940         if (!phy)
1941                 return -EINVAL;
1942
1943         gbe_intf = keystone_get_intf_data(netcp);
1944         if (!gbe_intf)
1945                 return -EINVAL;
1946
1947         if (!gbe_intf->slave)
1948                 return -EINVAL;
1949
1950         phy_ethtool_ksettings_get(phy, cmd);
1951         cmd->base.port = gbe_intf->slave->phy_port_t;
1952
1953         return 0;
1954 }
1955
1956 static int keystone_set_link_ksettings(struct net_device *ndev,
1957                                        const struct ethtool_link_ksettings *cmd)
1958 {
1959         struct netcp_intf *netcp = netdev_priv(ndev);
1960         struct phy_device *phy = ndev->phydev;
1961         struct gbe_intf *gbe_intf;
1962         u8 port = cmd->base.port;
1963         u32 advertising, supported;
1964         u32 features;
1965
1966         ethtool_convert_link_mode_to_legacy_u32(&advertising,
1967                                                 cmd->link_modes.advertising);
1968         ethtool_convert_link_mode_to_legacy_u32(&supported,
1969                                                 cmd->link_modes.supported);
1970         features = advertising & supported;
1971
1972         if (!phy)
1973                 return -EINVAL;
1974
1975         gbe_intf = keystone_get_intf_data(netcp);
1976         if (!gbe_intf)
1977                 return -EINVAL;
1978
1979         if (!gbe_intf->slave)
1980                 return -EINVAL;
1981
1982         if (port != gbe_intf->slave->phy_port_t) {
1983                 if ((port == PORT_TP) && !(features & ADVERTISED_TP))
1984                         return -EINVAL;
1985
1986                 if ((port == PORT_AUI) && !(features & ADVERTISED_AUI))
1987                         return -EINVAL;
1988
1989                 if ((port == PORT_BNC) && !(features & ADVERTISED_BNC))
1990                         return -EINVAL;
1991
1992                 if ((port == PORT_MII) && !(features & ADVERTISED_MII))
1993                         return -EINVAL;
1994
1995                 if ((port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
1996                         return -EINVAL;
1997         }
1998
1999         gbe_intf->slave->phy_port_t = port;
2000         return phy_ethtool_ksettings_set(phy, cmd);
2001 }
2002
2003 #if IS_ENABLED(CONFIG_TI_CPTS)
2004 static int keystone_get_ts_info(struct net_device *ndev,
2005                                 struct ethtool_ts_info *info)
2006 {
2007         struct netcp_intf *netcp = netdev_priv(ndev);
2008         struct gbe_intf *gbe_intf;
2009
2010         gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2011         if (!gbe_intf || !gbe_intf->gbe_dev->cpts)
2012                 return -EINVAL;
2013
2014         info->so_timestamping =
2015                 SOF_TIMESTAMPING_TX_HARDWARE |
2016                 SOF_TIMESTAMPING_TX_SOFTWARE |
2017                 SOF_TIMESTAMPING_RX_HARDWARE |
2018                 SOF_TIMESTAMPING_RX_SOFTWARE |
2019                 SOF_TIMESTAMPING_SOFTWARE |
2020                 SOF_TIMESTAMPING_RAW_HARDWARE;
2021         info->phc_index = gbe_intf->gbe_dev->cpts->phc_index;
2022         info->tx_types =
2023                 (1 << HWTSTAMP_TX_OFF) |
2024                 (1 << HWTSTAMP_TX_ON);
2025         info->rx_filters =
2026                 (1 << HWTSTAMP_FILTER_NONE) |
2027                 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2028                 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2029         return 0;
2030 }
2031 #else
2032 static int keystone_get_ts_info(struct net_device *ndev,
2033                                 struct ethtool_ts_info *info)
2034 {
2035         info->so_timestamping =
2036                 SOF_TIMESTAMPING_TX_SOFTWARE |
2037                 SOF_TIMESTAMPING_RX_SOFTWARE |
2038                 SOF_TIMESTAMPING_SOFTWARE;
2039         info->phc_index = -1;
2040         info->tx_types = 0;
2041         info->rx_filters = 0;
2042         return 0;
2043 }
2044 #endif /* CONFIG_TI_CPTS */
2045
2046 static const struct ethtool_ops keystone_ethtool_ops = {
2047         .get_drvinfo            = keystone_get_drvinfo,
2048         .get_link               = ethtool_op_get_link,
2049         .get_msglevel           = keystone_get_msglevel,
2050         .set_msglevel           = keystone_set_msglevel,
2051         .get_strings            = keystone_get_stat_strings,
2052         .get_sset_count         = keystone_get_sset_count,
2053         .get_ethtool_stats      = keystone_get_ethtool_stats,
2054         .get_link_ksettings     = keystone_get_link_ksettings,
2055         .set_link_ksettings     = keystone_set_link_ksettings,
2056         .get_ts_info            = keystone_get_ts_info,
2057 };
2058
2059 static void gbe_set_slave_mac(struct gbe_slave *slave,
2060                               struct gbe_intf *gbe_intf)
2061 {
2062         struct net_device *ndev = gbe_intf->ndev;
2063
2064         writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
2065         writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
2066 }
2067
2068 static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
2069 {
2070         if (priv->host_port == 0)
2071                 return slave_num + 1;
2072
2073         return slave_num;
2074 }
2075
2076 static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
2077                                           struct net_device *ndev,
2078                                           struct gbe_slave *slave,
2079                                           int up)
2080 {
2081         struct phy_device *phy = slave->phy;
2082         u32 mac_control = 0;
2083
2084         if (up) {
2085                 mac_control = slave->mac_control;
2086                 if (phy && (phy->speed == SPEED_1000)) {
2087                         mac_control |= MACSL_GIG_MODE;
2088                         mac_control &= ~MACSL_XGIG_MODE;
2089                 } else if (phy && (phy->speed == SPEED_10000)) {
2090                         mac_control |= MACSL_XGIG_MODE;
2091                         mac_control &= ~MACSL_GIG_MODE;
2092                 }
2093
2094                 writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
2095                                                  mac_control));
2096
2097                 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2098                                      ALE_PORT_STATE,
2099                                      ALE_PORT_STATE_FORWARD);
2100
2101                 if (ndev && slave->open &&
2102                     ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
2103                     (slave->link_interface != RGMII_LINK_MAC_PHY) &&
2104                     (slave->link_interface != XGMII_LINK_MAC_PHY)))
2105                         netif_carrier_on(ndev);
2106         } else {
2107                 writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
2108                                                  mac_control));
2109                 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2110                                      ALE_PORT_STATE,
2111                                      ALE_PORT_STATE_DISABLE);
2112                 if (ndev &&
2113                     ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
2114                     (slave->link_interface != RGMII_LINK_MAC_PHY) &&
2115                     (slave->link_interface != XGMII_LINK_MAC_PHY)))
2116                         netif_carrier_off(ndev);
2117         }
2118
2119         if (phy)
2120                 phy_print_status(phy);
2121 }
2122
2123 static bool gbe_phy_link_status(struct gbe_slave *slave)
2124 {
2125          return !slave->phy || slave->phy->link;
2126 }
2127
2128 #define RGMII_REG_STATUS_LINK   BIT(0)
2129
2130 static void netcp_2u_rgmii_get_port_link(struct gbe_priv *gbe_dev, bool *status)
2131 {
2132         u32 val = 0;
2133
2134         val = readl(GBE_REG_ADDR(gbe_dev, ss_regs, rgmii_status));
2135         *status = !!(val & RGMII_REG_STATUS_LINK);
2136 }
2137
2138 static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
2139                                           struct gbe_slave *slave,
2140                                           struct net_device *ndev)
2141 {
2142         bool sw_link_state = true, phy_link_state;
2143         int sp = slave->slave_num, link_state;
2144
2145         if (!slave->open)
2146                 return;
2147
2148         if (SLAVE_LINK_IS_RGMII(slave))
2149                 netcp_2u_rgmii_get_port_link(gbe_dev,
2150                                              &sw_link_state);
2151         if (SLAVE_LINK_IS_SGMII(slave))
2152                 sw_link_state =
2153                 netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
2154
2155         phy_link_state = gbe_phy_link_status(slave);
2156         link_state = phy_link_state & sw_link_state;
2157
2158         if (atomic_xchg(&slave->link_state, link_state) != link_state)
2159                 netcp_ethss_link_state_action(gbe_dev, ndev, slave,
2160                                               link_state);
2161 }
2162
2163 static void xgbe_adjust_link(struct net_device *ndev)
2164 {
2165         struct netcp_intf *netcp = netdev_priv(ndev);
2166         struct gbe_intf *gbe_intf;
2167
2168         gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
2169         if (!gbe_intf)
2170                 return;
2171
2172         netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2173                                       ndev);
2174 }
2175
2176 static void gbe_adjust_link(struct net_device *ndev)
2177 {
2178         struct netcp_intf *netcp = netdev_priv(ndev);
2179         struct gbe_intf *gbe_intf;
2180
2181         gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2182         if (!gbe_intf)
2183                 return;
2184
2185         netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2186                                       ndev);
2187 }
2188
2189 static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
2190 {
2191         struct gbe_priv *gbe_dev = netdev_priv(ndev);
2192         struct gbe_slave *slave;
2193
2194         for_each_sec_slave(slave, gbe_dev)
2195                 netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2196 }
2197
2198 /* Reset EMAC
2199  * Soft reset is set and polled until clear, or until a timeout occurs
2200  */
2201 static int gbe_port_reset(struct gbe_slave *slave)
2202 {
2203         u32 i, v;
2204
2205         /* Set the soft reset bit */
2206         writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
2207
2208         /* Wait for the bit to clear */
2209         for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
2210                 v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
2211                 if ((v & SOFT_RESET_MASK) != SOFT_RESET)
2212                         return 0;
2213         }
2214
2215         /* Timeout on the reset */
2216         return GMACSL_RET_WARN_RESET_INCOMPLETE;
2217 }
2218
2219 /* Configure EMAC */
2220 static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2221                             int max_rx_len)
2222 {
2223         void __iomem *rx_maxlen_reg;
2224         u32 xgmii_mode;
2225
2226         if (max_rx_len > NETCP_MAX_FRAME_SIZE)
2227                 max_rx_len = NETCP_MAX_FRAME_SIZE;
2228
2229         /* Enable correct MII mode at SS level */
2230         if (IS_SS_ID_XGBE(gbe_dev) &&
2231             (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
2232                 xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
2233                 xgmii_mode |= (1 << slave->slave_num);
2234                 writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
2235         }
2236
2237         if (IS_SS_ID_MU(gbe_dev))
2238                 rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
2239         else
2240                 rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
2241
2242         writel(max_rx_len, rx_maxlen_reg);
2243         writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
2244 }
2245
2246 static void gbe_sgmii_rtreset(struct gbe_priv *priv,
2247                               struct gbe_slave *slave, bool set)
2248 {
2249         if (SLAVE_LINK_IS_XGMII(slave))
2250                 return;
2251
2252         netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
2253                             slave->slave_num, set);
2254 }
2255
2256 static void gbe_slave_stop(struct gbe_intf *intf)
2257 {
2258         struct gbe_priv *gbe_dev = intf->gbe_dev;
2259         struct gbe_slave *slave = intf->slave;
2260
2261         if (!IS_SS_ID_2U(gbe_dev))
2262                 gbe_sgmii_rtreset(gbe_dev, slave, true);
2263         gbe_port_reset(slave);
2264         /* Disable forwarding */
2265         cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2266                              ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
2267         cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
2268                            1 << slave->port_num, 0, 0);
2269
2270         if (!slave->phy)
2271                 return;
2272
2273         phy_stop(slave->phy);
2274         phy_disconnect(slave->phy);
2275         slave->phy = NULL;
2276 }
2277
2278 static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
2279 {
2280         if (SLAVE_LINK_IS_XGMII(slave))
2281                 return;
2282
2283         netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
2284         netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
2285                            slave->link_interface);
2286 }
2287
2288 static int gbe_slave_open(struct gbe_intf *gbe_intf)
2289 {
2290         struct gbe_priv *priv = gbe_intf->gbe_dev;
2291         struct gbe_slave *slave = gbe_intf->slave;
2292         phy_interface_t phy_mode;
2293         bool has_phy = false;
2294         int err;
2295
2296         void (*hndlr)(struct net_device *) = gbe_adjust_link;
2297
2298         if (!IS_SS_ID_2U(priv))
2299                 gbe_sgmii_config(priv, slave);
2300         gbe_port_reset(slave);
2301         if (!IS_SS_ID_2U(priv))
2302                 gbe_sgmii_rtreset(priv, slave, false);
2303         gbe_port_config(priv, slave, priv->rx_packet_max);
2304         gbe_set_slave_mac(slave, gbe_intf);
2305         /* For NU & 2U switch, map the vlan priorities to zero
2306          * as we only configure to use priority 0
2307          */
2308         if (IS_SS_ID_MU(priv))
2309                 writel(HOST_TX_PRI_MAP_DEFAULT,
2310                        GBE_REG_ADDR(slave, port_regs, rx_pri_map));
2311
2312         /* enable forwarding */
2313         cpsw_ale_control_set(priv->ale, slave->port_num,
2314                              ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2315         cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
2316                            1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
2317
2318         if (slave->link_interface == SGMII_LINK_MAC_PHY) {
2319                 has_phy = true;
2320                 phy_mode = PHY_INTERFACE_MODE_SGMII;
2321                 slave->phy_port_t = PORT_MII;
2322         } else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
2323                 has_phy = true;
2324                 err = of_get_phy_mode(slave->node, &phy_mode);
2325                 /* if phy-mode is not present, default to
2326                  * PHY_INTERFACE_MODE_RGMII
2327                  */
2328                 if (err)
2329                         phy_mode = PHY_INTERFACE_MODE_RGMII;
2330
2331                 if (!phy_interface_mode_is_rgmii(phy_mode)) {
2332                         dev_err(priv->dev,
2333                                 "Unsupported phy mode %d\n", phy_mode);
2334                         return -EINVAL;
2335                 }
2336                 slave->phy_port_t = PORT_MII;
2337         } else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
2338                 has_phy = true;
2339                 phy_mode = PHY_INTERFACE_MODE_NA;
2340                 slave->phy_port_t = PORT_FIBRE;
2341         }
2342
2343         if (has_phy) {
2344                 if (IS_SS_ID_XGBE(priv))
2345                         hndlr = xgbe_adjust_link;
2346
2347                 slave->phy = of_phy_connect(gbe_intf->ndev,
2348                                             slave->phy_node,
2349                                             hndlr, 0,
2350                                             phy_mode);
2351                 if (!slave->phy) {
2352                         dev_err(priv->dev, "phy not found on slave %d\n",
2353                                 slave->slave_num);
2354                         return -ENODEV;
2355                 }
2356                 dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
2357                         phydev_name(slave->phy));
2358                 phy_start(slave->phy);
2359         }
2360         return 0;
2361 }
2362
2363 static void gbe_init_host_port(struct gbe_priv *priv)
2364 {
2365         int bypass_en = 1;
2366
2367         /* Host Tx Pri */
2368         if (IS_SS_ID_NU(priv) || IS_SS_ID_XGBE(priv))
2369                 writel(HOST_TX_PRI_MAP_DEFAULT,
2370                        GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
2371
2372         /* Max length register */
2373         writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
2374                                                   rx_maxlen));
2375
2376         cpsw_ale_start(priv->ale);
2377
2378         if (priv->enable_ale)
2379                 bypass_en = 0;
2380
2381         cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
2382
2383         cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
2384
2385         cpsw_ale_control_set(priv->ale, priv->host_port,
2386                              ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2387
2388         cpsw_ale_control_set(priv->ale, 0,
2389                              ALE_PORT_UNKNOWN_VLAN_MEMBER,
2390                              GBE_PORT_MASK(priv->ale_ports));
2391
2392         cpsw_ale_control_set(priv->ale, 0,
2393                              ALE_PORT_UNKNOWN_MCAST_FLOOD,
2394                              GBE_PORT_MASK(priv->ale_ports - 1));
2395
2396         cpsw_ale_control_set(priv->ale, 0,
2397                              ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
2398                              GBE_PORT_MASK(priv->ale_ports));
2399
2400         cpsw_ale_control_set(priv->ale, 0,
2401                              ALE_PORT_UNTAGGED_EGRESS,
2402                              GBE_PORT_MASK(priv->ale_ports));
2403 }
2404
2405 static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2406 {
2407         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2408         u16 vlan_id;
2409
2410         cpsw_ale_add_mcast(gbe_dev->ale, addr,
2411                            GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
2412                            ALE_MCAST_FWD_2);
2413         for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2414                 cpsw_ale_add_mcast(gbe_dev->ale, addr,
2415                                    GBE_PORT_MASK(gbe_dev->ale_ports),
2416                                    ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
2417         }
2418 }
2419
2420 static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2421 {
2422         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2423         u16 vlan_id;
2424
2425         cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2426
2427         for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
2428                 cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2429                                    ALE_VLAN, vlan_id);
2430 }
2431
2432 static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2433 {
2434         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2435         u16 vlan_id;
2436
2437         cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
2438
2439         for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2440                 cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
2441         }
2442 }
2443
2444 static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2445 {
2446         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2447         u16 vlan_id;
2448
2449         cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2450
2451         for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2452                 cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2453                                    ALE_VLAN, vlan_id);
2454         }
2455 }
2456
2457 static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
2458 {
2459         struct gbe_intf *gbe_intf = intf_priv;
2460         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2461
2462         dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
2463                 naddr->addr, naddr->type);
2464
2465         switch (naddr->type) {
2466         case ADDR_MCAST:
2467         case ADDR_BCAST:
2468                 gbe_add_mcast_addr(gbe_intf, naddr->addr);
2469                 break;
2470         case ADDR_UCAST:
2471         case ADDR_DEV:
2472                 gbe_add_ucast_addr(gbe_intf, naddr->addr);
2473                 break;
2474         case ADDR_ANY:
2475                 /* nothing to do for promiscuous */
2476         default:
2477                 break;
2478         }
2479
2480         return 0;
2481 }
2482
2483 static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
2484 {
2485         struct gbe_intf *gbe_intf = intf_priv;
2486         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2487
2488         dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
2489                 naddr->addr, naddr->type);
2490
2491         switch (naddr->type) {
2492         case ADDR_MCAST:
2493         case ADDR_BCAST:
2494                 gbe_del_mcast_addr(gbe_intf, naddr->addr);
2495                 break;
2496         case ADDR_UCAST:
2497         case ADDR_DEV:
2498                 gbe_del_ucast_addr(gbe_intf, naddr->addr);
2499                 break;
2500         case ADDR_ANY:
2501                 /* nothing to do for promiscuous */
2502         default:
2503                 break;
2504         }
2505
2506         return 0;
2507 }
2508
2509 static int gbe_add_vid(void *intf_priv, int vid)
2510 {
2511         struct gbe_intf *gbe_intf = intf_priv;
2512         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2513
2514         set_bit(vid, gbe_intf->active_vlans);
2515
2516         cpsw_ale_add_vlan(gbe_dev->ale, vid,
2517                           GBE_PORT_MASK(gbe_dev->ale_ports),
2518                           GBE_MASK_NO_PORTS,
2519                           GBE_PORT_MASK(gbe_dev->ale_ports),
2520                           GBE_PORT_MASK(gbe_dev->ale_ports - 1));
2521
2522         return 0;
2523 }
2524
2525 static int gbe_del_vid(void *intf_priv, int vid)
2526 {
2527         struct gbe_intf *gbe_intf = intf_priv;
2528         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2529
2530         cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
2531         clear_bit(vid, gbe_intf->active_vlans);
2532         return 0;
2533 }
2534
2535 #if IS_ENABLED(CONFIG_TI_CPTS)
2536
2537 static void gbe_txtstamp(void *context, struct sk_buff *skb)
2538 {
2539         struct gbe_intf *gbe_intf = context;
2540         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2541
2542         cpts_tx_timestamp(gbe_dev->cpts, skb);
2543 }
2544
2545 static bool gbe_need_txtstamp(struct gbe_intf *gbe_intf,
2546                               const struct netcp_packet *p_info)
2547 {
2548         struct sk_buff *skb = p_info->skb;
2549
2550         return cpts_can_timestamp(gbe_intf->gbe_dev->cpts, skb);
2551 }
2552
2553 static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
2554                                  struct netcp_packet *p_info)
2555 {
2556         struct phy_device *phydev = p_info->skb->dev->phydev;
2557         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2558
2559         if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) ||
2560             !gbe_dev->tx_ts_enabled)
2561                 return 0;
2562
2563         /* If phy has the txtstamp api, assume it will do it.
2564          * We mark it here because skb_tx_timestamp() is called
2565          * after all the txhooks are called.
2566          */
2567         if (phy_has_txtstamp(phydev)) {
2568                 skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
2569                 return 0;
2570         }
2571
2572         if (gbe_need_txtstamp(gbe_intf, p_info)) {
2573                 p_info->txtstamp = gbe_txtstamp;
2574                 p_info->ts_context = (void *)gbe_intf;
2575                 skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
2576         }
2577
2578         return 0;
2579 }
2580
2581 static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
2582 {
2583         struct phy_device *phydev = p_info->skb->dev->phydev;
2584         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2585
2586         if (p_info->rxtstamp_complete)
2587                 return 0;
2588
2589         if (phy_has_rxtstamp(phydev)) {
2590                 p_info->rxtstamp_complete = true;
2591                 return 0;
2592         }
2593
2594         if (gbe_dev->rx_ts_enabled)
2595                 cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
2596
2597         p_info->rxtstamp_complete = true;
2598
2599         return 0;
2600 }
2601
2602 static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2603 {
2604         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2605         struct cpts *cpts = gbe_dev->cpts;
2606         struct hwtstamp_config cfg;
2607
2608         if (!cpts)
2609                 return -EOPNOTSUPP;
2610
2611         cfg.flags = 0;
2612         cfg.tx_type = gbe_dev->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2613         cfg.rx_filter = gbe_dev->rx_ts_enabled;
2614
2615         return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2616 }
2617
2618 static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
2619 {
2620         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2621         struct gbe_slave *slave = gbe_intf->slave;
2622         u32 ts_en, seq_id, ctl;
2623
2624         if (!gbe_dev->rx_ts_enabled &&
2625             !gbe_dev->tx_ts_enabled) {
2626                 writel(0, GBE_REG_ADDR(slave, port_regs, ts_ctl));
2627                 return;
2628         }
2629
2630         seq_id = (30 << TS_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
2631         ts_en = EVENT_MSG_BITS << TS_MSG_TYPE_EN_SHIFT;
2632         ctl = ETH_P_1588 | TS_TTL_NONZERO |
2633                 (slave->ts_ctl.dst_port_map << TS_CTL_DST_PORT_SHIFT) |
2634                 (slave->ts_ctl.uni ?  TS_UNI_EN :
2635                         slave->ts_ctl.maddr_map << TS_CTL_MADDR_SHIFT);
2636
2637         if (gbe_dev->tx_ts_enabled)
2638                 ts_en |= (TS_TX_ANX_ALL_EN | TS_TX_VLAN_LT1_EN);
2639
2640         if (gbe_dev->rx_ts_enabled)
2641                 ts_en |= (TS_RX_ANX_ALL_EN | TS_RX_VLAN_LT1_EN);
2642
2643         writel(ts_en,  GBE_REG_ADDR(slave, port_regs, ts_ctl));
2644         writel(seq_id, GBE_REG_ADDR(slave, port_regs, ts_seq_ltype));
2645         writel(ctl,    GBE_REG_ADDR(slave, port_regs, ts_ctl_ltype2));
2646 }
2647
2648 static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2649 {
2650         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2651         struct cpts *cpts = gbe_dev->cpts;
2652         struct hwtstamp_config cfg;
2653
2654         if (!cpts)
2655                 return -EOPNOTSUPP;
2656
2657         if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
2658                 return -EFAULT;
2659
2660         /* reserved for future extensions */
2661         if (cfg.flags)
2662                 return -EINVAL;
2663
2664         switch (cfg.tx_type) {
2665         case HWTSTAMP_TX_OFF:
2666                 gbe_dev->tx_ts_enabled = 0;
2667                 break;
2668         case HWTSTAMP_TX_ON:
2669                 gbe_dev->tx_ts_enabled = 1;
2670                 break;
2671         default:
2672                 return -ERANGE;
2673         }
2674
2675         switch (cfg.rx_filter) {
2676         case HWTSTAMP_FILTER_NONE:
2677                 gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_NONE;
2678                 break;
2679         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2680         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2681         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2682                 gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2683                 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2684                 break;
2685         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2686         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2687         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2688         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2689         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2690         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2691         case HWTSTAMP_FILTER_PTP_V2_EVENT:
2692         case HWTSTAMP_FILTER_PTP_V2_SYNC:
2693         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2694                 gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
2695                 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
2696                 break;
2697         default:
2698                 return -ERANGE;
2699         }
2700
2701         gbe_hwtstamp(gbe_intf);
2702
2703         return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2704 }
2705
2706 static void gbe_register_cpts(struct gbe_priv *gbe_dev)
2707 {
2708         if (!gbe_dev->cpts)
2709                 return;
2710
2711         if (gbe_dev->cpts_registered > 0)
2712                 goto done;
2713
2714         if (cpts_register(gbe_dev->cpts)) {
2715                 dev_err(gbe_dev->dev, "error registering cpts device\n");
2716                 return;
2717         }
2718
2719 done:
2720         ++gbe_dev->cpts_registered;
2721 }
2722
2723 static void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
2724 {
2725         if (!gbe_dev->cpts || (gbe_dev->cpts_registered <= 0))
2726                 return;
2727
2728         if (--gbe_dev->cpts_registered)
2729                 return;
2730
2731         cpts_unregister(gbe_dev->cpts);
2732 }
2733 #else
2734 static inline int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
2735                                         struct netcp_packet *p_info)
2736 {
2737         return 0;
2738 }
2739
2740 static inline int gbe_rxtstamp(struct gbe_intf *gbe_intf,
2741                                struct netcp_packet *p_info)
2742 {
2743         return 0;
2744 }
2745
2746 static inline int gbe_hwtstamp(struct gbe_intf *gbe_intf,
2747                                struct ifreq *ifr, int cmd)
2748 {
2749         return -EOPNOTSUPP;
2750 }
2751
2752 static inline void gbe_register_cpts(struct gbe_priv *gbe_dev)
2753 {
2754 }
2755
2756 static inline void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
2757 {
2758 }
2759
2760 static inline int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *req)
2761 {
2762         return -EOPNOTSUPP;
2763 }
2764
2765 static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req)
2766 {
2767         return -EOPNOTSUPP;
2768 }
2769 #endif /* CONFIG_TI_CPTS */
2770
2771 static int gbe_set_rx_mode(void *intf_priv, bool promisc)
2772 {
2773         struct gbe_intf *gbe_intf = intf_priv;
2774         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2775         struct cpsw_ale *ale = gbe_dev->ale;
2776         unsigned long timeout;
2777         int i, ret = -ETIMEDOUT;
2778
2779         /* Disable(1)/Enable(0) Learn for all ports (host is port 0 and
2780          * slaves are port 1 and up
2781          */
2782         for (i = 0; i <= gbe_dev->num_slaves; i++) {
2783                 cpsw_ale_control_set(ale, i,
2784                                      ALE_PORT_NOLEARN, !!promisc);
2785                 cpsw_ale_control_set(ale, i,
2786                                      ALE_PORT_NO_SA_UPDATE, !!promisc);
2787         }
2788
2789         if (!promisc) {
2790                 /* Don't Flood All Unicast Packets to Host port */
2791                 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
2792                 dev_vdbg(gbe_dev->dev, "promiscuous mode disabled\n");
2793                 return 0;
2794         }
2795
2796         timeout = jiffies + HZ;
2797
2798         /* Clear All Untouched entries */
2799         cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
2800         do {
2801                 cpu_relax();
2802                 if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT)) {
2803                         ret = 0;
2804                         break;
2805                 }
2806
2807         } while (time_after(timeout, jiffies));
2808
2809         /* Make sure it is not a false timeout */
2810         if (ret && !cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
2811                 return ret;
2812
2813         cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
2814
2815         /* Clear all mcast from ALE */
2816         cpsw_ale_flush_multicast(ale,
2817                                  GBE_PORT_MASK(gbe_dev->ale_ports),
2818                                  -1);
2819
2820         /* Flood All Unicast Packets to Host port */
2821         cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
2822         dev_vdbg(gbe_dev->dev, "promiscuous mode enabled\n");
2823         return ret;
2824 }
2825
2826 static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
2827 {
2828         struct gbe_intf *gbe_intf = intf_priv;
2829         struct phy_device *phy = gbe_intf->slave->phy;
2830
2831         if (!phy_has_hwtstamp(phy)) {
2832                 switch (cmd) {
2833                 case SIOCGHWTSTAMP:
2834                         return gbe_hwtstamp_get(gbe_intf, req);
2835                 case SIOCSHWTSTAMP:
2836                         return gbe_hwtstamp_set(gbe_intf, req);
2837                 }
2838         }
2839
2840         if (phy)
2841                 return phy_mii_ioctl(phy, req, cmd);
2842
2843         return -EOPNOTSUPP;
2844 }
2845
2846 static void netcp_ethss_timer(struct timer_list *t)
2847 {
2848         struct gbe_priv *gbe_dev = from_timer(gbe_dev, t, timer);
2849         struct gbe_intf *gbe_intf;
2850         struct gbe_slave *slave;
2851
2852         /* Check & update SGMII link state of interfaces */
2853         for_each_intf(gbe_intf, gbe_dev) {
2854                 if (!gbe_intf->slave->open)
2855                         continue;
2856                 netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
2857                                               gbe_intf->ndev);
2858         }
2859
2860         /* Check & update SGMII link state of secondary ports */
2861         for_each_sec_slave(slave, gbe_dev) {
2862                 netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2863         }
2864
2865         /* A timer runs as a BH, no need to block them */
2866         spin_lock(&gbe_dev->hw_stats_lock);
2867
2868         if (IS_SS_ID_VER_14(gbe_dev))
2869                 gbe_update_stats_ver14(gbe_dev, NULL);
2870         else
2871                 gbe_update_stats(gbe_dev, NULL);
2872
2873         spin_unlock(&gbe_dev->hw_stats_lock);
2874
2875         gbe_dev->timer.expires  = jiffies + GBE_TIMER_INTERVAL;
2876         add_timer(&gbe_dev->timer);
2877 }
2878
2879 static int gbe_txhook(int order, void *data, struct netcp_packet *p_info)
2880 {
2881         struct gbe_intf *gbe_intf = data;
2882
2883         p_info->tx_pipe = &gbe_intf->tx_pipe;
2884
2885         return gbe_txtstamp_mark_pkt(gbe_intf, p_info);
2886 }
2887
2888 static int gbe_rxhook(int order, void *data, struct netcp_packet *p_info)
2889 {
2890         struct gbe_intf *gbe_intf = data;
2891
2892         return gbe_rxtstamp(gbe_intf, p_info);
2893 }
2894
2895 static int gbe_open(void *intf_priv, struct net_device *ndev)
2896 {
2897         struct gbe_intf *gbe_intf = intf_priv;
2898         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2899         struct netcp_intf *netcp = netdev_priv(ndev);
2900         struct gbe_slave *slave = gbe_intf->slave;
2901         int port_num = slave->port_num;
2902         u32 reg, val;
2903         int ret;
2904
2905         reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
2906         dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
2907                 GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
2908                 GBE_RTL_VERSION(reg), GBE_IDENT(reg));
2909
2910         /* For 10G and on NetCP 1.5, use directed to port */
2911         if (IS_SS_ID_XGBE(gbe_dev) || IS_SS_ID_MU(gbe_dev))
2912                 gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
2913
2914         if (gbe_dev->enable_ale)
2915                 gbe_intf->tx_pipe.switch_to_port = 0;
2916         else
2917                 gbe_intf->tx_pipe.switch_to_port = port_num;
2918
2919         dev_dbg(gbe_dev->dev,
2920                 "opened TX channel %s: %p with to port %d, flags %d\n",
2921                 gbe_intf->tx_pipe.dma_chan_name,
2922                 gbe_intf->tx_pipe.dma_channel,
2923                 gbe_intf->tx_pipe.switch_to_port,
2924                 gbe_intf->tx_pipe.flags);
2925
2926         gbe_slave_stop(gbe_intf);
2927
2928         /* disable priority elevation and enable statistics on all ports */
2929         writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
2930
2931         /* Control register */
2932         val = GBE_CTL_P0_ENABLE;
2933         if (IS_SS_ID_MU(gbe_dev)) {
2934                 val |= ETH_SW_CTL_P0_TX_CRC_REMOVE;
2935                 netcp->hw_cap = ETH_SW_CAN_REMOVE_ETH_FCS;
2936         }
2937         writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, control));
2938
2939         /* All statistics enabled and STAT AB visible by default */
2940         writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
2941                                                     stat_port_en));
2942
2943         ret = gbe_slave_open(gbe_intf);
2944         if (ret)
2945                 goto fail;
2946
2947         netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
2948         netcp_register_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
2949
2950         slave->open = true;
2951         netcp_ethss_update_link_state(gbe_dev, slave, ndev);
2952
2953         gbe_register_cpts(gbe_dev);
2954
2955         return 0;
2956
2957 fail:
2958         gbe_slave_stop(gbe_intf);
2959         return ret;
2960 }
2961
2962 static int gbe_close(void *intf_priv, struct net_device *ndev)
2963 {
2964         struct gbe_intf *gbe_intf = intf_priv;
2965         struct netcp_intf *netcp = netdev_priv(ndev);
2966         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2967
2968         gbe_unregister_cpts(gbe_dev);
2969
2970         gbe_slave_stop(gbe_intf);
2971
2972         netcp_unregister_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
2973         netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
2974
2975         gbe_intf->slave->open = false;
2976         atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
2977         return 0;
2978 }
2979
2980 #if IS_ENABLED(CONFIG_TI_CPTS)
2981 static void init_slave_ts_ctl(struct gbe_slave *slave)
2982 {
2983         slave->ts_ctl.uni = 1;
2984         slave->ts_ctl.dst_port_map =
2985                 (TS_CTL_DST_PORT >> TS_CTL_DST_PORT_SHIFT) & 0x3;
2986         slave->ts_ctl.maddr_map =
2987                 (TS_CTL_MADDR_ALL >> TS_CTL_MADDR_SHIFT) & 0x1f;
2988 }
2989
2990 #else
2991 static void init_slave_ts_ctl(struct gbe_slave *slave)
2992 {
2993 }
2994 #endif /* CONFIG_TI_CPTS */
2995
2996 static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2997                       struct device_node *node)
2998 {
2999         int port_reg_num;
3000         u32 port_reg_ofs, emac_reg_ofs;
3001         u32 port_reg_blk_sz, emac_reg_blk_sz;
3002
3003         if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
3004                 dev_err(gbe_dev->dev, "missing slave-port parameter\n");
3005                 return -EINVAL;
3006         }
3007
3008         if (of_property_read_u32(node, "link-interface",
3009                                  &slave->link_interface)) {
3010                 dev_warn(gbe_dev->dev,
3011                          "missing link-interface value defaulting to 1G mac-phy link\n");
3012                 slave->link_interface = SGMII_LINK_MAC_PHY;
3013         }
3014
3015         slave->node = node;
3016         slave->open = false;
3017         if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
3018             (slave->link_interface == RGMII_LINK_MAC_PHY) ||
3019             (slave->link_interface == XGMII_LINK_MAC_PHY))
3020                 slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
3021         slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
3022
3023         if (slave->link_interface >= XGMII_LINK_MAC_PHY)
3024                 slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
3025         else
3026                 slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
3027
3028         /* Emac regs memmap are contiguous but port regs are not */
3029         port_reg_num = slave->slave_num;
3030         if (IS_SS_ID_VER_14(gbe_dev)) {
3031                 if (slave->slave_num > 1) {
3032                         port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
3033                         port_reg_num -= 2;
3034                 } else {
3035                         port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
3036                 }
3037                 emac_reg_ofs = GBE13_EMAC_OFFSET;
3038                 port_reg_blk_sz = 0x30;
3039                 emac_reg_blk_sz = 0x40;
3040         } else if (IS_SS_ID_MU(gbe_dev)) {
3041                 port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
3042                 emac_reg_ofs = GBENU_EMAC_OFFSET;
3043                 port_reg_blk_sz = 0x1000;
3044                 emac_reg_blk_sz = 0x1000;
3045         } else if (IS_SS_ID_XGBE(gbe_dev)) {
3046                 port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
3047                 emac_reg_ofs = XGBE10_EMAC_OFFSET;
3048                 port_reg_blk_sz = 0x30;
3049                 emac_reg_blk_sz = 0x40;
3050         } else {
3051                 dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
3052                         gbe_dev->ss_version);
3053                 return -EINVAL;
3054         }
3055
3056         slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
3057                                 (port_reg_blk_sz * port_reg_num);
3058         slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
3059                                 (emac_reg_blk_sz * slave->slave_num);
3060
3061         if (IS_SS_ID_VER_14(gbe_dev)) {
3062                 /* Initialize  slave port register offsets */
3063                 GBE_SET_REG_OFS(slave, port_regs, port_vlan);
3064                 GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
3065                 GBE_SET_REG_OFS(slave, port_regs, sa_lo);
3066                 GBE_SET_REG_OFS(slave, port_regs, sa_hi);
3067                 GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
3068                 GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3069                 GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
3070                 GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3071                 GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
3072
3073                 /* Initialize EMAC register offsets */
3074                 GBE_SET_REG_OFS(slave, emac_regs, mac_control);
3075                 GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
3076                 GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
3077
3078         } else if (IS_SS_ID_MU(gbe_dev)) {
3079                 /* Initialize  slave port register offsets */
3080                 GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
3081                 GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
3082                 GBENU_SET_REG_OFS(slave, port_regs, rx_pri_map);
3083                 GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
3084                 GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
3085                 GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
3086                 GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3087                 GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
3088                 GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3089                 GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
3090                 GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
3091
3092                 /* Initialize EMAC register offsets */
3093                 GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
3094                 GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
3095
3096         } else if (IS_SS_ID_XGBE(gbe_dev)) {
3097                 /* Initialize  slave port register offsets */
3098                 XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
3099                 XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
3100                 XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
3101                 XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
3102                 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
3103                 XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3104                 XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
3105                 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3106                 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
3107
3108                 /* Initialize EMAC register offsets */
3109                 XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
3110                 XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
3111                 XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
3112         }
3113
3114         atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
3115
3116         init_slave_ts_ctl(slave);
3117         return 0;
3118 }
3119
3120 static void init_secondary_ports(struct gbe_priv *gbe_dev,
3121                                  struct device_node *node)
3122 {
3123         struct device *dev = gbe_dev->dev;
3124         phy_interface_t phy_mode;
3125         struct gbe_priv **priv;
3126         struct device_node *port;
3127         struct gbe_slave *slave;
3128         bool mac_phy_link = false;
3129
3130         for_each_child_of_node(node, port) {
3131                 slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
3132                 if (!slave) {
3133                         dev_err(dev, "memory alloc failed for secondary port(%pOFn), skipping...\n",
3134                                 port);
3135                         continue;
3136                 }
3137
3138                 if (init_slave(gbe_dev, slave, port)) {
3139                         dev_err(dev,
3140                                 "Failed to initialize secondary port(%pOFn), skipping...\n",
3141                                 port);
3142                         devm_kfree(dev, slave);
3143                         continue;
3144                 }
3145
3146                 if (!IS_SS_ID_2U(gbe_dev))
3147                         gbe_sgmii_config(gbe_dev, slave);
3148                 gbe_port_reset(slave);
3149                 gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
3150                 list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
3151                 gbe_dev->num_slaves++;
3152                 if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
3153                     (slave->link_interface == XGMII_LINK_MAC_PHY))
3154                         mac_phy_link = true;
3155
3156                 slave->open = true;
3157                 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3158                         of_node_put(port);
3159                         break;
3160                 }
3161         }
3162
3163         /* of_phy_connect() is needed only for MAC-PHY interface */
3164         if (!mac_phy_link)
3165                 return;
3166
3167         /* Allocate dummy netdev device for attaching to phy device */
3168         gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
3169                                         NET_NAME_UNKNOWN, ether_setup);
3170         if (!gbe_dev->dummy_ndev) {
3171                 dev_err(dev,
3172                         "Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
3173                 return;
3174         }
3175         priv = netdev_priv(gbe_dev->dummy_ndev);
3176         *priv = gbe_dev;
3177
3178         if (slave->link_interface == SGMII_LINK_MAC_PHY) {
3179                 phy_mode = PHY_INTERFACE_MODE_SGMII;
3180                 slave->phy_port_t = PORT_MII;
3181         } else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
3182                 phy_mode = PHY_INTERFACE_MODE_RGMII;
3183                 slave->phy_port_t = PORT_MII;
3184         } else {
3185                 phy_mode = PHY_INTERFACE_MODE_NA;
3186                 slave->phy_port_t = PORT_FIBRE;
3187         }
3188
3189         for_each_sec_slave(slave, gbe_dev) {
3190                 if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
3191                     (slave->link_interface != RGMII_LINK_MAC_PHY) &&
3192                     (slave->link_interface != XGMII_LINK_MAC_PHY))
3193                         continue;
3194                 slave->phy =
3195                         of_phy_connect(gbe_dev->dummy_ndev,
3196                                        slave->phy_node,
3197                                        gbe_adjust_link_sec_slaves,
3198                                        0, phy_mode);
3199                 if (!slave->phy) {
3200                         dev_err(dev, "phy not found for slave %d\n",
3201                                 slave->slave_num);
3202                 } else {
3203                         dev_dbg(dev, "phy found: id is: 0x%s\n",
3204                                 phydev_name(slave->phy));
3205                         phy_start(slave->phy);
3206                 }
3207         }
3208 }
3209
3210 static void free_secondary_ports(struct gbe_priv *gbe_dev)
3211 {
3212         struct gbe_slave *slave;
3213
3214         while (!list_empty(&gbe_dev->secondary_slaves)) {
3215                 slave = first_sec_slave(gbe_dev);
3216
3217                 if (slave->phy)
3218                         phy_disconnect(slave->phy);
3219                 list_del(&slave->slave_list);
3220         }
3221         if (gbe_dev->dummy_ndev)
3222                 free_netdev(gbe_dev->dummy_ndev);
3223 }
3224
3225 static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
3226                                  struct device_node *node)
3227 {
3228         struct resource res;
3229         void __iomem *regs;
3230         int ret, i;
3231
3232         ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
3233         if (ret) {
3234                 dev_err(gbe_dev->dev,
3235                         "Can't xlate xgbe of node(%pOFn) ss address at %d\n",
3236                         node, XGBE_SS_REG_INDEX);
3237                 return ret;
3238         }
3239
3240         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3241         if (IS_ERR(regs)) {
3242                 dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
3243                 return PTR_ERR(regs);
3244         }
3245         gbe_dev->ss_regs = regs;
3246
3247         ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
3248         if (ret) {
3249                 dev_err(gbe_dev->dev,
3250                         "Can't xlate xgbe of node(%pOFn) sm address at %d\n",
3251                         node, XGBE_SM_REG_INDEX);
3252                 return ret;
3253         }
3254
3255         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3256         if (IS_ERR(regs)) {
3257                 dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
3258                 return PTR_ERR(regs);
3259         }
3260         gbe_dev->switch_regs = regs;
3261
3262         ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
3263         if (ret) {
3264                 dev_err(gbe_dev->dev,
3265                         "Can't xlate xgbe serdes of node(%pOFn) address at %d\n",
3266                         node, XGBE_SERDES_REG_INDEX);
3267                 return ret;
3268         }
3269
3270         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3271         if (IS_ERR(regs)) {
3272                 dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
3273                 return PTR_ERR(regs);
3274         }
3275         gbe_dev->xgbe_serdes_regs = regs;
3276
3277         gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
3278         gbe_dev->et_stats = xgbe10_et_stats;
3279         gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
3280
3281         gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3282                                          gbe_dev->num_et_stats, sizeof(u64),
3283                                          GFP_KERNEL);
3284         if (!gbe_dev->hw_stats) {
3285                 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3286                 return -ENOMEM;
3287         }
3288
3289         gbe_dev->hw_stats_prev =
3290                 devm_kcalloc(gbe_dev->dev,
3291                              gbe_dev->num_et_stats, sizeof(u32),
3292                              GFP_KERNEL);
3293         if (!gbe_dev->hw_stats_prev) {
3294                 dev_err(gbe_dev->dev,
3295                         "hw_stats_prev memory allocation failed\n");
3296                 return -ENOMEM;
3297         }
3298
3299         gbe_dev->ss_version = XGBE_SS_VERSION_10;
3300         gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
3301                                         XGBE10_SGMII_MODULE_OFFSET;
3302         gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
3303
3304         for (i = 0; i < gbe_dev->max_num_ports; i++)
3305                 gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
3306                         XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
3307
3308         gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
3309         gbe_dev->cpts_reg = gbe_dev->switch_regs + XGBE10_CPTS_OFFSET;
3310         gbe_dev->ale_ports = gbe_dev->max_num_ports;
3311         gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
3312         gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
3313         gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3314
3315         /* Subsystem registers */
3316         XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3317         XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
3318
3319         /* Switch module registers */
3320         XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3321         XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
3322         XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3323         XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3324         XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
3325
3326         /* Host port registers */
3327         XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3328         XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3329         XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3330         return 0;
3331 }
3332
3333 static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
3334                                     struct device_node *node)
3335 {
3336         struct resource res;
3337         void __iomem *regs;
3338         int ret;
3339
3340         ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
3341         if (ret) {
3342                 dev_err(gbe_dev->dev,
3343                         "Can't translate of node(%pOFn) of gbe ss address at %d\n",
3344                         node, GBE_SS_REG_INDEX);
3345                 return ret;
3346         }
3347
3348         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3349         if (IS_ERR(regs)) {
3350                 dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
3351                 return PTR_ERR(regs);
3352         }
3353         gbe_dev->ss_regs = regs;
3354         gbe_dev->ss_version = readl(gbe_dev->ss_regs);
3355         return 0;
3356 }
3357
3358 static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
3359                                 struct device_node *node)
3360 {
3361         struct resource res;
3362         void __iomem *regs;
3363         int i, ret;
3364
3365         ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
3366         if (ret) {
3367                 dev_err(gbe_dev->dev,
3368                         "Can't translate of gbe node(%pOFn) address at index %d\n",
3369                         node, GBE_SGMII34_REG_INDEX);
3370                 return ret;
3371         }
3372
3373         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3374         if (IS_ERR(regs)) {
3375                 dev_err(gbe_dev->dev,
3376                         "Failed to map gbe sgmii port34 register base\n");
3377                 return PTR_ERR(regs);
3378         }
3379         gbe_dev->sgmii_port34_regs = regs;
3380
3381         ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
3382         if (ret) {
3383                 dev_err(gbe_dev->dev,
3384                         "Can't translate of gbe node(%pOFn) address at index %d\n",
3385                         node, GBE_SM_REG_INDEX);
3386                 return ret;
3387         }
3388
3389         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3390         if (IS_ERR(regs)) {
3391                 dev_err(gbe_dev->dev,
3392                         "Failed to map gbe switch module register base\n");
3393                 return PTR_ERR(regs);
3394         }
3395         gbe_dev->switch_regs = regs;
3396
3397         gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
3398         gbe_dev->et_stats = gbe13_et_stats;
3399         gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
3400
3401         gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3402                                          gbe_dev->num_et_stats, sizeof(u64),
3403                                          GFP_KERNEL);
3404         if (!gbe_dev->hw_stats) {
3405                 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3406                 return -ENOMEM;
3407         }
3408
3409         gbe_dev->hw_stats_prev =
3410                 devm_kcalloc(gbe_dev->dev,
3411                              gbe_dev->num_et_stats, sizeof(u32),
3412                              GFP_KERNEL);
3413         if (!gbe_dev->hw_stats_prev) {
3414                 dev_err(gbe_dev->dev,
3415                         "hw_stats_prev memory allocation failed\n");
3416                 return -ENOMEM;
3417         }
3418
3419         gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
3420         gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
3421
3422         /* K2HK has only 2 hw stats modules visible at a time, so
3423          * module 0 & 2 points to one base and
3424          * module 1 & 3 points to the other base
3425          */
3426         for (i = 0; i < gbe_dev->max_num_slaves; i++) {
3427                 gbe_dev->hw_stats_regs[i] =
3428                         gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
3429                         (GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
3430         }
3431
3432         gbe_dev->cpts_reg = gbe_dev->switch_regs + GBE13_CPTS_OFFSET;
3433         gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
3434         gbe_dev->ale_ports = gbe_dev->max_num_ports;
3435         gbe_dev->host_port = GBE13_HOST_PORT_NUM;
3436         gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
3437         gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
3438
3439         /* Subsystem registers */
3440         GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3441
3442         /* Switch module registers */
3443         GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3444         GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
3445         GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
3446         GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3447         GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3448         GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
3449
3450         /* Host port registers */
3451         GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3452         GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3453         return 0;
3454 }
3455
3456 static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
3457                                 struct device_node *node)
3458 {
3459         struct resource res;
3460         void __iomem *regs;
3461         int i, ret;
3462
3463         gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
3464         gbe_dev->et_stats = gbenu_et_stats;
3465
3466         if (IS_SS_ID_MU(gbe_dev))
3467                 gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
3468                         (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
3469         else
3470                 gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
3471                                         GBENU_ET_STATS_PORT_SIZE;
3472
3473         gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3474                                          gbe_dev->num_et_stats, sizeof(u64),
3475                                          GFP_KERNEL);
3476         if (!gbe_dev->hw_stats) {
3477                 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3478                 return -ENOMEM;
3479         }
3480
3481         gbe_dev->hw_stats_prev =
3482                 devm_kcalloc(gbe_dev->dev,
3483                              gbe_dev->num_et_stats, sizeof(u32),
3484                              GFP_KERNEL);
3485         if (!gbe_dev->hw_stats_prev) {
3486                 dev_err(gbe_dev->dev,
3487                         "hw_stats_prev memory allocation failed\n");
3488                 return -ENOMEM;
3489         }
3490
3491         ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
3492         if (ret) {
3493                 dev_err(gbe_dev->dev,
3494                         "Can't translate of gbenu node(%pOFn) addr at index %d\n",
3495                         node, GBENU_SM_REG_INDEX);
3496                 return ret;
3497         }
3498
3499         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3500         if (IS_ERR(regs)) {
3501                 dev_err(gbe_dev->dev,
3502                         "Failed to map gbenu switch module register base\n");
3503                 return PTR_ERR(regs);
3504         }
3505         gbe_dev->switch_regs = regs;
3506
3507         if (!IS_SS_ID_2U(gbe_dev))
3508                 gbe_dev->sgmii_port_regs =
3509                        gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
3510
3511         /* Although sgmii modules are mem mapped to one contiguous
3512          * region on GBENU devices, setting sgmii_port34_regs allows
3513          * consistent code when accessing sgmii api
3514          */
3515         gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
3516                                      (2 * GBENU_SGMII_MODULE_SIZE);
3517
3518         gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
3519
3520         for (i = 0; i < (gbe_dev->max_num_ports); i++)
3521                 gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
3522                         GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
3523
3524         gbe_dev->cpts_reg = gbe_dev->switch_regs + GBENU_CPTS_OFFSET;
3525         gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
3526         gbe_dev->ale_ports = gbe_dev->max_num_ports;
3527         gbe_dev->host_port = GBENU_HOST_PORT_NUM;
3528         gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3529
3530         /* Subsystem registers */
3531         GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3532         /* ok to set for MU, but used by 2U only */
3533         GBENU_SET_REG_OFS(gbe_dev, ss_regs, rgmii_status);
3534
3535         /* Switch module registers */
3536         GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3537         GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
3538         GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3539         GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3540
3541         /* Host port registers */
3542         GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3543         GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3544
3545         /* For NU only.  2U does not need tx_pri_map.
3546          * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
3547          * while 2U has only 1 such thread
3548          */
3549         GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3550         return 0;
3551 }
3552
3553 static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
3554                      struct device_node *node, void **inst_priv)
3555 {
3556         struct device_node *interfaces, *interface, *cpts_node;
3557         struct device_node *secondary_ports;
3558         struct cpsw_ale_params ale_params;
3559         struct gbe_priv *gbe_dev;
3560         u32 slave_num;
3561         int i, ret = 0;
3562
3563         if (!node) {
3564                 dev_err(dev, "device tree info unavailable\n");
3565                 return -ENODEV;
3566         }
3567
3568         gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
3569         if (!gbe_dev)
3570                 return -ENOMEM;
3571
3572         if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
3573             of_device_is_compatible(node, "ti,netcp-gbe")) {
3574                 gbe_dev->max_num_slaves = 4;
3575         } else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
3576                 gbe_dev->max_num_slaves = 8;
3577         } else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
3578                 gbe_dev->max_num_slaves = 1;
3579                 gbe_module.set_rx_mode = gbe_set_rx_mode;
3580         } else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
3581                 gbe_dev->max_num_slaves = 2;
3582         } else {
3583                 dev_err(dev, "device tree node for unknown device\n");
3584                 return -EINVAL;
3585         }
3586         gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
3587
3588         gbe_dev->dev = dev;
3589         gbe_dev->netcp_device = netcp_device;
3590         gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
3591
3592         /* init the hw stats lock */
3593         spin_lock_init(&gbe_dev->hw_stats_lock);
3594
3595         if (of_find_property(node, "enable-ale", NULL)) {
3596                 gbe_dev->enable_ale = true;
3597                 dev_info(dev, "ALE enabled\n");
3598         } else {
3599                 gbe_dev->enable_ale = false;
3600                 dev_dbg(dev, "ALE bypass enabled*\n");
3601         }
3602
3603         ret = of_property_read_u32(node, "tx-queue",
3604                                    &gbe_dev->tx_queue_id);
3605         if (ret < 0) {
3606                 dev_err(dev, "missing tx_queue parameter\n");
3607                 gbe_dev->tx_queue_id = GBE_TX_QUEUE;
3608         }
3609
3610         ret = of_property_read_string(node, "tx-channel",
3611                                       &gbe_dev->dma_chan_name);
3612         if (ret < 0) {
3613                 dev_err(dev, "missing \"tx-channel\" parameter\n");
3614                 return -EINVAL;
3615         }
3616
3617         if (of_node_name_eq(node, "gbe")) {
3618                 ret = get_gbe_resource_version(gbe_dev, node);
3619                 if (ret)
3620                         return ret;
3621
3622                 dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
3623
3624                 if (IS_SS_ID_VER_14(gbe_dev))
3625                         ret = set_gbe_ethss14_priv(gbe_dev, node);
3626                 else if (IS_SS_ID_MU(gbe_dev))
3627                         ret = set_gbenu_ethss_priv(gbe_dev, node);
3628                 else
3629                         ret = -ENODEV;
3630
3631         } else if (of_node_name_eq(node, "xgbe")) {
3632                 ret = set_xgbe_ethss10_priv(gbe_dev, node);
3633                 if (ret)
3634                         return ret;
3635                 ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
3636                                              gbe_dev->ss_regs);
3637         } else {
3638                 dev_err(dev, "unknown GBE node(%pOFn)\n", node);
3639                 ret = -ENODEV;
3640         }
3641
3642         if (ret)
3643                 return ret;
3644
3645         interfaces = of_get_child_by_name(node, "interfaces");
3646         if (!interfaces)
3647                 dev_err(dev, "could not find interfaces\n");
3648
3649         ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
3650                                 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
3651         if (ret) {
3652                 of_node_put(interfaces);
3653                 return ret;
3654         }
3655
3656         ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
3657         if (ret) {
3658                 of_node_put(interfaces);
3659                 return ret;
3660         }
3661
3662         /* Create network interfaces */
3663         INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
3664         for_each_child_of_node(interfaces, interface) {
3665                 ret = of_property_read_u32(interface, "slave-port", &slave_num);
3666                 if (ret) {
3667                         dev_err(dev, "missing slave-port parameter, skipping interface configuration for %pOFn\n",
3668                                 interface);
3669                         continue;
3670                 }
3671                 gbe_dev->num_slaves++;
3672                 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3673                         of_node_put(interface);
3674                         break;
3675                 }
3676         }
3677         of_node_put(interfaces);
3678
3679         if (!gbe_dev->num_slaves)
3680                 dev_warn(dev, "No network interface configured\n");
3681
3682         /* Initialize Secondary slave ports */
3683         secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
3684         INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
3685         if (secondary_ports && (gbe_dev->num_slaves <  gbe_dev->max_num_slaves))
3686                 init_secondary_ports(gbe_dev, secondary_ports);
3687         of_node_put(secondary_ports);
3688
3689         if (!gbe_dev->num_slaves) {
3690                 dev_err(dev,
3691                         "No network interface or secondary ports configured\n");
3692                 ret = -ENODEV;
3693                 goto free_sec_ports;
3694         }
3695
3696         memset(&ale_params, 0, sizeof(ale_params));
3697         ale_params.dev          = gbe_dev->dev;
3698         ale_params.ale_regs     = gbe_dev->ale_reg;
3699         ale_params.ale_ageout   = GBE_DEFAULT_ALE_AGEOUT;
3700         ale_params.ale_entries  = gbe_dev->ale_entries;
3701         ale_params.ale_ports    = gbe_dev->ale_ports;
3702         if (IS_SS_ID_MU(gbe_dev)) {
3703                 ale_params.major_ver_mask = 0x7;
3704                 ale_params.nu_switch_ale = true;
3705         }
3706         gbe_dev->ale = cpsw_ale_create(&ale_params);
3707         if (IS_ERR(gbe_dev->ale)) {
3708                 dev_err(gbe_dev->dev, "error initializing ale engine\n");
3709                 ret = PTR_ERR(gbe_dev->ale);
3710                 goto free_sec_ports;
3711         } else {
3712                 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
3713         }
3714
3715         cpts_node = of_get_child_by_name(node, "cpts");
3716         if (!cpts_node)
3717                 cpts_node = of_node_get(node);
3718
3719         gbe_dev->cpts = cpts_create(gbe_dev->dev, gbe_dev->cpts_reg,
3720                                     cpts_node, 0);
3721         of_node_put(cpts_node);
3722         if (IS_ENABLED(CONFIG_TI_CPTS) && IS_ERR(gbe_dev->cpts)) {
3723                 ret = PTR_ERR(gbe_dev->cpts);
3724                 goto free_sec_ports;
3725         }
3726
3727         /* initialize host port */
3728         gbe_init_host_port(gbe_dev);
3729
3730         spin_lock_bh(&gbe_dev->hw_stats_lock);
3731         for (i = 0; i < gbe_dev->num_stats_mods; i++) {
3732                 if (IS_SS_ID_VER_14(gbe_dev))
3733                         gbe_reset_mod_stats_ver14(gbe_dev, i);
3734                 else
3735                         gbe_reset_mod_stats(gbe_dev, i);
3736         }
3737         spin_unlock_bh(&gbe_dev->hw_stats_lock);
3738
3739         timer_setup(&gbe_dev->timer, netcp_ethss_timer, 0);
3740         gbe_dev->timer.expires   = jiffies + GBE_TIMER_INTERVAL;
3741         add_timer(&gbe_dev->timer);
3742         *inst_priv = gbe_dev;
3743         return 0;
3744
3745 free_sec_ports:
3746         free_secondary_ports(gbe_dev);
3747         return ret;
3748 }
3749
3750 static int gbe_attach(void *inst_priv, struct net_device *ndev,
3751                       struct device_node *node, void **intf_priv)
3752 {
3753         struct gbe_priv *gbe_dev = inst_priv;
3754         struct gbe_intf *gbe_intf;
3755         int ret;
3756
3757         if (!node) {
3758                 dev_err(gbe_dev->dev, "interface node not available\n");
3759                 return -ENODEV;
3760         }
3761
3762         gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
3763         if (!gbe_intf)
3764                 return -ENOMEM;
3765
3766         gbe_intf->ndev = ndev;
3767         gbe_intf->dev = gbe_dev->dev;
3768         gbe_intf->gbe_dev = gbe_dev;
3769
3770         gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
3771                                         sizeof(*gbe_intf->slave),
3772                                         GFP_KERNEL);
3773         if (!gbe_intf->slave) {
3774                 ret = -ENOMEM;
3775                 goto fail;
3776         }
3777
3778         if (init_slave(gbe_dev, gbe_intf->slave, node)) {
3779                 ret = -ENODEV;
3780                 goto fail;
3781         }
3782
3783         gbe_intf->tx_pipe = gbe_dev->tx_pipe;
3784         ndev->ethtool_ops = &keystone_ethtool_ops;
3785         list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
3786         *intf_priv = gbe_intf;
3787         return 0;
3788
3789 fail:
3790         if (gbe_intf->slave)
3791                 devm_kfree(gbe_dev->dev, gbe_intf->slave);
3792         if (gbe_intf)
3793                 devm_kfree(gbe_dev->dev, gbe_intf);
3794         return ret;
3795 }
3796
3797 static int gbe_release(void *intf_priv)
3798 {
3799         struct gbe_intf *gbe_intf = intf_priv;
3800
3801         gbe_intf->ndev->ethtool_ops = NULL;
3802         list_del(&gbe_intf->gbe_intf_list);
3803         devm_kfree(gbe_intf->dev, gbe_intf->slave);
3804         devm_kfree(gbe_intf->dev, gbe_intf);
3805         return 0;
3806 }
3807
3808 static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
3809 {
3810         struct gbe_priv *gbe_dev = inst_priv;
3811
3812         del_timer_sync(&gbe_dev->timer);
3813         cpts_release(gbe_dev->cpts);
3814         cpsw_ale_stop(gbe_dev->ale);
3815         netcp_txpipe_close(&gbe_dev->tx_pipe);
3816         free_secondary_ports(gbe_dev);
3817
3818         if (!list_empty(&gbe_dev->gbe_intf_head))
3819                 dev_alert(gbe_dev->dev,
3820                           "unreleased ethss interfaces present\n");
3821
3822         return 0;
3823 }
3824
3825 static struct netcp_module gbe_module = {
3826         .name           = GBE_MODULE_NAME,
3827         .owner          = THIS_MODULE,
3828         .primary        = true,
3829         .probe          = gbe_probe,
3830         .open           = gbe_open,
3831         .close          = gbe_close,
3832         .remove         = gbe_remove,
3833         .attach         = gbe_attach,
3834         .release        = gbe_release,
3835         .add_addr       = gbe_add_addr,
3836         .del_addr       = gbe_del_addr,
3837         .add_vid        = gbe_add_vid,
3838         .del_vid        = gbe_del_vid,
3839         .ioctl          = gbe_ioctl,
3840 };
3841
3842 static struct netcp_module xgbe_module = {
3843         .name           = XGBE_MODULE_NAME,
3844         .owner          = THIS_MODULE,
3845         .primary        = true,
3846         .probe          = gbe_probe,
3847         .open           = gbe_open,
3848         .close          = gbe_close,
3849         .remove         = gbe_remove,
3850         .attach         = gbe_attach,
3851         .release        = gbe_release,
3852         .add_addr       = gbe_add_addr,
3853         .del_addr       = gbe_del_addr,
3854         .add_vid        = gbe_add_vid,
3855         .del_vid        = gbe_del_vid,
3856         .ioctl          = gbe_ioctl,
3857 };
3858
3859 static int __init keystone_gbe_init(void)
3860 {
3861         int ret;
3862
3863         ret = netcp_register_module(&gbe_module);
3864         if (ret)
3865                 return ret;
3866
3867         ret = netcp_register_module(&xgbe_module);
3868         if (ret)
3869                 return ret;
3870
3871         return 0;
3872 }
3873 module_init(keystone_gbe_init);
3874
3875 static void __exit keystone_gbe_exit(void)
3876 {
3877         netcp_unregister_module(&gbe_module);
3878         netcp_unregister_module(&xgbe_module);
3879 }
3880 module_exit(keystone_gbe_exit);
3881
3882 MODULE_LICENSE("GPL v2");
3883 MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
3884 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");