Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / ti / netcp_ethss.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Keystone GBE and XGBE subsystem code
4  *
5  * Copyright (C) 2014 Texas Instruments Incorporated
6  * Authors:     Sandeep Nair <sandeep_n@ti.com>
7  *              Sandeep Paulraj <s-paulraj@ti.com>
8  *              Cyril Chemparathy <cyril@ti.com>
9  *              Santosh Shilimkar <santosh.shilimkar@ti.com>
10  *              Wingman Kwok <w-kwok2@ti.com>
11  */
12
13 #include <linux/io.h>
14 #include <linux/module.h>
15 #include <linux/of_mdio.h>
16 #include <linux/of_net.h>
17 #include <linux/of_address.h>
18 #include <linux/if_vlan.h>
19 #include <linux/ptp_classify.h>
20 #include <linux/net_tstamp.h>
21 #include <linux/ethtool.h>
22
23 #include "cpsw.h"
24 #include "cpsw_ale.h"
25 #include "netcp.h"
26 #include "cpts.h"
27
28 #define NETCP_DRIVER_NAME               "TI KeyStone Ethernet Driver"
29 #define NETCP_DRIVER_VERSION            "v1.0"
30
31 #define GBE_IDENT(reg)                  ((reg >> 16) & 0xffff)
32 #define GBE_MAJOR_VERSION(reg)          (reg >> 8 & 0x7)
33 #define GBE_MINOR_VERSION(reg)          (reg & 0xff)
34 #define GBE_RTL_VERSION(reg)            ((reg >> 11) & 0x1f)
35
36 /* 1G Ethernet SS defines */
37 #define GBE_MODULE_NAME                 "netcp-gbe"
38 #define GBE_SS_VERSION_14               0x4ed2
39
40 #define GBE_SS_REG_INDEX                0
41 #define GBE_SGMII34_REG_INDEX           1
42 #define GBE_SM_REG_INDEX                2
43 /* offset relative to base of GBE_SS_REG_INDEX */
44 #define GBE13_SGMII_MODULE_OFFSET       0x100
45 /* offset relative to base of GBE_SM_REG_INDEX */
46 #define GBE13_HOST_PORT_OFFSET          0x34
47 #define GBE13_SLAVE_PORT_OFFSET         0x60
48 #define GBE13_EMAC_OFFSET               0x100
49 #define GBE13_SLAVE_PORT2_OFFSET        0x200
50 #define GBE13_HW_STATS_OFFSET           0x300
51 #define GBE13_CPTS_OFFSET               0x500
52 #define GBE13_ALE_OFFSET                0x600
53 #define GBE13_HOST_PORT_NUM             0
54 #define GBE13_NUM_ALE_ENTRIES           1024
55
56 /* 1G Ethernet NU SS defines */
57 #define GBENU_MODULE_NAME               "netcp-gbenu"
58 #define GBE_SS_ID_NU                    0x4ee6
59 #define GBE_SS_ID_2U                    0x4ee8
60
61 #define IS_SS_ID_MU(d) \
62         ((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
63          (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
64
65 #define IS_SS_ID_NU(d) \
66         (GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
67
68 #define IS_SS_ID_VER_14(d) \
69         (GBE_IDENT((d)->ss_version) == GBE_SS_VERSION_14)
70 #define IS_SS_ID_2U(d) \
71         (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U)
72
73 #define GBENU_SS_REG_INDEX              0
74 #define GBENU_SM_REG_INDEX              1
75 #define GBENU_SGMII_MODULE_OFFSET       0x100
76 #define GBENU_HOST_PORT_OFFSET          0x1000
77 #define GBENU_SLAVE_PORT_OFFSET         0x2000
78 #define GBENU_EMAC_OFFSET               0x2330
79 #define GBENU_HW_STATS_OFFSET           0x1a000
80 #define GBENU_CPTS_OFFSET               0x1d000
81 #define GBENU_ALE_OFFSET                0x1e000
82 #define GBENU_HOST_PORT_NUM             0
83 #define GBENU_SGMII_MODULE_SIZE         0x100
84
85 /* 10G Ethernet SS defines */
86 #define XGBE_MODULE_NAME                "netcp-xgbe"
87 #define XGBE_SS_VERSION_10              0x4ee4
88
89 #define XGBE_SS_REG_INDEX               0
90 #define XGBE_SM_REG_INDEX               1
91 #define XGBE_SERDES_REG_INDEX           2
92
93 /* offset relative to base of XGBE_SS_REG_INDEX */
94 #define XGBE10_SGMII_MODULE_OFFSET      0x100
95 #define IS_SS_ID_XGBE(d)                ((d)->ss_version == XGBE_SS_VERSION_10)
96 /* offset relative to base of XGBE_SM_REG_INDEX */
97 #define XGBE10_HOST_PORT_OFFSET         0x34
98 #define XGBE10_SLAVE_PORT_OFFSET        0x64
99 #define XGBE10_EMAC_OFFSET              0x400
100 #define XGBE10_CPTS_OFFSET              0x600
101 #define XGBE10_ALE_OFFSET               0x700
102 #define XGBE10_HW_STATS_OFFSET          0x800
103 #define XGBE10_HOST_PORT_NUM            0
104 #define XGBE10_NUM_ALE_ENTRIES          2048
105
106 #define GBE_TIMER_INTERVAL                      (HZ / 2)
107
108 /* Soft reset register values */
109 #define SOFT_RESET_MASK                         BIT(0)
110 #define SOFT_RESET                              BIT(0)
111 #define DEVICE_EMACSL_RESET_POLL_COUNT          100
112 #define GMACSL_RET_WARN_RESET_INCOMPLETE        -2
113
114 #define MACSL_RX_ENABLE_CSF                     BIT(23)
115 #define MACSL_ENABLE_EXT_CTL                    BIT(18)
116 #define MACSL_XGMII_ENABLE                      BIT(13)
117 #define MACSL_XGIG_MODE                         BIT(8)
118 #define MACSL_GIG_MODE                          BIT(7)
119 #define MACSL_GMII_ENABLE                       BIT(5)
120 #define MACSL_FULLDUPLEX                        BIT(0)
121
122 #define GBE_CTL_P0_ENABLE                       BIT(2)
123 #define ETH_SW_CTL_P0_TX_CRC_REMOVE             BIT(13)
124 #define GBE13_REG_VAL_STAT_ENABLE_ALL           0xff
125 #define XGBE_REG_VAL_STAT_ENABLE_ALL            0xf
126 #define GBE_STATS_CD_SEL                        BIT(28)
127
128 #define GBE_PORT_MASK(x)                        (BIT(x) - 1)
129 #define GBE_MASK_NO_PORTS                       0
130
131 #define GBE_DEF_1G_MAC_CONTROL                                  \
132                 (MACSL_GIG_MODE | MACSL_GMII_ENABLE |           \
133                  MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
134
135 #define GBE_DEF_10G_MAC_CONTROL                         \
136                 (MACSL_XGIG_MODE | MACSL_XGMII_ENABLE |         \
137                  MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
138
139 #define GBE_STATSA_MODULE                       0
140 #define GBE_STATSB_MODULE                       1
141 #define GBE_STATSC_MODULE                       2
142 #define GBE_STATSD_MODULE                       3
143
144 #define GBENU_STATS0_MODULE                     0
145 #define GBENU_STATS1_MODULE                     1
146 #define GBENU_STATS2_MODULE                     2
147 #define GBENU_STATS3_MODULE                     3
148 #define GBENU_STATS4_MODULE                     4
149 #define GBENU_STATS5_MODULE                     5
150 #define GBENU_STATS6_MODULE                     6
151 #define GBENU_STATS7_MODULE                     7
152 #define GBENU_STATS8_MODULE                     8
153
154 #define XGBE_STATS0_MODULE                      0
155 #define XGBE_STATS1_MODULE                      1
156 #define XGBE_STATS2_MODULE                      2
157
158 /* s: 0-based slave_port */
159 #define SGMII_BASE(d, s) \
160         (((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
161
162 #define GBE_TX_QUEUE                            648
163 #define GBE_TXHOOK_ORDER                        0
164 #define GBE_RXHOOK_ORDER                        0
165 #define GBE_DEFAULT_ALE_AGEOUT                  30
166 #define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
167 #define SLAVE_LINK_IS_RGMII(s) \
168         (((s)->link_interface >= RGMII_LINK_MAC_PHY) && \
169          ((s)->link_interface <= RGMII_LINK_MAC_PHY_NO_MDIO))
170 #define SLAVE_LINK_IS_SGMII(s) \
171         ((s)->link_interface <= SGMII_LINK_MAC_PHY_NO_MDIO)
172 #define NETCP_LINK_STATE_INVALID                -1
173
174 #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
175                 offsetof(struct gbe##_##rb, rn)
176 #define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
177                 offsetof(struct gbenu##_##rb, rn)
178 #define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
179                 offsetof(struct xgbe##_##rb, rn)
180 #define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
181
182 #define HOST_TX_PRI_MAP_DEFAULT                 0x00000000
183
184 #if IS_ENABLED(CONFIG_TI_CPTS)
185 /* Px_TS_CTL register fields */
186 #define TS_RX_ANX_F_EN                          BIT(0)
187 #define TS_RX_VLAN_LT1_EN                       BIT(1)
188 #define TS_RX_VLAN_LT2_EN                       BIT(2)
189 #define TS_RX_ANX_D_EN                          BIT(3)
190 #define TS_TX_ANX_F_EN                          BIT(4)
191 #define TS_TX_VLAN_LT1_EN                       BIT(5)
192 #define TS_TX_VLAN_LT2_EN                       BIT(6)
193 #define TS_TX_ANX_D_EN                          BIT(7)
194 #define TS_LT2_EN                               BIT(8)
195 #define TS_RX_ANX_E_EN                          BIT(9)
196 #define TS_TX_ANX_E_EN                          BIT(10)
197 #define TS_MSG_TYPE_EN_SHIFT                    16
198 #define TS_MSG_TYPE_EN_MASK                     0xffff
199
200 /* Px_TS_SEQ_LTYPE register fields */
201 #define TS_SEQ_ID_OFS_SHIFT                     16
202 #define TS_SEQ_ID_OFS_MASK                      0x3f
203
204 /* Px_TS_CTL_LTYPE2 register fields */
205 #define TS_107                                  BIT(16)
206 #define TS_129                                  BIT(17)
207 #define TS_130                                  BIT(18)
208 #define TS_131                                  BIT(19)
209 #define TS_132                                  BIT(20)
210 #define TS_319                                  BIT(21)
211 #define TS_320                                  BIT(22)
212 #define TS_TTL_NONZERO                          BIT(23)
213 #define TS_UNI_EN                               BIT(24)
214 #define TS_UNI_EN_SHIFT                         24
215
216 #define TS_TX_ANX_ALL_EN         \
217         (TS_TX_ANX_D_EN | TS_TX_ANX_E_EN | TS_TX_ANX_F_EN)
218
219 #define TS_RX_ANX_ALL_EN         \
220         (TS_RX_ANX_D_EN | TS_RX_ANX_E_EN | TS_RX_ANX_F_EN)
221
222 #define TS_CTL_DST_PORT                         TS_319
223 #define TS_CTL_DST_PORT_SHIFT                   21
224
225 #define TS_CTL_MADDR_ALL        \
226         (TS_107 | TS_129 | TS_130 | TS_131 | TS_132)
227
228 #define TS_CTL_MADDR_SHIFT                      16
229
230 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
231 #define EVENT_MSG_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
232 #endif /* CONFIG_TI_CPTS */
233
234 struct xgbe_ss_regs {
235         u32     id_ver;
236         u32     synce_count;
237         u32     synce_mux;
238         u32     control;
239 };
240
241 struct xgbe_switch_regs {
242         u32     id_ver;
243         u32     control;
244         u32     emcontrol;
245         u32     stat_port_en;
246         u32     ptype;
247         u32     soft_idle;
248         u32     thru_rate;
249         u32     gap_thresh;
250         u32     tx_start_wds;
251         u32     flow_control;
252         u32     cppi_thresh;
253 };
254
255 struct xgbe_port_regs {
256         u32     blk_cnt;
257         u32     port_vlan;
258         u32     tx_pri_map;
259         u32     sa_lo;
260         u32     sa_hi;
261         u32     ts_ctl;
262         u32     ts_seq_ltype;
263         u32     ts_vlan;
264         u32     ts_ctl_ltype2;
265         u32     ts_ctl2;
266         u32     control;
267 };
268
269 struct xgbe_host_port_regs {
270         u32     blk_cnt;
271         u32     port_vlan;
272         u32     tx_pri_map;
273         u32     src_id;
274         u32     rx_pri_map;
275         u32     rx_maxlen;
276 };
277
278 struct xgbe_emac_regs {
279         u32     id_ver;
280         u32     mac_control;
281         u32     mac_status;
282         u32     soft_reset;
283         u32     rx_maxlen;
284         u32     __reserved_0;
285         u32     rx_pause;
286         u32     tx_pause;
287         u32     em_control;
288         u32     __reserved_1;
289         u32     tx_gap;
290         u32     rsvd[4];
291 };
292
293 struct xgbe_host_hw_stats {
294         u32     rx_good_frames;
295         u32     rx_broadcast_frames;
296         u32     rx_multicast_frames;
297         u32     __rsvd_0[3];
298         u32     rx_oversized_frames;
299         u32     __rsvd_1;
300         u32     rx_undersized_frames;
301         u32     __rsvd_2;
302         u32     overrun_type4;
303         u32     overrun_type5;
304         u32     rx_bytes;
305         u32     tx_good_frames;
306         u32     tx_broadcast_frames;
307         u32     tx_multicast_frames;
308         u32     __rsvd_3[9];
309         u32     tx_bytes;
310         u32     tx_64byte_frames;
311         u32     tx_65_to_127byte_frames;
312         u32     tx_128_to_255byte_frames;
313         u32     tx_256_to_511byte_frames;
314         u32     tx_512_to_1023byte_frames;
315         u32     tx_1024byte_frames;
316         u32     net_bytes;
317         u32     rx_sof_overruns;
318         u32     rx_mof_overruns;
319         u32     rx_dma_overruns;
320 };
321
322 struct xgbe_hw_stats {
323         u32     rx_good_frames;
324         u32     rx_broadcast_frames;
325         u32     rx_multicast_frames;
326         u32     rx_pause_frames;
327         u32     rx_crc_errors;
328         u32     rx_align_code_errors;
329         u32     rx_oversized_frames;
330         u32     rx_jabber_frames;
331         u32     rx_undersized_frames;
332         u32     rx_fragments;
333         u32     overrun_type4;
334         u32     overrun_type5;
335         u32     rx_bytes;
336         u32     tx_good_frames;
337         u32     tx_broadcast_frames;
338         u32     tx_multicast_frames;
339         u32     tx_pause_frames;
340         u32     tx_deferred_frames;
341         u32     tx_collision_frames;
342         u32     tx_single_coll_frames;
343         u32     tx_mult_coll_frames;
344         u32     tx_excessive_collisions;
345         u32     tx_late_collisions;
346         u32     tx_underrun;
347         u32     tx_carrier_sense_errors;
348         u32     tx_bytes;
349         u32     tx_64byte_frames;
350         u32     tx_65_to_127byte_frames;
351         u32     tx_128_to_255byte_frames;
352         u32     tx_256_to_511byte_frames;
353         u32     tx_512_to_1023byte_frames;
354         u32     tx_1024byte_frames;
355         u32     net_bytes;
356         u32     rx_sof_overruns;
357         u32     rx_mof_overruns;
358         u32     rx_dma_overruns;
359 };
360
361 struct gbenu_ss_regs {
362         u32     id_ver;
363         u32     synce_count;            /* NU */
364         u32     synce_mux;              /* NU */
365         u32     control;                /* 2U */
366         u32     __rsvd_0[2];            /* 2U */
367         u32     rgmii_status;           /* 2U */
368         u32     ss_status;              /* 2U */
369 };
370
371 struct gbenu_switch_regs {
372         u32     id_ver;
373         u32     control;
374         u32     __rsvd_0[2];
375         u32     emcontrol;
376         u32     stat_port_en;
377         u32     ptype;                  /* NU */
378         u32     soft_idle;
379         u32     thru_rate;              /* NU */
380         u32     gap_thresh;             /* NU */
381         u32     tx_start_wds;           /* NU */
382         u32     eee_prescale;           /* 2U */
383         u32     tx_g_oflow_thresh_set;  /* NU */
384         u32     tx_g_oflow_thresh_clr;  /* NU */
385         u32     tx_g_buf_thresh_set_l;  /* NU */
386         u32     tx_g_buf_thresh_set_h;  /* NU */
387         u32     tx_g_buf_thresh_clr_l;  /* NU */
388         u32     tx_g_buf_thresh_clr_h;  /* NU */
389 };
390
391 struct gbenu_port_regs {
392         u32     __rsvd_0;
393         u32     control;
394         u32     max_blks;               /* 2U */
395         u32     mem_align1;
396         u32     blk_cnt;
397         u32     port_vlan;
398         u32     tx_pri_map;             /* NU */
399         u32     pri_ctl;                /* 2U */
400         u32     rx_pri_map;
401         u32     rx_maxlen;
402         u32     tx_blks_pri;            /* NU */
403         u32     __rsvd_1;
404         u32     idle2lpi;               /* 2U */
405         u32     lpi2idle;               /* 2U */
406         u32     eee_status;             /* 2U */
407         u32     __rsvd_2;
408         u32     __rsvd_3[176];          /* NU: more to add */
409         u32     __rsvd_4[2];
410         u32     sa_lo;
411         u32     sa_hi;
412         u32     ts_ctl;
413         u32     ts_seq_ltype;
414         u32     ts_vlan;
415         u32     ts_ctl_ltype2;
416         u32     ts_ctl2;
417 };
418
419 struct gbenu_host_port_regs {
420         u32     __rsvd_0;
421         u32     control;
422         u32     flow_id_offset;         /* 2U */
423         u32     __rsvd_1;
424         u32     blk_cnt;
425         u32     port_vlan;
426         u32     tx_pri_map;             /* NU */
427         u32     pri_ctl;
428         u32     rx_pri_map;
429         u32     rx_maxlen;
430         u32     tx_blks_pri;            /* NU */
431         u32     __rsvd_2;
432         u32     idle2lpi;               /* 2U */
433         u32     lpi2wake;               /* 2U */
434         u32     eee_status;             /* 2U */
435         u32     __rsvd_3;
436         u32     __rsvd_4[184];          /* NU */
437         u32     host_blks_pri;          /* NU */
438 };
439
440 struct gbenu_emac_regs {
441         u32     mac_control;
442         u32     mac_status;
443         u32     soft_reset;
444         u32     boff_test;
445         u32     rx_pause;
446         u32     __rsvd_0[11];           /* NU */
447         u32     tx_pause;
448         u32     __rsvd_1[11];           /* NU */
449         u32     em_control;
450         u32     tx_gap;
451 };
452
453 /* Some hw stat regs are applicable to slave port only.
454  * This is handled by gbenu_et_stats struct.  Also some
455  * are for SS version NU and some are for 2U.
456  */
457 struct gbenu_hw_stats {
458         u32     rx_good_frames;
459         u32     rx_broadcast_frames;
460         u32     rx_multicast_frames;
461         u32     rx_pause_frames;                /* slave */
462         u32     rx_crc_errors;
463         u32     rx_align_code_errors;           /* slave */
464         u32     rx_oversized_frames;
465         u32     rx_jabber_frames;               /* slave */
466         u32     rx_undersized_frames;
467         u32     rx_fragments;                   /* slave */
468         u32     ale_drop;
469         u32     ale_overrun_drop;
470         u32     rx_bytes;
471         u32     tx_good_frames;
472         u32     tx_broadcast_frames;
473         u32     tx_multicast_frames;
474         u32     tx_pause_frames;                /* slave */
475         u32     tx_deferred_frames;             /* slave */
476         u32     tx_collision_frames;            /* slave */
477         u32     tx_single_coll_frames;          /* slave */
478         u32     tx_mult_coll_frames;            /* slave */
479         u32     tx_excessive_collisions;        /* slave */
480         u32     tx_late_collisions;             /* slave */
481         u32     rx_ipg_error;                   /* slave 10G only */
482         u32     tx_carrier_sense_errors;        /* slave */
483         u32     tx_bytes;
484         u32     tx_64B_frames;
485         u32     tx_65_to_127B_frames;
486         u32     tx_128_to_255B_frames;
487         u32     tx_256_to_511B_frames;
488         u32     tx_512_to_1023B_frames;
489         u32     tx_1024B_frames;
490         u32     net_bytes;
491         u32     rx_bottom_fifo_drop;
492         u32     rx_port_mask_drop;
493         u32     rx_top_fifo_drop;
494         u32     ale_rate_limit_drop;
495         u32     ale_vid_ingress_drop;
496         u32     ale_da_eq_sa_drop;
497         u32     __rsvd_0[3];
498         u32     ale_unknown_ucast;
499         u32     ale_unknown_ucast_bytes;
500         u32     ale_unknown_mcast;
501         u32     ale_unknown_mcast_bytes;
502         u32     ale_unknown_bcast;
503         u32     ale_unknown_bcast_bytes;
504         u32     ale_pol_match;
505         u32     ale_pol_match_red;              /* NU */
506         u32     ale_pol_match_yellow;           /* NU */
507         u32     __rsvd_1[44];
508         u32     tx_mem_protect_err;
509         /* following NU only */
510         u32     tx_pri0;
511         u32     tx_pri1;
512         u32     tx_pri2;
513         u32     tx_pri3;
514         u32     tx_pri4;
515         u32     tx_pri5;
516         u32     tx_pri6;
517         u32     tx_pri7;
518         u32     tx_pri0_bcnt;
519         u32     tx_pri1_bcnt;
520         u32     tx_pri2_bcnt;
521         u32     tx_pri3_bcnt;
522         u32     tx_pri4_bcnt;
523         u32     tx_pri5_bcnt;
524         u32     tx_pri6_bcnt;
525         u32     tx_pri7_bcnt;
526         u32     tx_pri0_drop;
527         u32     tx_pri1_drop;
528         u32     tx_pri2_drop;
529         u32     tx_pri3_drop;
530         u32     tx_pri4_drop;
531         u32     tx_pri5_drop;
532         u32     tx_pri6_drop;
533         u32     tx_pri7_drop;
534         u32     tx_pri0_drop_bcnt;
535         u32     tx_pri1_drop_bcnt;
536         u32     tx_pri2_drop_bcnt;
537         u32     tx_pri3_drop_bcnt;
538         u32     tx_pri4_drop_bcnt;
539         u32     tx_pri5_drop_bcnt;
540         u32     tx_pri6_drop_bcnt;
541         u32     tx_pri7_drop_bcnt;
542 };
543
544 #define GBENU_HW_STATS_REG_MAP_SZ       0x200
545
546 struct gbe_ss_regs {
547         u32     id_ver;
548         u32     synce_count;
549         u32     synce_mux;
550 };
551
552 struct gbe_ss_regs_ofs {
553         u16     id_ver;
554         u16     control;
555         u16     rgmii_status; /* 2U */
556 };
557
558 struct gbe_switch_regs {
559         u32     id_ver;
560         u32     control;
561         u32     soft_reset;
562         u32     stat_port_en;
563         u32     ptype;
564         u32     soft_idle;
565         u32     thru_rate;
566         u32     gap_thresh;
567         u32     tx_start_wds;
568         u32     flow_control;
569 };
570
571 struct gbe_switch_regs_ofs {
572         u16     id_ver;
573         u16     control;
574         u16     soft_reset;
575         u16     emcontrol;
576         u16     stat_port_en;
577         u16     ptype;
578         u16     flow_control;
579 };
580
581 struct gbe_port_regs {
582         u32     max_blks;
583         u32     blk_cnt;
584         u32     port_vlan;
585         u32     tx_pri_map;
586         u32     sa_lo;
587         u32     sa_hi;
588         u32     ts_ctl;
589         u32     ts_seq_ltype;
590         u32     ts_vlan;
591         u32     ts_ctl_ltype2;
592         u32     ts_ctl2;
593 };
594
595 struct gbe_port_regs_ofs {
596         u16     port_vlan;
597         u16     tx_pri_map;
598         u16     rx_pri_map;
599         u16     sa_lo;
600         u16     sa_hi;
601         u16     ts_ctl;
602         u16     ts_seq_ltype;
603         u16     ts_vlan;
604         u16     ts_ctl_ltype2;
605         u16     ts_ctl2;
606         u16     rx_maxlen;      /* 2U, NU */
607 };
608
609 struct gbe_host_port_regs {
610         u32     src_id;
611         u32     port_vlan;
612         u32     rx_pri_map;
613         u32     rx_maxlen;
614 };
615
616 struct gbe_host_port_regs_ofs {
617         u16     port_vlan;
618         u16     tx_pri_map;
619         u16     rx_maxlen;
620 };
621
622 struct gbe_emac_regs {
623         u32     id_ver;
624         u32     mac_control;
625         u32     mac_status;
626         u32     soft_reset;
627         u32     rx_maxlen;
628         u32     __reserved_0;
629         u32     rx_pause;
630         u32     tx_pause;
631         u32     __reserved_1;
632         u32     rx_pri_map;
633         u32     rsvd[6];
634 };
635
636 struct gbe_emac_regs_ofs {
637         u16     mac_control;
638         u16     soft_reset;
639         u16     rx_maxlen;
640 };
641
642 struct gbe_hw_stats {
643         u32     rx_good_frames;
644         u32     rx_broadcast_frames;
645         u32     rx_multicast_frames;
646         u32     rx_pause_frames;
647         u32     rx_crc_errors;
648         u32     rx_align_code_errors;
649         u32     rx_oversized_frames;
650         u32     rx_jabber_frames;
651         u32     rx_undersized_frames;
652         u32     rx_fragments;
653         u32     __pad_0[2];
654         u32     rx_bytes;
655         u32     tx_good_frames;
656         u32     tx_broadcast_frames;
657         u32     tx_multicast_frames;
658         u32     tx_pause_frames;
659         u32     tx_deferred_frames;
660         u32     tx_collision_frames;
661         u32     tx_single_coll_frames;
662         u32     tx_mult_coll_frames;
663         u32     tx_excessive_collisions;
664         u32     tx_late_collisions;
665         u32     tx_underrun;
666         u32     tx_carrier_sense_errors;
667         u32     tx_bytes;
668         u32     tx_64byte_frames;
669         u32     tx_65_to_127byte_frames;
670         u32     tx_128_to_255byte_frames;
671         u32     tx_256_to_511byte_frames;
672         u32     tx_512_to_1023byte_frames;
673         u32     tx_1024byte_frames;
674         u32     net_bytes;
675         u32     rx_sof_overruns;
676         u32     rx_mof_overruns;
677         u32     rx_dma_overruns;
678 };
679
680 #define GBE_MAX_HW_STAT_MODS                    9
681 #define GBE_HW_STATS_REG_MAP_SZ                 0x100
682
683 struct ts_ctl {
684         int     uni;
685         u8      dst_port_map;
686         u8      maddr_map;
687         u8      ts_mcast_type;
688 };
689
690 struct gbe_slave {
691         void __iomem                    *port_regs;
692         void __iomem                    *emac_regs;
693         struct gbe_port_regs_ofs        port_regs_ofs;
694         struct gbe_emac_regs_ofs        emac_regs_ofs;
695         int                             slave_num; /* 0 based logical number */
696         int                             port_num;  /* actual port number */
697         atomic_t                        link_state;
698         bool                            open;
699         struct phy_device               *phy;
700         u32                             link_interface;
701         u32                             mac_control;
702         u8                              phy_port_t;
703         struct device_node              *node;
704         struct device_node              *phy_node;
705         struct ts_ctl                   ts_ctl;
706         struct list_head                slave_list;
707 };
708
709 struct gbe_priv {
710         struct device                   *dev;
711         struct netcp_device             *netcp_device;
712         struct timer_list               timer;
713         u32                             num_slaves;
714         u32                             ale_entries;
715         u32                             ale_ports;
716         bool                            enable_ale;
717         u8                              max_num_slaves;
718         u8                              max_num_ports; /* max_num_slaves + 1 */
719         u8                              num_stats_mods;
720         struct netcp_tx_pipe            tx_pipe;
721
722         int                             host_port;
723         u32                             rx_packet_max;
724         u32                             ss_version;
725         u32                             stats_en_mask;
726
727         void __iomem                    *ss_regs;
728         void __iomem                    *switch_regs;
729         void __iomem                    *host_port_regs;
730         void __iomem                    *ale_reg;
731         void __iomem                    *cpts_reg;
732         void __iomem                    *sgmii_port_regs;
733         void __iomem                    *sgmii_port34_regs;
734         void __iomem                    *xgbe_serdes_regs;
735         void __iomem                    *hw_stats_regs[GBE_MAX_HW_STAT_MODS];
736
737         struct gbe_ss_regs_ofs          ss_regs_ofs;
738         struct gbe_switch_regs_ofs      switch_regs_ofs;
739         struct gbe_host_port_regs_ofs   host_port_regs_ofs;
740
741         struct cpsw_ale                 *ale;
742         unsigned int                    tx_queue_id;
743         const char                      *dma_chan_name;
744
745         struct list_head                gbe_intf_head;
746         struct list_head                secondary_slaves;
747         struct net_device               *dummy_ndev;
748
749         u64                             *hw_stats;
750         u32                             *hw_stats_prev;
751         const struct netcp_ethtool_stat *et_stats;
752         int                             num_et_stats;
753         /*  Lock for updating the hwstats */
754         spinlock_t                      hw_stats_lock;
755
756         int                             cpts_registered;
757         struct cpts                     *cpts;
758         int                             rx_ts_enabled;
759         int                             tx_ts_enabled;
760 };
761
762 struct gbe_intf {
763         struct net_device       *ndev;
764         struct device           *dev;
765         struct gbe_priv         *gbe_dev;
766         struct netcp_tx_pipe    tx_pipe;
767         struct gbe_slave        *slave;
768         struct list_head        gbe_intf_list;
769         unsigned long           active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
770 };
771
772 static struct netcp_module gbe_module;
773 static struct netcp_module xgbe_module;
774
775 /* Statistic management */
776 struct netcp_ethtool_stat {
777         char desc[ETH_GSTRING_LEN];
778         int type;
779         u32 size;
780         int offset;
781 };
782
783 #define GBE_STATSA_INFO(field)                                          \
784 {                                                                       \
785         "GBE_A:"#field, GBE_STATSA_MODULE,                              \
786         FIELD_SIZEOF(struct gbe_hw_stats, field),                       \
787         offsetof(struct gbe_hw_stats, field)                            \
788 }
789
790 #define GBE_STATSB_INFO(field)                                          \
791 {                                                                       \
792         "GBE_B:"#field, GBE_STATSB_MODULE,                              \
793         FIELD_SIZEOF(struct gbe_hw_stats, field),                       \
794         offsetof(struct gbe_hw_stats, field)                            \
795 }
796
797 #define GBE_STATSC_INFO(field)                                          \
798 {                                                                       \
799         "GBE_C:"#field, GBE_STATSC_MODULE,                              \
800         FIELD_SIZEOF(struct gbe_hw_stats, field),                       \
801         offsetof(struct gbe_hw_stats, field)                            \
802 }
803
804 #define GBE_STATSD_INFO(field)                                          \
805 {                                                                       \
806         "GBE_D:"#field, GBE_STATSD_MODULE,                              \
807         FIELD_SIZEOF(struct gbe_hw_stats, field),                       \
808         offsetof(struct gbe_hw_stats, field)                            \
809 }
810
811 static const struct netcp_ethtool_stat gbe13_et_stats[] = {
812         /* GBE module A */
813         GBE_STATSA_INFO(rx_good_frames),
814         GBE_STATSA_INFO(rx_broadcast_frames),
815         GBE_STATSA_INFO(rx_multicast_frames),
816         GBE_STATSA_INFO(rx_pause_frames),
817         GBE_STATSA_INFO(rx_crc_errors),
818         GBE_STATSA_INFO(rx_align_code_errors),
819         GBE_STATSA_INFO(rx_oversized_frames),
820         GBE_STATSA_INFO(rx_jabber_frames),
821         GBE_STATSA_INFO(rx_undersized_frames),
822         GBE_STATSA_INFO(rx_fragments),
823         GBE_STATSA_INFO(rx_bytes),
824         GBE_STATSA_INFO(tx_good_frames),
825         GBE_STATSA_INFO(tx_broadcast_frames),
826         GBE_STATSA_INFO(tx_multicast_frames),
827         GBE_STATSA_INFO(tx_pause_frames),
828         GBE_STATSA_INFO(tx_deferred_frames),
829         GBE_STATSA_INFO(tx_collision_frames),
830         GBE_STATSA_INFO(tx_single_coll_frames),
831         GBE_STATSA_INFO(tx_mult_coll_frames),
832         GBE_STATSA_INFO(tx_excessive_collisions),
833         GBE_STATSA_INFO(tx_late_collisions),
834         GBE_STATSA_INFO(tx_underrun),
835         GBE_STATSA_INFO(tx_carrier_sense_errors),
836         GBE_STATSA_INFO(tx_bytes),
837         GBE_STATSA_INFO(tx_64byte_frames),
838         GBE_STATSA_INFO(tx_65_to_127byte_frames),
839         GBE_STATSA_INFO(tx_128_to_255byte_frames),
840         GBE_STATSA_INFO(tx_256_to_511byte_frames),
841         GBE_STATSA_INFO(tx_512_to_1023byte_frames),
842         GBE_STATSA_INFO(tx_1024byte_frames),
843         GBE_STATSA_INFO(net_bytes),
844         GBE_STATSA_INFO(rx_sof_overruns),
845         GBE_STATSA_INFO(rx_mof_overruns),
846         GBE_STATSA_INFO(rx_dma_overruns),
847         /* GBE module B */
848         GBE_STATSB_INFO(rx_good_frames),
849         GBE_STATSB_INFO(rx_broadcast_frames),
850         GBE_STATSB_INFO(rx_multicast_frames),
851         GBE_STATSB_INFO(rx_pause_frames),
852         GBE_STATSB_INFO(rx_crc_errors),
853         GBE_STATSB_INFO(rx_align_code_errors),
854         GBE_STATSB_INFO(rx_oversized_frames),
855         GBE_STATSB_INFO(rx_jabber_frames),
856         GBE_STATSB_INFO(rx_undersized_frames),
857         GBE_STATSB_INFO(rx_fragments),
858         GBE_STATSB_INFO(rx_bytes),
859         GBE_STATSB_INFO(tx_good_frames),
860         GBE_STATSB_INFO(tx_broadcast_frames),
861         GBE_STATSB_INFO(tx_multicast_frames),
862         GBE_STATSB_INFO(tx_pause_frames),
863         GBE_STATSB_INFO(tx_deferred_frames),
864         GBE_STATSB_INFO(tx_collision_frames),
865         GBE_STATSB_INFO(tx_single_coll_frames),
866         GBE_STATSB_INFO(tx_mult_coll_frames),
867         GBE_STATSB_INFO(tx_excessive_collisions),
868         GBE_STATSB_INFO(tx_late_collisions),
869         GBE_STATSB_INFO(tx_underrun),
870         GBE_STATSB_INFO(tx_carrier_sense_errors),
871         GBE_STATSB_INFO(tx_bytes),
872         GBE_STATSB_INFO(tx_64byte_frames),
873         GBE_STATSB_INFO(tx_65_to_127byte_frames),
874         GBE_STATSB_INFO(tx_128_to_255byte_frames),
875         GBE_STATSB_INFO(tx_256_to_511byte_frames),
876         GBE_STATSB_INFO(tx_512_to_1023byte_frames),
877         GBE_STATSB_INFO(tx_1024byte_frames),
878         GBE_STATSB_INFO(net_bytes),
879         GBE_STATSB_INFO(rx_sof_overruns),
880         GBE_STATSB_INFO(rx_mof_overruns),
881         GBE_STATSB_INFO(rx_dma_overruns),
882         /* GBE module C */
883         GBE_STATSC_INFO(rx_good_frames),
884         GBE_STATSC_INFO(rx_broadcast_frames),
885         GBE_STATSC_INFO(rx_multicast_frames),
886         GBE_STATSC_INFO(rx_pause_frames),
887         GBE_STATSC_INFO(rx_crc_errors),
888         GBE_STATSC_INFO(rx_align_code_errors),
889         GBE_STATSC_INFO(rx_oversized_frames),
890         GBE_STATSC_INFO(rx_jabber_frames),
891         GBE_STATSC_INFO(rx_undersized_frames),
892         GBE_STATSC_INFO(rx_fragments),
893         GBE_STATSC_INFO(rx_bytes),
894         GBE_STATSC_INFO(tx_good_frames),
895         GBE_STATSC_INFO(tx_broadcast_frames),
896         GBE_STATSC_INFO(tx_multicast_frames),
897         GBE_STATSC_INFO(tx_pause_frames),
898         GBE_STATSC_INFO(tx_deferred_frames),
899         GBE_STATSC_INFO(tx_collision_frames),
900         GBE_STATSC_INFO(tx_single_coll_frames),
901         GBE_STATSC_INFO(tx_mult_coll_frames),
902         GBE_STATSC_INFO(tx_excessive_collisions),
903         GBE_STATSC_INFO(tx_late_collisions),
904         GBE_STATSC_INFO(tx_underrun),
905         GBE_STATSC_INFO(tx_carrier_sense_errors),
906         GBE_STATSC_INFO(tx_bytes),
907         GBE_STATSC_INFO(tx_64byte_frames),
908         GBE_STATSC_INFO(tx_65_to_127byte_frames),
909         GBE_STATSC_INFO(tx_128_to_255byte_frames),
910         GBE_STATSC_INFO(tx_256_to_511byte_frames),
911         GBE_STATSC_INFO(tx_512_to_1023byte_frames),
912         GBE_STATSC_INFO(tx_1024byte_frames),
913         GBE_STATSC_INFO(net_bytes),
914         GBE_STATSC_INFO(rx_sof_overruns),
915         GBE_STATSC_INFO(rx_mof_overruns),
916         GBE_STATSC_INFO(rx_dma_overruns),
917         /* GBE module D */
918         GBE_STATSD_INFO(rx_good_frames),
919         GBE_STATSD_INFO(rx_broadcast_frames),
920         GBE_STATSD_INFO(rx_multicast_frames),
921         GBE_STATSD_INFO(rx_pause_frames),
922         GBE_STATSD_INFO(rx_crc_errors),
923         GBE_STATSD_INFO(rx_align_code_errors),
924         GBE_STATSD_INFO(rx_oversized_frames),
925         GBE_STATSD_INFO(rx_jabber_frames),
926         GBE_STATSD_INFO(rx_undersized_frames),
927         GBE_STATSD_INFO(rx_fragments),
928         GBE_STATSD_INFO(rx_bytes),
929         GBE_STATSD_INFO(tx_good_frames),
930         GBE_STATSD_INFO(tx_broadcast_frames),
931         GBE_STATSD_INFO(tx_multicast_frames),
932         GBE_STATSD_INFO(tx_pause_frames),
933         GBE_STATSD_INFO(tx_deferred_frames),
934         GBE_STATSD_INFO(tx_collision_frames),
935         GBE_STATSD_INFO(tx_single_coll_frames),
936         GBE_STATSD_INFO(tx_mult_coll_frames),
937         GBE_STATSD_INFO(tx_excessive_collisions),
938         GBE_STATSD_INFO(tx_late_collisions),
939         GBE_STATSD_INFO(tx_underrun),
940         GBE_STATSD_INFO(tx_carrier_sense_errors),
941         GBE_STATSD_INFO(tx_bytes),
942         GBE_STATSD_INFO(tx_64byte_frames),
943         GBE_STATSD_INFO(tx_65_to_127byte_frames),
944         GBE_STATSD_INFO(tx_128_to_255byte_frames),
945         GBE_STATSD_INFO(tx_256_to_511byte_frames),
946         GBE_STATSD_INFO(tx_512_to_1023byte_frames),
947         GBE_STATSD_INFO(tx_1024byte_frames),
948         GBE_STATSD_INFO(net_bytes),
949         GBE_STATSD_INFO(rx_sof_overruns),
950         GBE_STATSD_INFO(rx_mof_overruns),
951         GBE_STATSD_INFO(rx_dma_overruns),
952 };
953
954 /* This is the size of entries in GBENU_STATS_HOST */
955 #define GBENU_ET_STATS_HOST_SIZE        52
956
957 #define GBENU_STATS_HOST(field)                                 \
958 {                                                               \
959         "GBE_HOST:"#field, GBENU_STATS0_MODULE,                 \
960         FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
961         offsetof(struct gbenu_hw_stats, field)                  \
962 }
963
964 /* This is the size of entries in GBENU_STATS_PORT */
965 #define GBENU_ET_STATS_PORT_SIZE        65
966
967 #define GBENU_STATS_P1(field)                                   \
968 {                                                               \
969         "GBE_P1:"#field, GBENU_STATS1_MODULE,                   \
970         FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
971         offsetof(struct gbenu_hw_stats, field)                  \
972 }
973
974 #define GBENU_STATS_P2(field)                                   \
975 {                                                               \
976         "GBE_P2:"#field, GBENU_STATS2_MODULE,                   \
977         FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
978         offsetof(struct gbenu_hw_stats, field)                  \
979 }
980
981 #define GBENU_STATS_P3(field)                                   \
982 {                                                               \
983         "GBE_P3:"#field, GBENU_STATS3_MODULE,                   \
984         FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
985         offsetof(struct gbenu_hw_stats, field)                  \
986 }
987
988 #define GBENU_STATS_P4(field)                                   \
989 {                                                               \
990         "GBE_P4:"#field, GBENU_STATS4_MODULE,                   \
991         FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
992         offsetof(struct gbenu_hw_stats, field)                  \
993 }
994
995 #define GBENU_STATS_P5(field)                                   \
996 {                                                               \
997         "GBE_P5:"#field, GBENU_STATS5_MODULE,                   \
998         FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
999         offsetof(struct gbenu_hw_stats, field)                  \
1000 }
1001
1002 #define GBENU_STATS_P6(field)                                   \
1003 {                                                               \
1004         "GBE_P6:"#field, GBENU_STATS6_MODULE,                   \
1005         FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
1006         offsetof(struct gbenu_hw_stats, field)                  \
1007 }
1008
1009 #define GBENU_STATS_P7(field)                                   \
1010 {                                                               \
1011         "GBE_P7:"#field, GBENU_STATS7_MODULE,                   \
1012         FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
1013         offsetof(struct gbenu_hw_stats, field)                  \
1014 }
1015
1016 #define GBENU_STATS_P8(field)                                   \
1017 {                                                               \
1018         "GBE_P8:"#field, GBENU_STATS8_MODULE,                   \
1019         FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
1020         offsetof(struct gbenu_hw_stats, field)                  \
1021 }
1022
1023 static const struct netcp_ethtool_stat gbenu_et_stats[] = {
1024         /* GBENU Host Module */
1025         GBENU_STATS_HOST(rx_good_frames),
1026         GBENU_STATS_HOST(rx_broadcast_frames),
1027         GBENU_STATS_HOST(rx_multicast_frames),
1028         GBENU_STATS_HOST(rx_crc_errors),
1029         GBENU_STATS_HOST(rx_oversized_frames),
1030         GBENU_STATS_HOST(rx_undersized_frames),
1031         GBENU_STATS_HOST(ale_drop),
1032         GBENU_STATS_HOST(ale_overrun_drop),
1033         GBENU_STATS_HOST(rx_bytes),
1034         GBENU_STATS_HOST(tx_good_frames),
1035         GBENU_STATS_HOST(tx_broadcast_frames),
1036         GBENU_STATS_HOST(tx_multicast_frames),
1037         GBENU_STATS_HOST(tx_bytes),
1038         GBENU_STATS_HOST(tx_64B_frames),
1039         GBENU_STATS_HOST(tx_65_to_127B_frames),
1040         GBENU_STATS_HOST(tx_128_to_255B_frames),
1041         GBENU_STATS_HOST(tx_256_to_511B_frames),
1042         GBENU_STATS_HOST(tx_512_to_1023B_frames),
1043         GBENU_STATS_HOST(tx_1024B_frames),
1044         GBENU_STATS_HOST(net_bytes),
1045         GBENU_STATS_HOST(rx_bottom_fifo_drop),
1046         GBENU_STATS_HOST(rx_port_mask_drop),
1047         GBENU_STATS_HOST(rx_top_fifo_drop),
1048         GBENU_STATS_HOST(ale_rate_limit_drop),
1049         GBENU_STATS_HOST(ale_vid_ingress_drop),
1050         GBENU_STATS_HOST(ale_da_eq_sa_drop),
1051         GBENU_STATS_HOST(ale_unknown_ucast),
1052         GBENU_STATS_HOST(ale_unknown_ucast_bytes),
1053         GBENU_STATS_HOST(ale_unknown_mcast),
1054         GBENU_STATS_HOST(ale_unknown_mcast_bytes),
1055         GBENU_STATS_HOST(ale_unknown_bcast),
1056         GBENU_STATS_HOST(ale_unknown_bcast_bytes),
1057         GBENU_STATS_HOST(ale_pol_match),
1058         GBENU_STATS_HOST(ale_pol_match_red),
1059         GBENU_STATS_HOST(ale_pol_match_yellow),
1060         GBENU_STATS_HOST(tx_mem_protect_err),
1061         GBENU_STATS_HOST(tx_pri0_drop),
1062         GBENU_STATS_HOST(tx_pri1_drop),
1063         GBENU_STATS_HOST(tx_pri2_drop),
1064         GBENU_STATS_HOST(tx_pri3_drop),
1065         GBENU_STATS_HOST(tx_pri4_drop),
1066         GBENU_STATS_HOST(tx_pri5_drop),
1067         GBENU_STATS_HOST(tx_pri6_drop),
1068         GBENU_STATS_HOST(tx_pri7_drop),
1069         GBENU_STATS_HOST(tx_pri0_drop_bcnt),
1070         GBENU_STATS_HOST(tx_pri1_drop_bcnt),
1071         GBENU_STATS_HOST(tx_pri2_drop_bcnt),
1072         GBENU_STATS_HOST(tx_pri3_drop_bcnt),
1073         GBENU_STATS_HOST(tx_pri4_drop_bcnt),
1074         GBENU_STATS_HOST(tx_pri5_drop_bcnt),
1075         GBENU_STATS_HOST(tx_pri6_drop_bcnt),
1076         GBENU_STATS_HOST(tx_pri7_drop_bcnt),
1077         /* GBENU Module 1 */
1078         GBENU_STATS_P1(rx_good_frames),
1079         GBENU_STATS_P1(rx_broadcast_frames),
1080         GBENU_STATS_P1(rx_multicast_frames),
1081         GBENU_STATS_P1(rx_pause_frames),
1082         GBENU_STATS_P1(rx_crc_errors),
1083         GBENU_STATS_P1(rx_align_code_errors),
1084         GBENU_STATS_P1(rx_oversized_frames),
1085         GBENU_STATS_P1(rx_jabber_frames),
1086         GBENU_STATS_P1(rx_undersized_frames),
1087         GBENU_STATS_P1(rx_fragments),
1088         GBENU_STATS_P1(ale_drop),
1089         GBENU_STATS_P1(ale_overrun_drop),
1090         GBENU_STATS_P1(rx_bytes),
1091         GBENU_STATS_P1(tx_good_frames),
1092         GBENU_STATS_P1(tx_broadcast_frames),
1093         GBENU_STATS_P1(tx_multicast_frames),
1094         GBENU_STATS_P1(tx_pause_frames),
1095         GBENU_STATS_P1(tx_deferred_frames),
1096         GBENU_STATS_P1(tx_collision_frames),
1097         GBENU_STATS_P1(tx_single_coll_frames),
1098         GBENU_STATS_P1(tx_mult_coll_frames),
1099         GBENU_STATS_P1(tx_excessive_collisions),
1100         GBENU_STATS_P1(tx_late_collisions),
1101         GBENU_STATS_P1(rx_ipg_error),
1102         GBENU_STATS_P1(tx_carrier_sense_errors),
1103         GBENU_STATS_P1(tx_bytes),
1104         GBENU_STATS_P1(tx_64B_frames),
1105         GBENU_STATS_P1(tx_65_to_127B_frames),
1106         GBENU_STATS_P1(tx_128_to_255B_frames),
1107         GBENU_STATS_P1(tx_256_to_511B_frames),
1108         GBENU_STATS_P1(tx_512_to_1023B_frames),
1109         GBENU_STATS_P1(tx_1024B_frames),
1110         GBENU_STATS_P1(net_bytes),
1111         GBENU_STATS_P1(rx_bottom_fifo_drop),
1112         GBENU_STATS_P1(rx_port_mask_drop),
1113         GBENU_STATS_P1(rx_top_fifo_drop),
1114         GBENU_STATS_P1(ale_rate_limit_drop),
1115         GBENU_STATS_P1(ale_vid_ingress_drop),
1116         GBENU_STATS_P1(ale_da_eq_sa_drop),
1117         GBENU_STATS_P1(ale_unknown_ucast),
1118         GBENU_STATS_P1(ale_unknown_ucast_bytes),
1119         GBENU_STATS_P1(ale_unknown_mcast),
1120         GBENU_STATS_P1(ale_unknown_mcast_bytes),
1121         GBENU_STATS_P1(ale_unknown_bcast),
1122         GBENU_STATS_P1(ale_unknown_bcast_bytes),
1123         GBENU_STATS_P1(ale_pol_match),
1124         GBENU_STATS_P1(ale_pol_match_red),
1125         GBENU_STATS_P1(ale_pol_match_yellow),
1126         GBENU_STATS_P1(tx_mem_protect_err),
1127         GBENU_STATS_P1(tx_pri0_drop),
1128         GBENU_STATS_P1(tx_pri1_drop),
1129         GBENU_STATS_P1(tx_pri2_drop),
1130         GBENU_STATS_P1(tx_pri3_drop),
1131         GBENU_STATS_P1(tx_pri4_drop),
1132         GBENU_STATS_P1(tx_pri5_drop),
1133         GBENU_STATS_P1(tx_pri6_drop),
1134         GBENU_STATS_P1(tx_pri7_drop),
1135         GBENU_STATS_P1(tx_pri0_drop_bcnt),
1136         GBENU_STATS_P1(tx_pri1_drop_bcnt),
1137         GBENU_STATS_P1(tx_pri2_drop_bcnt),
1138         GBENU_STATS_P1(tx_pri3_drop_bcnt),
1139         GBENU_STATS_P1(tx_pri4_drop_bcnt),
1140         GBENU_STATS_P1(tx_pri5_drop_bcnt),
1141         GBENU_STATS_P1(tx_pri6_drop_bcnt),
1142         GBENU_STATS_P1(tx_pri7_drop_bcnt),
1143         /* GBENU Module 2 */
1144         GBENU_STATS_P2(rx_good_frames),
1145         GBENU_STATS_P2(rx_broadcast_frames),
1146         GBENU_STATS_P2(rx_multicast_frames),
1147         GBENU_STATS_P2(rx_pause_frames),
1148         GBENU_STATS_P2(rx_crc_errors),
1149         GBENU_STATS_P2(rx_align_code_errors),
1150         GBENU_STATS_P2(rx_oversized_frames),
1151         GBENU_STATS_P2(rx_jabber_frames),
1152         GBENU_STATS_P2(rx_undersized_frames),
1153         GBENU_STATS_P2(rx_fragments),
1154         GBENU_STATS_P2(ale_drop),
1155         GBENU_STATS_P2(ale_overrun_drop),
1156         GBENU_STATS_P2(rx_bytes),
1157         GBENU_STATS_P2(tx_good_frames),
1158         GBENU_STATS_P2(tx_broadcast_frames),
1159         GBENU_STATS_P2(tx_multicast_frames),
1160         GBENU_STATS_P2(tx_pause_frames),
1161         GBENU_STATS_P2(tx_deferred_frames),
1162         GBENU_STATS_P2(tx_collision_frames),
1163         GBENU_STATS_P2(tx_single_coll_frames),
1164         GBENU_STATS_P2(tx_mult_coll_frames),
1165         GBENU_STATS_P2(tx_excessive_collisions),
1166         GBENU_STATS_P2(tx_late_collisions),
1167         GBENU_STATS_P2(rx_ipg_error),
1168         GBENU_STATS_P2(tx_carrier_sense_errors),
1169         GBENU_STATS_P2(tx_bytes),
1170         GBENU_STATS_P2(tx_64B_frames),
1171         GBENU_STATS_P2(tx_65_to_127B_frames),
1172         GBENU_STATS_P2(tx_128_to_255B_frames),
1173         GBENU_STATS_P2(tx_256_to_511B_frames),
1174         GBENU_STATS_P2(tx_512_to_1023B_frames),
1175         GBENU_STATS_P2(tx_1024B_frames),
1176         GBENU_STATS_P2(net_bytes),
1177         GBENU_STATS_P2(rx_bottom_fifo_drop),
1178         GBENU_STATS_P2(rx_port_mask_drop),
1179         GBENU_STATS_P2(rx_top_fifo_drop),
1180         GBENU_STATS_P2(ale_rate_limit_drop),
1181         GBENU_STATS_P2(ale_vid_ingress_drop),
1182         GBENU_STATS_P2(ale_da_eq_sa_drop),
1183         GBENU_STATS_P2(ale_unknown_ucast),
1184         GBENU_STATS_P2(ale_unknown_ucast_bytes),
1185         GBENU_STATS_P2(ale_unknown_mcast),
1186         GBENU_STATS_P2(ale_unknown_mcast_bytes),
1187         GBENU_STATS_P2(ale_unknown_bcast),
1188         GBENU_STATS_P2(ale_unknown_bcast_bytes),
1189         GBENU_STATS_P2(ale_pol_match),
1190         GBENU_STATS_P2(ale_pol_match_red),
1191         GBENU_STATS_P2(ale_pol_match_yellow),
1192         GBENU_STATS_P2(tx_mem_protect_err),
1193         GBENU_STATS_P2(tx_pri0_drop),
1194         GBENU_STATS_P2(tx_pri1_drop),
1195         GBENU_STATS_P2(tx_pri2_drop),
1196         GBENU_STATS_P2(tx_pri3_drop),
1197         GBENU_STATS_P2(tx_pri4_drop),
1198         GBENU_STATS_P2(tx_pri5_drop),
1199         GBENU_STATS_P2(tx_pri6_drop),
1200         GBENU_STATS_P2(tx_pri7_drop),
1201         GBENU_STATS_P2(tx_pri0_drop_bcnt),
1202         GBENU_STATS_P2(tx_pri1_drop_bcnt),
1203         GBENU_STATS_P2(tx_pri2_drop_bcnt),
1204         GBENU_STATS_P2(tx_pri3_drop_bcnt),
1205         GBENU_STATS_P2(tx_pri4_drop_bcnt),
1206         GBENU_STATS_P2(tx_pri5_drop_bcnt),
1207         GBENU_STATS_P2(tx_pri6_drop_bcnt),
1208         GBENU_STATS_P2(tx_pri7_drop_bcnt),
1209         /* GBENU Module 3 */
1210         GBENU_STATS_P3(rx_good_frames),
1211         GBENU_STATS_P3(rx_broadcast_frames),
1212         GBENU_STATS_P3(rx_multicast_frames),
1213         GBENU_STATS_P3(rx_pause_frames),
1214         GBENU_STATS_P3(rx_crc_errors),
1215         GBENU_STATS_P3(rx_align_code_errors),
1216         GBENU_STATS_P3(rx_oversized_frames),
1217         GBENU_STATS_P3(rx_jabber_frames),
1218         GBENU_STATS_P3(rx_undersized_frames),
1219         GBENU_STATS_P3(rx_fragments),
1220         GBENU_STATS_P3(ale_drop),
1221         GBENU_STATS_P3(ale_overrun_drop),
1222         GBENU_STATS_P3(rx_bytes),
1223         GBENU_STATS_P3(tx_good_frames),
1224         GBENU_STATS_P3(tx_broadcast_frames),
1225         GBENU_STATS_P3(tx_multicast_frames),
1226         GBENU_STATS_P3(tx_pause_frames),
1227         GBENU_STATS_P3(tx_deferred_frames),
1228         GBENU_STATS_P3(tx_collision_frames),
1229         GBENU_STATS_P3(tx_single_coll_frames),
1230         GBENU_STATS_P3(tx_mult_coll_frames),
1231         GBENU_STATS_P3(tx_excessive_collisions),
1232         GBENU_STATS_P3(tx_late_collisions),
1233         GBENU_STATS_P3(rx_ipg_error),
1234         GBENU_STATS_P3(tx_carrier_sense_errors),
1235         GBENU_STATS_P3(tx_bytes),
1236         GBENU_STATS_P3(tx_64B_frames),
1237         GBENU_STATS_P3(tx_65_to_127B_frames),
1238         GBENU_STATS_P3(tx_128_to_255B_frames),
1239         GBENU_STATS_P3(tx_256_to_511B_frames),
1240         GBENU_STATS_P3(tx_512_to_1023B_frames),
1241         GBENU_STATS_P3(tx_1024B_frames),
1242         GBENU_STATS_P3(net_bytes),
1243         GBENU_STATS_P3(rx_bottom_fifo_drop),
1244         GBENU_STATS_P3(rx_port_mask_drop),
1245         GBENU_STATS_P3(rx_top_fifo_drop),
1246         GBENU_STATS_P3(ale_rate_limit_drop),
1247         GBENU_STATS_P3(ale_vid_ingress_drop),
1248         GBENU_STATS_P3(ale_da_eq_sa_drop),
1249         GBENU_STATS_P3(ale_unknown_ucast),
1250         GBENU_STATS_P3(ale_unknown_ucast_bytes),
1251         GBENU_STATS_P3(ale_unknown_mcast),
1252         GBENU_STATS_P3(ale_unknown_mcast_bytes),
1253         GBENU_STATS_P3(ale_unknown_bcast),
1254         GBENU_STATS_P3(ale_unknown_bcast_bytes),
1255         GBENU_STATS_P3(ale_pol_match),
1256         GBENU_STATS_P3(ale_pol_match_red),
1257         GBENU_STATS_P3(ale_pol_match_yellow),
1258         GBENU_STATS_P3(tx_mem_protect_err),
1259         GBENU_STATS_P3(tx_pri0_drop),
1260         GBENU_STATS_P3(tx_pri1_drop),
1261         GBENU_STATS_P3(tx_pri2_drop),
1262         GBENU_STATS_P3(tx_pri3_drop),
1263         GBENU_STATS_P3(tx_pri4_drop),
1264         GBENU_STATS_P3(tx_pri5_drop),
1265         GBENU_STATS_P3(tx_pri6_drop),
1266         GBENU_STATS_P3(tx_pri7_drop),
1267         GBENU_STATS_P3(tx_pri0_drop_bcnt),
1268         GBENU_STATS_P3(tx_pri1_drop_bcnt),
1269         GBENU_STATS_P3(tx_pri2_drop_bcnt),
1270         GBENU_STATS_P3(tx_pri3_drop_bcnt),
1271         GBENU_STATS_P3(tx_pri4_drop_bcnt),
1272         GBENU_STATS_P3(tx_pri5_drop_bcnt),
1273         GBENU_STATS_P3(tx_pri6_drop_bcnt),
1274         GBENU_STATS_P3(tx_pri7_drop_bcnt),
1275         /* GBENU Module 4 */
1276         GBENU_STATS_P4(rx_good_frames),
1277         GBENU_STATS_P4(rx_broadcast_frames),
1278         GBENU_STATS_P4(rx_multicast_frames),
1279         GBENU_STATS_P4(rx_pause_frames),
1280         GBENU_STATS_P4(rx_crc_errors),
1281         GBENU_STATS_P4(rx_align_code_errors),
1282         GBENU_STATS_P4(rx_oversized_frames),
1283         GBENU_STATS_P4(rx_jabber_frames),
1284         GBENU_STATS_P4(rx_undersized_frames),
1285         GBENU_STATS_P4(rx_fragments),
1286         GBENU_STATS_P4(ale_drop),
1287         GBENU_STATS_P4(ale_overrun_drop),
1288         GBENU_STATS_P4(rx_bytes),
1289         GBENU_STATS_P4(tx_good_frames),
1290         GBENU_STATS_P4(tx_broadcast_frames),
1291         GBENU_STATS_P4(tx_multicast_frames),
1292         GBENU_STATS_P4(tx_pause_frames),
1293         GBENU_STATS_P4(tx_deferred_frames),
1294         GBENU_STATS_P4(tx_collision_frames),
1295         GBENU_STATS_P4(tx_single_coll_frames),
1296         GBENU_STATS_P4(tx_mult_coll_frames),
1297         GBENU_STATS_P4(tx_excessive_collisions),
1298         GBENU_STATS_P4(tx_late_collisions),
1299         GBENU_STATS_P4(rx_ipg_error),
1300         GBENU_STATS_P4(tx_carrier_sense_errors),
1301         GBENU_STATS_P4(tx_bytes),
1302         GBENU_STATS_P4(tx_64B_frames),
1303         GBENU_STATS_P4(tx_65_to_127B_frames),
1304         GBENU_STATS_P4(tx_128_to_255B_frames),
1305         GBENU_STATS_P4(tx_256_to_511B_frames),
1306         GBENU_STATS_P4(tx_512_to_1023B_frames),
1307         GBENU_STATS_P4(tx_1024B_frames),
1308         GBENU_STATS_P4(net_bytes),
1309         GBENU_STATS_P4(rx_bottom_fifo_drop),
1310         GBENU_STATS_P4(rx_port_mask_drop),
1311         GBENU_STATS_P4(rx_top_fifo_drop),
1312         GBENU_STATS_P4(ale_rate_limit_drop),
1313         GBENU_STATS_P4(ale_vid_ingress_drop),
1314         GBENU_STATS_P4(ale_da_eq_sa_drop),
1315         GBENU_STATS_P4(ale_unknown_ucast),
1316         GBENU_STATS_P4(ale_unknown_ucast_bytes),
1317         GBENU_STATS_P4(ale_unknown_mcast),
1318         GBENU_STATS_P4(ale_unknown_mcast_bytes),
1319         GBENU_STATS_P4(ale_unknown_bcast),
1320         GBENU_STATS_P4(ale_unknown_bcast_bytes),
1321         GBENU_STATS_P4(ale_pol_match),
1322         GBENU_STATS_P4(ale_pol_match_red),
1323         GBENU_STATS_P4(ale_pol_match_yellow),
1324         GBENU_STATS_P4(tx_mem_protect_err),
1325         GBENU_STATS_P4(tx_pri0_drop),
1326         GBENU_STATS_P4(tx_pri1_drop),
1327         GBENU_STATS_P4(tx_pri2_drop),
1328         GBENU_STATS_P4(tx_pri3_drop),
1329         GBENU_STATS_P4(tx_pri4_drop),
1330         GBENU_STATS_P4(tx_pri5_drop),
1331         GBENU_STATS_P4(tx_pri6_drop),
1332         GBENU_STATS_P4(tx_pri7_drop),
1333         GBENU_STATS_P4(tx_pri0_drop_bcnt),
1334         GBENU_STATS_P4(tx_pri1_drop_bcnt),
1335         GBENU_STATS_P4(tx_pri2_drop_bcnt),
1336         GBENU_STATS_P4(tx_pri3_drop_bcnt),
1337         GBENU_STATS_P4(tx_pri4_drop_bcnt),
1338         GBENU_STATS_P4(tx_pri5_drop_bcnt),
1339         GBENU_STATS_P4(tx_pri6_drop_bcnt),
1340         GBENU_STATS_P4(tx_pri7_drop_bcnt),
1341         /* GBENU Module 5 */
1342         GBENU_STATS_P5(rx_good_frames),
1343         GBENU_STATS_P5(rx_broadcast_frames),
1344         GBENU_STATS_P5(rx_multicast_frames),
1345         GBENU_STATS_P5(rx_pause_frames),
1346         GBENU_STATS_P5(rx_crc_errors),
1347         GBENU_STATS_P5(rx_align_code_errors),
1348         GBENU_STATS_P5(rx_oversized_frames),
1349         GBENU_STATS_P5(rx_jabber_frames),
1350         GBENU_STATS_P5(rx_undersized_frames),
1351         GBENU_STATS_P5(rx_fragments),
1352         GBENU_STATS_P5(ale_drop),
1353         GBENU_STATS_P5(ale_overrun_drop),
1354         GBENU_STATS_P5(rx_bytes),
1355         GBENU_STATS_P5(tx_good_frames),
1356         GBENU_STATS_P5(tx_broadcast_frames),
1357         GBENU_STATS_P5(tx_multicast_frames),
1358         GBENU_STATS_P5(tx_pause_frames),
1359         GBENU_STATS_P5(tx_deferred_frames),
1360         GBENU_STATS_P5(tx_collision_frames),
1361         GBENU_STATS_P5(tx_single_coll_frames),
1362         GBENU_STATS_P5(tx_mult_coll_frames),
1363         GBENU_STATS_P5(tx_excessive_collisions),
1364         GBENU_STATS_P5(tx_late_collisions),
1365         GBENU_STATS_P5(rx_ipg_error),
1366         GBENU_STATS_P5(tx_carrier_sense_errors),
1367         GBENU_STATS_P5(tx_bytes),
1368         GBENU_STATS_P5(tx_64B_frames),
1369         GBENU_STATS_P5(tx_65_to_127B_frames),
1370         GBENU_STATS_P5(tx_128_to_255B_frames),
1371         GBENU_STATS_P5(tx_256_to_511B_frames),
1372         GBENU_STATS_P5(tx_512_to_1023B_frames),
1373         GBENU_STATS_P5(tx_1024B_frames),
1374         GBENU_STATS_P5(net_bytes),
1375         GBENU_STATS_P5(rx_bottom_fifo_drop),
1376         GBENU_STATS_P5(rx_port_mask_drop),
1377         GBENU_STATS_P5(rx_top_fifo_drop),
1378         GBENU_STATS_P5(ale_rate_limit_drop),
1379         GBENU_STATS_P5(ale_vid_ingress_drop),
1380         GBENU_STATS_P5(ale_da_eq_sa_drop),
1381         GBENU_STATS_P5(ale_unknown_ucast),
1382         GBENU_STATS_P5(ale_unknown_ucast_bytes),
1383         GBENU_STATS_P5(ale_unknown_mcast),
1384         GBENU_STATS_P5(ale_unknown_mcast_bytes),
1385         GBENU_STATS_P5(ale_unknown_bcast),
1386         GBENU_STATS_P5(ale_unknown_bcast_bytes),
1387         GBENU_STATS_P5(ale_pol_match),
1388         GBENU_STATS_P5(ale_pol_match_red),
1389         GBENU_STATS_P5(ale_pol_match_yellow),
1390         GBENU_STATS_P5(tx_mem_protect_err),
1391         GBENU_STATS_P5(tx_pri0_drop),
1392         GBENU_STATS_P5(tx_pri1_drop),
1393         GBENU_STATS_P5(tx_pri2_drop),
1394         GBENU_STATS_P5(tx_pri3_drop),
1395         GBENU_STATS_P5(tx_pri4_drop),
1396         GBENU_STATS_P5(tx_pri5_drop),
1397         GBENU_STATS_P5(tx_pri6_drop),
1398         GBENU_STATS_P5(tx_pri7_drop),
1399         GBENU_STATS_P5(tx_pri0_drop_bcnt),
1400         GBENU_STATS_P5(tx_pri1_drop_bcnt),
1401         GBENU_STATS_P5(tx_pri2_drop_bcnt),
1402         GBENU_STATS_P5(tx_pri3_drop_bcnt),
1403         GBENU_STATS_P5(tx_pri4_drop_bcnt),
1404         GBENU_STATS_P5(tx_pri5_drop_bcnt),
1405         GBENU_STATS_P5(tx_pri6_drop_bcnt),
1406         GBENU_STATS_P5(tx_pri7_drop_bcnt),
1407         /* GBENU Module 6 */
1408         GBENU_STATS_P6(rx_good_frames),
1409         GBENU_STATS_P6(rx_broadcast_frames),
1410         GBENU_STATS_P6(rx_multicast_frames),
1411         GBENU_STATS_P6(rx_pause_frames),
1412         GBENU_STATS_P6(rx_crc_errors),
1413         GBENU_STATS_P6(rx_align_code_errors),
1414         GBENU_STATS_P6(rx_oversized_frames),
1415         GBENU_STATS_P6(rx_jabber_frames),
1416         GBENU_STATS_P6(rx_undersized_frames),
1417         GBENU_STATS_P6(rx_fragments),
1418         GBENU_STATS_P6(ale_drop),
1419         GBENU_STATS_P6(ale_overrun_drop),
1420         GBENU_STATS_P6(rx_bytes),
1421         GBENU_STATS_P6(tx_good_frames),
1422         GBENU_STATS_P6(tx_broadcast_frames),
1423         GBENU_STATS_P6(tx_multicast_frames),
1424         GBENU_STATS_P6(tx_pause_frames),
1425         GBENU_STATS_P6(tx_deferred_frames),
1426         GBENU_STATS_P6(tx_collision_frames),
1427         GBENU_STATS_P6(tx_single_coll_frames),
1428         GBENU_STATS_P6(tx_mult_coll_frames),
1429         GBENU_STATS_P6(tx_excessive_collisions),
1430         GBENU_STATS_P6(tx_late_collisions),
1431         GBENU_STATS_P6(rx_ipg_error),
1432         GBENU_STATS_P6(tx_carrier_sense_errors),
1433         GBENU_STATS_P6(tx_bytes),
1434         GBENU_STATS_P6(tx_64B_frames),
1435         GBENU_STATS_P6(tx_65_to_127B_frames),
1436         GBENU_STATS_P6(tx_128_to_255B_frames),
1437         GBENU_STATS_P6(tx_256_to_511B_frames),
1438         GBENU_STATS_P6(tx_512_to_1023B_frames),
1439         GBENU_STATS_P6(tx_1024B_frames),
1440         GBENU_STATS_P6(net_bytes),
1441         GBENU_STATS_P6(rx_bottom_fifo_drop),
1442         GBENU_STATS_P6(rx_port_mask_drop),
1443         GBENU_STATS_P6(rx_top_fifo_drop),
1444         GBENU_STATS_P6(ale_rate_limit_drop),
1445         GBENU_STATS_P6(ale_vid_ingress_drop),
1446         GBENU_STATS_P6(ale_da_eq_sa_drop),
1447         GBENU_STATS_P6(ale_unknown_ucast),
1448         GBENU_STATS_P6(ale_unknown_ucast_bytes),
1449         GBENU_STATS_P6(ale_unknown_mcast),
1450         GBENU_STATS_P6(ale_unknown_mcast_bytes),
1451         GBENU_STATS_P6(ale_unknown_bcast),
1452         GBENU_STATS_P6(ale_unknown_bcast_bytes),
1453         GBENU_STATS_P6(ale_pol_match),
1454         GBENU_STATS_P6(ale_pol_match_red),
1455         GBENU_STATS_P6(ale_pol_match_yellow),
1456         GBENU_STATS_P6(tx_mem_protect_err),
1457         GBENU_STATS_P6(tx_pri0_drop),
1458         GBENU_STATS_P6(tx_pri1_drop),
1459         GBENU_STATS_P6(tx_pri2_drop),
1460         GBENU_STATS_P6(tx_pri3_drop),
1461         GBENU_STATS_P6(tx_pri4_drop),
1462         GBENU_STATS_P6(tx_pri5_drop),
1463         GBENU_STATS_P6(tx_pri6_drop),
1464         GBENU_STATS_P6(tx_pri7_drop),
1465         GBENU_STATS_P6(tx_pri0_drop_bcnt),
1466         GBENU_STATS_P6(tx_pri1_drop_bcnt),
1467         GBENU_STATS_P6(tx_pri2_drop_bcnt),
1468         GBENU_STATS_P6(tx_pri3_drop_bcnt),
1469         GBENU_STATS_P6(tx_pri4_drop_bcnt),
1470         GBENU_STATS_P6(tx_pri5_drop_bcnt),
1471         GBENU_STATS_P6(tx_pri6_drop_bcnt),
1472         GBENU_STATS_P6(tx_pri7_drop_bcnt),
1473         /* GBENU Module 7 */
1474         GBENU_STATS_P7(rx_good_frames),
1475         GBENU_STATS_P7(rx_broadcast_frames),
1476         GBENU_STATS_P7(rx_multicast_frames),
1477         GBENU_STATS_P7(rx_pause_frames),
1478         GBENU_STATS_P7(rx_crc_errors),
1479         GBENU_STATS_P7(rx_align_code_errors),
1480         GBENU_STATS_P7(rx_oversized_frames),
1481         GBENU_STATS_P7(rx_jabber_frames),
1482         GBENU_STATS_P7(rx_undersized_frames),
1483         GBENU_STATS_P7(rx_fragments),
1484         GBENU_STATS_P7(ale_drop),
1485         GBENU_STATS_P7(ale_overrun_drop),
1486         GBENU_STATS_P7(rx_bytes),
1487         GBENU_STATS_P7(tx_good_frames),
1488         GBENU_STATS_P7(tx_broadcast_frames),
1489         GBENU_STATS_P7(tx_multicast_frames),
1490         GBENU_STATS_P7(tx_pause_frames),
1491         GBENU_STATS_P7(tx_deferred_frames),
1492         GBENU_STATS_P7(tx_collision_frames),
1493         GBENU_STATS_P7(tx_single_coll_frames),
1494         GBENU_STATS_P7(tx_mult_coll_frames),
1495         GBENU_STATS_P7(tx_excessive_collisions),
1496         GBENU_STATS_P7(tx_late_collisions),
1497         GBENU_STATS_P7(rx_ipg_error),
1498         GBENU_STATS_P7(tx_carrier_sense_errors),
1499         GBENU_STATS_P7(tx_bytes),
1500         GBENU_STATS_P7(tx_64B_frames),
1501         GBENU_STATS_P7(tx_65_to_127B_frames),
1502         GBENU_STATS_P7(tx_128_to_255B_frames),
1503         GBENU_STATS_P7(tx_256_to_511B_frames),
1504         GBENU_STATS_P7(tx_512_to_1023B_frames),
1505         GBENU_STATS_P7(tx_1024B_frames),
1506         GBENU_STATS_P7(net_bytes),
1507         GBENU_STATS_P7(rx_bottom_fifo_drop),
1508         GBENU_STATS_P7(rx_port_mask_drop),
1509         GBENU_STATS_P7(rx_top_fifo_drop),
1510         GBENU_STATS_P7(ale_rate_limit_drop),
1511         GBENU_STATS_P7(ale_vid_ingress_drop),
1512         GBENU_STATS_P7(ale_da_eq_sa_drop),
1513         GBENU_STATS_P7(ale_unknown_ucast),
1514         GBENU_STATS_P7(ale_unknown_ucast_bytes),
1515         GBENU_STATS_P7(ale_unknown_mcast),
1516         GBENU_STATS_P7(ale_unknown_mcast_bytes),
1517         GBENU_STATS_P7(ale_unknown_bcast),
1518         GBENU_STATS_P7(ale_unknown_bcast_bytes),
1519         GBENU_STATS_P7(ale_pol_match),
1520         GBENU_STATS_P7(ale_pol_match_red),
1521         GBENU_STATS_P7(ale_pol_match_yellow),
1522         GBENU_STATS_P7(tx_mem_protect_err),
1523         GBENU_STATS_P7(tx_pri0_drop),
1524         GBENU_STATS_P7(tx_pri1_drop),
1525         GBENU_STATS_P7(tx_pri2_drop),
1526         GBENU_STATS_P7(tx_pri3_drop),
1527         GBENU_STATS_P7(tx_pri4_drop),
1528         GBENU_STATS_P7(tx_pri5_drop),
1529         GBENU_STATS_P7(tx_pri6_drop),
1530         GBENU_STATS_P7(tx_pri7_drop),
1531         GBENU_STATS_P7(tx_pri0_drop_bcnt),
1532         GBENU_STATS_P7(tx_pri1_drop_bcnt),
1533         GBENU_STATS_P7(tx_pri2_drop_bcnt),
1534         GBENU_STATS_P7(tx_pri3_drop_bcnt),
1535         GBENU_STATS_P7(tx_pri4_drop_bcnt),
1536         GBENU_STATS_P7(tx_pri5_drop_bcnt),
1537         GBENU_STATS_P7(tx_pri6_drop_bcnt),
1538         GBENU_STATS_P7(tx_pri7_drop_bcnt),
1539         /* GBENU Module 8 */
1540         GBENU_STATS_P8(rx_good_frames),
1541         GBENU_STATS_P8(rx_broadcast_frames),
1542         GBENU_STATS_P8(rx_multicast_frames),
1543         GBENU_STATS_P8(rx_pause_frames),
1544         GBENU_STATS_P8(rx_crc_errors),
1545         GBENU_STATS_P8(rx_align_code_errors),
1546         GBENU_STATS_P8(rx_oversized_frames),
1547         GBENU_STATS_P8(rx_jabber_frames),
1548         GBENU_STATS_P8(rx_undersized_frames),
1549         GBENU_STATS_P8(rx_fragments),
1550         GBENU_STATS_P8(ale_drop),
1551         GBENU_STATS_P8(ale_overrun_drop),
1552         GBENU_STATS_P8(rx_bytes),
1553         GBENU_STATS_P8(tx_good_frames),
1554         GBENU_STATS_P8(tx_broadcast_frames),
1555         GBENU_STATS_P8(tx_multicast_frames),
1556         GBENU_STATS_P8(tx_pause_frames),
1557         GBENU_STATS_P8(tx_deferred_frames),
1558         GBENU_STATS_P8(tx_collision_frames),
1559         GBENU_STATS_P8(tx_single_coll_frames),
1560         GBENU_STATS_P8(tx_mult_coll_frames),
1561         GBENU_STATS_P8(tx_excessive_collisions),
1562         GBENU_STATS_P8(tx_late_collisions),
1563         GBENU_STATS_P8(rx_ipg_error),
1564         GBENU_STATS_P8(tx_carrier_sense_errors),
1565         GBENU_STATS_P8(tx_bytes),
1566         GBENU_STATS_P8(tx_64B_frames),
1567         GBENU_STATS_P8(tx_65_to_127B_frames),
1568         GBENU_STATS_P8(tx_128_to_255B_frames),
1569         GBENU_STATS_P8(tx_256_to_511B_frames),
1570         GBENU_STATS_P8(tx_512_to_1023B_frames),
1571         GBENU_STATS_P8(tx_1024B_frames),
1572         GBENU_STATS_P8(net_bytes),
1573         GBENU_STATS_P8(rx_bottom_fifo_drop),
1574         GBENU_STATS_P8(rx_port_mask_drop),
1575         GBENU_STATS_P8(rx_top_fifo_drop),
1576         GBENU_STATS_P8(ale_rate_limit_drop),
1577         GBENU_STATS_P8(ale_vid_ingress_drop),
1578         GBENU_STATS_P8(ale_da_eq_sa_drop),
1579         GBENU_STATS_P8(ale_unknown_ucast),
1580         GBENU_STATS_P8(ale_unknown_ucast_bytes),
1581         GBENU_STATS_P8(ale_unknown_mcast),
1582         GBENU_STATS_P8(ale_unknown_mcast_bytes),
1583         GBENU_STATS_P8(ale_unknown_bcast),
1584         GBENU_STATS_P8(ale_unknown_bcast_bytes),
1585         GBENU_STATS_P8(ale_pol_match),
1586         GBENU_STATS_P8(ale_pol_match_red),
1587         GBENU_STATS_P8(ale_pol_match_yellow),
1588         GBENU_STATS_P8(tx_mem_protect_err),
1589         GBENU_STATS_P8(tx_pri0_drop),
1590         GBENU_STATS_P8(tx_pri1_drop),
1591         GBENU_STATS_P8(tx_pri2_drop),
1592         GBENU_STATS_P8(tx_pri3_drop),
1593         GBENU_STATS_P8(tx_pri4_drop),
1594         GBENU_STATS_P8(tx_pri5_drop),
1595         GBENU_STATS_P8(tx_pri6_drop),
1596         GBENU_STATS_P8(tx_pri7_drop),
1597         GBENU_STATS_P8(tx_pri0_drop_bcnt),
1598         GBENU_STATS_P8(tx_pri1_drop_bcnt),
1599         GBENU_STATS_P8(tx_pri2_drop_bcnt),
1600         GBENU_STATS_P8(tx_pri3_drop_bcnt),
1601         GBENU_STATS_P8(tx_pri4_drop_bcnt),
1602         GBENU_STATS_P8(tx_pri5_drop_bcnt),
1603         GBENU_STATS_P8(tx_pri6_drop_bcnt),
1604         GBENU_STATS_P8(tx_pri7_drop_bcnt),
1605 };
1606
1607 #define XGBE_STATS0_INFO(field)                         \
1608 {                                                       \
1609         "GBE_0:"#field, XGBE_STATS0_MODULE,             \
1610         FIELD_SIZEOF(struct xgbe_hw_stats, field),      \
1611         offsetof(struct xgbe_hw_stats, field)           \
1612 }
1613
1614 #define XGBE_STATS1_INFO(field)                         \
1615 {                                                       \
1616         "GBE_1:"#field, XGBE_STATS1_MODULE,             \
1617         FIELD_SIZEOF(struct xgbe_hw_stats, field),      \
1618         offsetof(struct xgbe_hw_stats, field)           \
1619 }
1620
1621 #define XGBE_STATS2_INFO(field)                         \
1622 {                                                       \
1623         "GBE_2:"#field, XGBE_STATS2_MODULE,             \
1624         FIELD_SIZEOF(struct xgbe_hw_stats, field),      \
1625         offsetof(struct xgbe_hw_stats, field)           \
1626 }
1627
1628 static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
1629         /* GBE module 0 */
1630         XGBE_STATS0_INFO(rx_good_frames),
1631         XGBE_STATS0_INFO(rx_broadcast_frames),
1632         XGBE_STATS0_INFO(rx_multicast_frames),
1633         XGBE_STATS0_INFO(rx_oversized_frames),
1634         XGBE_STATS0_INFO(rx_undersized_frames),
1635         XGBE_STATS0_INFO(overrun_type4),
1636         XGBE_STATS0_INFO(overrun_type5),
1637         XGBE_STATS0_INFO(rx_bytes),
1638         XGBE_STATS0_INFO(tx_good_frames),
1639         XGBE_STATS0_INFO(tx_broadcast_frames),
1640         XGBE_STATS0_INFO(tx_multicast_frames),
1641         XGBE_STATS0_INFO(tx_bytes),
1642         XGBE_STATS0_INFO(tx_64byte_frames),
1643         XGBE_STATS0_INFO(tx_65_to_127byte_frames),
1644         XGBE_STATS0_INFO(tx_128_to_255byte_frames),
1645         XGBE_STATS0_INFO(tx_256_to_511byte_frames),
1646         XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
1647         XGBE_STATS0_INFO(tx_1024byte_frames),
1648         XGBE_STATS0_INFO(net_bytes),
1649         XGBE_STATS0_INFO(rx_sof_overruns),
1650         XGBE_STATS0_INFO(rx_mof_overruns),
1651         XGBE_STATS0_INFO(rx_dma_overruns),
1652         /* XGBE module 1 */
1653         XGBE_STATS1_INFO(rx_good_frames),
1654         XGBE_STATS1_INFO(rx_broadcast_frames),
1655         XGBE_STATS1_INFO(rx_multicast_frames),
1656         XGBE_STATS1_INFO(rx_pause_frames),
1657         XGBE_STATS1_INFO(rx_crc_errors),
1658         XGBE_STATS1_INFO(rx_align_code_errors),
1659         XGBE_STATS1_INFO(rx_oversized_frames),
1660         XGBE_STATS1_INFO(rx_jabber_frames),
1661         XGBE_STATS1_INFO(rx_undersized_frames),
1662         XGBE_STATS1_INFO(rx_fragments),
1663         XGBE_STATS1_INFO(overrun_type4),
1664         XGBE_STATS1_INFO(overrun_type5),
1665         XGBE_STATS1_INFO(rx_bytes),
1666         XGBE_STATS1_INFO(tx_good_frames),
1667         XGBE_STATS1_INFO(tx_broadcast_frames),
1668         XGBE_STATS1_INFO(tx_multicast_frames),
1669         XGBE_STATS1_INFO(tx_pause_frames),
1670         XGBE_STATS1_INFO(tx_deferred_frames),
1671         XGBE_STATS1_INFO(tx_collision_frames),
1672         XGBE_STATS1_INFO(tx_single_coll_frames),
1673         XGBE_STATS1_INFO(tx_mult_coll_frames),
1674         XGBE_STATS1_INFO(tx_excessive_collisions),
1675         XGBE_STATS1_INFO(tx_late_collisions),
1676         XGBE_STATS1_INFO(tx_underrun),
1677         XGBE_STATS1_INFO(tx_carrier_sense_errors),
1678         XGBE_STATS1_INFO(tx_bytes),
1679         XGBE_STATS1_INFO(tx_64byte_frames),
1680         XGBE_STATS1_INFO(tx_65_to_127byte_frames),
1681         XGBE_STATS1_INFO(tx_128_to_255byte_frames),
1682         XGBE_STATS1_INFO(tx_256_to_511byte_frames),
1683         XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
1684         XGBE_STATS1_INFO(tx_1024byte_frames),
1685         XGBE_STATS1_INFO(net_bytes),
1686         XGBE_STATS1_INFO(rx_sof_overruns),
1687         XGBE_STATS1_INFO(rx_mof_overruns),
1688         XGBE_STATS1_INFO(rx_dma_overruns),
1689         /* XGBE module 2 */
1690         XGBE_STATS2_INFO(rx_good_frames),
1691         XGBE_STATS2_INFO(rx_broadcast_frames),
1692         XGBE_STATS2_INFO(rx_multicast_frames),
1693         XGBE_STATS2_INFO(rx_pause_frames),
1694         XGBE_STATS2_INFO(rx_crc_errors),
1695         XGBE_STATS2_INFO(rx_align_code_errors),
1696         XGBE_STATS2_INFO(rx_oversized_frames),
1697         XGBE_STATS2_INFO(rx_jabber_frames),
1698         XGBE_STATS2_INFO(rx_undersized_frames),
1699         XGBE_STATS2_INFO(rx_fragments),
1700         XGBE_STATS2_INFO(overrun_type4),
1701         XGBE_STATS2_INFO(overrun_type5),
1702         XGBE_STATS2_INFO(rx_bytes),
1703         XGBE_STATS2_INFO(tx_good_frames),
1704         XGBE_STATS2_INFO(tx_broadcast_frames),
1705         XGBE_STATS2_INFO(tx_multicast_frames),
1706         XGBE_STATS2_INFO(tx_pause_frames),
1707         XGBE_STATS2_INFO(tx_deferred_frames),
1708         XGBE_STATS2_INFO(tx_collision_frames),
1709         XGBE_STATS2_INFO(tx_single_coll_frames),
1710         XGBE_STATS2_INFO(tx_mult_coll_frames),
1711         XGBE_STATS2_INFO(tx_excessive_collisions),
1712         XGBE_STATS2_INFO(tx_late_collisions),
1713         XGBE_STATS2_INFO(tx_underrun),
1714         XGBE_STATS2_INFO(tx_carrier_sense_errors),
1715         XGBE_STATS2_INFO(tx_bytes),
1716         XGBE_STATS2_INFO(tx_64byte_frames),
1717         XGBE_STATS2_INFO(tx_65_to_127byte_frames),
1718         XGBE_STATS2_INFO(tx_128_to_255byte_frames),
1719         XGBE_STATS2_INFO(tx_256_to_511byte_frames),
1720         XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
1721         XGBE_STATS2_INFO(tx_1024byte_frames),
1722         XGBE_STATS2_INFO(net_bytes),
1723         XGBE_STATS2_INFO(rx_sof_overruns),
1724         XGBE_STATS2_INFO(rx_mof_overruns),
1725         XGBE_STATS2_INFO(rx_dma_overruns),
1726 };
1727
1728 #define for_each_intf(i, priv) \
1729         list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
1730
1731 #define for_each_sec_slave(slave, priv) \
1732         list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
1733
1734 #define first_sec_slave(priv)                                   \
1735         list_first_entry(&priv->secondary_slaves, \
1736                         struct gbe_slave, slave_list)
1737
1738 static void keystone_get_drvinfo(struct net_device *ndev,
1739                                  struct ethtool_drvinfo *info)
1740 {
1741         strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
1742         strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
1743 }
1744
1745 static u32 keystone_get_msglevel(struct net_device *ndev)
1746 {
1747         struct netcp_intf *netcp = netdev_priv(ndev);
1748
1749         return netcp->msg_enable;
1750 }
1751
1752 static void keystone_set_msglevel(struct net_device *ndev, u32 value)
1753 {
1754         struct netcp_intf *netcp = netdev_priv(ndev);
1755
1756         netcp->msg_enable = value;
1757 }
1758
1759 static struct gbe_intf *keystone_get_intf_data(struct netcp_intf *netcp)
1760 {
1761         struct gbe_intf *gbe_intf;
1762
1763         gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1764         if (!gbe_intf)
1765                 gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
1766
1767         return gbe_intf;
1768 }
1769
1770 static void keystone_get_stat_strings(struct net_device *ndev,
1771                                       uint32_t stringset, uint8_t *data)
1772 {
1773         struct netcp_intf *netcp = netdev_priv(ndev);
1774         struct gbe_intf *gbe_intf;
1775         struct gbe_priv *gbe_dev;
1776         int i;
1777
1778         gbe_intf = keystone_get_intf_data(netcp);
1779         if (!gbe_intf)
1780                 return;
1781         gbe_dev = gbe_intf->gbe_dev;
1782
1783         switch (stringset) {
1784         case ETH_SS_STATS:
1785                 for (i = 0; i < gbe_dev->num_et_stats; i++) {
1786                         memcpy(data, gbe_dev->et_stats[i].desc,
1787                                ETH_GSTRING_LEN);
1788                         data += ETH_GSTRING_LEN;
1789                 }
1790                 break;
1791         case ETH_SS_TEST:
1792                 break;
1793         }
1794 }
1795
1796 static int keystone_get_sset_count(struct net_device *ndev, int stringset)
1797 {
1798         struct netcp_intf *netcp = netdev_priv(ndev);
1799         struct gbe_intf *gbe_intf;
1800         struct gbe_priv *gbe_dev;
1801
1802         gbe_intf = keystone_get_intf_data(netcp);
1803         if (!gbe_intf)
1804                 return -EINVAL;
1805         gbe_dev = gbe_intf->gbe_dev;
1806
1807         switch (stringset) {
1808         case ETH_SS_TEST:
1809                 return 0;
1810         case ETH_SS_STATS:
1811                 return gbe_dev->num_et_stats;
1812         default:
1813                 return -EINVAL;
1814         }
1815 }
1816
1817 static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
1818 {
1819         void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
1820         u32  __iomem *p_stats_entry;
1821         int i;
1822
1823         for (i = 0; i < gbe_dev->num_et_stats; i++) {
1824                 if (gbe_dev->et_stats[i].type == stats_mod) {
1825                         p_stats_entry = base + gbe_dev->et_stats[i].offset;
1826                         gbe_dev->hw_stats[i] = 0;
1827                         gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
1828                 }
1829         }
1830 }
1831
1832 static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
1833                                              int et_stats_entry)
1834 {
1835         void __iomem *base = NULL;
1836         u32  __iomem *p_stats_entry;
1837         u32 curr, delta;
1838
1839         /* The hw_stats_regs pointers are already
1840          * properly set to point to the right base:
1841          */
1842         base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
1843         p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
1844         curr = readl(p_stats_entry);
1845         delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
1846         gbe_dev->hw_stats_prev[et_stats_entry] = curr;
1847         gbe_dev->hw_stats[et_stats_entry] += delta;
1848 }
1849
1850 static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
1851 {
1852         int i;
1853
1854         for (i = 0; i < gbe_dev->num_et_stats; i++) {
1855                 gbe_update_hw_stats_entry(gbe_dev, i);
1856
1857                 if (data)
1858                         data[i] = gbe_dev->hw_stats[i];
1859         }
1860 }
1861
1862 static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
1863                                                int stats_mod)
1864 {
1865         u32 val;
1866
1867         val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1868
1869         switch (stats_mod) {
1870         case GBE_STATSA_MODULE:
1871         case GBE_STATSB_MODULE:
1872                 val &= ~GBE_STATS_CD_SEL;
1873                 break;
1874         case GBE_STATSC_MODULE:
1875         case GBE_STATSD_MODULE:
1876                 val |= GBE_STATS_CD_SEL;
1877                 break;
1878         default:
1879                 return;
1880         }
1881
1882         /* make the stat module visible */
1883         writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1884 }
1885
1886 static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
1887 {
1888         gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
1889         gbe_reset_mod_stats(gbe_dev, stats_mod);
1890 }
1891
1892 static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
1893 {
1894         u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
1895         int et_entry, j, pair;
1896
1897         for (pair = 0; pair < 2; pair++) {
1898                 gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
1899                                                       GBE_STATSC_MODULE :
1900                                                       GBE_STATSA_MODULE));
1901
1902                 for (j = 0; j < half_num_et_stats; j++) {
1903                         et_entry = pair * half_num_et_stats + j;
1904                         gbe_update_hw_stats_entry(gbe_dev, et_entry);
1905
1906                         if (data)
1907                                 data[et_entry] = gbe_dev->hw_stats[et_entry];
1908                 }
1909         }
1910 }
1911
1912 static void keystone_get_ethtool_stats(struct net_device *ndev,
1913                                        struct ethtool_stats *stats,
1914                                        uint64_t *data)
1915 {
1916         struct netcp_intf *netcp = netdev_priv(ndev);
1917         struct gbe_intf *gbe_intf;
1918         struct gbe_priv *gbe_dev;
1919
1920         gbe_intf = keystone_get_intf_data(netcp);
1921         if (!gbe_intf)
1922                 return;
1923
1924         gbe_dev = gbe_intf->gbe_dev;
1925         spin_lock_bh(&gbe_dev->hw_stats_lock);
1926         if (IS_SS_ID_VER_14(gbe_dev))
1927                 gbe_update_stats_ver14(gbe_dev, data);
1928         else
1929                 gbe_update_stats(gbe_dev, data);
1930         spin_unlock_bh(&gbe_dev->hw_stats_lock);
1931 }
1932
1933 static int keystone_get_link_ksettings(struct net_device *ndev,
1934                                        struct ethtool_link_ksettings *cmd)
1935 {
1936         struct netcp_intf *netcp = netdev_priv(ndev);
1937         struct phy_device *phy = ndev->phydev;
1938         struct gbe_intf *gbe_intf;
1939
1940         if (!phy)
1941                 return -EINVAL;
1942
1943         gbe_intf = keystone_get_intf_data(netcp);
1944         if (!gbe_intf)
1945                 return -EINVAL;
1946
1947         if (!gbe_intf->slave)
1948                 return -EINVAL;
1949
1950         phy_ethtool_ksettings_get(phy, cmd);
1951         cmd->base.port = gbe_intf->slave->phy_port_t;
1952
1953         return 0;
1954 }
1955
1956 static int keystone_set_link_ksettings(struct net_device *ndev,
1957                                        const struct ethtool_link_ksettings *cmd)
1958 {
1959         struct netcp_intf *netcp = netdev_priv(ndev);
1960         struct phy_device *phy = ndev->phydev;
1961         struct gbe_intf *gbe_intf;
1962         u8 port = cmd->base.port;
1963         u32 advertising, supported;
1964         u32 features;
1965
1966         ethtool_convert_link_mode_to_legacy_u32(&advertising,
1967                                                 cmd->link_modes.advertising);
1968         ethtool_convert_link_mode_to_legacy_u32(&supported,
1969                                                 cmd->link_modes.supported);
1970         features = advertising & supported;
1971
1972         if (!phy)
1973                 return -EINVAL;
1974
1975         gbe_intf = keystone_get_intf_data(netcp);
1976         if (!gbe_intf)
1977                 return -EINVAL;
1978
1979         if (!gbe_intf->slave)
1980                 return -EINVAL;
1981
1982         if (port != gbe_intf->slave->phy_port_t) {
1983                 if ((port == PORT_TP) && !(features & ADVERTISED_TP))
1984                         return -EINVAL;
1985
1986                 if ((port == PORT_AUI) && !(features & ADVERTISED_AUI))
1987                         return -EINVAL;
1988
1989                 if ((port == PORT_BNC) && !(features & ADVERTISED_BNC))
1990                         return -EINVAL;
1991
1992                 if ((port == PORT_MII) && !(features & ADVERTISED_MII))
1993                         return -EINVAL;
1994
1995                 if ((port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
1996                         return -EINVAL;
1997         }
1998
1999         gbe_intf->slave->phy_port_t = port;
2000         return phy_ethtool_ksettings_set(phy, cmd);
2001 }
2002
2003 #if IS_ENABLED(CONFIG_TI_CPTS)
2004 static int keystone_get_ts_info(struct net_device *ndev,
2005                                 struct ethtool_ts_info *info)
2006 {
2007         struct netcp_intf *netcp = netdev_priv(ndev);
2008         struct gbe_intf *gbe_intf;
2009
2010         gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2011         if (!gbe_intf || !gbe_intf->gbe_dev->cpts)
2012                 return -EINVAL;
2013
2014         info->so_timestamping =
2015                 SOF_TIMESTAMPING_TX_HARDWARE |
2016                 SOF_TIMESTAMPING_TX_SOFTWARE |
2017                 SOF_TIMESTAMPING_RX_HARDWARE |
2018                 SOF_TIMESTAMPING_RX_SOFTWARE |
2019                 SOF_TIMESTAMPING_SOFTWARE |
2020                 SOF_TIMESTAMPING_RAW_HARDWARE;
2021         info->phc_index = gbe_intf->gbe_dev->cpts->phc_index;
2022         info->tx_types =
2023                 (1 << HWTSTAMP_TX_OFF) |
2024                 (1 << HWTSTAMP_TX_ON);
2025         info->rx_filters =
2026                 (1 << HWTSTAMP_FILTER_NONE) |
2027                 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2028                 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2029         return 0;
2030 }
2031 #else
2032 static int keystone_get_ts_info(struct net_device *ndev,
2033                                 struct ethtool_ts_info *info)
2034 {
2035         info->so_timestamping =
2036                 SOF_TIMESTAMPING_TX_SOFTWARE |
2037                 SOF_TIMESTAMPING_RX_SOFTWARE |
2038                 SOF_TIMESTAMPING_SOFTWARE;
2039         info->phc_index = -1;
2040         info->tx_types = 0;
2041         info->rx_filters = 0;
2042         return 0;
2043 }
2044 #endif /* CONFIG_TI_CPTS */
2045
2046 static const struct ethtool_ops keystone_ethtool_ops = {
2047         .get_drvinfo            = keystone_get_drvinfo,
2048         .get_link               = ethtool_op_get_link,
2049         .get_msglevel           = keystone_get_msglevel,
2050         .set_msglevel           = keystone_set_msglevel,
2051         .get_strings            = keystone_get_stat_strings,
2052         .get_sset_count         = keystone_get_sset_count,
2053         .get_ethtool_stats      = keystone_get_ethtool_stats,
2054         .get_link_ksettings     = keystone_get_link_ksettings,
2055         .set_link_ksettings     = keystone_set_link_ksettings,
2056         .get_ts_info            = keystone_get_ts_info,
2057 };
2058
2059 static void gbe_set_slave_mac(struct gbe_slave *slave,
2060                               struct gbe_intf *gbe_intf)
2061 {
2062         struct net_device *ndev = gbe_intf->ndev;
2063
2064         writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
2065         writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
2066 }
2067
2068 static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
2069 {
2070         if (priv->host_port == 0)
2071                 return slave_num + 1;
2072
2073         return slave_num;
2074 }
2075
2076 static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
2077                                           struct net_device *ndev,
2078                                           struct gbe_slave *slave,
2079                                           int up)
2080 {
2081         struct phy_device *phy = slave->phy;
2082         u32 mac_control = 0;
2083
2084         if (up) {
2085                 mac_control = slave->mac_control;
2086                 if (phy && (phy->speed == SPEED_1000)) {
2087                         mac_control |= MACSL_GIG_MODE;
2088                         mac_control &= ~MACSL_XGIG_MODE;
2089                 } else if (phy && (phy->speed == SPEED_10000)) {
2090                         mac_control |= MACSL_XGIG_MODE;
2091                         mac_control &= ~MACSL_GIG_MODE;
2092                 }
2093
2094                 writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
2095                                                  mac_control));
2096
2097                 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2098                                      ALE_PORT_STATE,
2099                                      ALE_PORT_STATE_FORWARD);
2100
2101                 if (ndev && slave->open &&
2102                     ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
2103                     (slave->link_interface != RGMII_LINK_MAC_PHY) &&
2104                     (slave->link_interface != XGMII_LINK_MAC_PHY)))
2105                         netif_carrier_on(ndev);
2106         } else {
2107                 writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
2108                                                  mac_control));
2109                 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2110                                      ALE_PORT_STATE,
2111                                      ALE_PORT_STATE_DISABLE);
2112                 if (ndev &&
2113                     ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
2114                     (slave->link_interface != RGMII_LINK_MAC_PHY) &&
2115                     (slave->link_interface != XGMII_LINK_MAC_PHY)))
2116                         netif_carrier_off(ndev);
2117         }
2118
2119         if (phy)
2120                 phy_print_status(phy);
2121 }
2122
2123 static bool gbe_phy_link_status(struct gbe_slave *slave)
2124 {
2125          return !slave->phy || slave->phy->link;
2126 }
2127
2128 #define RGMII_REG_STATUS_LINK   BIT(0)
2129
2130 static void netcp_2u_rgmii_get_port_link(struct gbe_priv *gbe_dev, bool *status)
2131 {
2132         u32 val = 0;
2133
2134         val = readl(GBE_REG_ADDR(gbe_dev, ss_regs, rgmii_status));
2135         *status = !!(val & RGMII_REG_STATUS_LINK);
2136 }
2137
2138 static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
2139                                           struct gbe_slave *slave,
2140                                           struct net_device *ndev)
2141 {
2142         bool sw_link_state = true, phy_link_state;
2143         int sp = slave->slave_num, link_state;
2144
2145         if (!slave->open)
2146                 return;
2147
2148         if (SLAVE_LINK_IS_RGMII(slave))
2149                 netcp_2u_rgmii_get_port_link(gbe_dev,
2150                                              &sw_link_state);
2151         if (SLAVE_LINK_IS_SGMII(slave))
2152                 sw_link_state =
2153                 netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
2154
2155         phy_link_state = gbe_phy_link_status(slave);
2156         link_state = phy_link_state & sw_link_state;
2157
2158         if (atomic_xchg(&slave->link_state, link_state) != link_state)
2159                 netcp_ethss_link_state_action(gbe_dev, ndev, slave,
2160                                               link_state);
2161 }
2162
2163 static void xgbe_adjust_link(struct net_device *ndev)
2164 {
2165         struct netcp_intf *netcp = netdev_priv(ndev);
2166         struct gbe_intf *gbe_intf;
2167
2168         gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
2169         if (!gbe_intf)
2170                 return;
2171
2172         netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2173                                       ndev);
2174 }
2175
2176 static void gbe_adjust_link(struct net_device *ndev)
2177 {
2178         struct netcp_intf *netcp = netdev_priv(ndev);
2179         struct gbe_intf *gbe_intf;
2180
2181         gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2182         if (!gbe_intf)
2183                 return;
2184
2185         netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2186                                       ndev);
2187 }
2188
2189 static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
2190 {
2191         struct gbe_priv *gbe_dev = netdev_priv(ndev);
2192         struct gbe_slave *slave;
2193
2194         for_each_sec_slave(slave, gbe_dev)
2195                 netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2196 }
2197
2198 /* Reset EMAC
2199  * Soft reset is set and polled until clear, or until a timeout occurs
2200  */
2201 static int gbe_port_reset(struct gbe_slave *slave)
2202 {
2203         u32 i, v;
2204
2205         /* Set the soft reset bit */
2206         writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
2207
2208         /* Wait for the bit to clear */
2209         for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
2210                 v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
2211                 if ((v & SOFT_RESET_MASK) != SOFT_RESET)
2212                         return 0;
2213         }
2214
2215         /* Timeout on the reset */
2216         return GMACSL_RET_WARN_RESET_INCOMPLETE;
2217 }
2218
2219 /* Configure EMAC */
2220 static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2221                             int max_rx_len)
2222 {
2223         void __iomem *rx_maxlen_reg;
2224         u32 xgmii_mode;
2225
2226         if (max_rx_len > NETCP_MAX_FRAME_SIZE)
2227                 max_rx_len = NETCP_MAX_FRAME_SIZE;
2228
2229         /* Enable correct MII mode at SS level */
2230         if (IS_SS_ID_XGBE(gbe_dev) &&
2231             (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
2232                 xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
2233                 xgmii_mode |= (1 << slave->slave_num);
2234                 writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
2235         }
2236
2237         if (IS_SS_ID_MU(gbe_dev))
2238                 rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
2239         else
2240                 rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
2241
2242         writel(max_rx_len, rx_maxlen_reg);
2243         writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
2244 }
2245
2246 static void gbe_sgmii_rtreset(struct gbe_priv *priv,
2247                               struct gbe_slave *slave, bool set)
2248 {
2249         if (SLAVE_LINK_IS_XGMII(slave))
2250                 return;
2251
2252         netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
2253                             slave->slave_num, set);
2254 }
2255
2256 static void gbe_slave_stop(struct gbe_intf *intf)
2257 {
2258         struct gbe_priv *gbe_dev = intf->gbe_dev;
2259         struct gbe_slave *slave = intf->slave;
2260
2261         if (!IS_SS_ID_2U(gbe_dev))
2262                 gbe_sgmii_rtreset(gbe_dev, slave, true);
2263         gbe_port_reset(slave);
2264         /* Disable forwarding */
2265         cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2266                              ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
2267         cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
2268                            1 << slave->port_num, 0, 0);
2269
2270         if (!slave->phy)
2271                 return;
2272
2273         phy_stop(slave->phy);
2274         phy_disconnect(slave->phy);
2275         slave->phy = NULL;
2276 }
2277
2278 static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
2279 {
2280         if (SLAVE_LINK_IS_XGMII(slave))
2281                 return;
2282
2283         netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
2284         netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
2285                            slave->link_interface);
2286 }
2287
2288 static int gbe_slave_open(struct gbe_intf *gbe_intf)
2289 {
2290         struct gbe_priv *priv = gbe_intf->gbe_dev;
2291         struct gbe_slave *slave = gbe_intf->slave;
2292         phy_interface_t phy_mode;
2293         bool has_phy = false;
2294
2295         void (*hndlr)(struct net_device *) = gbe_adjust_link;
2296
2297         if (!IS_SS_ID_2U(priv))
2298                 gbe_sgmii_config(priv, slave);
2299         gbe_port_reset(slave);
2300         if (!IS_SS_ID_2U(priv))
2301                 gbe_sgmii_rtreset(priv, slave, false);
2302         gbe_port_config(priv, slave, priv->rx_packet_max);
2303         gbe_set_slave_mac(slave, gbe_intf);
2304         /* For NU & 2U switch, map the vlan priorities to zero
2305          * as we only configure to use priority 0
2306          */
2307         if (IS_SS_ID_MU(priv))
2308                 writel(HOST_TX_PRI_MAP_DEFAULT,
2309                        GBE_REG_ADDR(slave, port_regs, rx_pri_map));
2310
2311         /* enable forwarding */
2312         cpsw_ale_control_set(priv->ale, slave->port_num,
2313                              ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2314         cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
2315                            1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
2316
2317         if (slave->link_interface == SGMII_LINK_MAC_PHY) {
2318                 has_phy = true;
2319                 phy_mode = PHY_INTERFACE_MODE_SGMII;
2320                 slave->phy_port_t = PORT_MII;
2321         } else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
2322                 has_phy = true;
2323                 phy_mode = of_get_phy_mode(slave->node);
2324                 /* if phy-mode is not present, default to
2325                  * PHY_INTERFACE_MODE_RGMII
2326                  */
2327                 if (phy_mode < 0)
2328                         phy_mode = PHY_INTERFACE_MODE_RGMII;
2329
2330                 if (!phy_interface_mode_is_rgmii(phy_mode)) {
2331                         dev_err(priv->dev,
2332                                 "Unsupported phy mode %d\n", phy_mode);
2333                         return -EINVAL;
2334                 }
2335                 slave->phy_port_t = PORT_MII;
2336         } else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
2337                 has_phy = true;
2338                 phy_mode = PHY_INTERFACE_MODE_NA;
2339                 slave->phy_port_t = PORT_FIBRE;
2340         }
2341
2342         if (has_phy) {
2343                 if (IS_SS_ID_XGBE(priv))
2344                         hndlr = xgbe_adjust_link;
2345
2346                 slave->phy = of_phy_connect(gbe_intf->ndev,
2347                                             slave->phy_node,
2348                                             hndlr, 0,
2349                                             phy_mode);
2350                 if (!slave->phy) {
2351                         dev_err(priv->dev, "phy not found on slave %d\n",
2352                                 slave->slave_num);
2353                         return -ENODEV;
2354                 }
2355                 dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
2356                         phydev_name(slave->phy));
2357                 phy_start(slave->phy);
2358         }
2359         return 0;
2360 }
2361
2362 static void gbe_init_host_port(struct gbe_priv *priv)
2363 {
2364         int bypass_en = 1;
2365
2366         /* Host Tx Pri */
2367         if (IS_SS_ID_NU(priv) || IS_SS_ID_XGBE(priv))
2368                 writel(HOST_TX_PRI_MAP_DEFAULT,
2369                        GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
2370
2371         /* Max length register */
2372         writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
2373                                                   rx_maxlen));
2374
2375         cpsw_ale_start(priv->ale);
2376
2377         if (priv->enable_ale)
2378                 bypass_en = 0;
2379
2380         cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
2381
2382         cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
2383
2384         cpsw_ale_control_set(priv->ale, priv->host_port,
2385                              ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2386
2387         cpsw_ale_control_set(priv->ale, 0,
2388                              ALE_PORT_UNKNOWN_VLAN_MEMBER,
2389                              GBE_PORT_MASK(priv->ale_ports));
2390
2391         cpsw_ale_control_set(priv->ale, 0,
2392                              ALE_PORT_UNKNOWN_MCAST_FLOOD,
2393                              GBE_PORT_MASK(priv->ale_ports - 1));
2394
2395         cpsw_ale_control_set(priv->ale, 0,
2396                              ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
2397                              GBE_PORT_MASK(priv->ale_ports));
2398
2399         cpsw_ale_control_set(priv->ale, 0,
2400                              ALE_PORT_UNTAGGED_EGRESS,
2401                              GBE_PORT_MASK(priv->ale_ports));
2402 }
2403
2404 static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2405 {
2406         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2407         u16 vlan_id;
2408
2409         cpsw_ale_add_mcast(gbe_dev->ale, addr,
2410                            GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
2411                            ALE_MCAST_FWD_2);
2412         for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2413                 cpsw_ale_add_mcast(gbe_dev->ale, addr,
2414                                    GBE_PORT_MASK(gbe_dev->ale_ports),
2415                                    ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
2416         }
2417 }
2418
2419 static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2420 {
2421         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2422         u16 vlan_id;
2423
2424         cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2425
2426         for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
2427                 cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2428                                    ALE_VLAN, vlan_id);
2429 }
2430
2431 static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2432 {
2433         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2434         u16 vlan_id;
2435
2436         cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
2437
2438         for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2439                 cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
2440         }
2441 }
2442
2443 static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2444 {
2445         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2446         u16 vlan_id;
2447
2448         cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2449
2450         for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2451                 cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2452                                    ALE_VLAN, vlan_id);
2453         }
2454 }
2455
2456 static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
2457 {
2458         struct gbe_intf *gbe_intf = intf_priv;
2459         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2460
2461         dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
2462                 naddr->addr, naddr->type);
2463
2464         switch (naddr->type) {
2465         case ADDR_MCAST:
2466         case ADDR_BCAST:
2467                 gbe_add_mcast_addr(gbe_intf, naddr->addr);
2468                 break;
2469         case ADDR_UCAST:
2470         case ADDR_DEV:
2471                 gbe_add_ucast_addr(gbe_intf, naddr->addr);
2472                 break;
2473         case ADDR_ANY:
2474                 /* nothing to do for promiscuous */
2475         default:
2476                 break;
2477         }
2478
2479         return 0;
2480 }
2481
2482 static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
2483 {
2484         struct gbe_intf *gbe_intf = intf_priv;
2485         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2486
2487         dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
2488                 naddr->addr, naddr->type);
2489
2490         switch (naddr->type) {
2491         case ADDR_MCAST:
2492         case ADDR_BCAST:
2493                 gbe_del_mcast_addr(gbe_intf, naddr->addr);
2494                 break;
2495         case ADDR_UCAST:
2496         case ADDR_DEV:
2497                 gbe_del_ucast_addr(gbe_intf, naddr->addr);
2498                 break;
2499         case ADDR_ANY:
2500                 /* nothing to do for promiscuous */
2501         default:
2502                 break;
2503         }
2504
2505         return 0;
2506 }
2507
2508 static int gbe_add_vid(void *intf_priv, int vid)
2509 {
2510         struct gbe_intf *gbe_intf = intf_priv;
2511         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2512
2513         set_bit(vid, gbe_intf->active_vlans);
2514
2515         cpsw_ale_add_vlan(gbe_dev->ale, vid,
2516                           GBE_PORT_MASK(gbe_dev->ale_ports),
2517                           GBE_MASK_NO_PORTS,
2518                           GBE_PORT_MASK(gbe_dev->ale_ports),
2519                           GBE_PORT_MASK(gbe_dev->ale_ports - 1));
2520
2521         return 0;
2522 }
2523
2524 static int gbe_del_vid(void *intf_priv, int vid)
2525 {
2526         struct gbe_intf *gbe_intf = intf_priv;
2527         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2528
2529         cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
2530         clear_bit(vid, gbe_intf->active_vlans);
2531         return 0;
2532 }
2533
2534 #if IS_ENABLED(CONFIG_TI_CPTS)
2535 #define HAS_PHY_TXTSTAMP(p) ((p)->drv && (p)->drv->txtstamp)
2536 #define HAS_PHY_RXTSTAMP(p) ((p)->drv && (p)->drv->rxtstamp)
2537
2538 static void gbe_txtstamp(void *context, struct sk_buff *skb)
2539 {
2540         struct gbe_intf *gbe_intf = context;
2541         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2542
2543         cpts_tx_timestamp(gbe_dev->cpts, skb);
2544 }
2545
2546 static bool gbe_need_txtstamp(struct gbe_intf *gbe_intf,
2547                               const struct netcp_packet *p_info)
2548 {
2549         struct sk_buff *skb = p_info->skb;
2550
2551         return cpts_can_timestamp(gbe_intf->gbe_dev->cpts, skb);
2552 }
2553
2554 static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
2555                                  struct netcp_packet *p_info)
2556 {
2557         struct phy_device *phydev = p_info->skb->dev->phydev;
2558         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2559
2560         if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) ||
2561             !gbe_dev->tx_ts_enabled)
2562                 return 0;
2563
2564         /* If phy has the txtstamp api, assume it will do it.
2565          * We mark it here because skb_tx_timestamp() is called
2566          * after all the txhooks are called.
2567          */
2568         if (phydev && HAS_PHY_TXTSTAMP(phydev)) {
2569                 skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
2570                 return 0;
2571         }
2572
2573         if (gbe_need_txtstamp(gbe_intf, p_info)) {
2574                 p_info->txtstamp = gbe_txtstamp;
2575                 p_info->ts_context = (void *)gbe_intf;
2576                 skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
2577         }
2578
2579         return 0;
2580 }
2581
2582 static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
2583 {
2584         struct phy_device *phydev = p_info->skb->dev->phydev;
2585         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2586
2587         if (p_info->rxtstamp_complete)
2588                 return 0;
2589
2590         if (phydev && HAS_PHY_RXTSTAMP(phydev)) {
2591                 p_info->rxtstamp_complete = true;
2592                 return 0;
2593         }
2594
2595         if (gbe_dev->rx_ts_enabled)
2596                 cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
2597
2598         p_info->rxtstamp_complete = true;
2599
2600         return 0;
2601 }
2602
2603 static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2604 {
2605         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2606         struct cpts *cpts = gbe_dev->cpts;
2607         struct hwtstamp_config cfg;
2608
2609         if (!cpts)
2610                 return -EOPNOTSUPP;
2611
2612         cfg.flags = 0;
2613         cfg.tx_type = gbe_dev->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2614         cfg.rx_filter = gbe_dev->rx_ts_enabled;
2615
2616         return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2617 }
2618
2619 static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
2620 {
2621         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2622         struct gbe_slave *slave = gbe_intf->slave;
2623         u32 ts_en, seq_id, ctl;
2624
2625         if (!gbe_dev->rx_ts_enabled &&
2626             !gbe_dev->tx_ts_enabled) {
2627                 writel(0, GBE_REG_ADDR(slave, port_regs, ts_ctl));
2628                 return;
2629         }
2630
2631         seq_id = (30 << TS_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
2632         ts_en = EVENT_MSG_BITS << TS_MSG_TYPE_EN_SHIFT;
2633         ctl = ETH_P_1588 | TS_TTL_NONZERO |
2634                 (slave->ts_ctl.dst_port_map << TS_CTL_DST_PORT_SHIFT) |
2635                 (slave->ts_ctl.uni ?  TS_UNI_EN :
2636                         slave->ts_ctl.maddr_map << TS_CTL_MADDR_SHIFT);
2637
2638         if (gbe_dev->tx_ts_enabled)
2639                 ts_en |= (TS_TX_ANX_ALL_EN | TS_TX_VLAN_LT1_EN);
2640
2641         if (gbe_dev->rx_ts_enabled)
2642                 ts_en |= (TS_RX_ANX_ALL_EN | TS_RX_VLAN_LT1_EN);
2643
2644         writel(ts_en,  GBE_REG_ADDR(slave, port_regs, ts_ctl));
2645         writel(seq_id, GBE_REG_ADDR(slave, port_regs, ts_seq_ltype));
2646         writel(ctl,    GBE_REG_ADDR(slave, port_regs, ts_ctl_ltype2));
2647 }
2648
2649 static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2650 {
2651         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2652         struct cpts *cpts = gbe_dev->cpts;
2653         struct hwtstamp_config cfg;
2654
2655         if (!cpts)
2656                 return -EOPNOTSUPP;
2657
2658         if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
2659                 return -EFAULT;
2660
2661         /* reserved for future extensions */
2662         if (cfg.flags)
2663                 return -EINVAL;
2664
2665         switch (cfg.tx_type) {
2666         case HWTSTAMP_TX_OFF:
2667                 gbe_dev->tx_ts_enabled = 0;
2668                 break;
2669         case HWTSTAMP_TX_ON:
2670                 gbe_dev->tx_ts_enabled = 1;
2671                 break;
2672         default:
2673                 return -ERANGE;
2674         }
2675
2676         switch (cfg.rx_filter) {
2677         case HWTSTAMP_FILTER_NONE:
2678                 gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_NONE;
2679                 break;
2680         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2681         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2682         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2683                 gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2684                 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2685                 break;
2686         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2687         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2688         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2689         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2690         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2691         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2692         case HWTSTAMP_FILTER_PTP_V2_EVENT:
2693         case HWTSTAMP_FILTER_PTP_V2_SYNC:
2694         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2695                 gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
2696                 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
2697                 break;
2698         default:
2699                 return -ERANGE;
2700         }
2701
2702         gbe_hwtstamp(gbe_intf);
2703
2704         return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2705 }
2706
2707 static void gbe_register_cpts(struct gbe_priv *gbe_dev)
2708 {
2709         if (!gbe_dev->cpts)
2710                 return;
2711
2712         if (gbe_dev->cpts_registered > 0)
2713                 goto done;
2714
2715         if (cpts_register(gbe_dev->cpts)) {
2716                 dev_err(gbe_dev->dev, "error registering cpts device\n");
2717                 return;
2718         }
2719
2720 done:
2721         ++gbe_dev->cpts_registered;
2722 }
2723
2724 static void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
2725 {
2726         if (!gbe_dev->cpts || (gbe_dev->cpts_registered <= 0))
2727                 return;
2728
2729         if (--gbe_dev->cpts_registered)
2730                 return;
2731
2732         cpts_unregister(gbe_dev->cpts);
2733 }
2734 #else
2735 static inline int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
2736                                         struct netcp_packet *p_info)
2737 {
2738         return 0;
2739 }
2740
2741 static inline int gbe_rxtstamp(struct gbe_intf *gbe_intf,
2742                                struct netcp_packet *p_info)
2743 {
2744         return 0;
2745 }
2746
2747 static inline int gbe_hwtstamp(struct gbe_intf *gbe_intf,
2748                                struct ifreq *ifr, int cmd)
2749 {
2750         return -EOPNOTSUPP;
2751 }
2752
2753 static inline void gbe_register_cpts(struct gbe_priv *gbe_dev)
2754 {
2755 }
2756
2757 static inline void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
2758 {
2759 }
2760
2761 static inline int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *req)
2762 {
2763         return -EOPNOTSUPP;
2764 }
2765
2766 static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req)
2767 {
2768         return -EOPNOTSUPP;
2769 }
2770 #endif /* CONFIG_TI_CPTS */
2771
2772 static int gbe_set_rx_mode(void *intf_priv, bool promisc)
2773 {
2774         struct gbe_intf *gbe_intf = intf_priv;
2775         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2776         struct cpsw_ale *ale = gbe_dev->ale;
2777         unsigned long timeout;
2778         int i, ret = -ETIMEDOUT;
2779
2780         /* Disable(1)/Enable(0) Learn for all ports (host is port 0 and
2781          * slaves are port 1 and up
2782          */
2783         for (i = 0; i <= gbe_dev->num_slaves; i++) {
2784                 cpsw_ale_control_set(ale, i,
2785                                      ALE_PORT_NOLEARN, !!promisc);
2786                 cpsw_ale_control_set(ale, i,
2787                                      ALE_PORT_NO_SA_UPDATE, !!promisc);
2788         }
2789
2790         if (!promisc) {
2791                 /* Don't Flood All Unicast Packets to Host port */
2792                 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
2793                 dev_vdbg(gbe_dev->dev, "promiscuous mode disabled\n");
2794                 return 0;
2795         }
2796
2797         timeout = jiffies + HZ;
2798
2799         /* Clear All Untouched entries */
2800         cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
2801         do {
2802                 cpu_relax();
2803                 if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT)) {
2804                         ret = 0;
2805                         break;
2806                 }
2807
2808         } while (time_after(timeout, jiffies));
2809
2810         /* Make sure it is not a false timeout */
2811         if (ret && !cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
2812                 return ret;
2813
2814         cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
2815
2816         /* Clear all mcast from ALE */
2817         cpsw_ale_flush_multicast(ale,
2818                                  GBE_PORT_MASK(gbe_dev->ale_ports),
2819                                  -1);
2820
2821         /* Flood All Unicast Packets to Host port */
2822         cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
2823         dev_vdbg(gbe_dev->dev, "promiscuous mode enabled\n");
2824         return ret;
2825 }
2826
2827 static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
2828 {
2829         struct gbe_intf *gbe_intf = intf_priv;
2830         struct phy_device *phy = gbe_intf->slave->phy;
2831
2832         if (!phy || !phy->drv->hwtstamp) {
2833                 switch (cmd) {
2834                 case SIOCGHWTSTAMP:
2835                         return gbe_hwtstamp_get(gbe_intf, req);
2836                 case SIOCSHWTSTAMP:
2837                         return gbe_hwtstamp_set(gbe_intf, req);
2838                 }
2839         }
2840
2841         if (phy)
2842                 return phy_mii_ioctl(phy, req, cmd);
2843
2844         return -EOPNOTSUPP;
2845 }
2846
2847 static void netcp_ethss_timer(struct timer_list *t)
2848 {
2849         struct gbe_priv *gbe_dev = from_timer(gbe_dev, t, timer);
2850         struct gbe_intf *gbe_intf;
2851         struct gbe_slave *slave;
2852
2853         /* Check & update SGMII link state of interfaces */
2854         for_each_intf(gbe_intf, gbe_dev) {
2855                 if (!gbe_intf->slave->open)
2856                         continue;
2857                 netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
2858                                               gbe_intf->ndev);
2859         }
2860
2861         /* Check & update SGMII link state of secondary ports */
2862         for_each_sec_slave(slave, gbe_dev) {
2863                 netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2864         }
2865
2866         /* A timer runs as a BH, no need to block them */
2867         spin_lock(&gbe_dev->hw_stats_lock);
2868
2869         if (IS_SS_ID_VER_14(gbe_dev))
2870                 gbe_update_stats_ver14(gbe_dev, NULL);
2871         else
2872                 gbe_update_stats(gbe_dev, NULL);
2873
2874         spin_unlock(&gbe_dev->hw_stats_lock);
2875
2876         gbe_dev->timer.expires  = jiffies + GBE_TIMER_INTERVAL;
2877         add_timer(&gbe_dev->timer);
2878 }
2879
2880 static int gbe_txhook(int order, void *data, struct netcp_packet *p_info)
2881 {
2882         struct gbe_intf *gbe_intf = data;
2883
2884         p_info->tx_pipe = &gbe_intf->tx_pipe;
2885
2886         return gbe_txtstamp_mark_pkt(gbe_intf, p_info);
2887 }
2888
2889 static int gbe_rxhook(int order, void *data, struct netcp_packet *p_info)
2890 {
2891         struct gbe_intf *gbe_intf = data;
2892
2893         return gbe_rxtstamp(gbe_intf, p_info);
2894 }
2895
2896 static int gbe_open(void *intf_priv, struct net_device *ndev)
2897 {
2898         struct gbe_intf *gbe_intf = intf_priv;
2899         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2900         struct netcp_intf *netcp = netdev_priv(ndev);
2901         struct gbe_slave *slave = gbe_intf->slave;
2902         int port_num = slave->port_num;
2903         u32 reg, val;
2904         int ret;
2905
2906         reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
2907         dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
2908                 GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
2909                 GBE_RTL_VERSION(reg), GBE_IDENT(reg));
2910
2911         /* For 10G and on NetCP 1.5, use directed to port */
2912         if (IS_SS_ID_XGBE(gbe_dev) || IS_SS_ID_MU(gbe_dev))
2913                 gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
2914
2915         if (gbe_dev->enable_ale)
2916                 gbe_intf->tx_pipe.switch_to_port = 0;
2917         else
2918                 gbe_intf->tx_pipe.switch_to_port = port_num;
2919
2920         dev_dbg(gbe_dev->dev,
2921                 "opened TX channel %s: %p with to port %d, flags %d\n",
2922                 gbe_intf->tx_pipe.dma_chan_name,
2923                 gbe_intf->tx_pipe.dma_channel,
2924                 gbe_intf->tx_pipe.switch_to_port,
2925                 gbe_intf->tx_pipe.flags);
2926
2927         gbe_slave_stop(gbe_intf);
2928
2929         /* disable priority elevation and enable statistics on all ports */
2930         writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
2931
2932         /* Control register */
2933         val = GBE_CTL_P0_ENABLE;
2934         if (IS_SS_ID_MU(gbe_dev)) {
2935                 val |= ETH_SW_CTL_P0_TX_CRC_REMOVE;
2936                 netcp->hw_cap = ETH_SW_CAN_REMOVE_ETH_FCS;
2937         }
2938         writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, control));
2939
2940         /* All statistics enabled and STAT AB visible by default */
2941         writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
2942                                                     stat_port_en));
2943
2944         ret = gbe_slave_open(gbe_intf);
2945         if (ret)
2946                 goto fail;
2947
2948         netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
2949         netcp_register_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
2950
2951         slave->open = true;
2952         netcp_ethss_update_link_state(gbe_dev, slave, ndev);
2953
2954         gbe_register_cpts(gbe_dev);
2955
2956         return 0;
2957
2958 fail:
2959         gbe_slave_stop(gbe_intf);
2960         return ret;
2961 }
2962
2963 static int gbe_close(void *intf_priv, struct net_device *ndev)
2964 {
2965         struct gbe_intf *gbe_intf = intf_priv;
2966         struct netcp_intf *netcp = netdev_priv(ndev);
2967         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2968
2969         gbe_unregister_cpts(gbe_dev);
2970
2971         gbe_slave_stop(gbe_intf);
2972
2973         netcp_unregister_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
2974         netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
2975
2976         gbe_intf->slave->open = false;
2977         atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
2978         return 0;
2979 }
2980
2981 #if IS_ENABLED(CONFIG_TI_CPTS)
2982 static void init_slave_ts_ctl(struct gbe_slave *slave)
2983 {
2984         slave->ts_ctl.uni = 1;
2985         slave->ts_ctl.dst_port_map =
2986                 (TS_CTL_DST_PORT >> TS_CTL_DST_PORT_SHIFT) & 0x3;
2987         slave->ts_ctl.maddr_map =
2988                 (TS_CTL_MADDR_ALL >> TS_CTL_MADDR_SHIFT) & 0x1f;
2989 }
2990
2991 #else
2992 static void init_slave_ts_ctl(struct gbe_slave *slave)
2993 {
2994 }
2995 #endif /* CONFIG_TI_CPTS */
2996
2997 static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2998                       struct device_node *node)
2999 {
3000         int port_reg_num;
3001         u32 port_reg_ofs, emac_reg_ofs;
3002         u32 port_reg_blk_sz, emac_reg_blk_sz;
3003
3004         if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
3005                 dev_err(gbe_dev->dev, "missing slave-port parameter\n");
3006                 return -EINVAL;
3007         }
3008
3009         if (of_property_read_u32(node, "link-interface",
3010                                  &slave->link_interface)) {
3011                 dev_warn(gbe_dev->dev,
3012                          "missing link-interface value defaulting to 1G mac-phy link\n");
3013                 slave->link_interface = SGMII_LINK_MAC_PHY;
3014         }
3015
3016         slave->node = node;
3017         slave->open = false;
3018         if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
3019             (slave->link_interface == RGMII_LINK_MAC_PHY) ||
3020             (slave->link_interface == XGMII_LINK_MAC_PHY))
3021                 slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
3022         slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
3023
3024         if (slave->link_interface >= XGMII_LINK_MAC_PHY)
3025                 slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
3026         else
3027                 slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
3028
3029         /* Emac regs memmap are contiguous but port regs are not */
3030         port_reg_num = slave->slave_num;
3031         if (IS_SS_ID_VER_14(gbe_dev)) {
3032                 if (slave->slave_num > 1) {
3033                         port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
3034                         port_reg_num -= 2;
3035                 } else {
3036                         port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
3037                 }
3038                 emac_reg_ofs = GBE13_EMAC_OFFSET;
3039                 port_reg_blk_sz = 0x30;
3040                 emac_reg_blk_sz = 0x40;
3041         } else if (IS_SS_ID_MU(gbe_dev)) {
3042                 port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
3043                 emac_reg_ofs = GBENU_EMAC_OFFSET;
3044                 port_reg_blk_sz = 0x1000;
3045                 emac_reg_blk_sz = 0x1000;
3046         } else if (IS_SS_ID_XGBE(gbe_dev)) {
3047                 port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
3048                 emac_reg_ofs = XGBE10_EMAC_OFFSET;
3049                 port_reg_blk_sz = 0x30;
3050                 emac_reg_blk_sz = 0x40;
3051         } else {
3052                 dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
3053                         gbe_dev->ss_version);
3054                 return -EINVAL;
3055         }
3056
3057         slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
3058                                 (port_reg_blk_sz * port_reg_num);
3059         slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
3060                                 (emac_reg_blk_sz * slave->slave_num);
3061
3062         if (IS_SS_ID_VER_14(gbe_dev)) {
3063                 /* Initialize  slave port register offsets */
3064                 GBE_SET_REG_OFS(slave, port_regs, port_vlan);
3065                 GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
3066                 GBE_SET_REG_OFS(slave, port_regs, sa_lo);
3067                 GBE_SET_REG_OFS(slave, port_regs, sa_hi);
3068                 GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
3069                 GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3070                 GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
3071                 GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3072                 GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
3073
3074                 /* Initialize EMAC register offsets */
3075                 GBE_SET_REG_OFS(slave, emac_regs, mac_control);
3076                 GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
3077                 GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
3078
3079         } else if (IS_SS_ID_MU(gbe_dev)) {
3080                 /* Initialize  slave port register offsets */
3081                 GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
3082                 GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
3083                 GBENU_SET_REG_OFS(slave, port_regs, rx_pri_map);
3084                 GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
3085                 GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
3086                 GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
3087                 GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3088                 GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
3089                 GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3090                 GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
3091                 GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
3092
3093                 /* Initialize EMAC register offsets */
3094                 GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
3095                 GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
3096
3097         } else if (IS_SS_ID_XGBE(gbe_dev)) {
3098                 /* Initialize  slave port register offsets */
3099                 XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
3100                 XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
3101                 XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
3102                 XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
3103                 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
3104                 XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3105                 XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
3106                 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3107                 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
3108
3109                 /* Initialize EMAC register offsets */
3110                 XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
3111                 XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
3112                 XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
3113         }
3114
3115         atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
3116
3117         init_slave_ts_ctl(slave);
3118         return 0;
3119 }
3120
3121 static void init_secondary_ports(struct gbe_priv *gbe_dev,
3122                                  struct device_node *node)
3123 {
3124         struct device *dev = gbe_dev->dev;
3125         phy_interface_t phy_mode;
3126         struct gbe_priv **priv;
3127         struct device_node *port;
3128         struct gbe_slave *slave;
3129         bool mac_phy_link = false;
3130
3131         for_each_child_of_node(node, port) {
3132                 slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
3133                 if (!slave) {
3134                         dev_err(dev, "memory alloc failed for secondary port(%pOFn), skipping...\n",
3135                                 port);
3136                         continue;
3137                 }
3138
3139                 if (init_slave(gbe_dev, slave, port)) {
3140                         dev_err(dev,
3141                                 "Failed to initialize secondary port(%pOFn), skipping...\n",
3142                                 port);
3143                         devm_kfree(dev, slave);
3144                         continue;
3145                 }
3146
3147                 if (!IS_SS_ID_2U(gbe_dev))
3148                         gbe_sgmii_config(gbe_dev, slave);
3149                 gbe_port_reset(slave);
3150                 gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
3151                 list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
3152                 gbe_dev->num_slaves++;
3153                 if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
3154                     (slave->link_interface == XGMII_LINK_MAC_PHY))
3155                         mac_phy_link = true;
3156
3157                 slave->open = true;
3158                 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3159                         of_node_put(port);
3160                         break;
3161                 }
3162         }
3163
3164         /* of_phy_connect() is needed only for MAC-PHY interface */
3165         if (!mac_phy_link)
3166                 return;
3167
3168         /* Allocate dummy netdev device for attaching to phy device */
3169         gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
3170                                         NET_NAME_UNKNOWN, ether_setup);
3171         if (!gbe_dev->dummy_ndev) {
3172                 dev_err(dev,
3173                         "Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
3174                 return;
3175         }
3176         priv = netdev_priv(gbe_dev->dummy_ndev);
3177         *priv = gbe_dev;
3178
3179         if (slave->link_interface == SGMII_LINK_MAC_PHY) {
3180                 phy_mode = PHY_INTERFACE_MODE_SGMII;
3181                 slave->phy_port_t = PORT_MII;
3182         } else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
3183                 phy_mode = PHY_INTERFACE_MODE_RGMII;
3184                 slave->phy_port_t = PORT_MII;
3185         } else {
3186                 phy_mode = PHY_INTERFACE_MODE_NA;
3187                 slave->phy_port_t = PORT_FIBRE;
3188         }
3189
3190         for_each_sec_slave(slave, gbe_dev) {
3191                 if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
3192                     (slave->link_interface != RGMII_LINK_MAC_PHY) &&
3193                     (slave->link_interface != XGMII_LINK_MAC_PHY))
3194                         continue;
3195                 slave->phy =
3196                         of_phy_connect(gbe_dev->dummy_ndev,
3197                                        slave->phy_node,
3198                                        gbe_adjust_link_sec_slaves,
3199                                        0, phy_mode);
3200                 if (!slave->phy) {
3201                         dev_err(dev, "phy not found for slave %d\n",
3202                                 slave->slave_num);
3203                 } else {
3204                         dev_dbg(dev, "phy found: id is: 0x%s\n",
3205                                 phydev_name(slave->phy));
3206                         phy_start(slave->phy);
3207                 }
3208         }
3209 }
3210
3211 static void free_secondary_ports(struct gbe_priv *gbe_dev)
3212 {
3213         struct gbe_slave *slave;
3214
3215         while (!list_empty(&gbe_dev->secondary_slaves)) {
3216                 slave = first_sec_slave(gbe_dev);
3217
3218                 if (slave->phy)
3219                         phy_disconnect(slave->phy);
3220                 list_del(&slave->slave_list);
3221         }
3222         if (gbe_dev->dummy_ndev)
3223                 free_netdev(gbe_dev->dummy_ndev);
3224 }
3225
3226 static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
3227                                  struct device_node *node)
3228 {
3229         struct resource res;
3230         void __iomem *regs;
3231         int ret, i;
3232
3233         ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
3234         if (ret) {
3235                 dev_err(gbe_dev->dev,
3236                         "Can't xlate xgbe of node(%pOFn) ss address at %d\n",
3237                         node, XGBE_SS_REG_INDEX);
3238                 return ret;
3239         }
3240
3241         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3242         if (IS_ERR(regs)) {
3243                 dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
3244                 return PTR_ERR(regs);
3245         }
3246         gbe_dev->ss_regs = regs;
3247
3248         ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
3249         if (ret) {
3250                 dev_err(gbe_dev->dev,
3251                         "Can't xlate xgbe of node(%pOFn) sm address at %d\n",
3252                         node, XGBE_SM_REG_INDEX);
3253                 return ret;
3254         }
3255
3256         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3257         if (IS_ERR(regs)) {
3258                 dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
3259                 return PTR_ERR(regs);
3260         }
3261         gbe_dev->switch_regs = regs;
3262
3263         ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
3264         if (ret) {
3265                 dev_err(gbe_dev->dev,
3266                         "Can't xlate xgbe serdes of node(%pOFn) address at %d\n",
3267                         node, XGBE_SERDES_REG_INDEX);
3268                 return ret;
3269         }
3270
3271         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3272         if (IS_ERR(regs)) {
3273                 dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
3274                 return PTR_ERR(regs);
3275         }
3276         gbe_dev->xgbe_serdes_regs = regs;
3277
3278         gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
3279         gbe_dev->et_stats = xgbe10_et_stats;
3280         gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
3281
3282         gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3283                                          gbe_dev->num_et_stats, sizeof(u64),
3284                                          GFP_KERNEL);
3285         if (!gbe_dev->hw_stats) {
3286                 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3287                 return -ENOMEM;
3288         }
3289
3290         gbe_dev->hw_stats_prev =
3291                 devm_kcalloc(gbe_dev->dev,
3292                              gbe_dev->num_et_stats, sizeof(u32),
3293                              GFP_KERNEL);
3294         if (!gbe_dev->hw_stats_prev) {
3295                 dev_err(gbe_dev->dev,
3296                         "hw_stats_prev memory allocation failed\n");
3297                 return -ENOMEM;
3298         }
3299
3300         gbe_dev->ss_version = XGBE_SS_VERSION_10;
3301         gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
3302                                         XGBE10_SGMII_MODULE_OFFSET;
3303         gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
3304
3305         for (i = 0; i < gbe_dev->max_num_ports; i++)
3306                 gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
3307                         XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
3308
3309         gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
3310         gbe_dev->cpts_reg = gbe_dev->switch_regs + XGBE10_CPTS_OFFSET;
3311         gbe_dev->ale_ports = gbe_dev->max_num_ports;
3312         gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
3313         gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
3314         gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3315
3316         /* Subsystem registers */
3317         XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3318         XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
3319
3320         /* Switch module registers */
3321         XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3322         XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
3323         XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3324         XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3325         XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
3326
3327         /* Host port registers */
3328         XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3329         XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3330         XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3331         return 0;
3332 }
3333
3334 static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
3335                                     struct device_node *node)
3336 {
3337         struct resource res;
3338         void __iomem *regs;
3339         int ret;
3340
3341         ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
3342         if (ret) {
3343                 dev_err(gbe_dev->dev,
3344                         "Can't translate of node(%pOFn) of gbe ss address at %d\n",
3345                         node, GBE_SS_REG_INDEX);
3346                 return ret;
3347         }
3348
3349         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3350         if (IS_ERR(regs)) {
3351                 dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
3352                 return PTR_ERR(regs);
3353         }
3354         gbe_dev->ss_regs = regs;
3355         gbe_dev->ss_version = readl(gbe_dev->ss_regs);
3356         return 0;
3357 }
3358
3359 static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
3360                                 struct device_node *node)
3361 {
3362         struct resource res;
3363         void __iomem *regs;
3364         int i, ret;
3365
3366         ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
3367         if (ret) {
3368                 dev_err(gbe_dev->dev,
3369                         "Can't translate of gbe node(%pOFn) address at index %d\n",
3370                         node, GBE_SGMII34_REG_INDEX);
3371                 return ret;
3372         }
3373
3374         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3375         if (IS_ERR(regs)) {
3376                 dev_err(gbe_dev->dev,
3377                         "Failed to map gbe sgmii port34 register base\n");
3378                 return PTR_ERR(regs);
3379         }
3380         gbe_dev->sgmii_port34_regs = regs;
3381
3382         ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
3383         if (ret) {
3384                 dev_err(gbe_dev->dev,
3385                         "Can't translate of gbe node(%pOFn) address at index %d\n",
3386                         node, GBE_SM_REG_INDEX);
3387                 return ret;
3388         }
3389
3390         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3391         if (IS_ERR(regs)) {
3392                 dev_err(gbe_dev->dev,
3393                         "Failed to map gbe switch module register base\n");
3394                 return PTR_ERR(regs);
3395         }
3396         gbe_dev->switch_regs = regs;
3397
3398         gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
3399         gbe_dev->et_stats = gbe13_et_stats;
3400         gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
3401
3402         gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3403                                          gbe_dev->num_et_stats, sizeof(u64),
3404                                          GFP_KERNEL);
3405         if (!gbe_dev->hw_stats) {
3406                 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3407                 return -ENOMEM;
3408         }
3409
3410         gbe_dev->hw_stats_prev =
3411                 devm_kcalloc(gbe_dev->dev,
3412                              gbe_dev->num_et_stats, sizeof(u32),
3413                              GFP_KERNEL);
3414         if (!gbe_dev->hw_stats_prev) {
3415                 dev_err(gbe_dev->dev,
3416                         "hw_stats_prev memory allocation failed\n");
3417                 return -ENOMEM;
3418         }
3419
3420         gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
3421         gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
3422
3423         /* K2HK has only 2 hw stats modules visible at a time, so
3424          * module 0 & 2 points to one base and
3425          * module 1 & 3 points to the other base
3426          */
3427         for (i = 0; i < gbe_dev->max_num_slaves; i++) {
3428                 gbe_dev->hw_stats_regs[i] =
3429                         gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
3430                         (GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
3431         }
3432
3433         gbe_dev->cpts_reg = gbe_dev->switch_regs + GBE13_CPTS_OFFSET;
3434         gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
3435         gbe_dev->ale_ports = gbe_dev->max_num_ports;
3436         gbe_dev->host_port = GBE13_HOST_PORT_NUM;
3437         gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
3438         gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
3439
3440         /* Subsystem registers */
3441         GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3442
3443         /* Switch module registers */
3444         GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3445         GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
3446         GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
3447         GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3448         GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3449         GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
3450
3451         /* Host port registers */
3452         GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3453         GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3454         return 0;
3455 }
3456
3457 static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
3458                                 struct device_node *node)
3459 {
3460         struct resource res;
3461         void __iomem *regs;
3462         int i, ret;
3463
3464         gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
3465         gbe_dev->et_stats = gbenu_et_stats;
3466
3467         if (IS_SS_ID_MU(gbe_dev))
3468                 gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
3469                         (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
3470         else
3471                 gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
3472                                         GBENU_ET_STATS_PORT_SIZE;
3473
3474         gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3475                                          gbe_dev->num_et_stats, sizeof(u64),
3476                                          GFP_KERNEL);
3477         if (!gbe_dev->hw_stats) {
3478                 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3479                 return -ENOMEM;
3480         }
3481
3482         gbe_dev->hw_stats_prev =
3483                 devm_kcalloc(gbe_dev->dev,
3484                              gbe_dev->num_et_stats, sizeof(u32),
3485                              GFP_KERNEL);
3486         if (!gbe_dev->hw_stats_prev) {
3487                 dev_err(gbe_dev->dev,
3488                         "hw_stats_prev memory allocation failed\n");
3489                 return -ENOMEM;
3490         }
3491
3492         ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
3493         if (ret) {
3494                 dev_err(gbe_dev->dev,
3495                         "Can't translate of gbenu node(%pOFn) addr at index %d\n",
3496                         node, GBENU_SM_REG_INDEX);
3497                 return ret;
3498         }
3499
3500         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3501         if (IS_ERR(regs)) {
3502                 dev_err(gbe_dev->dev,
3503                         "Failed to map gbenu switch module register base\n");
3504                 return PTR_ERR(regs);
3505         }
3506         gbe_dev->switch_regs = regs;
3507
3508         if (!IS_SS_ID_2U(gbe_dev))
3509                 gbe_dev->sgmii_port_regs =
3510                        gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
3511
3512         /* Although sgmii modules are mem mapped to one contiguous
3513          * region on GBENU devices, setting sgmii_port34_regs allows
3514          * consistent code when accessing sgmii api
3515          */
3516         gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
3517                                      (2 * GBENU_SGMII_MODULE_SIZE);
3518
3519         gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
3520
3521         for (i = 0; i < (gbe_dev->max_num_ports); i++)
3522                 gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
3523                         GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
3524
3525         gbe_dev->cpts_reg = gbe_dev->switch_regs + GBENU_CPTS_OFFSET;
3526         gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
3527         gbe_dev->ale_ports = gbe_dev->max_num_ports;
3528         gbe_dev->host_port = GBENU_HOST_PORT_NUM;
3529         gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3530
3531         /* Subsystem registers */
3532         GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3533         /* ok to set for MU, but used by 2U only */
3534         GBENU_SET_REG_OFS(gbe_dev, ss_regs, rgmii_status);
3535
3536         /* Switch module registers */
3537         GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3538         GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
3539         GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3540         GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3541
3542         /* Host port registers */
3543         GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3544         GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3545
3546         /* For NU only.  2U does not need tx_pri_map.
3547          * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
3548          * while 2U has only 1 such thread
3549          */
3550         GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3551         return 0;
3552 }
3553
3554 static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
3555                      struct device_node *node, void **inst_priv)
3556 {
3557         struct device_node *interfaces, *interface;
3558         struct device_node *secondary_ports;
3559         struct cpsw_ale_params ale_params;
3560         struct gbe_priv *gbe_dev;
3561         u32 slave_num;
3562         int i, ret = 0;
3563
3564         if (!node) {
3565                 dev_err(dev, "device tree info unavailable\n");
3566                 return -ENODEV;
3567         }
3568
3569         gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
3570         if (!gbe_dev)
3571                 return -ENOMEM;
3572
3573         if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
3574             of_device_is_compatible(node, "ti,netcp-gbe")) {
3575                 gbe_dev->max_num_slaves = 4;
3576         } else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
3577                 gbe_dev->max_num_slaves = 8;
3578         } else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
3579                 gbe_dev->max_num_slaves = 1;
3580                 gbe_module.set_rx_mode = gbe_set_rx_mode;
3581         } else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
3582                 gbe_dev->max_num_slaves = 2;
3583         } else {
3584                 dev_err(dev, "device tree node for unknown device\n");
3585                 return -EINVAL;
3586         }
3587         gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
3588
3589         gbe_dev->dev = dev;
3590         gbe_dev->netcp_device = netcp_device;
3591         gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
3592
3593         /* init the hw stats lock */
3594         spin_lock_init(&gbe_dev->hw_stats_lock);
3595
3596         if (of_find_property(node, "enable-ale", NULL)) {
3597                 gbe_dev->enable_ale = true;
3598                 dev_info(dev, "ALE enabled\n");
3599         } else {
3600                 gbe_dev->enable_ale = false;
3601                 dev_dbg(dev, "ALE bypass enabled*\n");
3602         }
3603
3604         ret = of_property_read_u32(node, "tx-queue",
3605                                    &gbe_dev->tx_queue_id);
3606         if (ret < 0) {
3607                 dev_err(dev, "missing tx_queue parameter\n");
3608                 gbe_dev->tx_queue_id = GBE_TX_QUEUE;
3609         }
3610
3611         ret = of_property_read_string(node, "tx-channel",
3612                                       &gbe_dev->dma_chan_name);
3613         if (ret < 0) {
3614                 dev_err(dev, "missing \"tx-channel\" parameter\n");
3615                 return -EINVAL;
3616         }
3617
3618         if (of_node_name_eq(node, "gbe")) {
3619                 ret = get_gbe_resource_version(gbe_dev, node);
3620                 if (ret)
3621                         return ret;
3622
3623                 dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
3624
3625                 if (IS_SS_ID_VER_14(gbe_dev))
3626                         ret = set_gbe_ethss14_priv(gbe_dev, node);
3627                 else if (IS_SS_ID_MU(gbe_dev))
3628                         ret = set_gbenu_ethss_priv(gbe_dev, node);
3629                 else
3630                         ret = -ENODEV;
3631
3632         } else if (of_node_name_eq(node, "xgbe")) {
3633                 ret = set_xgbe_ethss10_priv(gbe_dev, node);
3634                 if (ret)
3635                         return ret;
3636                 ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
3637                                              gbe_dev->ss_regs);
3638         } else {
3639                 dev_err(dev, "unknown GBE node(%pOFn)\n", node);
3640                 ret = -ENODEV;
3641         }
3642
3643         if (ret)
3644                 return ret;
3645
3646         interfaces = of_get_child_by_name(node, "interfaces");
3647         if (!interfaces)
3648                 dev_err(dev, "could not find interfaces\n");
3649
3650         ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
3651                                 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
3652         if (ret) {
3653                 of_node_put(interfaces);
3654                 return ret;
3655         }
3656
3657         ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
3658         if (ret) {
3659                 of_node_put(interfaces);
3660                 return ret;
3661         }
3662
3663         /* Create network interfaces */
3664         INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
3665         for_each_child_of_node(interfaces, interface) {
3666                 ret = of_property_read_u32(interface, "slave-port", &slave_num);
3667                 if (ret) {
3668                         dev_err(dev, "missing slave-port parameter, skipping interface configuration for %pOFn\n",
3669                                 interface);
3670                         continue;
3671                 }
3672                 gbe_dev->num_slaves++;
3673                 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3674                         of_node_put(interface);
3675                         break;
3676                 }
3677         }
3678         of_node_put(interfaces);
3679
3680         if (!gbe_dev->num_slaves)
3681                 dev_warn(dev, "No network interface configured\n");
3682
3683         /* Initialize Secondary slave ports */
3684         secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
3685         INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
3686         if (secondary_ports && (gbe_dev->num_slaves <  gbe_dev->max_num_slaves))
3687                 init_secondary_ports(gbe_dev, secondary_ports);
3688         of_node_put(secondary_ports);
3689
3690         if (!gbe_dev->num_slaves) {
3691                 dev_err(dev,
3692                         "No network interface or secondary ports configured\n");
3693                 ret = -ENODEV;
3694                 goto free_sec_ports;
3695         }
3696
3697         memset(&ale_params, 0, sizeof(ale_params));
3698         ale_params.dev          = gbe_dev->dev;
3699         ale_params.ale_regs     = gbe_dev->ale_reg;
3700         ale_params.ale_ageout   = GBE_DEFAULT_ALE_AGEOUT;
3701         ale_params.ale_entries  = gbe_dev->ale_entries;
3702         ale_params.ale_ports    = gbe_dev->ale_ports;
3703         if (IS_SS_ID_MU(gbe_dev)) {
3704                 ale_params.major_ver_mask = 0x7;
3705                 ale_params.nu_switch_ale = true;
3706         }
3707         gbe_dev->ale = cpsw_ale_create(&ale_params);
3708         if (!gbe_dev->ale) {
3709                 dev_err(gbe_dev->dev, "error initializing ale engine\n");
3710                 ret = -ENODEV;
3711                 goto free_sec_ports;
3712         } else {
3713                 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
3714         }
3715
3716         gbe_dev->cpts = cpts_create(gbe_dev->dev, gbe_dev->cpts_reg, node);
3717         if (IS_ENABLED(CONFIG_TI_CPTS) && IS_ERR(gbe_dev->cpts)) {
3718                 ret = PTR_ERR(gbe_dev->cpts);
3719                 goto free_sec_ports;
3720         }
3721
3722         /* initialize host port */
3723         gbe_init_host_port(gbe_dev);
3724
3725         spin_lock_bh(&gbe_dev->hw_stats_lock);
3726         for (i = 0; i < gbe_dev->num_stats_mods; i++) {
3727                 if (IS_SS_ID_VER_14(gbe_dev))
3728                         gbe_reset_mod_stats_ver14(gbe_dev, i);
3729                 else
3730                         gbe_reset_mod_stats(gbe_dev, i);
3731         }
3732         spin_unlock_bh(&gbe_dev->hw_stats_lock);
3733
3734         timer_setup(&gbe_dev->timer, netcp_ethss_timer, 0);
3735         gbe_dev->timer.expires   = jiffies + GBE_TIMER_INTERVAL;
3736         add_timer(&gbe_dev->timer);
3737         *inst_priv = gbe_dev;
3738         return 0;
3739
3740 free_sec_ports:
3741         free_secondary_ports(gbe_dev);
3742         return ret;
3743 }
3744
3745 static int gbe_attach(void *inst_priv, struct net_device *ndev,
3746                       struct device_node *node, void **intf_priv)
3747 {
3748         struct gbe_priv *gbe_dev = inst_priv;
3749         struct gbe_intf *gbe_intf;
3750         int ret;
3751
3752         if (!node) {
3753                 dev_err(gbe_dev->dev, "interface node not available\n");
3754                 return -ENODEV;
3755         }
3756
3757         gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
3758         if (!gbe_intf)
3759                 return -ENOMEM;
3760
3761         gbe_intf->ndev = ndev;
3762         gbe_intf->dev = gbe_dev->dev;
3763         gbe_intf->gbe_dev = gbe_dev;
3764
3765         gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
3766                                         sizeof(*gbe_intf->slave),
3767                                         GFP_KERNEL);
3768         if (!gbe_intf->slave) {
3769                 ret = -ENOMEM;
3770                 goto fail;
3771         }
3772
3773         if (init_slave(gbe_dev, gbe_intf->slave, node)) {
3774                 ret = -ENODEV;
3775                 goto fail;
3776         }
3777
3778         gbe_intf->tx_pipe = gbe_dev->tx_pipe;
3779         ndev->ethtool_ops = &keystone_ethtool_ops;
3780         list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
3781         *intf_priv = gbe_intf;
3782         return 0;
3783
3784 fail:
3785         if (gbe_intf->slave)
3786                 devm_kfree(gbe_dev->dev, gbe_intf->slave);
3787         if (gbe_intf)
3788                 devm_kfree(gbe_dev->dev, gbe_intf);
3789         return ret;
3790 }
3791
3792 static int gbe_release(void *intf_priv)
3793 {
3794         struct gbe_intf *gbe_intf = intf_priv;
3795
3796         gbe_intf->ndev->ethtool_ops = NULL;
3797         list_del(&gbe_intf->gbe_intf_list);
3798         devm_kfree(gbe_intf->dev, gbe_intf->slave);
3799         devm_kfree(gbe_intf->dev, gbe_intf);
3800         return 0;
3801 }
3802
3803 static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
3804 {
3805         struct gbe_priv *gbe_dev = inst_priv;
3806
3807         del_timer_sync(&gbe_dev->timer);
3808         cpts_release(gbe_dev->cpts);
3809         cpsw_ale_stop(gbe_dev->ale);
3810         netcp_txpipe_close(&gbe_dev->tx_pipe);
3811         free_secondary_ports(gbe_dev);
3812
3813         if (!list_empty(&gbe_dev->gbe_intf_head))
3814                 dev_alert(gbe_dev->dev,
3815                           "unreleased ethss interfaces present\n");
3816
3817         return 0;
3818 }
3819
3820 static struct netcp_module gbe_module = {
3821         .name           = GBE_MODULE_NAME,
3822         .owner          = THIS_MODULE,
3823         .primary        = true,
3824         .probe          = gbe_probe,
3825         .open           = gbe_open,
3826         .close          = gbe_close,
3827         .remove         = gbe_remove,
3828         .attach         = gbe_attach,
3829         .release        = gbe_release,
3830         .add_addr       = gbe_add_addr,
3831         .del_addr       = gbe_del_addr,
3832         .add_vid        = gbe_add_vid,
3833         .del_vid        = gbe_del_vid,
3834         .ioctl          = gbe_ioctl,
3835 };
3836
3837 static struct netcp_module xgbe_module = {
3838         .name           = XGBE_MODULE_NAME,
3839         .owner          = THIS_MODULE,
3840         .primary        = true,
3841         .probe          = gbe_probe,
3842         .open           = gbe_open,
3843         .close          = gbe_close,
3844         .remove         = gbe_remove,
3845         .attach         = gbe_attach,
3846         .release        = gbe_release,
3847         .add_addr       = gbe_add_addr,
3848         .del_addr       = gbe_del_addr,
3849         .add_vid        = gbe_add_vid,
3850         .del_vid        = gbe_del_vid,
3851         .ioctl          = gbe_ioctl,
3852 };
3853
3854 static int __init keystone_gbe_init(void)
3855 {
3856         int ret;
3857
3858         ret = netcp_register_module(&gbe_module);
3859         if (ret)
3860                 return ret;
3861
3862         ret = netcp_register_module(&xgbe_module);
3863         if (ret)
3864                 return ret;
3865
3866         return 0;
3867 }
3868 module_init(keystone_gbe_init);
3869
3870 static void __exit keystone_gbe_exit(void)
3871 {
3872         netcp_unregister_module(&gbe_module);
3873         netcp_unregister_module(&xgbe_module);
3874 }
3875 module_exit(keystone_gbe_exit);
3876
3877 MODULE_LICENSE("GPL v2");
3878 MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
3879 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");