dma-mapping: fix 32-bit overflow with CONFIG_ARM_LPAE=n
[platform/kernel/linux-rpi.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/types.h>
7 #include <linux/pci.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/ethtool.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/if_bridge.h>
16 #include <linux/workqueue.h>
17 #include <linux/jiffies.h>
18 #include <linux/bitops.h>
19 #include <linux/list.h>
20 #include <linux/notifier.h>
21 #include <linux/dcbnl.h>
22 #include <linux/inetdevice.h>
23 #include <linux/netlink.h>
24 #include <linux/jhash.h>
25 #include <linux/log2.h>
26 #include <net/switchdev.h>
27 #include <net/pkt_cls.h>
28 #include <net/netevent.h>
29 #include <net/addrconf.h>
30
31 #include "spectrum.h"
32 #include "pci.h"
33 #include "core.h"
34 #include "core_env.h"
35 #include "reg.h"
36 #include "port.h"
37 #include "trap.h"
38 #include "txheader.h"
39 #include "spectrum_cnt.h"
40 #include "spectrum_dpipe.h"
41 #include "spectrum_acl_flex_actions.h"
42 #include "spectrum_span.h"
43 #include "spectrum_ptp.h"
44 #include "spectrum_trap.h"
45
46 #define MLXSW_SP1_FWREV_MAJOR 13
47 #define MLXSW_SP1_FWREV_MINOR 2008
48 #define MLXSW_SP1_FWREV_SUBMINOR 1310
49 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
50
51 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
52         .major = MLXSW_SP1_FWREV_MAJOR,
53         .minor = MLXSW_SP1_FWREV_MINOR,
54         .subminor = MLXSW_SP1_FWREV_SUBMINOR,
55         .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
56 };
57
58 #define MLXSW_SP1_FW_FILENAME \
59         "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
60         "." __stringify(MLXSW_SP1_FWREV_MINOR) \
61         "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
62
63 #define MLXSW_SP2_FWREV_MAJOR 29
64 #define MLXSW_SP2_FWREV_MINOR 2008
65 #define MLXSW_SP2_FWREV_SUBMINOR 1310
66
67 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
68         .major = MLXSW_SP2_FWREV_MAJOR,
69         .minor = MLXSW_SP2_FWREV_MINOR,
70         .subminor = MLXSW_SP2_FWREV_SUBMINOR,
71 };
72
73 #define MLXSW_SP2_FW_FILENAME \
74         "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
75         "." __stringify(MLXSW_SP2_FWREV_MINOR) \
76         "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
77
78 #define MLXSW_SP3_FWREV_MAJOR 30
79 #define MLXSW_SP3_FWREV_MINOR 2008
80 #define MLXSW_SP3_FWREV_SUBMINOR 1310
81
82 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
83         .major = MLXSW_SP3_FWREV_MAJOR,
84         .minor = MLXSW_SP3_FWREV_MINOR,
85         .subminor = MLXSW_SP3_FWREV_SUBMINOR,
86 };
87
88 #define MLXSW_SP3_FW_FILENAME \
89         "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
90         "." __stringify(MLXSW_SP3_FWREV_MINOR) \
91         "." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2"
92
93 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
94 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
95 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
96
97 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
98         0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
99 };
100 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
101         0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
102 };
103
104 /* tx_hdr_version
105  * Tx header version.
106  * Must be set to 1.
107  */
108 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
109
110 /* tx_hdr_ctl
111  * Packet control type.
112  * 0 - Ethernet control (e.g. EMADs, LACP)
113  * 1 - Ethernet data
114  */
115 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
116
117 /* tx_hdr_proto
118  * Packet protocol type. Must be set to 1 (Ethernet).
119  */
120 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
121
122 /* tx_hdr_rx_is_router
123  * Packet is sent from the router. Valid for data packets only.
124  */
125 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
126
127 /* tx_hdr_fid_valid
128  * Indicates if the 'fid' field is valid and should be used for
129  * forwarding lookup. Valid for data packets only.
130  */
131 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
132
133 /* tx_hdr_swid
134  * Switch partition ID. Must be set to 0.
135  */
136 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
137
138 /* tx_hdr_control_tclass
139  * Indicates if the packet should use the control TClass and not one
140  * of the data TClasses.
141  */
142 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
143
144 /* tx_hdr_etclass
145  * Egress TClass to be used on the egress device on the egress port.
146  */
147 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
148
149 /* tx_hdr_port_mid
150  * Destination local port for unicast packets.
151  * Destination multicast ID for multicast packets.
152  *
153  * Control packets are directed to a specific egress port, while data
154  * packets are transmitted through the CPU port (0) into the switch partition,
155  * where forwarding rules are applied.
156  */
157 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
158
159 /* tx_hdr_fid
160  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
161  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
162  * Valid for data packets only.
163  */
164 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
165
166 /* tx_hdr_type
167  * 0 - Data packets
168  * 6 - Control packets
169  */
170 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
171
172 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
173                               unsigned int counter_index, u64 *packets,
174                               u64 *bytes)
175 {
176         char mgpc_pl[MLXSW_REG_MGPC_LEN];
177         int err;
178
179         mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
180                             MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
181         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
182         if (err)
183                 return err;
184         if (packets)
185                 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
186         if (bytes)
187                 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
188         return 0;
189 }
190
191 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
192                                        unsigned int counter_index)
193 {
194         char mgpc_pl[MLXSW_REG_MGPC_LEN];
195
196         mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
197                             MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
198         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
199 }
200
201 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
202                                 unsigned int *p_counter_index)
203 {
204         int err;
205
206         err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
207                                      p_counter_index);
208         if (err)
209                 return err;
210         err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
211         if (err)
212                 goto err_counter_clear;
213         return 0;
214
215 err_counter_clear:
216         mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
217                               *p_counter_index);
218         return err;
219 }
220
221 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
222                                 unsigned int counter_index)
223 {
224          mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
225                                counter_index);
226 }
227
228 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
229                                      const struct mlxsw_tx_info *tx_info)
230 {
231         char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
232
233         memset(txhdr, 0, MLXSW_TXHDR_LEN);
234
235         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
236         mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
237         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
238         mlxsw_tx_hdr_swid_set(txhdr, 0);
239         mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
240         mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
241         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
242 }
243
244 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
245 {
246         switch (state) {
247         case BR_STATE_FORWARDING:
248                 return MLXSW_REG_SPMS_STATE_FORWARDING;
249         case BR_STATE_LEARNING:
250                 return MLXSW_REG_SPMS_STATE_LEARNING;
251         case BR_STATE_LISTENING:
252         case BR_STATE_DISABLED:
253         case BR_STATE_BLOCKING:
254                 return MLXSW_REG_SPMS_STATE_DISCARDING;
255         default:
256                 BUG();
257         }
258 }
259
260 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
261                               u8 state)
262 {
263         enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
264         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
265         char *spms_pl;
266         int err;
267
268         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
269         if (!spms_pl)
270                 return -ENOMEM;
271         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
272         mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
273
274         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
275         kfree(spms_pl);
276         return err;
277 }
278
279 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
280 {
281         char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
282         int err;
283
284         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
285         if (err)
286                 return err;
287         mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
288         return 0;
289 }
290
291 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
292                                    bool is_up)
293 {
294         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
295         char paos_pl[MLXSW_REG_PAOS_LEN];
296
297         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
298                             is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
299                             MLXSW_PORT_ADMIN_STATUS_DOWN);
300         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
301 }
302
303 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
304                                       unsigned char *addr)
305 {
306         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
307         char ppad_pl[MLXSW_REG_PPAD_LEN];
308
309         mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
310         mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
311         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
312 }
313
314 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
315 {
316         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
317         unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
318
319         ether_addr_copy(addr, mlxsw_sp->base_mac);
320         addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
321         return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
322 }
323
324 static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
325 {
326         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
327         char pmtu_pl[MLXSW_REG_PMTU_LEN];
328         int err;
329
330         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
331         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
332         if (err)
333                 return err;
334
335         *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
336         return 0;
337 }
338
339 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
340 {
341         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
342         char pmtu_pl[MLXSW_REG_PMTU_LEN];
343
344         mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
345         if (mtu > mlxsw_sp_port->max_mtu)
346                 return -EINVAL;
347
348         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
349         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
350 }
351
352 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
353 {
354         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
355         char pspa_pl[MLXSW_REG_PSPA_LEN];
356
357         mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
358         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
359 }
360
361 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
362 {
363         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
364         char svpe_pl[MLXSW_REG_SVPE_LEN];
365
366         mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
367         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
368 }
369
370 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
371                                    bool learn_enable)
372 {
373         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
374         char *spvmlr_pl;
375         int err;
376
377         spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
378         if (!spvmlr_pl)
379                 return -ENOMEM;
380         mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
381                               learn_enable);
382         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
383         kfree(spvmlr_pl);
384         return err;
385 }
386
387 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
388                                     u16 vid)
389 {
390         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
391         char spvid_pl[MLXSW_REG_SPVID_LEN];
392
393         mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
394         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
395 }
396
397 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
398                                             bool allow)
399 {
400         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
401         char spaft_pl[MLXSW_REG_SPAFT_LEN];
402
403         mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
404         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
405 }
406
407 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
408 {
409         int err;
410
411         if (!vid) {
412                 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
413                 if (err)
414                         return err;
415         } else {
416                 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
417                 if (err)
418                         return err;
419                 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
420                 if (err)
421                         goto err_port_allow_untagged_set;
422         }
423
424         mlxsw_sp_port->pvid = vid;
425         return 0;
426
427 err_port_allow_untagged_set:
428         __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
429         return err;
430 }
431
432 static int
433 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
434 {
435         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
436         char sspr_pl[MLXSW_REG_SSPR_LEN];
437
438         mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
439         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
440 }
441
442 static int
443 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port,
444                               struct mlxsw_sp_port_mapping *port_mapping)
445 {
446         char pmlp_pl[MLXSW_REG_PMLP_LEN];
447         bool separate_rxtx;
448         u8 module;
449         u8 width;
450         int err;
451         int i;
452
453         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
454         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
455         if (err)
456                 return err;
457         module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
458         width = mlxsw_reg_pmlp_width_get(pmlp_pl);
459         separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
460
461         if (width && !is_power_of_2(width)) {
462                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
463                         local_port);
464                 return -EINVAL;
465         }
466
467         for (i = 0; i < width; i++) {
468                 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
469                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
470                                 local_port);
471                         return -EINVAL;
472                 }
473                 if (separate_rxtx &&
474                     mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
475                     mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
476                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
477                                 local_port);
478                         return -EINVAL;
479                 }
480                 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) {
481                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
482                                 local_port);
483                         return -EINVAL;
484                 }
485         }
486
487         port_mapping->module = module;
488         port_mapping->width = width;
489         port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
490         return 0;
491 }
492
493 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port)
494 {
495         struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping;
496         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
497         char pmlp_pl[MLXSW_REG_PMLP_LEN];
498         int i;
499
500         mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
501         mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
502         for (i = 0; i < port_mapping->width; i++) {
503                 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
504                 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
505         }
506
507         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
508 }
509
510 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
511 {
512         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
513         char pmlp_pl[MLXSW_REG_PMLP_LEN];
514
515         mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
516         mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
517         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
518 }
519
520 static int mlxsw_sp_port_open(struct net_device *dev)
521 {
522         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
523         int err;
524
525         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
526         if (err)
527                 return err;
528         netif_start_queue(dev);
529         return 0;
530 }
531
532 static int mlxsw_sp_port_stop(struct net_device *dev)
533 {
534         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
535
536         netif_stop_queue(dev);
537         return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
538 }
539
540 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
541                                       struct net_device *dev)
542 {
543         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
544         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
545         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
546         const struct mlxsw_tx_info tx_info = {
547                 .local_port = mlxsw_sp_port->local_port,
548                 .is_emad = false,
549         };
550         u64 len;
551         int err;
552
553         if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
554                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
555                 dev_kfree_skb_any(skb);
556                 return NETDEV_TX_OK;
557         }
558
559         memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
560
561         if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
562                 return NETDEV_TX_BUSY;
563
564         if (eth_skb_pad(skb)) {
565                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
566                 return NETDEV_TX_OK;
567         }
568
569         mlxsw_sp_txhdr_construct(skb, &tx_info);
570         /* TX header is consumed by HW on the way so we shouldn't count its
571          * bytes as being sent.
572          */
573         len = skb->len - MLXSW_TXHDR_LEN;
574
575         /* Due to a race we might fail here because of a full queue. In that
576          * unlikely case we simply drop the packet.
577          */
578         err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
579
580         if (!err) {
581                 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
582                 u64_stats_update_begin(&pcpu_stats->syncp);
583                 pcpu_stats->tx_packets++;
584                 pcpu_stats->tx_bytes += len;
585                 u64_stats_update_end(&pcpu_stats->syncp);
586         } else {
587                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
588                 dev_kfree_skb_any(skb);
589         }
590         return NETDEV_TX_OK;
591 }
592
593 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
594 {
595 }
596
597 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
598 {
599         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
600         struct sockaddr *addr = p;
601         int err;
602
603         if (!is_valid_ether_addr(addr->sa_data))
604                 return -EADDRNOTAVAIL;
605
606         err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
607         if (err)
608                 return err;
609         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
610         return 0;
611 }
612
613 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
614 {
615         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
616         struct mlxsw_sp_hdroom orig_hdroom;
617         struct mlxsw_sp_hdroom hdroom;
618         int err;
619
620         orig_hdroom = *mlxsw_sp_port->hdroom;
621
622         hdroom = orig_hdroom;
623         hdroom.mtu = mtu;
624         mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
625
626         err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
627         if (err) {
628                 netdev_err(dev, "Failed to configure port's headroom\n");
629                 return err;
630         }
631
632         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
633         if (err)
634                 goto err_port_mtu_set;
635         dev->mtu = mtu;
636         return 0;
637
638 err_port_mtu_set:
639         mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
640         return err;
641 }
642
643 static int
644 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
645                              struct rtnl_link_stats64 *stats)
646 {
647         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
648         struct mlxsw_sp_port_pcpu_stats *p;
649         u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
650         u32 tx_dropped = 0;
651         unsigned int start;
652         int i;
653
654         for_each_possible_cpu(i) {
655                 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
656                 do {
657                         start = u64_stats_fetch_begin_irq(&p->syncp);
658                         rx_packets      = p->rx_packets;
659                         rx_bytes        = p->rx_bytes;
660                         tx_packets      = p->tx_packets;
661                         tx_bytes        = p->tx_bytes;
662                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
663
664                 stats->rx_packets       += rx_packets;
665                 stats->rx_bytes         += rx_bytes;
666                 stats->tx_packets       += tx_packets;
667                 stats->tx_bytes         += tx_bytes;
668                 /* tx_dropped is u32, updated without syncp protection. */
669                 tx_dropped      += p->tx_dropped;
670         }
671         stats->tx_dropped       = tx_dropped;
672         return 0;
673 }
674
675 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
676 {
677         switch (attr_id) {
678         case IFLA_OFFLOAD_XSTATS_CPU_HIT:
679                 return true;
680         }
681
682         return false;
683 }
684
685 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
686                                            void *sp)
687 {
688         switch (attr_id) {
689         case IFLA_OFFLOAD_XSTATS_CPU_HIT:
690                 return mlxsw_sp_port_get_sw_stats64(dev, sp);
691         }
692
693         return -EINVAL;
694 }
695
696 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
697                                 int prio, char *ppcnt_pl)
698 {
699         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
700         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
701
702         mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
703         return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
704 }
705
706 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
707                                       struct rtnl_link_stats64 *stats)
708 {
709         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
710         int err;
711
712         err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
713                                           0, ppcnt_pl);
714         if (err)
715                 goto out;
716
717         stats->tx_packets =
718                 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
719         stats->rx_packets =
720                 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
721         stats->tx_bytes =
722                 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
723         stats->rx_bytes =
724                 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
725         stats->multicast =
726                 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
727
728         stats->rx_crc_errors =
729                 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
730         stats->rx_frame_errors =
731                 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
732
733         stats->rx_length_errors = (
734                 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
735                 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
736                 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
737
738         stats->rx_errors = (stats->rx_crc_errors +
739                 stats->rx_frame_errors + stats->rx_length_errors);
740
741 out:
742         return err;
743 }
744
745 static void
746 mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
747                             struct mlxsw_sp_port_xstats *xstats)
748 {
749         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
750         int err, i;
751
752         err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
753                                           ppcnt_pl);
754         if (!err)
755                 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
756
757         for (i = 0; i < TC_MAX_QUEUE; i++) {
758                 err = mlxsw_sp_port_get_stats_raw(dev,
759                                                   MLXSW_REG_PPCNT_TC_CONG_TC,
760                                                   i, ppcnt_pl);
761                 if (!err)
762                         xstats->wred_drop[i] =
763                                 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
764
765                 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
766                                                   i, ppcnt_pl);
767                 if (err)
768                         continue;
769
770                 xstats->backlog[i] =
771                         mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
772                 xstats->tail_drop[i] =
773                         mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
774         }
775
776         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
777                 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
778                                                   i, ppcnt_pl);
779                 if (err)
780                         continue;
781
782                 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
783                 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
784         }
785 }
786
787 static void update_stats_cache(struct work_struct *work)
788 {
789         struct mlxsw_sp_port *mlxsw_sp_port =
790                 container_of(work, struct mlxsw_sp_port,
791                              periodic_hw_stats.update_dw.work);
792
793         if (!netif_carrier_ok(mlxsw_sp_port->dev))
794                 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
795                  * necessary when port goes down.
796                  */
797                 goto out;
798
799         mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
800                                    &mlxsw_sp_port->periodic_hw_stats.stats);
801         mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
802                                     &mlxsw_sp_port->periodic_hw_stats.xstats);
803
804 out:
805         mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
806                                MLXSW_HW_STATS_UPDATE_TIME);
807 }
808
809 /* Return the stats from a cache that is updated periodically,
810  * as this function might get called in an atomic context.
811  */
812 static void
813 mlxsw_sp_port_get_stats64(struct net_device *dev,
814                           struct rtnl_link_stats64 *stats)
815 {
816         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
817
818         memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
819 }
820
821 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
822                                     u16 vid_begin, u16 vid_end,
823                                     bool is_member, bool untagged)
824 {
825         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
826         char *spvm_pl;
827         int err;
828
829         spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
830         if (!spvm_pl)
831                 return -ENOMEM;
832
833         mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
834                             vid_end, is_member, untagged);
835         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
836         kfree(spvm_pl);
837         return err;
838 }
839
840 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
841                            u16 vid_end, bool is_member, bool untagged)
842 {
843         u16 vid, vid_e;
844         int err;
845
846         for (vid = vid_begin; vid <= vid_end;
847              vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
848                 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
849                             vid_end);
850
851                 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
852                                                is_member, untagged);
853                 if (err)
854                         return err;
855         }
856
857         return 0;
858 }
859
860 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
861                                      bool flush_default)
862 {
863         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
864
865         list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
866                                  &mlxsw_sp_port->vlans_list, list) {
867                 if (!flush_default &&
868                     mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
869                         continue;
870                 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
871         }
872 }
873
874 static void
875 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
876 {
877         if (mlxsw_sp_port_vlan->bridge_port)
878                 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
879         else if (mlxsw_sp_port_vlan->fid)
880                 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
881 }
882
883 struct mlxsw_sp_port_vlan *
884 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
885 {
886         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
887         bool untagged = vid == MLXSW_SP_DEFAULT_VID;
888         int err;
889
890         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
891         if (mlxsw_sp_port_vlan)
892                 return ERR_PTR(-EEXIST);
893
894         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
895         if (err)
896                 return ERR_PTR(err);
897
898         mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
899         if (!mlxsw_sp_port_vlan) {
900                 err = -ENOMEM;
901                 goto err_port_vlan_alloc;
902         }
903
904         mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
905         mlxsw_sp_port_vlan->vid = vid;
906         list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
907
908         return mlxsw_sp_port_vlan;
909
910 err_port_vlan_alloc:
911         mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
912         return ERR_PTR(err);
913 }
914
915 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
916 {
917         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
918         u16 vid = mlxsw_sp_port_vlan->vid;
919
920         mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
921         list_del(&mlxsw_sp_port_vlan->list);
922         kfree(mlxsw_sp_port_vlan);
923         mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
924 }
925
926 static int mlxsw_sp_port_add_vid(struct net_device *dev,
927                                  __be16 __always_unused proto, u16 vid)
928 {
929         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
930
931         /* VLAN 0 is added to HW filter when device goes up, but it is
932          * reserved in our case, so simply return.
933          */
934         if (!vid)
935                 return 0;
936
937         return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
938 }
939
940 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
941                                   __be16 __always_unused proto, u16 vid)
942 {
943         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
944         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
945
946         /* VLAN 0 is removed from HW filter when device goes down, but
947          * it is reserved in our case, so simply return.
948          */
949         if (!vid)
950                 return 0;
951
952         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
953         if (!mlxsw_sp_port_vlan)
954                 return 0;
955         mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
956
957         return 0;
958 }
959
960 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
961                                    struct flow_block_offload *f)
962 {
963         switch (f->binder_type) {
964         case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
965                 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true);
966         case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
967                 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
968         case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
969                 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
970         default:
971                 return -EOPNOTSUPP;
972         }
973 }
974
975 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
976                              void *type_data)
977 {
978         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
979
980         switch (type) {
981         case TC_SETUP_BLOCK:
982                 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
983         case TC_SETUP_QDISC_RED:
984                 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
985         case TC_SETUP_QDISC_PRIO:
986                 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
987         case TC_SETUP_QDISC_ETS:
988                 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
989         case TC_SETUP_QDISC_TBF:
990                 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
991         case TC_SETUP_QDISC_FIFO:
992                 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
993         default:
994                 return -EOPNOTSUPP;
995         }
996 }
997
998 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
999 {
1000         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1001
1002         if (!enable) {
1003                 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
1004                     mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
1005                         netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1006                         return -EINVAL;
1007                 }
1008                 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
1009                 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
1010         } else {
1011                 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
1012                 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
1013         }
1014         return 0;
1015 }
1016
1017 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
1018 {
1019         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1020         char pplr_pl[MLXSW_REG_PPLR_LEN];
1021         int err;
1022
1023         if (netif_running(dev))
1024                 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1025
1026         mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
1027         err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
1028                               pplr_pl);
1029
1030         if (netif_running(dev))
1031                 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1032
1033         return err;
1034 }
1035
1036 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1037
1038 static int mlxsw_sp_handle_feature(struct net_device *dev,
1039                                    netdev_features_t wanted_features,
1040                                    netdev_features_t feature,
1041                                    mlxsw_sp_feature_handler feature_handler)
1042 {
1043         netdev_features_t changes = wanted_features ^ dev->features;
1044         bool enable = !!(wanted_features & feature);
1045         int err;
1046
1047         if (!(changes & feature))
1048                 return 0;
1049
1050         err = feature_handler(dev, enable);
1051         if (err) {
1052                 netdev_err(dev, "%s feature %pNF failed, err %d\n",
1053                            enable ? "Enable" : "Disable", &feature, err);
1054                 return err;
1055         }
1056
1057         if (enable)
1058                 dev->features |= feature;
1059         else
1060                 dev->features &= ~feature;
1061
1062         return 0;
1063 }
1064 static int mlxsw_sp_set_features(struct net_device *dev,
1065                                  netdev_features_t features)
1066 {
1067         netdev_features_t oper_features = dev->features;
1068         int err = 0;
1069
1070         err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1071                                        mlxsw_sp_feature_hw_tc);
1072         err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
1073                                        mlxsw_sp_feature_loopback);
1074
1075         if (err) {
1076                 dev->features = oper_features;
1077                 return -EINVAL;
1078         }
1079
1080         return 0;
1081 }
1082
1083 static struct devlink_port *
1084 mlxsw_sp_port_get_devlink_port(struct net_device *dev)
1085 {
1086         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1087         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1088
1089         return mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
1090                                                 mlxsw_sp_port->local_port);
1091 }
1092
1093 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1094                                       struct ifreq *ifr)
1095 {
1096         struct hwtstamp_config config;
1097         int err;
1098
1099         if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1100                 return -EFAULT;
1101
1102         err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
1103                                                              &config);
1104         if (err)
1105                 return err;
1106
1107         if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1108                 return -EFAULT;
1109
1110         return 0;
1111 }
1112
1113 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1114                                       struct ifreq *ifr)
1115 {
1116         struct hwtstamp_config config;
1117         int err;
1118
1119         err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
1120                                                              &config);
1121         if (err)
1122                 return err;
1123
1124         if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1125                 return -EFAULT;
1126
1127         return 0;
1128 }
1129
1130 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
1131 {
1132         struct hwtstamp_config config = {0};
1133
1134         mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
1135 }
1136
1137 static int
1138 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1139 {
1140         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1141
1142         switch (cmd) {
1143         case SIOCSHWTSTAMP:
1144                 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
1145         case SIOCGHWTSTAMP:
1146                 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
1147         default:
1148                 return -EOPNOTSUPP;
1149         }
1150 }
1151
1152 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1153         .ndo_open               = mlxsw_sp_port_open,
1154         .ndo_stop               = mlxsw_sp_port_stop,
1155         .ndo_start_xmit         = mlxsw_sp_port_xmit,
1156         .ndo_setup_tc           = mlxsw_sp_setup_tc,
1157         .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
1158         .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
1159         .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
1160         .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
1161         .ndo_has_offload_stats  = mlxsw_sp_port_has_offload_stats,
1162         .ndo_get_offload_stats  = mlxsw_sp_port_get_offload_stats,
1163         .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
1164         .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
1165         .ndo_set_features       = mlxsw_sp_set_features,
1166         .ndo_get_devlink_port   = mlxsw_sp_port_get_devlink_port,
1167         .ndo_do_ioctl           = mlxsw_sp_port_ioctl,
1168 };
1169
1170 static int
1171 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
1172 {
1173         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1174         u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
1175         const struct mlxsw_sp_port_type_speed_ops *ops;
1176         char ptys_pl[MLXSW_REG_PTYS_LEN];
1177         int err;
1178
1179         ops = mlxsw_sp->port_type_speed_ops;
1180
1181         /* Set advertised speeds to supported speeds. */
1182         ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1183                                0, false);
1184         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1185         if (err)
1186                 return err;
1187
1188         ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
1189                                  &eth_proto_admin, &eth_proto_oper);
1190         ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1191                                eth_proto_cap, mlxsw_sp_port->link.autoneg);
1192         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1193 }
1194
1195 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
1196 {
1197         const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
1198         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1199         char ptys_pl[MLXSW_REG_PTYS_LEN];
1200         u32 eth_proto_oper;
1201         int err;
1202
1203         port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
1204         port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
1205                                                mlxsw_sp_port->local_port, 0,
1206                                                false);
1207         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1208         if (err)
1209                 return err;
1210         port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
1211                                                  &eth_proto_oper);
1212         *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
1213         return 0;
1214 }
1215
1216 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1217                           enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1218                           bool dwrr, u8 dwrr_weight)
1219 {
1220         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1221         char qeec_pl[MLXSW_REG_QEEC_LEN];
1222
1223         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1224                             next_index);
1225         mlxsw_reg_qeec_de_set(qeec_pl, true);
1226         mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1227         mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1228         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1229 }
1230
1231 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1232                                   enum mlxsw_reg_qeec_hr hr, u8 index,
1233                                   u8 next_index, u32 maxrate, u8 burst_size)
1234 {
1235         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1236         char qeec_pl[MLXSW_REG_QEEC_LEN];
1237
1238         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1239                             next_index);
1240         mlxsw_reg_qeec_mase_set(qeec_pl, true);
1241         mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1242         mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
1243         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1244 }
1245
1246 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
1247                                     enum mlxsw_reg_qeec_hr hr, u8 index,
1248                                     u8 next_index, u32 minrate)
1249 {
1250         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1251         char qeec_pl[MLXSW_REG_QEEC_LEN];
1252
1253         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1254                             next_index);
1255         mlxsw_reg_qeec_mise_set(qeec_pl, true);
1256         mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
1257
1258         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1259 }
1260
1261 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1262                               u8 switch_prio, u8 tclass)
1263 {
1264         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1265         char qtct_pl[MLXSW_REG_QTCT_LEN];
1266
1267         mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1268                             tclass);
1269         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1270 }
1271
1272 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1273 {
1274         int err, i;
1275
1276         /* Setup the elements hierarcy, so that each TC is linked to
1277          * one subgroup, which are all member in the same group.
1278          */
1279         err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1280                                     MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
1281         if (err)
1282                 return err;
1283         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1284                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1285                                             MLXSW_REG_QEEC_HR_SUBGROUP, i,
1286                                             0, false, 0);
1287                 if (err)
1288                         return err;
1289         }
1290         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1291                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1292                                             MLXSW_REG_QEEC_HR_TC, i, i,
1293                                             false, 0);
1294                 if (err)
1295                         return err;
1296
1297                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1298                                             MLXSW_REG_QEEC_HR_TC,
1299                                             i + 8, i,
1300                                             true, 100);
1301                 if (err)
1302                         return err;
1303         }
1304
1305         /* Make sure the max shaper is disabled in all hierarchies that support
1306          * it. Note that this disables ptps (PTP shaper), but that is intended
1307          * for the initial configuration.
1308          */
1309         err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1310                                             MLXSW_REG_QEEC_HR_PORT, 0, 0,
1311                                             MLXSW_REG_QEEC_MAS_DIS, 0);
1312         if (err)
1313                 return err;
1314         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1315                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1316                                                     MLXSW_REG_QEEC_HR_SUBGROUP,
1317                                                     i, 0,
1318                                                     MLXSW_REG_QEEC_MAS_DIS, 0);
1319                 if (err)
1320                         return err;
1321         }
1322         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1323                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1324                                                     MLXSW_REG_QEEC_HR_TC,
1325                                                     i, i,
1326                                                     MLXSW_REG_QEEC_MAS_DIS, 0);
1327                 if (err)
1328                         return err;
1329
1330                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1331                                                     MLXSW_REG_QEEC_HR_TC,
1332                                                     i + 8, i,
1333                                                     MLXSW_REG_QEEC_MAS_DIS, 0);
1334                 if (err)
1335                         return err;
1336         }
1337
1338         /* Configure the min shaper for multicast TCs. */
1339         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1340                 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
1341                                                MLXSW_REG_QEEC_HR_TC,
1342                                                i + 8, i,
1343                                                MLXSW_REG_QEEC_MIS_MIN);
1344                 if (err)
1345                         return err;
1346         }
1347
1348         /* Map all priorities to traffic class 0. */
1349         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1350                 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1351                 if (err)
1352                         return err;
1353         }
1354
1355         return 0;
1356 }
1357
1358 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
1359                                         bool enable)
1360 {
1361         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1362         char qtctm_pl[MLXSW_REG_QTCTM_LEN];
1363
1364         mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
1365         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
1366 }
1367
1368 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
1369 {
1370         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1371         u8 module = mlxsw_sp_port->mapping.module;
1372         u64 overheat_counter;
1373         int err;
1374
1375         err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, module,
1376                                                     &overheat_counter);
1377         if (err)
1378                 return err;
1379
1380         mlxsw_sp_port->module_overheat_initial_val = overheat_counter;
1381         return 0;
1382 }
1383
1384 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1385                                 u8 split_base_local_port,
1386                                 struct mlxsw_sp_port_mapping *port_mapping)
1387 {
1388         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1389         bool split = !!split_base_local_port;
1390         struct mlxsw_sp_port *mlxsw_sp_port;
1391         u32 lanes = port_mapping->width;
1392         struct net_device *dev;
1393         bool splittable;
1394         int err;
1395
1396         splittable = lanes > 1 && !split;
1397         err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
1398                                    port_mapping->module + 1, split,
1399                                    port_mapping->lane / lanes,
1400                                    splittable, lanes,
1401                                    mlxsw_sp->base_mac,
1402                                    sizeof(mlxsw_sp->base_mac));
1403         if (err) {
1404                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1405                         local_port);
1406                 return err;
1407         }
1408
1409         dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1410         if (!dev) {
1411                 err = -ENOMEM;
1412                 goto err_alloc_etherdev;
1413         }
1414         SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
1415         dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
1416         mlxsw_sp_port = netdev_priv(dev);
1417         mlxsw_sp_port->dev = dev;
1418         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1419         mlxsw_sp_port->local_port = local_port;
1420         mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
1421         mlxsw_sp_port->split = split;
1422         mlxsw_sp_port->split_base_local_port = split_base_local_port;
1423         mlxsw_sp_port->mapping = *port_mapping;
1424         mlxsw_sp_port->link.autoneg = 1;
1425         INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
1426
1427         mlxsw_sp_port->pcpu_stats =
1428                 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1429         if (!mlxsw_sp_port->pcpu_stats) {
1430                 err = -ENOMEM;
1431                 goto err_alloc_stats;
1432         }
1433
1434         INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1435                           &update_stats_cache);
1436
1437         dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1438         dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1439
1440         err = mlxsw_sp_port_module_map(mlxsw_sp_port);
1441         if (err) {
1442                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
1443                         mlxsw_sp_port->local_port);
1444                 goto err_port_module_map;
1445         }
1446
1447         err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1448         if (err) {
1449                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1450                         mlxsw_sp_port->local_port);
1451                 goto err_port_swid_set;
1452         }
1453
1454         err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1455         if (err) {
1456                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1457                         mlxsw_sp_port->local_port);
1458                 goto err_dev_addr_init;
1459         }
1460
1461         netif_carrier_off(dev);
1462
1463         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1464                          NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
1465         dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
1466
1467         dev->min_mtu = 0;
1468         dev->max_mtu = ETH_MAX_MTU;
1469
1470         /* Each packet needs to have a Tx header (metadata) on top all other
1471          * headers.
1472          */
1473         dev->needed_headroom = MLXSW_TXHDR_LEN;
1474
1475         err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1476         if (err) {
1477                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1478                         mlxsw_sp_port->local_port);
1479                 goto err_port_system_port_mapping_set;
1480         }
1481
1482         err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
1483         if (err) {
1484                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1485                         mlxsw_sp_port->local_port);
1486                 goto err_port_speed_by_width_set;
1487         }
1488
1489         err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port,
1490                                                             &mlxsw_sp_port->max_speed);
1491         if (err) {
1492                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n",
1493                         mlxsw_sp_port->local_port);
1494                 goto err_max_speed_get;
1495         }
1496
1497         err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
1498         if (err) {
1499                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
1500                         mlxsw_sp_port->local_port);
1501                 goto err_port_max_mtu_get;
1502         }
1503
1504         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1505         if (err) {
1506                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1507                         mlxsw_sp_port->local_port);
1508                 goto err_port_mtu_set;
1509         }
1510
1511         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1512         if (err)
1513                 goto err_port_admin_status_set;
1514
1515         err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1516         if (err) {
1517                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1518                         mlxsw_sp_port->local_port);
1519                 goto err_port_buffers_init;
1520         }
1521
1522         err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1523         if (err) {
1524                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1525                         mlxsw_sp_port->local_port);
1526                 goto err_port_ets_init;
1527         }
1528
1529         err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
1530         if (err) {
1531                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
1532                         mlxsw_sp_port->local_port);
1533                 goto err_port_tc_mc_mode;
1534         }
1535
1536         /* ETS and buffers must be initialized before DCB. */
1537         err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1538         if (err) {
1539                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1540                         mlxsw_sp_port->local_port);
1541                 goto err_port_dcb_init;
1542         }
1543
1544         err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
1545         if (err) {
1546                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
1547                         mlxsw_sp_port->local_port);
1548                 goto err_port_fids_init;
1549         }
1550
1551         err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
1552         if (err) {
1553                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
1554                         mlxsw_sp_port->local_port);
1555                 goto err_port_qdiscs_init;
1556         }
1557
1558         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false,
1559                                      false);
1560         if (err) {
1561                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
1562                         mlxsw_sp_port->local_port);
1563                 goto err_port_vlan_clear;
1564         }
1565
1566         err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
1567         if (err) {
1568                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
1569                         mlxsw_sp_port->local_port);
1570                 goto err_port_nve_init;
1571         }
1572
1573         err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
1574         if (err) {
1575                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
1576                         mlxsw_sp_port->local_port);
1577                 goto err_port_pvid_set;
1578         }
1579
1580         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1581                                                        MLXSW_SP_DEFAULT_VID);
1582         if (IS_ERR(mlxsw_sp_port_vlan)) {
1583                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
1584                         mlxsw_sp_port->local_port);
1585                 err = PTR_ERR(mlxsw_sp_port_vlan);
1586                 goto err_port_vlan_create;
1587         }
1588         mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
1589
1590         INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
1591                           mlxsw_sp->ptp_ops->shaper_work);
1592
1593         mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1594
1595         err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port);
1596         if (err) {
1597                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n",
1598                         mlxsw_sp_port->local_port);
1599                 goto err_port_overheat_init_val_set;
1600         }
1601
1602         err = register_netdev(dev);
1603         if (err) {
1604                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1605                         mlxsw_sp_port->local_port);
1606                 goto err_register_netdev;
1607         }
1608
1609         mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
1610                                 mlxsw_sp_port, dev);
1611         mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
1612         return 0;
1613
1614 err_register_netdev:
1615 err_port_overheat_init_val_set:
1616         mlxsw_sp->ports[local_port] = NULL;
1617         mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1618 err_port_vlan_create:
1619 err_port_pvid_set:
1620         mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1621 err_port_nve_init:
1622 err_port_vlan_clear:
1623         mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1624 err_port_qdiscs_init:
1625         mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1626 err_port_fids_init:
1627         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1628 err_port_dcb_init:
1629         mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1630 err_port_tc_mc_mode:
1631 err_port_ets_init:
1632         mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1633 err_port_buffers_init:
1634 err_port_admin_status_set:
1635 err_port_mtu_set:
1636 err_port_max_mtu_get:
1637 err_max_speed_get:
1638 err_port_speed_by_width_set:
1639 err_port_system_port_mapping_set:
1640 err_dev_addr_init:
1641         mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1642 err_port_swid_set:
1643         mlxsw_sp_port_module_unmap(mlxsw_sp_port);
1644 err_port_module_map:
1645         free_percpu(mlxsw_sp_port->pcpu_stats);
1646 err_alloc_stats:
1647         free_netdev(dev);
1648 err_alloc_etherdev:
1649         mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1650         return err;
1651 }
1652
1653 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1654 {
1655         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1656
1657         cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
1658         cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
1659         mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
1660         mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
1661         unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1662         mlxsw_sp->ports[local_port] = NULL;
1663         mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
1664         mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1665         mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1666         mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1667         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1668         mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1669         mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1670         mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1671         mlxsw_sp_port_module_unmap(mlxsw_sp_port);
1672         free_percpu(mlxsw_sp_port->pcpu_stats);
1673         WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
1674         free_netdev(mlxsw_sp_port->dev);
1675         mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1676 }
1677
1678 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
1679 {
1680         struct mlxsw_sp_port *mlxsw_sp_port;
1681         int err;
1682
1683         mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL);
1684         if (!mlxsw_sp_port)
1685                 return -ENOMEM;
1686
1687         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1688         mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT;
1689
1690         err = mlxsw_core_cpu_port_init(mlxsw_sp->core,
1691                                        mlxsw_sp_port,
1692                                        mlxsw_sp->base_mac,
1693                                        sizeof(mlxsw_sp->base_mac));
1694         if (err) {
1695                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n");
1696                 goto err_core_cpu_port_init;
1697         }
1698
1699         mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port;
1700         return 0;
1701
1702 err_core_cpu_port_init:
1703         kfree(mlxsw_sp_port);
1704         return err;
1705 }
1706
1707 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
1708 {
1709         struct mlxsw_sp_port *mlxsw_sp_port =
1710                                 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
1711
1712         mlxsw_core_cpu_port_fini(mlxsw_sp->core);
1713         mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL;
1714         kfree(mlxsw_sp_port);
1715 }
1716
1717 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1718 {
1719         return mlxsw_sp->ports[local_port] != NULL;
1720 }
1721
1722 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1723 {
1724         int i;
1725
1726         for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
1727                 if (mlxsw_sp_port_created(mlxsw_sp, i))
1728                         mlxsw_sp_port_remove(mlxsw_sp, i);
1729         mlxsw_sp_cpu_port_remove(mlxsw_sp);
1730         kfree(mlxsw_sp->ports);
1731         mlxsw_sp->ports = NULL;
1732 }
1733
1734 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1735 {
1736         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1737         struct mlxsw_sp_port_mapping *port_mapping;
1738         size_t alloc_size;
1739         int i;
1740         int err;
1741
1742         alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
1743         mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1744         if (!mlxsw_sp->ports)
1745                 return -ENOMEM;
1746
1747         err = mlxsw_sp_cpu_port_create(mlxsw_sp);
1748         if (err)
1749                 goto err_cpu_port_create;
1750
1751         for (i = 1; i < max_ports; i++) {
1752                 port_mapping = mlxsw_sp->port_mapping[i];
1753                 if (!port_mapping)
1754                         continue;
1755                 err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping);
1756                 if (err)
1757                         goto err_port_create;
1758         }
1759         return 0;
1760
1761 err_port_create:
1762         for (i--; i >= 1; i--)
1763                 if (mlxsw_sp_port_created(mlxsw_sp, i))
1764                         mlxsw_sp_port_remove(mlxsw_sp, i);
1765         mlxsw_sp_cpu_port_remove(mlxsw_sp);
1766 err_cpu_port_create:
1767         kfree(mlxsw_sp->ports);
1768         mlxsw_sp->ports = NULL;
1769         return err;
1770 }
1771
1772 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
1773 {
1774         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1775         struct mlxsw_sp_port_mapping port_mapping;
1776         int i;
1777         int err;
1778
1779         mlxsw_sp->port_mapping = kcalloc(max_ports,
1780                                          sizeof(struct mlxsw_sp_port_mapping *),
1781                                          GFP_KERNEL);
1782         if (!mlxsw_sp->port_mapping)
1783                 return -ENOMEM;
1784
1785         for (i = 1; i < max_ports; i++) {
1786                 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping);
1787                 if (err)
1788                         goto err_port_module_info_get;
1789                 if (!port_mapping.width)
1790                         continue;
1791
1792                 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping,
1793                                                     sizeof(port_mapping),
1794                                                     GFP_KERNEL);
1795                 if (!mlxsw_sp->port_mapping[i]) {
1796                         err = -ENOMEM;
1797                         goto err_port_module_info_dup;
1798                 }
1799         }
1800         return 0;
1801
1802 err_port_module_info_get:
1803 err_port_module_info_dup:
1804         for (i--; i >= 1; i--)
1805                 kfree(mlxsw_sp->port_mapping[i]);
1806         kfree(mlxsw_sp->port_mapping);
1807         return err;
1808 }
1809
1810 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
1811 {
1812         int i;
1813
1814         for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
1815                 kfree(mlxsw_sp->port_mapping[i]);
1816         kfree(mlxsw_sp->port_mapping);
1817 }
1818
1819 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width)
1820 {
1821         u8 offset = (local_port - 1) % max_width;
1822
1823         return local_port - offset;
1824 }
1825
1826 static int
1827 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
1828                            struct mlxsw_sp_port_mapping *port_mapping,
1829                            unsigned int count, u8 offset)
1830 {
1831         struct mlxsw_sp_port_mapping split_port_mapping;
1832         int err, i;
1833
1834         split_port_mapping = *port_mapping;
1835         split_port_mapping.width /= count;
1836         for (i = 0; i < count; i++) {
1837                 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset,
1838                                            base_port, &split_port_mapping);
1839                 if (err)
1840                         goto err_port_create;
1841                 split_port_mapping.lane += split_port_mapping.width;
1842         }
1843
1844         return 0;
1845
1846 err_port_create:
1847         for (i--; i >= 0; i--)
1848                 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
1849                         mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
1850         return err;
1851 }
1852
1853 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
1854                                          u8 base_port,
1855                                          unsigned int count, u8 offset)
1856 {
1857         struct mlxsw_sp_port_mapping *port_mapping;
1858         int i;
1859
1860         /* Go over original unsplit ports in the gap and recreate them. */
1861         for (i = 0; i < count * offset; i++) {
1862                 port_mapping = mlxsw_sp->port_mapping[base_port + i];
1863                 if (!port_mapping)
1864                         continue;
1865                 mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping);
1866         }
1867 }
1868
1869 static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core,
1870                                        unsigned int count,
1871                                        unsigned int max_width)
1872 {
1873         enum mlxsw_res_id local_ports_in_x_res_id;
1874         int split_width = max_width / count;
1875
1876         if (split_width == 1)
1877                 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X;
1878         else if (split_width == 2)
1879                 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X;
1880         else if (split_width == 4)
1881                 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X;
1882         else
1883                 return -EINVAL;
1884
1885         if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id))
1886                 return -EINVAL;
1887         return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id);
1888 }
1889
1890 static struct mlxsw_sp_port *
1891 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1892 {
1893         if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
1894                 return mlxsw_sp->ports[local_port];
1895         return NULL;
1896 }
1897
1898 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
1899                                unsigned int count,
1900                                struct netlink_ext_ack *extack)
1901 {
1902         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1903         struct mlxsw_sp_port_mapping port_mapping;
1904         struct mlxsw_sp_port *mlxsw_sp_port;
1905         int max_width;
1906         u8 base_port;
1907         int offset;
1908         int i;
1909         int err;
1910
1911         mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
1912         if (!mlxsw_sp_port) {
1913                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
1914                         local_port);
1915                 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
1916                 return -EINVAL;
1917         }
1918
1919         max_width = mlxsw_core_module_max_width(mlxsw_core,
1920                                                 mlxsw_sp_port->mapping.module);
1921         if (max_width < 0) {
1922                 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
1923                 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
1924                 return max_width;
1925         }
1926
1927         /* Split port with non-max cannot be split. */
1928         if (mlxsw_sp_port->mapping.width != max_width) {
1929                 netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n");
1930                 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split");
1931                 return -EINVAL;
1932         }
1933
1934         offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
1935         if (offset < 0) {
1936                 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
1937                 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
1938                 return -EINVAL;
1939         }
1940
1941         /* Only in case max split is being done, the local port and
1942          * base port may differ.
1943          */
1944         base_port = count == max_width ?
1945                     mlxsw_sp_cluster_base_port_get(local_port, max_width) :
1946                     local_port;
1947
1948         for (i = 0; i < count * offset; i++) {
1949                 /* Expect base port to exist and also the one in the middle in
1950                  * case of maximal split count.
1951                  */
1952                 if (i == 0 || (count == max_width && i == count / 2))
1953                         continue;
1954
1955                 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) {
1956                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
1957                         NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
1958                         return -EINVAL;
1959                 }
1960         }
1961
1962         port_mapping = mlxsw_sp_port->mapping;
1963
1964         for (i = 0; i < count; i++)
1965                 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
1966                         mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
1967
1968         err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping,
1969                                          count, offset);
1970         if (err) {
1971                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
1972                 goto err_port_split_create;
1973         }
1974
1975         return 0;
1976
1977 err_port_split_create:
1978         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
1979         return err;
1980 }
1981
1982 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
1983                                  struct netlink_ext_ack *extack)
1984 {
1985         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1986         struct mlxsw_sp_port *mlxsw_sp_port;
1987         unsigned int count;
1988         int max_width;
1989         u8 base_port;
1990         int offset;
1991         int i;
1992
1993         mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
1994         if (!mlxsw_sp_port) {
1995                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
1996                         local_port);
1997                 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
1998                 return -EINVAL;
1999         }
2000
2001         if (!mlxsw_sp_port->split) {
2002                 netdev_err(mlxsw_sp_port->dev, "Port was not split\n");
2003                 NL_SET_ERR_MSG_MOD(extack, "Port was not split");
2004                 return -EINVAL;
2005         }
2006
2007         max_width = mlxsw_core_module_max_width(mlxsw_core,
2008                                                 mlxsw_sp_port->mapping.module);
2009         if (max_width < 0) {
2010                 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
2011                 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
2012                 return max_width;
2013         }
2014
2015         count = max_width / mlxsw_sp_port->mapping.width;
2016
2017         offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
2018         if (WARN_ON(offset < 0)) {
2019                 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
2020                 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
2021                 return -EINVAL;
2022         }
2023
2024         base_port = mlxsw_sp_port->split_base_local_port;
2025
2026         for (i = 0; i < count; i++)
2027                 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
2028                         mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
2029
2030         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
2031
2032         return 0;
2033 }
2034
2035 static void
2036 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
2037 {
2038         int i;
2039
2040         for (i = 0; i < TC_MAX_QUEUE; i++)
2041                 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
2042 }
2043
2044 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2045                                      char *pude_pl, void *priv)
2046 {
2047         struct mlxsw_sp *mlxsw_sp = priv;
2048         struct mlxsw_sp_port *mlxsw_sp_port;
2049         enum mlxsw_reg_pude_oper_status status;
2050         u8 local_port;
2051
2052         local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2053         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2054         if (!mlxsw_sp_port)
2055                 return;
2056
2057         status = mlxsw_reg_pude_oper_status_get(pude_pl);
2058         if (status == MLXSW_PORT_OPER_STATUS_UP) {
2059                 netdev_info(mlxsw_sp_port->dev, "link up\n");
2060                 netif_carrier_on(mlxsw_sp_port->dev);
2061                 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
2062         } else {
2063                 netdev_info(mlxsw_sp_port->dev, "link down\n");
2064                 netif_carrier_off(mlxsw_sp_port->dev);
2065                 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
2066         }
2067 }
2068
2069 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
2070                                           char *mtpptr_pl, bool ingress)
2071 {
2072         u8 local_port;
2073         u8 num_rec;
2074         int i;
2075
2076         local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl);
2077         num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl);
2078         for (i = 0; i < num_rec; i++) {
2079                 u8 domain_number;
2080                 u8 message_type;
2081                 u16 sequence_id;
2082                 u64 timestamp;
2083
2084                 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type,
2085                                         &domain_number, &sequence_id,
2086                                         &timestamp);
2087                 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
2088                                             message_type, domain_number,
2089                                             sequence_id, timestamp);
2090         }
2091 }
2092
2093 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
2094                                               char *mtpptr_pl, void *priv)
2095 {
2096         struct mlxsw_sp *mlxsw_sp = priv;
2097
2098         mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true);
2099 }
2100
2101 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
2102                                               char *mtpptr_pl, void *priv)
2103 {
2104         struct mlxsw_sp *mlxsw_sp = priv;
2105
2106         mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false);
2107 }
2108
2109 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2110                                        u8 local_port, void *priv)
2111 {
2112         struct mlxsw_sp *mlxsw_sp = priv;
2113         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2114         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2115
2116         if (unlikely(!mlxsw_sp_port)) {
2117                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2118                                      local_port);
2119                 return;
2120         }
2121
2122         skb->dev = mlxsw_sp_port->dev;
2123
2124         pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2125         u64_stats_update_begin(&pcpu_stats->syncp);
2126         pcpu_stats->rx_packets++;
2127         pcpu_stats->rx_bytes += skb->len;
2128         u64_stats_update_end(&pcpu_stats->syncp);
2129
2130         skb->protocol = eth_type_trans(skb, skb->dev);
2131         netif_receive_skb(skb);
2132 }
2133
2134 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
2135                                            void *priv)
2136 {
2137         skb->offload_fwd_mark = 1;
2138         return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2139 }
2140
2141 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
2142                                               u8 local_port, void *priv)
2143 {
2144         skb->offload_l3_fwd_mark = 1;
2145         skb->offload_fwd_mark = 1;
2146         return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2147 }
2148
2149 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2150                           u8 local_port)
2151 {
2152         mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
2153 }
2154
2155 void mlxsw_sp_sample_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2156                              u8 local_port)
2157 {
2158         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2159         struct mlxsw_sp_port_sample *sample;
2160         u32 size;
2161
2162         if (unlikely(!mlxsw_sp_port)) {
2163                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
2164                                      local_port);
2165                 goto out;
2166         }
2167
2168         rcu_read_lock();
2169         sample = rcu_dereference(mlxsw_sp_port->sample);
2170         if (!sample)
2171                 goto out_unlock;
2172         size = sample->truncate ? sample->trunc_size : skb->len;
2173         psample_sample_packet(sample->psample_group, skb, size,
2174                               mlxsw_sp_port->dev->ifindex, 0, sample->rate);
2175 out_unlock:
2176         rcu_read_unlock();
2177 out:
2178         consume_skb(skb);
2179 }
2180
2181 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
2182         MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
2183                   _is_ctrl, SP_##_trap_group, DISCARD)
2184
2185 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl)     \
2186         MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action,    \
2187                 _is_ctrl, SP_##_trap_group, DISCARD)
2188
2189 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
2190         MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
2191                 _is_ctrl, SP_##_trap_group, DISCARD)
2192
2193 #define MLXSW_SP_EVENTL(_func, _trap_id)                \
2194         MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2195
2196 static const struct mlxsw_listener mlxsw_sp_listener[] = {
2197         /* Events */
2198         MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2199         /* L2 traps */
2200         MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
2201         /* L3 traps */
2202         MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
2203                           false),
2204         MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
2205         MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
2206                           false),
2207         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
2208                              ROUTER_EXP, false),
2209         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
2210                              ROUTER_EXP, false),
2211         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
2212                              ROUTER_EXP, false),
2213         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
2214                              ROUTER_EXP, false),
2215         /* Multicast Router Traps */
2216         MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
2217         MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
2218         /* NVE traps */
2219         MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
2220 };
2221
2222 static const struct mlxsw_listener mlxsw_sp1_listener[] = {
2223         /* Events */
2224         MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
2225         MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
2226 };
2227
2228 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2229 {
2230         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2231         char qpcr_pl[MLXSW_REG_QPCR_LEN];
2232         enum mlxsw_reg_qpcr_ir_units ir_units;
2233         int max_cpu_policers;
2234         bool is_bytes;
2235         u8 burst_size;
2236         u32 rate;
2237         int i, err;
2238
2239         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
2240                 return -EIO;
2241
2242         max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2243
2244         ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
2245         for (i = 0; i < max_cpu_policers; i++) {
2246                 is_bytes = false;
2247                 switch (i) {
2248                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2249                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2250                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2251                         rate = 1024;
2252                         burst_size = 7;
2253                         break;
2254                 default:
2255                         continue;
2256                 }
2257
2258                 __set_bit(i, mlxsw_sp->trap->policers_usage);
2259                 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
2260                                     burst_size);
2261                 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
2262                 if (err)
2263                         return err;
2264         }
2265
2266         return 0;
2267 }
2268
2269 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
2270 {
2271         char htgt_pl[MLXSW_REG_HTGT_LEN];
2272         enum mlxsw_reg_htgt_trap_group i;
2273         int max_cpu_policers;
2274         int max_trap_groups;
2275         u8 priority, tc;
2276         u16 policer_id;
2277         int err;
2278
2279         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
2280                 return -EIO;
2281
2282         max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
2283         max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2284
2285         for (i = 0; i < max_trap_groups; i++) {
2286                 policer_id = i;
2287                 switch (i) {
2288                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2289                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2290                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2291                         priority = 1;
2292                         tc = 1;
2293                         break;
2294                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
2295                         priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
2296                         tc = MLXSW_REG_HTGT_DEFAULT_TC;
2297                         policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
2298                         break;
2299                 default:
2300                         continue;
2301                 }
2302
2303                 if (max_cpu_policers <= policer_id &&
2304                     policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
2305                         return -EIO;
2306
2307                 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
2308                 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2309                 if (err)
2310                         return err;
2311         }
2312
2313         return 0;
2314 }
2315
2316 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp,
2317                                    const struct mlxsw_listener listeners[],
2318                                    size_t listeners_count)
2319 {
2320         int i;
2321         int err;
2322
2323         for (i = 0; i < listeners_count; i++) {
2324                 err = mlxsw_core_trap_register(mlxsw_sp->core,
2325                                                &listeners[i],
2326                                                mlxsw_sp);
2327                 if (err)
2328                         goto err_listener_register;
2329
2330         }
2331         return 0;
2332
2333 err_listener_register:
2334         for (i--; i >= 0; i--) {
2335                 mlxsw_core_trap_unregister(mlxsw_sp->core,
2336                                            &listeners[i],
2337                                            mlxsw_sp);
2338         }
2339         return err;
2340 }
2341
2342 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp,
2343                                       const struct mlxsw_listener listeners[],
2344                                       size_t listeners_count)
2345 {
2346         int i;
2347
2348         for (i = 0; i < listeners_count; i++) {
2349                 mlxsw_core_trap_unregister(mlxsw_sp->core,
2350                                            &listeners[i],
2351                                            mlxsw_sp);
2352         }
2353 }
2354
2355 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2356 {
2357         struct mlxsw_sp_trap *trap;
2358         u64 max_policers;
2359         int err;
2360
2361         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
2362                 return -EIO;
2363         max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
2364         trap = kzalloc(struct_size(trap, policers_usage,
2365                                    BITS_TO_LONGS(max_policers)), GFP_KERNEL);
2366         if (!trap)
2367                 return -ENOMEM;
2368         trap->max_policers = max_policers;
2369         mlxsw_sp->trap = trap;
2370
2371         err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
2372         if (err)
2373                 goto err_cpu_policers_set;
2374
2375         err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
2376         if (err)
2377                 goto err_trap_groups_set;
2378
2379         err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener,
2380                                       ARRAY_SIZE(mlxsw_sp_listener));
2381         if (err)
2382                 goto err_traps_register;
2383
2384         err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners,
2385                                       mlxsw_sp->listeners_count);
2386         if (err)
2387                 goto err_extra_traps_init;
2388
2389         return 0;
2390
2391 err_extra_traps_init:
2392         mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
2393                                   ARRAY_SIZE(mlxsw_sp_listener));
2394 err_traps_register:
2395 err_trap_groups_set:
2396 err_cpu_policers_set:
2397         kfree(trap);
2398         return err;
2399 }
2400
2401 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2402 {
2403         mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners,
2404                                   mlxsw_sp->listeners_count);
2405         mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
2406                                   ARRAY_SIZE(mlxsw_sp_listener));
2407         kfree(mlxsw_sp->trap);
2408 }
2409
2410 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
2411
2412 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2413 {
2414         char slcr_pl[MLXSW_REG_SLCR_LEN];
2415         u32 seed;
2416         int err;
2417
2418         seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
2419                      MLXSW_SP_LAG_SEED_INIT);
2420         mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2421                                      MLXSW_REG_SLCR_LAG_HASH_DMAC |
2422                                      MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2423                                      MLXSW_REG_SLCR_LAG_HASH_VLANID |
2424                                      MLXSW_REG_SLCR_LAG_HASH_SIP |
2425                                      MLXSW_REG_SLCR_LAG_HASH_DIP |
2426                                      MLXSW_REG_SLCR_LAG_HASH_SPORT |
2427                                      MLXSW_REG_SLCR_LAG_HASH_DPORT |
2428                                      MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
2429         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2430         if (err)
2431                 return err;
2432
2433         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
2434             !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
2435                 return -EIO;
2436
2437         mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
2438                                  sizeof(struct mlxsw_sp_upper),
2439                                  GFP_KERNEL);
2440         if (!mlxsw_sp->lags)
2441                 return -ENOMEM;
2442
2443         return 0;
2444 }
2445
2446 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
2447 {
2448         kfree(mlxsw_sp->lags);
2449 }
2450
2451 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
2452 {
2453         char htgt_pl[MLXSW_REG_HTGT_LEN];
2454         int err;
2455
2456         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
2457                             MLXSW_REG_HTGT_INVALID_POLICER,
2458                             MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2459                             MLXSW_REG_HTGT_DEFAULT_TC);
2460         err =  mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2461         if (err)
2462                 return err;
2463
2464         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MFDE,
2465                             MLXSW_REG_HTGT_INVALID_POLICER,
2466                             MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2467                             MLXSW_REG_HTGT_DEFAULT_TC);
2468         err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2469         if (err)
2470                 return err;
2471
2472         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MTWE,
2473                             MLXSW_REG_HTGT_INVALID_POLICER,
2474                             MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2475                             MLXSW_REG_HTGT_DEFAULT_TC);
2476         err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2477         if (err)
2478                 return err;
2479
2480         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_PMPE,
2481                             MLXSW_REG_HTGT_INVALID_POLICER,
2482                             MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2483                             MLXSW_REG_HTGT_DEFAULT_TC);
2484         return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2485 }
2486
2487 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
2488         .clock_init     = mlxsw_sp1_ptp_clock_init,
2489         .clock_fini     = mlxsw_sp1_ptp_clock_fini,
2490         .init           = mlxsw_sp1_ptp_init,
2491         .fini           = mlxsw_sp1_ptp_fini,
2492         .receive        = mlxsw_sp1_ptp_receive,
2493         .transmitted    = mlxsw_sp1_ptp_transmitted,
2494         .hwtstamp_get   = mlxsw_sp1_ptp_hwtstamp_get,
2495         .hwtstamp_set   = mlxsw_sp1_ptp_hwtstamp_set,
2496         .shaper_work    = mlxsw_sp1_ptp_shaper_work,
2497         .get_ts_info    = mlxsw_sp1_ptp_get_ts_info,
2498         .get_stats_count = mlxsw_sp1_get_stats_count,
2499         .get_stats_strings = mlxsw_sp1_get_stats_strings,
2500         .get_stats      = mlxsw_sp1_get_stats,
2501 };
2502
2503 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
2504         .clock_init     = mlxsw_sp2_ptp_clock_init,
2505         .clock_fini     = mlxsw_sp2_ptp_clock_fini,
2506         .init           = mlxsw_sp2_ptp_init,
2507         .fini           = mlxsw_sp2_ptp_fini,
2508         .receive        = mlxsw_sp2_ptp_receive,
2509         .transmitted    = mlxsw_sp2_ptp_transmitted,
2510         .hwtstamp_get   = mlxsw_sp2_ptp_hwtstamp_get,
2511         .hwtstamp_set   = mlxsw_sp2_ptp_hwtstamp_set,
2512         .shaper_work    = mlxsw_sp2_ptp_shaper_work,
2513         .get_ts_info    = mlxsw_sp2_ptp_get_ts_info,
2514         .get_stats_count = mlxsw_sp2_get_stats_count,
2515         .get_stats_strings = mlxsw_sp2_get_stats_strings,
2516         .get_stats      = mlxsw_sp2_get_stats,
2517 };
2518
2519 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
2520                                     unsigned long event, void *ptr);
2521
2522 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2523                          const struct mlxsw_bus_info *mlxsw_bus_info,
2524                          struct netlink_ext_ack *extack)
2525 {
2526         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2527         int err;
2528
2529         mlxsw_sp->core = mlxsw_core;
2530         mlxsw_sp->bus_info = mlxsw_bus_info;
2531
2532         mlxsw_core_emad_string_tlv_enable(mlxsw_core);
2533
2534         err = mlxsw_sp_base_mac_get(mlxsw_sp);
2535         if (err) {
2536                 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2537                 return err;
2538         }
2539
2540         err = mlxsw_sp_kvdl_init(mlxsw_sp);
2541         if (err) {
2542                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
2543                 return err;
2544         }
2545
2546         err = mlxsw_sp_fids_init(mlxsw_sp);
2547         if (err) {
2548                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
2549                 goto err_fids_init;
2550         }
2551
2552         err = mlxsw_sp_policers_init(mlxsw_sp);
2553         if (err) {
2554                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n");
2555                 goto err_policers_init;
2556         }
2557
2558         err = mlxsw_sp_traps_init(mlxsw_sp);
2559         if (err) {
2560                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
2561                 goto err_traps_init;
2562         }
2563
2564         err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
2565         if (err) {
2566                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
2567                 goto err_devlink_traps_init;
2568         }
2569
2570         err = mlxsw_sp_buffers_init(mlxsw_sp);
2571         if (err) {
2572                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2573                 goto err_buffers_init;
2574         }
2575
2576         err = mlxsw_sp_lag_init(mlxsw_sp);
2577         if (err) {
2578                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2579                 goto err_lag_init;
2580         }
2581
2582         /* Initialize SPAN before router and switchdev, so that those components
2583          * can call mlxsw_sp_span_respin().
2584          */
2585         err = mlxsw_sp_span_init(mlxsw_sp);
2586         if (err) {
2587                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
2588                 goto err_span_init;
2589         }
2590
2591         err = mlxsw_sp_switchdev_init(mlxsw_sp);
2592         if (err) {
2593                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2594                 goto err_switchdev_init;
2595         }
2596
2597         err = mlxsw_sp_counter_pool_init(mlxsw_sp);
2598         if (err) {
2599                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
2600                 goto err_counter_pool_init;
2601         }
2602
2603         err = mlxsw_sp_afa_init(mlxsw_sp);
2604         if (err) {
2605                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
2606                 goto err_afa_init;
2607         }
2608
2609         err = mlxsw_sp_nve_init(mlxsw_sp);
2610         if (err) {
2611                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
2612                 goto err_nve_init;
2613         }
2614
2615         err = mlxsw_sp_acl_init(mlxsw_sp);
2616         if (err) {
2617                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
2618                 goto err_acl_init;
2619         }
2620
2621         err = mlxsw_sp_router_init(mlxsw_sp, extack);
2622         if (err) {
2623                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
2624                 goto err_router_init;
2625         }
2626
2627         if (mlxsw_sp->bus_info->read_frc_capable) {
2628                 /* NULL is a valid return value from clock_init */
2629                 mlxsw_sp->clock =
2630                         mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
2631                                                       mlxsw_sp->bus_info->dev);
2632                 if (IS_ERR(mlxsw_sp->clock)) {
2633                         err = PTR_ERR(mlxsw_sp->clock);
2634                         dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
2635                         goto err_ptp_clock_init;
2636                 }
2637         }
2638
2639         if (mlxsw_sp->clock) {
2640                 /* NULL is a valid return value from ptp_ops->init */
2641                 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
2642                 if (IS_ERR(mlxsw_sp->ptp_state)) {
2643                         err = PTR_ERR(mlxsw_sp->ptp_state);
2644                         dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
2645                         goto err_ptp_init;
2646                 }
2647         }
2648
2649         /* Initialize netdevice notifier after router and SPAN is initialized,
2650          * so that the event handler can use router structures and call SPAN
2651          * respin.
2652          */
2653         mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
2654         err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2655                                               &mlxsw_sp->netdevice_nb);
2656         if (err) {
2657                 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
2658                 goto err_netdev_notifier;
2659         }
2660
2661         err = mlxsw_sp_dpipe_init(mlxsw_sp);
2662         if (err) {
2663                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
2664                 goto err_dpipe_init;
2665         }
2666
2667         err = mlxsw_sp_port_module_info_init(mlxsw_sp);
2668         if (err) {
2669                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
2670                 goto err_port_module_info_init;
2671         }
2672
2673         err = mlxsw_sp_ports_create(mlxsw_sp);
2674         if (err) {
2675                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2676                 goto err_ports_create;
2677         }
2678
2679         return 0;
2680
2681 err_ports_create:
2682         mlxsw_sp_port_module_info_fini(mlxsw_sp);
2683 err_port_module_info_init:
2684         mlxsw_sp_dpipe_fini(mlxsw_sp);
2685 err_dpipe_init:
2686         unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2687                                           &mlxsw_sp->netdevice_nb);
2688 err_netdev_notifier:
2689         if (mlxsw_sp->clock)
2690                 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
2691 err_ptp_init:
2692         if (mlxsw_sp->clock)
2693                 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
2694 err_ptp_clock_init:
2695         mlxsw_sp_router_fini(mlxsw_sp);
2696 err_router_init:
2697         mlxsw_sp_acl_fini(mlxsw_sp);
2698 err_acl_init:
2699         mlxsw_sp_nve_fini(mlxsw_sp);
2700 err_nve_init:
2701         mlxsw_sp_afa_fini(mlxsw_sp);
2702 err_afa_init:
2703         mlxsw_sp_counter_pool_fini(mlxsw_sp);
2704 err_counter_pool_init:
2705         mlxsw_sp_switchdev_fini(mlxsw_sp);
2706 err_switchdev_init:
2707         mlxsw_sp_span_fini(mlxsw_sp);
2708 err_span_init:
2709         mlxsw_sp_lag_fini(mlxsw_sp);
2710 err_lag_init:
2711         mlxsw_sp_buffers_fini(mlxsw_sp);
2712 err_buffers_init:
2713         mlxsw_sp_devlink_traps_fini(mlxsw_sp);
2714 err_devlink_traps_init:
2715         mlxsw_sp_traps_fini(mlxsw_sp);
2716 err_traps_init:
2717         mlxsw_sp_policers_fini(mlxsw_sp);
2718 err_policers_init:
2719         mlxsw_sp_fids_fini(mlxsw_sp);
2720 err_fids_init:
2721         mlxsw_sp_kvdl_fini(mlxsw_sp);
2722         return err;
2723 }
2724
2725 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
2726                           const struct mlxsw_bus_info *mlxsw_bus_info,
2727                           struct netlink_ext_ack *extack)
2728 {
2729         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2730
2731         mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
2732         mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
2733         mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
2734         mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
2735         mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
2736         mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
2737         mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
2738         mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
2739         mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
2740         mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
2741         mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
2742         mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
2743         mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
2744         mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
2745         mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
2746         mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
2747         mlxsw_sp->listeners = mlxsw_sp1_listener;
2748         mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
2749         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
2750
2751         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
2752 }
2753
2754 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
2755                           const struct mlxsw_bus_info *mlxsw_bus_info,
2756                           struct netlink_ext_ack *extack)
2757 {
2758         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2759
2760         mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
2761         mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
2762         mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
2763         mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
2764         mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
2765         mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
2766         mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
2767         mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
2768         mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
2769         mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
2770         mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
2771         mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
2772         mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
2773         mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
2774         mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
2775         mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
2776         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
2777
2778         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
2779 }
2780
2781 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
2782                           const struct mlxsw_bus_info *mlxsw_bus_info,
2783                           struct netlink_ext_ack *extack)
2784 {
2785         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2786
2787         mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
2788         mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
2789         mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
2790         mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
2791         mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
2792         mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
2793         mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
2794         mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
2795         mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
2796         mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
2797         mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
2798         mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
2799         mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
2800         mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
2801         mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
2802         mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
2803         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
2804
2805         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
2806 }
2807
2808 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2809 {
2810         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2811
2812         mlxsw_sp_ports_remove(mlxsw_sp);
2813         mlxsw_sp_port_module_info_fini(mlxsw_sp);
2814         mlxsw_sp_dpipe_fini(mlxsw_sp);
2815         unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2816                                           &mlxsw_sp->netdevice_nb);
2817         if (mlxsw_sp->clock) {
2818                 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
2819                 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
2820         }
2821         mlxsw_sp_router_fini(mlxsw_sp);
2822         mlxsw_sp_acl_fini(mlxsw_sp);
2823         mlxsw_sp_nve_fini(mlxsw_sp);
2824         mlxsw_sp_afa_fini(mlxsw_sp);
2825         mlxsw_sp_counter_pool_fini(mlxsw_sp);
2826         mlxsw_sp_switchdev_fini(mlxsw_sp);
2827         mlxsw_sp_span_fini(mlxsw_sp);
2828         mlxsw_sp_lag_fini(mlxsw_sp);
2829         mlxsw_sp_buffers_fini(mlxsw_sp);
2830         mlxsw_sp_devlink_traps_fini(mlxsw_sp);
2831         mlxsw_sp_traps_fini(mlxsw_sp);
2832         mlxsw_sp_policers_fini(mlxsw_sp);
2833         mlxsw_sp_fids_fini(mlxsw_sp);
2834         mlxsw_sp_kvdl_fini(mlxsw_sp);
2835 }
2836
2837 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated
2838  * 802.1Q FIDs
2839  */
2840 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE   (MLXSW_SP_FID_8021D_MAX + \
2841                                          VLAN_VID_MASK - 1)
2842
2843 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
2844         .used_max_mid                   = 1,
2845         .max_mid                        = MLXSW_SP_MID_MAX,
2846         .used_flood_tables              = 1,
2847         .used_flood_mode                = 1,
2848         .flood_mode                     = 3,
2849         .max_fid_flood_tables           = 3,
2850         .fid_flood_table_size           = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
2851         .used_max_ib_mc                 = 1,
2852         .max_ib_mc                      = 0,
2853         .used_max_pkey                  = 1,
2854         .max_pkey                       = 0,
2855         .used_kvd_sizes                 = 1,
2856         .kvd_hash_single_parts          = 59,
2857         .kvd_hash_double_parts          = 41,
2858         .kvd_linear_size                = MLXSW_SP_KVD_LINEAR_SIZE,
2859         .swid_config                    = {
2860                 {
2861                         .used_type      = 1,
2862                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
2863                 }
2864         },
2865 };
2866
2867 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
2868         .used_max_mid                   = 1,
2869         .max_mid                        = MLXSW_SP_MID_MAX,
2870         .used_flood_tables              = 1,
2871         .used_flood_mode                = 1,
2872         .flood_mode                     = 3,
2873         .max_fid_flood_tables           = 3,
2874         .fid_flood_table_size           = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
2875         .used_max_ib_mc                 = 1,
2876         .max_ib_mc                      = 0,
2877         .used_max_pkey                  = 1,
2878         .max_pkey                       = 0,
2879         .swid_config                    = {
2880                 {
2881                         .used_type      = 1,
2882                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
2883                 }
2884         },
2885 };
2886
2887 static void
2888 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
2889                                       struct devlink_resource_size_params *kvd_size_params,
2890                                       struct devlink_resource_size_params *linear_size_params,
2891                                       struct devlink_resource_size_params *hash_double_size_params,
2892                                       struct devlink_resource_size_params *hash_single_size_params)
2893 {
2894         u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
2895                                                  KVD_SINGLE_MIN_SIZE);
2896         u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
2897                                                  KVD_DOUBLE_MIN_SIZE);
2898         u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
2899         u32 linear_size_min = 0;
2900
2901         devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
2902                                           MLXSW_SP_KVD_GRANULARITY,
2903                                           DEVLINK_RESOURCE_UNIT_ENTRY);
2904         devlink_resource_size_params_init(linear_size_params, linear_size_min,
2905                                           kvd_size - single_size_min -
2906                                           double_size_min,
2907                                           MLXSW_SP_KVD_GRANULARITY,
2908                                           DEVLINK_RESOURCE_UNIT_ENTRY);
2909         devlink_resource_size_params_init(hash_double_size_params,
2910                                           double_size_min,
2911                                           kvd_size - single_size_min -
2912                                           linear_size_min,
2913                                           MLXSW_SP_KVD_GRANULARITY,
2914                                           DEVLINK_RESOURCE_UNIT_ENTRY);
2915         devlink_resource_size_params_init(hash_single_size_params,
2916                                           single_size_min,
2917                                           kvd_size - double_size_min -
2918                                           linear_size_min,
2919                                           MLXSW_SP_KVD_GRANULARITY,
2920                                           DEVLINK_RESOURCE_UNIT_ENTRY);
2921 }
2922
2923 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
2924 {
2925         struct devlink *devlink = priv_to_devlink(mlxsw_core);
2926         struct devlink_resource_size_params hash_single_size_params;
2927         struct devlink_resource_size_params hash_double_size_params;
2928         struct devlink_resource_size_params linear_size_params;
2929         struct devlink_resource_size_params kvd_size_params;
2930         u32 kvd_size, single_size, double_size, linear_size;
2931         const struct mlxsw_config_profile *profile;
2932         int err;
2933
2934         profile = &mlxsw_sp1_config_profile;
2935         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
2936                 return -EIO;
2937
2938         mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
2939                                               &linear_size_params,
2940                                               &hash_double_size_params,
2941                                               &hash_single_size_params);
2942
2943         kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
2944         err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
2945                                         kvd_size, MLXSW_SP_RESOURCE_KVD,
2946                                         DEVLINK_RESOURCE_ID_PARENT_TOP,
2947                                         &kvd_size_params);
2948         if (err)
2949                 return err;
2950
2951         linear_size = profile->kvd_linear_size;
2952         err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
2953                                         linear_size,
2954                                         MLXSW_SP_RESOURCE_KVD_LINEAR,
2955                                         MLXSW_SP_RESOURCE_KVD,
2956                                         &linear_size_params);
2957         if (err)
2958                 return err;
2959
2960         err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
2961         if  (err)
2962                 return err;
2963
2964         double_size = kvd_size - linear_size;
2965         double_size *= profile->kvd_hash_double_parts;
2966         double_size /= profile->kvd_hash_double_parts +
2967                        profile->kvd_hash_single_parts;
2968         double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
2969         err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
2970                                         double_size,
2971                                         MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
2972                                         MLXSW_SP_RESOURCE_KVD,
2973                                         &hash_double_size_params);
2974         if (err)
2975                 return err;
2976
2977         single_size = kvd_size - double_size - linear_size;
2978         err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
2979                                         single_size,
2980                                         MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
2981                                         MLXSW_SP_RESOURCE_KVD,
2982                                         &hash_single_size_params);
2983         if (err)
2984                 return err;
2985
2986         return 0;
2987 }
2988
2989 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
2990 {
2991         struct devlink *devlink = priv_to_devlink(mlxsw_core);
2992         struct devlink_resource_size_params kvd_size_params;
2993         u32 kvd_size;
2994
2995         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
2996                 return -EIO;
2997
2998         kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
2999         devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
3000                                           MLXSW_SP_KVD_GRANULARITY,
3001                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3002
3003         return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3004                                          kvd_size, MLXSW_SP_RESOURCE_KVD,
3005                                          DEVLINK_RESOURCE_ID_PARENT_TOP,
3006                                          &kvd_size_params);
3007 }
3008
3009 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
3010 {
3011         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3012         struct devlink_resource_size_params span_size_params;
3013         u32 max_span;
3014
3015         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
3016                 return -EIO;
3017
3018         max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
3019         devlink_resource_size_params_init(&span_size_params, max_span, max_span,
3020                                           1, DEVLINK_RESOURCE_UNIT_ENTRY);
3021
3022         return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
3023                                          max_span, MLXSW_SP_RESOURCE_SPAN,
3024                                          DEVLINK_RESOURCE_ID_PARENT_TOP,
3025                                          &span_size_params);
3026 }
3027
3028 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
3029 {
3030         int err;
3031
3032         err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
3033         if (err)
3034                 return err;
3035
3036         err = mlxsw_sp_resources_span_register(mlxsw_core);
3037         if (err)
3038                 goto err_resources_span_register;
3039
3040         err = mlxsw_sp_counter_resources_register(mlxsw_core);
3041         if (err)
3042                 goto err_resources_counter_register;
3043
3044         err = mlxsw_sp_policer_resources_register(mlxsw_core);
3045         if (err)
3046                 goto err_resources_counter_register;
3047
3048         return 0;
3049
3050 err_resources_counter_register:
3051 err_resources_span_register:
3052         devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
3053         return err;
3054 }
3055
3056 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
3057 {
3058         int err;
3059
3060         err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
3061         if (err)
3062                 return err;
3063
3064         err = mlxsw_sp_resources_span_register(mlxsw_core);
3065         if (err)
3066                 goto err_resources_span_register;
3067
3068         err = mlxsw_sp_counter_resources_register(mlxsw_core);
3069         if (err)
3070                 goto err_resources_counter_register;
3071
3072         err = mlxsw_sp_policer_resources_register(mlxsw_core);
3073         if (err)
3074                 goto err_resources_counter_register;
3075
3076         return 0;
3077
3078 err_resources_counter_register:
3079 err_resources_span_register:
3080         devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
3081         return err;
3082 }
3083
3084 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3085                                   const struct mlxsw_config_profile *profile,
3086                                   u64 *p_single_size, u64 *p_double_size,
3087                                   u64 *p_linear_size)
3088 {
3089         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3090         u32 double_size;
3091         int err;
3092
3093         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3094             !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
3095                 return -EIO;
3096
3097         /* The hash part is what left of the kvd without the
3098          * linear part. It is split to the single size and
3099          * double size by the parts ratio from the profile.
3100          * Both sizes must be a multiplications of the
3101          * granularity from the profile. In case the user
3102          * provided the sizes they are obtained via devlink.
3103          */
3104         err = devlink_resource_size_get(devlink,
3105                                         MLXSW_SP_RESOURCE_KVD_LINEAR,
3106                                         p_linear_size);
3107         if (err)
3108                 *p_linear_size = profile->kvd_linear_size;
3109
3110         err = devlink_resource_size_get(devlink,
3111                                         MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3112                                         p_double_size);
3113         if (err) {
3114                 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3115                               *p_linear_size;
3116                 double_size *= profile->kvd_hash_double_parts;
3117                 double_size /= profile->kvd_hash_double_parts +
3118                                profile->kvd_hash_single_parts;
3119                 *p_double_size = rounddown(double_size,
3120                                            MLXSW_SP_KVD_GRANULARITY);
3121         }
3122
3123         err = devlink_resource_size_get(devlink,
3124                                         MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3125                                         p_single_size);
3126         if (err)
3127                 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3128                                  *p_double_size - *p_linear_size;
3129
3130         /* Check results are legal. */
3131         if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3132             *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
3133             MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
3134                 return -EIO;
3135
3136         return 0;
3137 }
3138
3139 static int
3140 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
3141                                              struct devlink_param_gset_ctx *ctx)
3142 {
3143         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3144         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3145
3146         ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp);
3147         return 0;
3148 }
3149
3150 static int
3151 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
3152                                              struct devlink_param_gset_ctx *ctx)
3153 {
3154         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3155         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3156
3157         return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32);
3158 }
3159
3160 static const struct devlink_param mlxsw_sp2_devlink_params[] = {
3161         DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3162                              "acl_region_rehash_interval",
3163                              DEVLINK_PARAM_TYPE_U32,
3164                              BIT(DEVLINK_PARAM_CMODE_RUNTIME),
3165                              mlxsw_sp_params_acl_region_rehash_intrvl_get,
3166                              mlxsw_sp_params_acl_region_rehash_intrvl_set,
3167                              NULL),
3168 };
3169
3170 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
3171 {
3172         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3173         union devlink_param_value value;
3174         int err;
3175
3176         err = devlink_params_register(devlink, mlxsw_sp2_devlink_params,
3177                                       ARRAY_SIZE(mlxsw_sp2_devlink_params));
3178         if (err)
3179                 return err;
3180
3181         value.vu32 = 0;
3182         devlink_param_driverinit_value_set(devlink,
3183                                            MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3184                                            value);
3185         return 0;
3186 }
3187
3188 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
3189 {
3190         devlink_params_unregister(priv_to_devlink(mlxsw_core),
3191                                   mlxsw_sp2_devlink_params,
3192                                   ARRAY_SIZE(mlxsw_sp2_devlink_params));
3193 }
3194
3195 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
3196                                      struct sk_buff *skb, u8 local_port)
3197 {
3198         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3199
3200         skb_pull(skb, MLXSW_TXHDR_LEN);
3201         mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
3202 }
3203
3204 static struct mlxsw_driver mlxsw_sp1_driver = {
3205         .kind                           = mlxsw_sp1_driver_name,
3206         .priv_size                      = sizeof(struct mlxsw_sp),
3207         .fw_req_rev                     = &mlxsw_sp1_fw_rev,
3208         .fw_filename                    = MLXSW_SP1_FW_FILENAME,
3209         .init                           = mlxsw_sp1_init,
3210         .fini                           = mlxsw_sp_fini,
3211         .basic_trap_groups_set          = mlxsw_sp_basic_trap_groups_set,
3212         .port_split                     = mlxsw_sp_port_split,
3213         .port_unsplit                   = mlxsw_sp_port_unsplit,
3214         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
3215         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
3216         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
3217         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
3218         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
3219         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
3220         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
3221         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
3222         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
3223         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
3224         .trap_init                      = mlxsw_sp_trap_init,
3225         .trap_fini                      = mlxsw_sp_trap_fini,
3226         .trap_action_set                = mlxsw_sp_trap_action_set,
3227         .trap_group_init                = mlxsw_sp_trap_group_init,
3228         .trap_group_set                 = mlxsw_sp_trap_group_set,
3229         .trap_policer_init              = mlxsw_sp_trap_policer_init,
3230         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
3231         .trap_policer_set               = mlxsw_sp_trap_policer_set,
3232         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
3233         .txhdr_construct                = mlxsw_sp_txhdr_construct,
3234         .resources_register             = mlxsw_sp1_resources_register,
3235         .kvd_sizes_get                  = mlxsw_sp_kvd_sizes_get,
3236         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
3237         .txhdr_len                      = MLXSW_TXHDR_LEN,
3238         .profile                        = &mlxsw_sp1_config_profile,
3239         .res_query_enabled              = true,
3240         .fw_fatal_enabled               = true,
3241         .temp_warn_enabled              = true,
3242 };
3243
3244 static struct mlxsw_driver mlxsw_sp2_driver = {
3245         .kind                           = mlxsw_sp2_driver_name,
3246         .priv_size                      = sizeof(struct mlxsw_sp),
3247         .fw_req_rev                     = &mlxsw_sp2_fw_rev,
3248         .fw_filename                    = MLXSW_SP2_FW_FILENAME,
3249         .init                           = mlxsw_sp2_init,
3250         .fini                           = mlxsw_sp_fini,
3251         .basic_trap_groups_set          = mlxsw_sp_basic_trap_groups_set,
3252         .port_split                     = mlxsw_sp_port_split,
3253         .port_unsplit                   = mlxsw_sp_port_unsplit,
3254         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
3255         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
3256         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
3257         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
3258         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
3259         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
3260         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
3261         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
3262         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
3263         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
3264         .trap_init                      = mlxsw_sp_trap_init,
3265         .trap_fini                      = mlxsw_sp_trap_fini,
3266         .trap_action_set                = mlxsw_sp_trap_action_set,
3267         .trap_group_init                = mlxsw_sp_trap_group_init,
3268         .trap_group_set                 = mlxsw_sp_trap_group_set,
3269         .trap_policer_init              = mlxsw_sp_trap_policer_init,
3270         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
3271         .trap_policer_set               = mlxsw_sp_trap_policer_set,
3272         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
3273         .txhdr_construct                = mlxsw_sp_txhdr_construct,
3274         .resources_register             = mlxsw_sp2_resources_register,
3275         .params_register                = mlxsw_sp2_params_register,
3276         .params_unregister              = mlxsw_sp2_params_unregister,
3277         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
3278         .txhdr_len                      = MLXSW_TXHDR_LEN,
3279         .profile                        = &mlxsw_sp2_config_profile,
3280         .res_query_enabled              = true,
3281         .fw_fatal_enabled               = true,
3282         .temp_warn_enabled              = true,
3283 };
3284
3285 static struct mlxsw_driver mlxsw_sp3_driver = {
3286         .kind                           = mlxsw_sp3_driver_name,
3287         .priv_size                      = sizeof(struct mlxsw_sp),
3288         .fw_req_rev                     = &mlxsw_sp3_fw_rev,
3289         .fw_filename                    = MLXSW_SP3_FW_FILENAME,
3290         .init                           = mlxsw_sp3_init,
3291         .fini                           = mlxsw_sp_fini,
3292         .basic_trap_groups_set          = mlxsw_sp_basic_trap_groups_set,
3293         .port_split                     = mlxsw_sp_port_split,
3294         .port_unsplit                   = mlxsw_sp_port_unsplit,
3295         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
3296         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
3297         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
3298         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
3299         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
3300         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
3301         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
3302         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
3303         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
3304         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
3305         .trap_init                      = mlxsw_sp_trap_init,
3306         .trap_fini                      = mlxsw_sp_trap_fini,
3307         .trap_action_set                = mlxsw_sp_trap_action_set,
3308         .trap_group_init                = mlxsw_sp_trap_group_init,
3309         .trap_group_set                 = mlxsw_sp_trap_group_set,
3310         .trap_policer_init              = mlxsw_sp_trap_policer_init,
3311         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
3312         .trap_policer_set               = mlxsw_sp_trap_policer_set,
3313         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
3314         .txhdr_construct                = mlxsw_sp_txhdr_construct,
3315         .resources_register             = mlxsw_sp2_resources_register,
3316         .params_register                = mlxsw_sp2_params_register,
3317         .params_unregister              = mlxsw_sp2_params_unregister,
3318         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
3319         .txhdr_len                      = MLXSW_TXHDR_LEN,
3320         .profile                        = &mlxsw_sp2_config_profile,
3321         .res_query_enabled              = true,
3322         .fw_fatal_enabled               = true,
3323         .temp_warn_enabled              = true,
3324 };
3325
3326 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
3327 {
3328         return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3329 }
3330
3331 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev,
3332                                    struct netdev_nested_priv *priv)
3333 {
3334         int ret = 0;
3335
3336         if (mlxsw_sp_port_dev_check(lower_dev)) {
3337                 priv->data = (void *)netdev_priv(lower_dev);
3338                 ret = 1;
3339         }
3340
3341         return ret;
3342 }
3343
3344 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
3345 {
3346         struct netdev_nested_priv priv = {
3347                 .data = NULL,
3348         };
3349
3350         if (mlxsw_sp_port_dev_check(dev))
3351                 return netdev_priv(dev);
3352
3353         netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv);
3354
3355         return (struct mlxsw_sp_port *)priv.data;
3356 }
3357
3358 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
3359 {
3360         struct mlxsw_sp_port *mlxsw_sp_port;
3361
3362         mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3363         return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3364 }
3365
3366 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
3367 {
3368         struct netdev_nested_priv priv = {
3369                 .data = NULL,
3370         };
3371
3372         if (mlxsw_sp_port_dev_check(dev))
3373                 return netdev_priv(dev);
3374
3375         netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3376                                       &priv);
3377
3378         return (struct mlxsw_sp_port *)priv.data;
3379 }
3380
3381 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3382 {
3383         struct mlxsw_sp_port *mlxsw_sp_port;
3384
3385         rcu_read_lock();
3386         mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3387         if (mlxsw_sp_port)
3388                 dev_hold(mlxsw_sp_port->dev);
3389         rcu_read_unlock();
3390         return mlxsw_sp_port;
3391 }
3392
3393 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3394 {
3395         dev_put(mlxsw_sp_port->dev);
3396 }
3397
3398 static void
3399 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
3400                                  struct net_device *lag_dev)
3401 {
3402         struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
3403         struct net_device *upper_dev;
3404         struct list_head *iter;
3405
3406         if (netif_is_bridge_port(lag_dev))
3407                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
3408
3409         netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
3410                 if (!netif_is_bridge_port(upper_dev))
3411                         continue;
3412                 br_dev = netdev_master_upper_dev_get(upper_dev);
3413                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
3414         }
3415 }
3416
3417 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3418 {
3419         char sldr_pl[MLXSW_REG_SLDR_LEN];
3420
3421         mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3422         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3423 }
3424
3425 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3426 {
3427         char sldr_pl[MLXSW_REG_SLDR_LEN];
3428
3429         mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3430         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3431 }
3432
3433 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3434                                      u16 lag_id, u8 port_index)
3435 {
3436         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3437         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3438
3439         mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3440                                       lag_id, port_index);
3441         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3442 }
3443
3444 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3445                                         u16 lag_id)
3446 {
3447         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3448         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3449
3450         mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3451                                          lag_id);
3452         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3453 }
3454
3455 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3456                                         u16 lag_id)
3457 {
3458         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3459         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3460
3461         mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3462                                         lag_id);
3463         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3464 }
3465
3466 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3467                                          u16 lag_id)
3468 {
3469         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3470         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3471
3472         mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3473                                          lag_id);
3474         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3475 }
3476
3477 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3478                                   struct net_device *lag_dev,
3479                                   u16 *p_lag_id)
3480 {
3481         struct mlxsw_sp_upper *lag;
3482         int free_lag_id = -1;
3483         u64 max_lag;
3484         int i;
3485
3486         max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
3487         for (i = 0; i < max_lag; i++) {
3488                 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3489                 if (lag->ref_count) {
3490                         if (lag->dev == lag_dev) {
3491                                 *p_lag_id = i;
3492                                 return 0;
3493                         }
3494                 } else if (free_lag_id < 0) {
3495                         free_lag_id = i;
3496                 }
3497         }
3498         if (free_lag_id < 0)
3499                 return -EBUSY;
3500         *p_lag_id = free_lag_id;
3501         return 0;
3502 }
3503
3504 static bool
3505 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3506                           struct net_device *lag_dev,
3507                           struct netdev_lag_upper_info *lag_upper_info,
3508                           struct netlink_ext_ack *extack)
3509 {
3510         u16 lag_id;
3511
3512         if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
3513                 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
3514                 return false;
3515         }
3516         if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
3517                 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
3518                 return false;
3519         }
3520         return true;
3521 }
3522
3523 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3524                                        u16 lag_id, u8 *p_port_index)
3525 {
3526         u64 max_lag_members;
3527         int i;
3528
3529         max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3530                                              MAX_LAG_MEMBERS);
3531         for (i = 0; i < max_lag_members; i++) {
3532                 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3533                         *p_port_index = i;
3534                         return 0;
3535                 }
3536         }
3537         return -EBUSY;
3538 }
3539
3540 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3541                                   struct net_device *lag_dev)
3542 {
3543         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3544         struct mlxsw_sp_upper *lag;
3545         u16 lag_id;
3546         u8 port_index;
3547         int err;
3548
3549         err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3550         if (err)
3551                 return err;
3552         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3553         if (!lag->ref_count) {
3554                 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3555                 if (err)
3556                         return err;
3557                 lag->dev = lag_dev;
3558         }
3559
3560         err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3561         if (err)
3562                 return err;
3563         err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3564         if (err)
3565                 goto err_col_port_add;
3566
3567         mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
3568                                    mlxsw_sp_port->local_port);
3569         mlxsw_sp_port->lag_id = lag_id;
3570         mlxsw_sp_port->lagged = 1;
3571         lag->ref_count++;
3572
3573         /* Port is no longer usable as a router interface */
3574         if (mlxsw_sp_port->default_vlan->fid)
3575                 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
3576
3577         return 0;
3578
3579 err_col_port_add:
3580         if (!lag->ref_count)
3581                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3582         return err;
3583 }
3584
3585 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
3586                                     struct net_device *lag_dev)
3587 {
3588         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3589         u16 lag_id = mlxsw_sp_port->lag_id;
3590         struct mlxsw_sp_upper *lag;
3591
3592         if (!mlxsw_sp_port->lagged)
3593                 return;
3594         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3595         WARN_ON(lag->ref_count == 0);
3596
3597         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3598
3599         /* Any VLANs configured on the port are no longer valid */
3600         mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
3601         mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
3602         /* Make the LAG and its directly linked uppers leave bridges they
3603          * are memeber in
3604          */
3605         mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
3606
3607         if (lag->ref_count == 1)
3608                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3609
3610         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3611                                      mlxsw_sp_port->local_port);
3612         mlxsw_sp_port->lagged = 0;
3613         lag->ref_count--;
3614
3615         /* Make sure untagged frames are allowed to ingress */
3616         mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
3617 }
3618
3619 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3620                                       u16 lag_id)
3621 {
3622         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3623         char sldr_pl[MLXSW_REG_SLDR_LEN];
3624
3625         mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
3626                                          mlxsw_sp_port->local_port);
3627         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3628 }
3629
3630 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3631                                          u16 lag_id)
3632 {
3633         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3634         char sldr_pl[MLXSW_REG_SLDR_LEN];
3635
3636         mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
3637                                             mlxsw_sp_port->local_port);
3638         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3639 }
3640
3641 static int
3642 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
3643 {
3644         int err;
3645
3646         err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
3647                                            mlxsw_sp_port->lag_id);
3648         if (err)
3649                 return err;
3650
3651         err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
3652         if (err)
3653                 goto err_dist_port_add;
3654
3655         return 0;
3656
3657 err_dist_port_add:
3658         mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
3659         return err;
3660 }
3661
3662 static int
3663 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
3664 {
3665         int err;
3666
3667         err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
3668                                             mlxsw_sp_port->lag_id);
3669         if (err)
3670                 return err;
3671
3672         err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
3673                                             mlxsw_sp_port->lag_id);
3674         if (err)
3675                 goto err_col_port_disable;
3676
3677         return 0;
3678
3679 err_col_port_disable:
3680         mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
3681         return err;
3682 }
3683
3684 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
3685                                      struct netdev_lag_lower_state_info *info)
3686 {
3687         if (info->tx_enabled)
3688                 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
3689         else
3690                 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
3691 }
3692
3693 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
3694                                  bool enable)
3695 {
3696         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3697         enum mlxsw_reg_spms_state spms_state;
3698         char *spms_pl;
3699         u16 vid;
3700         int err;
3701
3702         spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
3703                               MLXSW_REG_SPMS_STATE_DISCARDING;
3704
3705         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
3706         if (!spms_pl)
3707                 return -ENOMEM;
3708         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
3709
3710         for (vid = 0; vid < VLAN_N_VID; vid++)
3711                 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
3712
3713         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
3714         kfree(spms_pl);
3715         return err;
3716 }
3717
3718 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
3719 {
3720         u16 vid = 1;
3721         int err;
3722
3723         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
3724         if (err)
3725                 return err;
3726         err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
3727         if (err)
3728                 goto err_port_stp_set;
3729         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
3730                                      true, false);
3731         if (err)
3732                 goto err_port_vlan_set;
3733
3734         for (; vid <= VLAN_N_VID - 1; vid++) {
3735                 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
3736                                                      vid, false);
3737                 if (err)
3738                         goto err_vid_learning_set;
3739         }
3740
3741         return 0;
3742
3743 err_vid_learning_set:
3744         for (vid--; vid >= 1; vid--)
3745                 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
3746 err_port_vlan_set:
3747         mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
3748 err_port_stp_set:
3749         mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
3750         return err;
3751 }
3752
3753 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3754 {
3755         u16 vid;
3756
3757         for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
3758                 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
3759                                                vid, true);
3760
3761         mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
3762                                false, false);
3763         mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
3764         mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
3765 }
3766
3767 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
3768 {
3769         unsigned int num_vxlans = 0;
3770         struct net_device *dev;
3771         struct list_head *iter;
3772
3773         netdev_for_each_lower_dev(br_dev, dev, iter) {
3774                 if (netif_is_vxlan(dev))
3775                         num_vxlans++;
3776         }
3777
3778         return num_vxlans > 1;
3779 }
3780
3781 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
3782 {
3783         DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
3784         struct net_device *dev;
3785         struct list_head *iter;
3786
3787         netdev_for_each_lower_dev(br_dev, dev, iter) {
3788                 u16 pvid;
3789                 int err;
3790
3791                 if (!netif_is_vxlan(dev))
3792                         continue;
3793
3794                 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
3795                 if (err || !pvid)
3796                         continue;
3797
3798                 if (test_and_set_bit(pvid, vlans))
3799                         return false;
3800         }
3801
3802         return true;
3803 }
3804
3805 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
3806                                            struct netlink_ext_ack *extack)
3807 {
3808         if (br_multicast_enabled(br_dev)) {
3809                 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
3810                 return false;
3811         }
3812
3813         if (!br_vlan_enabled(br_dev) &&
3814             mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
3815                 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
3816                 return false;
3817         }
3818
3819         if (br_vlan_enabled(br_dev) &&
3820             !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
3821                 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
3822                 return false;
3823         }
3824
3825         return true;
3826 }
3827
3828 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
3829                                                struct net_device *dev,
3830                                                unsigned long event, void *ptr)
3831 {
3832         struct netdev_notifier_changeupper_info *info;
3833         struct mlxsw_sp_port *mlxsw_sp_port;
3834         struct netlink_ext_ack *extack;
3835         struct net_device *upper_dev;
3836         struct mlxsw_sp *mlxsw_sp;
3837         int err = 0;
3838
3839         mlxsw_sp_port = netdev_priv(dev);
3840         mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3841         info = ptr;
3842         extack = netdev_notifier_info_to_extack(&info->info);
3843
3844         switch (event) {
3845         case NETDEV_PRECHANGEUPPER:
3846                 upper_dev = info->upper_dev;
3847                 if (!is_vlan_dev(upper_dev) &&
3848                     !netif_is_lag_master(upper_dev) &&
3849                     !netif_is_bridge_master(upper_dev) &&
3850                     !netif_is_ovs_master(upper_dev) &&
3851                     !netif_is_macvlan(upper_dev)) {
3852                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
3853                         return -EINVAL;
3854                 }
3855                 if (!info->linking)
3856                         break;
3857                 if (netif_is_bridge_master(upper_dev) &&
3858                     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
3859                     mlxsw_sp_bridge_has_vxlan(upper_dev) &&
3860                     !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
3861                         return -EOPNOTSUPP;
3862                 if (netdev_has_any_upper_dev(upper_dev) &&
3863                     (!netif_is_bridge_master(upper_dev) ||
3864                      !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
3865                                                           upper_dev))) {
3866                         NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
3867                         return -EINVAL;
3868                 }
3869                 if (netif_is_lag_master(upper_dev) &&
3870                     !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
3871                                                info->upper_info, extack))
3872                         return -EINVAL;
3873                 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
3874                         NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
3875                         return -EINVAL;
3876                 }
3877                 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
3878                     !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
3879                         NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
3880                         return -EINVAL;
3881                 }
3882                 if (netif_is_macvlan(upper_dev) &&
3883                     !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) {
3884                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
3885                         return -EOPNOTSUPP;
3886                 }
3887                 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
3888                         NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
3889                         return -EINVAL;
3890                 }
3891                 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
3892                         NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
3893                         return -EINVAL;
3894                 }
3895                 break;
3896         case NETDEV_CHANGEUPPER:
3897                 upper_dev = info->upper_dev;
3898                 if (netif_is_bridge_master(upper_dev)) {
3899                         if (info->linking)
3900                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
3901                                                                 lower_dev,
3902                                                                 upper_dev,
3903                                                                 extack);
3904                         else
3905                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
3906                                                            lower_dev,
3907                                                            upper_dev);
3908                 } else if (netif_is_lag_master(upper_dev)) {
3909                         if (info->linking) {
3910                                 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
3911                                                              upper_dev);
3912                         } else {
3913                                 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
3914                                 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
3915                                                         upper_dev);
3916                         }
3917                 } else if (netif_is_ovs_master(upper_dev)) {
3918                         if (info->linking)
3919                                 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
3920                         else
3921                                 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
3922                 } else if (netif_is_macvlan(upper_dev)) {
3923                         if (!info->linking)
3924                                 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
3925                 } else if (is_vlan_dev(upper_dev)) {
3926                         struct net_device *br_dev;
3927
3928                         if (!netif_is_bridge_port(upper_dev))
3929                                 break;
3930                         if (info->linking)
3931                                 break;
3932                         br_dev = netdev_master_upper_dev_get(upper_dev);
3933                         mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
3934                                                    br_dev);
3935                 }
3936                 break;
3937         }
3938
3939         return err;
3940 }
3941
3942 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
3943                                                unsigned long event, void *ptr)
3944 {
3945         struct netdev_notifier_changelowerstate_info *info;
3946         struct mlxsw_sp_port *mlxsw_sp_port;
3947         int err;
3948
3949         mlxsw_sp_port = netdev_priv(dev);
3950         info = ptr;
3951
3952         switch (event) {
3953         case NETDEV_CHANGELOWERSTATE:
3954                 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
3955                         err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
3956                                                         info->lower_state_info);
3957                         if (err)
3958                                 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
3959                 }
3960                 break;
3961         }
3962
3963         return 0;
3964 }
3965
3966 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
3967                                          struct net_device *port_dev,
3968                                          unsigned long event, void *ptr)
3969 {
3970         switch (event) {
3971         case NETDEV_PRECHANGEUPPER:
3972         case NETDEV_CHANGEUPPER:
3973                 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
3974                                                            event, ptr);
3975         case NETDEV_CHANGELOWERSTATE:
3976                 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
3977                                                            ptr);
3978         }
3979
3980         return 0;
3981 }
3982
3983 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
3984                                         unsigned long event, void *ptr)
3985 {
3986         struct net_device *dev;
3987         struct list_head *iter;
3988         int ret;
3989
3990         netdev_for_each_lower_dev(lag_dev, dev, iter) {
3991                 if (mlxsw_sp_port_dev_check(dev)) {
3992                         ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
3993                                                             ptr);
3994                         if (ret)
3995                                 return ret;
3996                 }
3997         }
3998
3999         return 0;
4000 }
4001
4002 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4003                                               struct net_device *dev,
4004                                               unsigned long event, void *ptr,
4005                                               u16 vid)
4006 {
4007         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4008         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4009         struct netdev_notifier_changeupper_info *info = ptr;
4010         struct netlink_ext_ack *extack;
4011         struct net_device *upper_dev;
4012         int err = 0;
4013
4014         extack = netdev_notifier_info_to_extack(&info->info);
4015
4016         switch (event) {
4017         case NETDEV_PRECHANGEUPPER:
4018                 upper_dev = info->upper_dev;
4019                 if (!netif_is_bridge_master(upper_dev) &&
4020                     !netif_is_macvlan(upper_dev)) {
4021                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4022                         return -EINVAL;
4023                 }
4024                 if (!info->linking)
4025                         break;
4026                 if (netif_is_bridge_master(upper_dev) &&
4027                     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4028                     mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4029                     !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4030                         return -EOPNOTSUPP;
4031                 if (netdev_has_any_upper_dev(upper_dev) &&
4032                     (!netif_is_bridge_master(upper_dev) ||
4033                      !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4034                                                           upper_dev))) {
4035                         NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
4036                         return -EINVAL;
4037                 }
4038                 if (netif_is_macvlan(upper_dev) &&
4039                     !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4040                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4041                         return -EOPNOTSUPP;
4042                 }
4043                 break;
4044         case NETDEV_CHANGEUPPER:
4045                 upper_dev = info->upper_dev;
4046                 if (netif_is_bridge_master(upper_dev)) {
4047                         if (info->linking)
4048                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4049                                                                 vlan_dev,
4050                                                                 upper_dev,
4051                                                                 extack);
4052                         else
4053                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4054                                                            vlan_dev,
4055                                                            upper_dev);
4056                 } else if (netif_is_macvlan(upper_dev)) {
4057                         if (!info->linking)
4058                                 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4059                 } else {
4060                         err = -EINVAL;
4061                         WARN_ON(1);
4062                 }
4063                 break;
4064         }
4065
4066         return err;
4067 }
4068
4069 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
4070                                                   struct net_device *lag_dev,
4071                                                   unsigned long event,
4072                                                   void *ptr, u16 vid)
4073 {
4074         struct net_device *dev;
4075         struct list_head *iter;
4076         int ret;
4077
4078         netdev_for_each_lower_dev(lag_dev, dev, iter) {
4079                 if (mlxsw_sp_port_dev_check(dev)) {
4080                         ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
4081                                                                  event, ptr,
4082                                                                  vid);
4083                         if (ret)
4084                                 return ret;
4085                 }
4086         }
4087
4088         return 0;
4089 }
4090
4091 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
4092                                                 struct net_device *br_dev,
4093                                                 unsigned long event, void *ptr,
4094                                                 u16 vid)
4095 {
4096         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
4097         struct netdev_notifier_changeupper_info *info = ptr;
4098         struct netlink_ext_ack *extack;
4099         struct net_device *upper_dev;
4100
4101         if (!mlxsw_sp)
4102                 return 0;
4103
4104         extack = netdev_notifier_info_to_extack(&info->info);
4105
4106         switch (event) {
4107         case NETDEV_PRECHANGEUPPER:
4108                 upper_dev = info->upper_dev;
4109                 if (!netif_is_macvlan(upper_dev)) {
4110                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4111                         return -EOPNOTSUPP;
4112                 }
4113                 if (!info->linking)
4114                         break;
4115                 if (netif_is_macvlan(upper_dev) &&
4116                     !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4117                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4118                         return -EOPNOTSUPP;
4119                 }
4120                 break;
4121         case NETDEV_CHANGEUPPER:
4122                 upper_dev = info->upper_dev;
4123                 if (info->linking)
4124                         break;
4125                 if (netif_is_macvlan(upper_dev))
4126                         mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4127                 break;
4128         }
4129
4130         return 0;
4131 }
4132
4133 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4134                                          unsigned long event, void *ptr)
4135 {
4136         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4137         u16 vid = vlan_dev_vlan_id(vlan_dev);
4138
4139         if (mlxsw_sp_port_dev_check(real_dev))
4140                 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
4141                                                           event, ptr, vid);
4142         else if (netif_is_lag_master(real_dev))
4143                 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
4144                                                               real_dev, event,
4145                                                               ptr, vid);
4146         else if (netif_is_bridge_master(real_dev))
4147                 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev,
4148                                                             event, ptr, vid);
4149
4150         return 0;
4151 }
4152
4153 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
4154                                            unsigned long event, void *ptr)
4155 {
4156         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
4157         struct netdev_notifier_changeupper_info *info = ptr;
4158         struct netlink_ext_ack *extack;
4159         struct net_device *upper_dev;
4160
4161         if (!mlxsw_sp)
4162                 return 0;
4163
4164         extack = netdev_notifier_info_to_extack(&info->info);
4165
4166         switch (event) {
4167         case NETDEV_PRECHANGEUPPER:
4168                 upper_dev = info->upper_dev;
4169                 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) {
4170                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4171                         return -EOPNOTSUPP;
4172                 }
4173                 if (!info->linking)
4174                         break;
4175                 if (netif_is_macvlan(upper_dev) &&
4176                     !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) {
4177                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4178                         return -EOPNOTSUPP;
4179                 }
4180                 break;
4181         case NETDEV_CHANGEUPPER:
4182                 upper_dev = info->upper_dev;
4183                 if (info->linking)
4184                         break;
4185                 if (is_vlan_dev(upper_dev))
4186                         mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
4187                 if (netif_is_macvlan(upper_dev))
4188                         mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4189                 break;
4190         }
4191
4192         return 0;
4193 }
4194
4195 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
4196                                             unsigned long event, void *ptr)
4197 {
4198         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
4199         struct netdev_notifier_changeupper_info *info = ptr;
4200         struct netlink_ext_ack *extack;
4201
4202         if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
4203                 return 0;
4204
4205         extack = netdev_notifier_info_to_extack(&info->info);
4206
4207         /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */
4208         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4209
4210         return -EOPNOTSUPP;
4211 }
4212
4213 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
4214 {
4215         struct netdev_notifier_changeupper_info *info = ptr;
4216
4217         if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
4218                 return false;
4219         return netif_is_l3_master(info->upper_dev);
4220 }
4221
4222 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
4223                                           struct net_device *dev,
4224                                           unsigned long event, void *ptr)
4225 {
4226         struct netdev_notifier_changeupper_info *cu_info;
4227         struct netdev_notifier_info *info = ptr;
4228         struct netlink_ext_ack *extack;
4229         struct net_device *upper_dev;
4230
4231         extack = netdev_notifier_info_to_extack(info);
4232
4233         switch (event) {
4234         case NETDEV_CHANGEUPPER:
4235                 cu_info = container_of(info,
4236                                        struct netdev_notifier_changeupper_info,
4237                                        info);
4238                 upper_dev = cu_info->upper_dev;
4239                 if (!netif_is_bridge_master(upper_dev))
4240                         return 0;
4241                 if (!mlxsw_sp_lower_get(upper_dev))
4242                         return 0;
4243                 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4244                         return -EOPNOTSUPP;
4245                 if (cu_info->linking) {
4246                         if (!netif_running(dev))
4247                                 return 0;
4248                         /* When the bridge is VLAN-aware, the VNI of the VxLAN
4249                          * device needs to be mapped to a VLAN, but at this
4250                          * point no VLANs are configured on the VxLAN device
4251                          */
4252                         if (br_vlan_enabled(upper_dev))
4253                                 return 0;
4254                         return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
4255                                                           dev, 0, extack);
4256                 } else {
4257                         /* VLANs were already flushed, which triggered the
4258                          * necessary cleanup
4259                          */
4260                         if (br_vlan_enabled(upper_dev))
4261                                 return 0;
4262                         mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
4263                 }
4264                 break;
4265         case NETDEV_PRE_UP:
4266                 upper_dev = netdev_master_upper_dev_get(dev);
4267                 if (!upper_dev)
4268                         return 0;
4269                 if (!netif_is_bridge_master(upper_dev))
4270                         return 0;
4271                 if (!mlxsw_sp_lower_get(upper_dev))
4272                         return 0;
4273                 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
4274                                                   extack);
4275         case NETDEV_DOWN:
4276                 upper_dev = netdev_master_upper_dev_get(dev);
4277                 if (!upper_dev)
4278                         return 0;
4279                 if (!netif_is_bridge_master(upper_dev))
4280                         return 0;
4281                 if (!mlxsw_sp_lower_get(upper_dev))
4282                         return 0;
4283                 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
4284                 break;
4285         }
4286
4287         return 0;
4288 }
4289
4290 static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
4291                                     unsigned long event, void *ptr)
4292 {
4293         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4294         struct mlxsw_sp_span_entry *span_entry;
4295         struct mlxsw_sp *mlxsw_sp;
4296         int err = 0;
4297
4298         mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
4299         if (event == NETDEV_UNREGISTER) {
4300                 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
4301                 if (span_entry)
4302                         mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
4303         }
4304         mlxsw_sp_span_respin(mlxsw_sp);
4305
4306         if (netif_is_vxlan(dev))
4307                 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
4308         if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
4309                 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
4310                                                        event, ptr);
4311         else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
4312                 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
4313                                                        event, ptr);
4314         else if (event == NETDEV_PRE_CHANGEADDR ||
4315                  event == NETDEV_CHANGEADDR ||
4316                  event == NETDEV_CHANGEMTU)
4317                 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
4318         else if (mlxsw_sp_is_vrf_event(event, ptr))
4319                 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
4320         else if (mlxsw_sp_port_dev_check(dev))
4321                 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
4322         else if (netif_is_lag_master(dev))
4323                 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4324         else if (is_vlan_dev(dev))
4325                 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4326         else if (netif_is_bridge_master(dev))
4327                 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
4328         else if (netif_is_macvlan(dev))
4329                 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
4330
4331         return notifier_from_errno(err);
4332 }
4333
4334 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
4335         .notifier_call = mlxsw_sp_inetaddr_valid_event,
4336 };
4337
4338 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
4339         .notifier_call = mlxsw_sp_inet6addr_valid_event,
4340 };
4341
4342 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
4343         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4344         {0, },
4345 };
4346
4347 static struct pci_driver mlxsw_sp1_pci_driver = {
4348         .name = mlxsw_sp1_driver_name,
4349         .id_table = mlxsw_sp1_pci_id_table,
4350 };
4351
4352 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
4353         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
4354         {0, },
4355 };
4356
4357 static struct pci_driver mlxsw_sp2_pci_driver = {
4358         .name = mlxsw_sp2_driver_name,
4359         .id_table = mlxsw_sp2_pci_id_table,
4360 };
4361
4362 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
4363         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
4364         {0, },
4365 };
4366
4367 static struct pci_driver mlxsw_sp3_pci_driver = {
4368         .name = mlxsw_sp3_driver_name,
4369         .id_table = mlxsw_sp3_pci_id_table,
4370 };
4371
4372 static int __init mlxsw_sp_module_init(void)
4373 {
4374         int err;
4375
4376         register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4377         register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4378
4379         err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
4380         if (err)
4381                 goto err_sp1_core_driver_register;
4382
4383         err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
4384         if (err)
4385                 goto err_sp2_core_driver_register;
4386
4387         err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
4388         if (err)
4389                 goto err_sp3_core_driver_register;
4390
4391         err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
4392         if (err)
4393                 goto err_sp1_pci_driver_register;
4394
4395         err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
4396         if (err)
4397                 goto err_sp2_pci_driver_register;
4398
4399         err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
4400         if (err)
4401                 goto err_sp3_pci_driver_register;
4402
4403         return 0;
4404
4405 err_sp3_pci_driver_register:
4406         mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
4407 err_sp2_pci_driver_register:
4408         mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
4409 err_sp1_pci_driver_register:
4410         mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
4411 err_sp3_core_driver_register:
4412         mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
4413 err_sp2_core_driver_register:
4414         mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
4415 err_sp1_core_driver_register:
4416         unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4417         unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4418         return err;
4419 }
4420
4421 static void __exit mlxsw_sp_module_exit(void)
4422 {
4423         mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
4424         mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
4425         mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
4426         mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
4427         mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
4428         mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
4429         unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4430         unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4431 }
4432
4433 module_init(mlxsw_sp_module_init);
4434 module_exit(mlxsw_sp_module_exit);
4435
4436 MODULE_LICENSE("Dual BSD/GPL");
4437 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4438 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4439 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
4440 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
4441 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
4442 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
4443 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
4444 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);