Merge tag 'tty-5.10-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty
[platform/kernel/linux-rpi.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/types.h>
7 #include <linux/pci.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/ethtool.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/if_bridge.h>
16 #include <linux/workqueue.h>
17 #include <linux/jiffies.h>
18 #include <linux/bitops.h>
19 #include <linux/list.h>
20 #include <linux/notifier.h>
21 #include <linux/dcbnl.h>
22 #include <linux/inetdevice.h>
23 #include <linux/netlink.h>
24 #include <linux/jhash.h>
25 #include <linux/log2.h>
26 #include <net/switchdev.h>
27 #include <net/pkt_cls.h>
28 #include <net/netevent.h>
29 #include <net/addrconf.h>
30
31 #include "spectrum.h"
32 #include "pci.h"
33 #include "core.h"
34 #include "core_env.h"
35 #include "reg.h"
36 #include "port.h"
37 #include "trap.h"
38 #include "txheader.h"
39 #include "spectrum_cnt.h"
40 #include "spectrum_dpipe.h"
41 #include "spectrum_acl_flex_actions.h"
42 #include "spectrum_span.h"
43 #include "spectrum_ptp.h"
44 #include "spectrum_trap.h"
45
46 #define MLXSW_SP1_FWREV_MAJOR 13
47 #define MLXSW_SP1_FWREV_MINOR 2008
48 #define MLXSW_SP1_FWREV_SUBMINOR 1310
49 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
50
51 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
52         .major = MLXSW_SP1_FWREV_MAJOR,
53         .minor = MLXSW_SP1_FWREV_MINOR,
54         .subminor = MLXSW_SP1_FWREV_SUBMINOR,
55         .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
56 };
57
58 #define MLXSW_SP1_FW_FILENAME \
59         "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
60         "." __stringify(MLXSW_SP1_FWREV_MINOR) \
61         "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
62
63 #define MLXSW_SP2_FWREV_MAJOR 29
64 #define MLXSW_SP2_FWREV_MINOR 2008
65 #define MLXSW_SP2_FWREV_SUBMINOR 1310
66
67 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
68         .major = MLXSW_SP2_FWREV_MAJOR,
69         .minor = MLXSW_SP2_FWREV_MINOR,
70         .subminor = MLXSW_SP2_FWREV_SUBMINOR,
71 };
72
73 #define MLXSW_SP2_FW_FILENAME \
74         "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
75         "." __stringify(MLXSW_SP2_FWREV_MINOR) \
76         "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
77
78 #define MLXSW_SP3_FWREV_MAJOR 30
79 #define MLXSW_SP3_FWREV_MINOR 2008
80 #define MLXSW_SP3_FWREV_SUBMINOR 1310
81
82 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
83         .major = MLXSW_SP3_FWREV_MAJOR,
84         .minor = MLXSW_SP3_FWREV_MINOR,
85         .subminor = MLXSW_SP3_FWREV_SUBMINOR,
86 };
87
88 #define MLXSW_SP3_FW_FILENAME \
89         "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
90         "." __stringify(MLXSW_SP3_FWREV_MINOR) \
91         "." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2"
92
93 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
94 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
95 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
96
97 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
98         0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
99 };
100 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
101         0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
102 };
103
104 /* tx_hdr_version
105  * Tx header version.
106  * Must be set to 1.
107  */
108 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
109
110 /* tx_hdr_ctl
111  * Packet control type.
112  * 0 - Ethernet control (e.g. EMADs, LACP)
113  * 1 - Ethernet data
114  */
115 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
116
117 /* tx_hdr_proto
118  * Packet protocol type. Must be set to 1 (Ethernet).
119  */
120 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
121
122 /* tx_hdr_rx_is_router
123  * Packet is sent from the router. Valid for data packets only.
124  */
125 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
126
127 /* tx_hdr_fid_valid
128  * Indicates if the 'fid' field is valid and should be used for
129  * forwarding lookup. Valid for data packets only.
130  */
131 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
132
133 /* tx_hdr_swid
134  * Switch partition ID. Must be set to 0.
135  */
136 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
137
138 /* tx_hdr_control_tclass
139  * Indicates if the packet should use the control TClass and not one
140  * of the data TClasses.
141  */
142 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
143
144 /* tx_hdr_etclass
145  * Egress TClass to be used on the egress device on the egress port.
146  */
147 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
148
149 /* tx_hdr_port_mid
150  * Destination local port for unicast packets.
151  * Destination multicast ID for multicast packets.
152  *
153  * Control packets are directed to a specific egress port, while data
154  * packets are transmitted through the CPU port (0) into the switch partition,
155  * where forwarding rules are applied.
156  */
157 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
158
159 /* tx_hdr_fid
160  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
161  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
162  * Valid for data packets only.
163  */
164 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
165
166 /* tx_hdr_type
167  * 0 - Data packets
168  * 6 - Control packets
169  */
170 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
171
172 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
173                               unsigned int counter_index, u64 *packets,
174                               u64 *bytes)
175 {
176         char mgpc_pl[MLXSW_REG_MGPC_LEN];
177         int err;
178
179         mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
180                             MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
181         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
182         if (err)
183                 return err;
184         if (packets)
185                 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
186         if (bytes)
187                 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
188         return 0;
189 }
190
191 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
192                                        unsigned int counter_index)
193 {
194         char mgpc_pl[MLXSW_REG_MGPC_LEN];
195
196         mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
197                             MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
198         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
199 }
200
201 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
202                                 unsigned int *p_counter_index)
203 {
204         int err;
205
206         err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
207                                      p_counter_index);
208         if (err)
209                 return err;
210         err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
211         if (err)
212                 goto err_counter_clear;
213         return 0;
214
215 err_counter_clear:
216         mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
217                               *p_counter_index);
218         return err;
219 }
220
221 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
222                                 unsigned int counter_index)
223 {
224          mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
225                                counter_index);
226 }
227
228 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
229                                      const struct mlxsw_tx_info *tx_info)
230 {
231         char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
232
233         memset(txhdr, 0, MLXSW_TXHDR_LEN);
234
235         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
236         mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
237         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
238         mlxsw_tx_hdr_swid_set(txhdr, 0);
239         mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
240         mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
241         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
242 }
243
244 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
245 {
246         switch (state) {
247         case BR_STATE_FORWARDING:
248                 return MLXSW_REG_SPMS_STATE_FORWARDING;
249         case BR_STATE_LEARNING:
250                 return MLXSW_REG_SPMS_STATE_LEARNING;
251         case BR_STATE_LISTENING:
252         case BR_STATE_DISABLED:
253         case BR_STATE_BLOCKING:
254                 return MLXSW_REG_SPMS_STATE_DISCARDING;
255         default:
256                 BUG();
257         }
258 }
259
260 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
261                               u8 state)
262 {
263         enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
264         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
265         char *spms_pl;
266         int err;
267
268         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
269         if (!spms_pl)
270                 return -ENOMEM;
271         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
272         mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
273
274         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
275         kfree(spms_pl);
276         return err;
277 }
278
279 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
280 {
281         char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
282         int err;
283
284         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
285         if (err)
286                 return err;
287         mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
288         return 0;
289 }
290
291 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
292                                    bool is_up)
293 {
294         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
295         char paos_pl[MLXSW_REG_PAOS_LEN];
296
297         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
298                             is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
299                             MLXSW_PORT_ADMIN_STATUS_DOWN);
300         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
301 }
302
303 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
304                                       unsigned char *addr)
305 {
306         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
307         char ppad_pl[MLXSW_REG_PPAD_LEN];
308
309         mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
310         mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
311         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
312 }
313
314 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
315 {
316         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
317         unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
318
319         ether_addr_copy(addr, mlxsw_sp->base_mac);
320         addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
321         return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
322 }
323
324 static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
325 {
326         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
327         char pmtu_pl[MLXSW_REG_PMTU_LEN];
328         int err;
329
330         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
331         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
332         if (err)
333                 return err;
334
335         *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
336         return 0;
337 }
338
339 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
340 {
341         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
342         char pmtu_pl[MLXSW_REG_PMTU_LEN];
343
344         mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
345         if (mtu > mlxsw_sp_port->max_mtu)
346                 return -EINVAL;
347
348         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
349         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
350 }
351
352 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
353 {
354         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
355         char pspa_pl[MLXSW_REG_PSPA_LEN];
356
357         mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
358         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
359 }
360
361 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
362 {
363         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
364         char svpe_pl[MLXSW_REG_SVPE_LEN];
365
366         mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
367         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
368 }
369
370 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
371                                    bool learn_enable)
372 {
373         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
374         char *spvmlr_pl;
375         int err;
376
377         spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
378         if (!spvmlr_pl)
379                 return -ENOMEM;
380         mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
381                               learn_enable);
382         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
383         kfree(spvmlr_pl);
384         return err;
385 }
386
387 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
388                                     u16 vid)
389 {
390         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
391         char spvid_pl[MLXSW_REG_SPVID_LEN];
392
393         mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
394         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
395 }
396
397 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
398                                             bool allow)
399 {
400         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
401         char spaft_pl[MLXSW_REG_SPAFT_LEN];
402
403         mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
404         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
405 }
406
407 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
408 {
409         int err;
410
411         if (!vid) {
412                 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
413                 if (err)
414                         return err;
415         } else {
416                 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
417                 if (err)
418                         return err;
419                 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
420                 if (err)
421                         goto err_port_allow_untagged_set;
422         }
423
424         mlxsw_sp_port->pvid = vid;
425         return 0;
426
427 err_port_allow_untagged_set:
428         __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
429         return err;
430 }
431
432 static int
433 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
434 {
435         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
436         char sspr_pl[MLXSW_REG_SSPR_LEN];
437
438         mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
439         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
440 }
441
442 static int
443 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port,
444                               struct mlxsw_sp_port_mapping *port_mapping)
445 {
446         char pmlp_pl[MLXSW_REG_PMLP_LEN];
447         bool separate_rxtx;
448         u8 module;
449         u8 width;
450         int err;
451         int i;
452
453         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
454         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
455         if (err)
456                 return err;
457         module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
458         width = mlxsw_reg_pmlp_width_get(pmlp_pl);
459         separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
460
461         if (width && !is_power_of_2(width)) {
462                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
463                         local_port);
464                 return -EINVAL;
465         }
466
467         for (i = 0; i < width; i++) {
468                 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
469                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
470                                 local_port);
471                         return -EINVAL;
472                 }
473                 if (separate_rxtx &&
474                     mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
475                     mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
476                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
477                                 local_port);
478                         return -EINVAL;
479                 }
480                 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) {
481                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
482                                 local_port);
483                         return -EINVAL;
484                 }
485         }
486
487         port_mapping->module = module;
488         port_mapping->width = width;
489         port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
490         return 0;
491 }
492
493 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port)
494 {
495         struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping;
496         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
497         char pmlp_pl[MLXSW_REG_PMLP_LEN];
498         int i;
499
500         mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
501         mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
502         for (i = 0; i < port_mapping->width; i++) {
503                 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
504                 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
505         }
506
507         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
508 }
509
510 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
511 {
512         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
513         char pmlp_pl[MLXSW_REG_PMLP_LEN];
514
515         mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
516         mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
517         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
518 }
519
520 static int mlxsw_sp_port_open(struct net_device *dev)
521 {
522         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
523         int err;
524
525         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
526         if (err)
527                 return err;
528         netif_start_queue(dev);
529         return 0;
530 }
531
532 static int mlxsw_sp_port_stop(struct net_device *dev)
533 {
534         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
535
536         netif_stop_queue(dev);
537         return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
538 }
539
540 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
541                                       struct net_device *dev)
542 {
543         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
544         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
545         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
546         const struct mlxsw_tx_info tx_info = {
547                 .local_port = mlxsw_sp_port->local_port,
548                 .is_emad = false,
549         };
550         u64 len;
551         int err;
552
553         if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
554                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
555                 dev_kfree_skb_any(skb);
556                 return NETDEV_TX_OK;
557         }
558
559         memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
560
561         if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
562                 return NETDEV_TX_BUSY;
563
564         if (eth_skb_pad(skb)) {
565                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
566                 return NETDEV_TX_OK;
567         }
568
569         mlxsw_sp_txhdr_construct(skb, &tx_info);
570         /* TX header is consumed by HW on the way so we shouldn't count its
571          * bytes as being sent.
572          */
573         len = skb->len - MLXSW_TXHDR_LEN;
574
575         /* Due to a race we might fail here because of a full queue. In that
576          * unlikely case we simply drop the packet.
577          */
578         err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
579
580         if (!err) {
581                 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
582                 u64_stats_update_begin(&pcpu_stats->syncp);
583                 pcpu_stats->tx_packets++;
584                 pcpu_stats->tx_bytes += len;
585                 u64_stats_update_end(&pcpu_stats->syncp);
586         } else {
587                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
588                 dev_kfree_skb_any(skb);
589         }
590         return NETDEV_TX_OK;
591 }
592
593 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
594 {
595 }
596
597 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
598 {
599         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
600         struct sockaddr *addr = p;
601         int err;
602
603         if (!is_valid_ether_addr(addr->sa_data))
604                 return -EADDRNOTAVAIL;
605
606         err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
607         if (err)
608                 return err;
609         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
610         return 0;
611 }
612
613 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
614 {
615         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
616         struct mlxsw_sp_hdroom orig_hdroom;
617         struct mlxsw_sp_hdroom hdroom;
618         int err;
619
620         orig_hdroom = *mlxsw_sp_port->hdroom;
621
622         hdroom = orig_hdroom;
623         hdroom.mtu = mtu;
624         mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
625
626         err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
627         if (err) {
628                 netdev_err(dev, "Failed to configure port's headroom\n");
629                 return err;
630         }
631
632         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
633         if (err)
634                 goto err_port_mtu_set;
635         dev->mtu = mtu;
636         return 0;
637
638 err_port_mtu_set:
639         mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
640         return err;
641 }
642
643 static int
644 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
645                              struct rtnl_link_stats64 *stats)
646 {
647         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
648         struct mlxsw_sp_port_pcpu_stats *p;
649         u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
650         u32 tx_dropped = 0;
651         unsigned int start;
652         int i;
653
654         for_each_possible_cpu(i) {
655                 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
656                 do {
657                         start = u64_stats_fetch_begin_irq(&p->syncp);
658                         rx_packets      = p->rx_packets;
659                         rx_bytes        = p->rx_bytes;
660                         tx_packets      = p->tx_packets;
661                         tx_bytes        = p->tx_bytes;
662                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
663
664                 stats->rx_packets       += rx_packets;
665                 stats->rx_bytes         += rx_bytes;
666                 stats->tx_packets       += tx_packets;
667                 stats->tx_bytes         += tx_bytes;
668                 /* tx_dropped is u32, updated without syncp protection. */
669                 tx_dropped      += p->tx_dropped;
670         }
671         stats->tx_dropped       = tx_dropped;
672         return 0;
673 }
674
675 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
676 {
677         switch (attr_id) {
678         case IFLA_OFFLOAD_XSTATS_CPU_HIT:
679                 return true;
680         }
681
682         return false;
683 }
684
685 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
686                                            void *sp)
687 {
688         switch (attr_id) {
689         case IFLA_OFFLOAD_XSTATS_CPU_HIT:
690                 return mlxsw_sp_port_get_sw_stats64(dev, sp);
691         }
692
693         return -EINVAL;
694 }
695
696 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
697                                 int prio, char *ppcnt_pl)
698 {
699         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
700         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
701
702         mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
703         return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
704 }
705
706 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
707                                       struct rtnl_link_stats64 *stats)
708 {
709         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
710         int err;
711
712         err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
713                                           0, ppcnt_pl);
714         if (err)
715                 goto out;
716
717         stats->tx_packets =
718                 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
719         stats->rx_packets =
720                 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
721         stats->tx_bytes =
722                 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
723         stats->rx_bytes =
724                 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
725         stats->multicast =
726                 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
727
728         stats->rx_crc_errors =
729                 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
730         stats->rx_frame_errors =
731                 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
732
733         stats->rx_length_errors = (
734                 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
735                 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
736                 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
737
738         stats->rx_errors = (stats->rx_crc_errors +
739                 stats->rx_frame_errors + stats->rx_length_errors);
740
741 out:
742         return err;
743 }
744
745 static void
746 mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
747                             struct mlxsw_sp_port_xstats *xstats)
748 {
749         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
750         int err, i;
751
752         err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
753                                           ppcnt_pl);
754         if (!err)
755                 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
756
757         for (i = 0; i < TC_MAX_QUEUE; i++) {
758                 err = mlxsw_sp_port_get_stats_raw(dev,
759                                                   MLXSW_REG_PPCNT_TC_CONG_TC,
760                                                   i, ppcnt_pl);
761                 if (!err)
762                         xstats->wred_drop[i] =
763                                 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
764
765                 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
766                                                   i, ppcnt_pl);
767                 if (err)
768                         continue;
769
770                 xstats->backlog[i] =
771                         mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
772                 xstats->tail_drop[i] =
773                         mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
774         }
775
776         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
777                 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
778                                                   i, ppcnt_pl);
779                 if (err)
780                         continue;
781
782                 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
783                 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
784         }
785 }
786
787 static void update_stats_cache(struct work_struct *work)
788 {
789         struct mlxsw_sp_port *mlxsw_sp_port =
790                 container_of(work, struct mlxsw_sp_port,
791                              periodic_hw_stats.update_dw.work);
792
793         if (!netif_carrier_ok(mlxsw_sp_port->dev))
794                 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
795                  * necessary when port goes down.
796                  */
797                 goto out;
798
799         mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
800                                    &mlxsw_sp_port->periodic_hw_stats.stats);
801         mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
802                                     &mlxsw_sp_port->periodic_hw_stats.xstats);
803
804 out:
805         mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
806                                MLXSW_HW_STATS_UPDATE_TIME);
807 }
808
809 /* Return the stats from a cache that is updated periodically,
810  * as this function might get called in an atomic context.
811  */
812 static void
813 mlxsw_sp_port_get_stats64(struct net_device *dev,
814                           struct rtnl_link_stats64 *stats)
815 {
816         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
817
818         memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
819 }
820
821 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
822                                     u16 vid_begin, u16 vid_end,
823                                     bool is_member, bool untagged)
824 {
825         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
826         char *spvm_pl;
827         int err;
828
829         spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
830         if (!spvm_pl)
831                 return -ENOMEM;
832
833         mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
834                             vid_end, is_member, untagged);
835         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
836         kfree(spvm_pl);
837         return err;
838 }
839
840 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
841                            u16 vid_end, bool is_member, bool untagged)
842 {
843         u16 vid, vid_e;
844         int err;
845
846         for (vid = vid_begin; vid <= vid_end;
847              vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
848                 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
849                             vid_end);
850
851                 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
852                                                is_member, untagged);
853                 if (err)
854                         return err;
855         }
856
857         return 0;
858 }
859
860 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
861                                      bool flush_default)
862 {
863         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
864
865         list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
866                                  &mlxsw_sp_port->vlans_list, list) {
867                 if (!flush_default &&
868                     mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
869                         continue;
870                 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
871         }
872 }
873
874 static void
875 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
876 {
877         if (mlxsw_sp_port_vlan->bridge_port)
878                 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
879         else if (mlxsw_sp_port_vlan->fid)
880                 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
881 }
882
883 struct mlxsw_sp_port_vlan *
884 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
885 {
886         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
887         bool untagged = vid == MLXSW_SP_DEFAULT_VID;
888         int err;
889
890         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
891         if (mlxsw_sp_port_vlan)
892                 return ERR_PTR(-EEXIST);
893
894         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
895         if (err)
896                 return ERR_PTR(err);
897
898         mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
899         if (!mlxsw_sp_port_vlan) {
900                 err = -ENOMEM;
901                 goto err_port_vlan_alloc;
902         }
903
904         mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
905         mlxsw_sp_port_vlan->vid = vid;
906         list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
907
908         return mlxsw_sp_port_vlan;
909
910 err_port_vlan_alloc:
911         mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
912         return ERR_PTR(err);
913 }
914
915 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
916 {
917         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
918         u16 vid = mlxsw_sp_port_vlan->vid;
919
920         mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
921         list_del(&mlxsw_sp_port_vlan->list);
922         kfree(mlxsw_sp_port_vlan);
923         mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
924 }
925
926 static int mlxsw_sp_port_add_vid(struct net_device *dev,
927                                  __be16 __always_unused proto, u16 vid)
928 {
929         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
930
931         /* VLAN 0 is added to HW filter when device goes up, but it is
932          * reserved in our case, so simply return.
933          */
934         if (!vid)
935                 return 0;
936
937         return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
938 }
939
940 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
941                                   __be16 __always_unused proto, u16 vid)
942 {
943         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
944         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
945
946         /* VLAN 0 is removed from HW filter when device goes down, but
947          * it is reserved in our case, so simply return.
948          */
949         if (!vid)
950                 return 0;
951
952         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
953         if (!mlxsw_sp_port_vlan)
954                 return 0;
955         mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
956
957         return 0;
958 }
959
960 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
961                                    struct flow_block_offload *f)
962 {
963         switch (f->binder_type) {
964         case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
965                 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true);
966         case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
967                 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
968         case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
969                 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
970         default:
971                 return -EOPNOTSUPP;
972         }
973 }
974
975 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
976                              void *type_data)
977 {
978         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
979
980         switch (type) {
981         case TC_SETUP_BLOCK:
982                 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
983         case TC_SETUP_QDISC_RED:
984                 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
985         case TC_SETUP_QDISC_PRIO:
986                 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
987         case TC_SETUP_QDISC_ETS:
988                 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
989         case TC_SETUP_QDISC_TBF:
990                 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
991         case TC_SETUP_QDISC_FIFO:
992                 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
993         default:
994                 return -EOPNOTSUPP;
995         }
996 }
997
998 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
999 {
1000         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1001
1002         if (!enable) {
1003                 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
1004                     mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
1005                         netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1006                         return -EINVAL;
1007                 }
1008                 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
1009                 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
1010         } else {
1011                 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
1012                 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
1013         }
1014         return 0;
1015 }
1016
1017 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
1018 {
1019         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1020         char pplr_pl[MLXSW_REG_PPLR_LEN];
1021         int err;
1022
1023         if (netif_running(dev))
1024                 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1025
1026         mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
1027         err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
1028                               pplr_pl);
1029
1030         if (netif_running(dev))
1031                 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1032
1033         return err;
1034 }
1035
1036 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1037
1038 static int mlxsw_sp_handle_feature(struct net_device *dev,
1039                                    netdev_features_t wanted_features,
1040                                    netdev_features_t feature,
1041                                    mlxsw_sp_feature_handler feature_handler)
1042 {
1043         netdev_features_t changes = wanted_features ^ dev->features;
1044         bool enable = !!(wanted_features & feature);
1045         int err;
1046
1047         if (!(changes & feature))
1048                 return 0;
1049
1050         err = feature_handler(dev, enable);
1051         if (err) {
1052                 netdev_err(dev, "%s feature %pNF failed, err %d\n",
1053                            enable ? "Enable" : "Disable", &feature, err);
1054                 return err;
1055         }
1056
1057         if (enable)
1058                 dev->features |= feature;
1059         else
1060                 dev->features &= ~feature;
1061
1062         return 0;
1063 }
1064 static int mlxsw_sp_set_features(struct net_device *dev,
1065                                  netdev_features_t features)
1066 {
1067         netdev_features_t oper_features = dev->features;
1068         int err = 0;
1069
1070         err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1071                                        mlxsw_sp_feature_hw_tc);
1072         err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
1073                                        mlxsw_sp_feature_loopback);
1074
1075         if (err) {
1076                 dev->features = oper_features;
1077                 return -EINVAL;
1078         }
1079
1080         return 0;
1081 }
1082
1083 static struct devlink_port *
1084 mlxsw_sp_port_get_devlink_port(struct net_device *dev)
1085 {
1086         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1087         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1088
1089         return mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
1090                                                 mlxsw_sp_port->local_port);
1091 }
1092
1093 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1094                                       struct ifreq *ifr)
1095 {
1096         struct hwtstamp_config config;
1097         int err;
1098
1099         if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1100                 return -EFAULT;
1101
1102         err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
1103                                                              &config);
1104         if (err)
1105                 return err;
1106
1107         if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1108                 return -EFAULT;
1109
1110         return 0;
1111 }
1112
1113 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1114                                       struct ifreq *ifr)
1115 {
1116         struct hwtstamp_config config;
1117         int err;
1118
1119         err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
1120                                                              &config);
1121         if (err)
1122                 return err;
1123
1124         if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1125                 return -EFAULT;
1126
1127         return 0;
1128 }
1129
1130 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
1131 {
1132         struct hwtstamp_config config = {0};
1133
1134         mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
1135 }
1136
1137 static int
1138 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1139 {
1140         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1141
1142         switch (cmd) {
1143         case SIOCSHWTSTAMP:
1144                 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
1145         case SIOCGHWTSTAMP:
1146                 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
1147         default:
1148                 return -EOPNOTSUPP;
1149         }
1150 }
1151
1152 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1153         .ndo_open               = mlxsw_sp_port_open,
1154         .ndo_stop               = mlxsw_sp_port_stop,
1155         .ndo_start_xmit         = mlxsw_sp_port_xmit,
1156         .ndo_setup_tc           = mlxsw_sp_setup_tc,
1157         .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
1158         .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
1159         .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
1160         .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
1161         .ndo_has_offload_stats  = mlxsw_sp_port_has_offload_stats,
1162         .ndo_get_offload_stats  = mlxsw_sp_port_get_offload_stats,
1163         .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
1164         .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
1165         .ndo_set_features       = mlxsw_sp_set_features,
1166         .ndo_get_devlink_port   = mlxsw_sp_port_get_devlink_port,
1167         .ndo_do_ioctl           = mlxsw_sp_port_ioctl,
1168 };
1169
1170 static int
1171 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
1172 {
1173         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1174         u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
1175         const struct mlxsw_sp_port_type_speed_ops *ops;
1176         char ptys_pl[MLXSW_REG_PTYS_LEN];
1177         u32 eth_proto_cap_masked;
1178         int err;
1179
1180         ops = mlxsw_sp->port_type_speed_ops;
1181
1182         /* Set advertised speeds to speeds supported by both the driver
1183          * and the device.
1184          */
1185         ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1186                                0, false);
1187         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1188         if (err)
1189                 return err;
1190
1191         ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
1192                                  &eth_proto_admin, &eth_proto_oper);
1193         eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
1194         ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1195                                eth_proto_cap_masked,
1196                                mlxsw_sp_port->link.autoneg);
1197         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1198 }
1199
1200 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
1201 {
1202         const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
1203         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1204         char ptys_pl[MLXSW_REG_PTYS_LEN];
1205         u32 eth_proto_oper;
1206         int err;
1207
1208         port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
1209         port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
1210                                                mlxsw_sp_port->local_port, 0,
1211                                                false);
1212         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1213         if (err)
1214                 return err;
1215         port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
1216                                                  &eth_proto_oper);
1217         *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
1218         return 0;
1219 }
1220
1221 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1222                           enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1223                           bool dwrr, u8 dwrr_weight)
1224 {
1225         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1226         char qeec_pl[MLXSW_REG_QEEC_LEN];
1227
1228         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1229                             next_index);
1230         mlxsw_reg_qeec_de_set(qeec_pl, true);
1231         mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1232         mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1233         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1234 }
1235
1236 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1237                                   enum mlxsw_reg_qeec_hr hr, u8 index,
1238                                   u8 next_index, u32 maxrate, u8 burst_size)
1239 {
1240         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1241         char qeec_pl[MLXSW_REG_QEEC_LEN];
1242
1243         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1244                             next_index);
1245         mlxsw_reg_qeec_mase_set(qeec_pl, true);
1246         mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1247         mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
1248         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1249 }
1250
1251 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
1252                                     enum mlxsw_reg_qeec_hr hr, u8 index,
1253                                     u8 next_index, u32 minrate)
1254 {
1255         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1256         char qeec_pl[MLXSW_REG_QEEC_LEN];
1257
1258         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1259                             next_index);
1260         mlxsw_reg_qeec_mise_set(qeec_pl, true);
1261         mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
1262
1263         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1264 }
1265
1266 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1267                               u8 switch_prio, u8 tclass)
1268 {
1269         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1270         char qtct_pl[MLXSW_REG_QTCT_LEN];
1271
1272         mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1273                             tclass);
1274         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1275 }
1276
1277 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1278 {
1279         int err, i;
1280
1281         /* Setup the elements hierarcy, so that each TC is linked to
1282          * one subgroup, which are all member in the same group.
1283          */
1284         err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1285                                     MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
1286         if (err)
1287                 return err;
1288         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1289                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1290                                             MLXSW_REG_QEEC_HR_SUBGROUP, i,
1291                                             0, false, 0);
1292                 if (err)
1293                         return err;
1294         }
1295         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1296                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1297                                             MLXSW_REG_QEEC_HR_TC, i, i,
1298                                             false, 0);
1299                 if (err)
1300                         return err;
1301
1302                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1303                                             MLXSW_REG_QEEC_HR_TC,
1304                                             i + 8, i,
1305                                             true, 100);
1306                 if (err)
1307                         return err;
1308         }
1309
1310         /* Make sure the max shaper is disabled in all hierarchies that support
1311          * it. Note that this disables ptps (PTP shaper), but that is intended
1312          * for the initial configuration.
1313          */
1314         err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1315                                             MLXSW_REG_QEEC_HR_PORT, 0, 0,
1316                                             MLXSW_REG_QEEC_MAS_DIS, 0);
1317         if (err)
1318                 return err;
1319         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1320                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1321                                                     MLXSW_REG_QEEC_HR_SUBGROUP,
1322                                                     i, 0,
1323                                                     MLXSW_REG_QEEC_MAS_DIS, 0);
1324                 if (err)
1325                         return err;
1326         }
1327         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1328                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1329                                                     MLXSW_REG_QEEC_HR_TC,
1330                                                     i, i,
1331                                                     MLXSW_REG_QEEC_MAS_DIS, 0);
1332                 if (err)
1333                         return err;
1334
1335                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1336                                                     MLXSW_REG_QEEC_HR_TC,
1337                                                     i + 8, i,
1338                                                     MLXSW_REG_QEEC_MAS_DIS, 0);
1339                 if (err)
1340                         return err;
1341         }
1342
1343         /* Configure the min shaper for multicast TCs. */
1344         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1345                 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
1346                                                MLXSW_REG_QEEC_HR_TC,
1347                                                i + 8, i,
1348                                                MLXSW_REG_QEEC_MIS_MIN);
1349                 if (err)
1350                         return err;
1351         }
1352
1353         /* Map all priorities to traffic class 0. */
1354         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1355                 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1356                 if (err)
1357                         return err;
1358         }
1359
1360         return 0;
1361 }
1362
1363 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
1364                                         bool enable)
1365 {
1366         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1367         char qtctm_pl[MLXSW_REG_QTCTM_LEN];
1368
1369         mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
1370         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
1371 }
1372
1373 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
1374 {
1375         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1376         u8 module = mlxsw_sp_port->mapping.module;
1377         u64 overheat_counter;
1378         int err;
1379
1380         err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, module,
1381                                                     &overheat_counter);
1382         if (err)
1383                 return err;
1384
1385         mlxsw_sp_port->module_overheat_initial_val = overheat_counter;
1386         return 0;
1387 }
1388
1389 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1390                                 u8 split_base_local_port,
1391                                 struct mlxsw_sp_port_mapping *port_mapping)
1392 {
1393         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1394         bool split = !!split_base_local_port;
1395         struct mlxsw_sp_port *mlxsw_sp_port;
1396         u32 lanes = port_mapping->width;
1397         struct net_device *dev;
1398         bool splittable;
1399         int err;
1400
1401         splittable = lanes > 1 && !split;
1402         err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
1403                                    port_mapping->module + 1, split,
1404                                    port_mapping->lane / lanes,
1405                                    splittable, lanes,
1406                                    mlxsw_sp->base_mac,
1407                                    sizeof(mlxsw_sp->base_mac));
1408         if (err) {
1409                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1410                         local_port);
1411                 return err;
1412         }
1413
1414         dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1415         if (!dev) {
1416                 err = -ENOMEM;
1417                 goto err_alloc_etherdev;
1418         }
1419         SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
1420         dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
1421         mlxsw_sp_port = netdev_priv(dev);
1422         mlxsw_sp_port->dev = dev;
1423         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1424         mlxsw_sp_port->local_port = local_port;
1425         mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
1426         mlxsw_sp_port->split = split;
1427         mlxsw_sp_port->split_base_local_port = split_base_local_port;
1428         mlxsw_sp_port->mapping = *port_mapping;
1429         mlxsw_sp_port->link.autoneg = 1;
1430         INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
1431
1432         mlxsw_sp_port->pcpu_stats =
1433                 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1434         if (!mlxsw_sp_port->pcpu_stats) {
1435                 err = -ENOMEM;
1436                 goto err_alloc_stats;
1437         }
1438
1439         INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1440                           &update_stats_cache);
1441
1442         dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1443         dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1444
1445         err = mlxsw_sp_port_module_map(mlxsw_sp_port);
1446         if (err) {
1447                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
1448                         mlxsw_sp_port->local_port);
1449                 goto err_port_module_map;
1450         }
1451
1452         err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1453         if (err) {
1454                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1455                         mlxsw_sp_port->local_port);
1456                 goto err_port_swid_set;
1457         }
1458
1459         err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1460         if (err) {
1461                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1462                         mlxsw_sp_port->local_port);
1463                 goto err_dev_addr_init;
1464         }
1465
1466         netif_carrier_off(dev);
1467
1468         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1469                          NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
1470         dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
1471
1472         dev->min_mtu = 0;
1473         dev->max_mtu = ETH_MAX_MTU;
1474
1475         /* Each packet needs to have a Tx header (metadata) on top all other
1476          * headers.
1477          */
1478         dev->needed_headroom = MLXSW_TXHDR_LEN;
1479
1480         err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1481         if (err) {
1482                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1483                         mlxsw_sp_port->local_port);
1484                 goto err_port_system_port_mapping_set;
1485         }
1486
1487         err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
1488         if (err) {
1489                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1490                         mlxsw_sp_port->local_port);
1491                 goto err_port_speed_by_width_set;
1492         }
1493
1494         err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port,
1495                                                             &mlxsw_sp_port->max_speed);
1496         if (err) {
1497                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n",
1498                         mlxsw_sp_port->local_port);
1499                 goto err_max_speed_get;
1500         }
1501
1502         err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
1503         if (err) {
1504                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
1505                         mlxsw_sp_port->local_port);
1506                 goto err_port_max_mtu_get;
1507         }
1508
1509         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1510         if (err) {
1511                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1512                         mlxsw_sp_port->local_port);
1513                 goto err_port_mtu_set;
1514         }
1515
1516         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1517         if (err)
1518                 goto err_port_admin_status_set;
1519
1520         err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1521         if (err) {
1522                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1523                         mlxsw_sp_port->local_port);
1524                 goto err_port_buffers_init;
1525         }
1526
1527         err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1528         if (err) {
1529                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1530                         mlxsw_sp_port->local_port);
1531                 goto err_port_ets_init;
1532         }
1533
1534         err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
1535         if (err) {
1536                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
1537                         mlxsw_sp_port->local_port);
1538                 goto err_port_tc_mc_mode;
1539         }
1540
1541         /* ETS and buffers must be initialized before DCB. */
1542         err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1543         if (err) {
1544                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1545                         mlxsw_sp_port->local_port);
1546                 goto err_port_dcb_init;
1547         }
1548
1549         err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
1550         if (err) {
1551                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
1552                         mlxsw_sp_port->local_port);
1553                 goto err_port_fids_init;
1554         }
1555
1556         err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
1557         if (err) {
1558                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
1559                         mlxsw_sp_port->local_port);
1560                 goto err_port_qdiscs_init;
1561         }
1562
1563         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false,
1564                                      false);
1565         if (err) {
1566                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
1567                         mlxsw_sp_port->local_port);
1568                 goto err_port_vlan_clear;
1569         }
1570
1571         err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
1572         if (err) {
1573                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
1574                         mlxsw_sp_port->local_port);
1575                 goto err_port_nve_init;
1576         }
1577
1578         err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
1579         if (err) {
1580                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
1581                         mlxsw_sp_port->local_port);
1582                 goto err_port_pvid_set;
1583         }
1584
1585         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1586                                                        MLXSW_SP_DEFAULT_VID);
1587         if (IS_ERR(mlxsw_sp_port_vlan)) {
1588                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
1589                         mlxsw_sp_port->local_port);
1590                 err = PTR_ERR(mlxsw_sp_port_vlan);
1591                 goto err_port_vlan_create;
1592         }
1593         mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
1594
1595         INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
1596                           mlxsw_sp->ptp_ops->shaper_work);
1597
1598         mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1599
1600         err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port);
1601         if (err) {
1602                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n",
1603                         mlxsw_sp_port->local_port);
1604                 goto err_port_overheat_init_val_set;
1605         }
1606
1607         err = register_netdev(dev);
1608         if (err) {
1609                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1610                         mlxsw_sp_port->local_port);
1611                 goto err_register_netdev;
1612         }
1613
1614         mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
1615                                 mlxsw_sp_port, dev);
1616         mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
1617         return 0;
1618
1619 err_register_netdev:
1620 err_port_overheat_init_val_set:
1621         mlxsw_sp->ports[local_port] = NULL;
1622         mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1623 err_port_vlan_create:
1624 err_port_pvid_set:
1625         mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1626 err_port_nve_init:
1627 err_port_vlan_clear:
1628         mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1629 err_port_qdiscs_init:
1630         mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1631 err_port_fids_init:
1632         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1633 err_port_dcb_init:
1634         mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1635 err_port_tc_mc_mode:
1636 err_port_ets_init:
1637         mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1638 err_port_buffers_init:
1639 err_port_admin_status_set:
1640 err_port_mtu_set:
1641 err_port_max_mtu_get:
1642 err_max_speed_get:
1643 err_port_speed_by_width_set:
1644 err_port_system_port_mapping_set:
1645 err_dev_addr_init:
1646         mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1647 err_port_swid_set:
1648         mlxsw_sp_port_module_unmap(mlxsw_sp_port);
1649 err_port_module_map:
1650         free_percpu(mlxsw_sp_port->pcpu_stats);
1651 err_alloc_stats:
1652         free_netdev(dev);
1653 err_alloc_etherdev:
1654         mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1655         return err;
1656 }
1657
1658 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1659 {
1660         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1661
1662         cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
1663         cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
1664         mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
1665         mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
1666         unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1667         mlxsw_sp->ports[local_port] = NULL;
1668         mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
1669         mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1670         mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1671         mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1672         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1673         mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1674         mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1675         mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1676         mlxsw_sp_port_module_unmap(mlxsw_sp_port);
1677         free_percpu(mlxsw_sp_port->pcpu_stats);
1678         WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
1679         free_netdev(mlxsw_sp_port->dev);
1680         mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1681 }
1682
1683 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
1684 {
1685         struct mlxsw_sp_port *mlxsw_sp_port;
1686         int err;
1687
1688         mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL);
1689         if (!mlxsw_sp_port)
1690                 return -ENOMEM;
1691
1692         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1693         mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT;
1694
1695         err = mlxsw_core_cpu_port_init(mlxsw_sp->core,
1696                                        mlxsw_sp_port,
1697                                        mlxsw_sp->base_mac,
1698                                        sizeof(mlxsw_sp->base_mac));
1699         if (err) {
1700                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n");
1701                 goto err_core_cpu_port_init;
1702         }
1703
1704         mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port;
1705         return 0;
1706
1707 err_core_cpu_port_init:
1708         kfree(mlxsw_sp_port);
1709         return err;
1710 }
1711
1712 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
1713 {
1714         struct mlxsw_sp_port *mlxsw_sp_port =
1715                                 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
1716
1717         mlxsw_core_cpu_port_fini(mlxsw_sp->core);
1718         mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL;
1719         kfree(mlxsw_sp_port);
1720 }
1721
1722 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1723 {
1724         return mlxsw_sp->ports[local_port] != NULL;
1725 }
1726
1727 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1728 {
1729         int i;
1730
1731         for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
1732                 if (mlxsw_sp_port_created(mlxsw_sp, i))
1733                         mlxsw_sp_port_remove(mlxsw_sp, i);
1734         mlxsw_sp_cpu_port_remove(mlxsw_sp);
1735         kfree(mlxsw_sp->ports);
1736         mlxsw_sp->ports = NULL;
1737 }
1738
1739 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1740 {
1741         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1742         struct mlxsw_sp_port_mapping *port_mapping;
1743         size_t alloc_size;
1744         int i;
1745         int err;
1746
1747         alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
1748         mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1749         if (!mlxsw_sp->ports)
1750                 return -ENOMEM;
1751
1752         err = mlxsw_sp_cpu_port_create(mlxsw_sp);
1753         if (err)
1754                 goto err_cpu_port_create;
1755
1756         for (i = 1; i < max_ports; i++) {
1757                 port_mapping = mlxsw_sp->port_mapping[i];
1758                 if (!port_mapping)
1759                         continue;
1760                 err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping);
1761                 if (err)
1762                         goto err_port_create;
1763         }
1764         return 0;
1765
1766 err_port_create:
1767         for (i--; i >= 1; i--)
1768                 if (mlxsw_sp_port_created(mlxsw_sp, i))
1769                         mlxsw_sp_port_remove(mlxsw_sp, i);
1770         mlxsw_sp_cpu_port_remove(mlxsw_sp);
1771 err_cpu_port_create:
1772         kfree(mlxsw_sp->ports);
1773         mlxsw_sp->ports = NULL;
1774         return err;
1775 }
1776
1777 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
1778 {
1779         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1780         struct mlxsw_sp_port_mapping port_mapping;
1781         int i;
1782         int err;
1783
1784         mlxsw_sp->port_mapping = kcalloc(max_ports,
1785                                          sizeof(struct mlxsw_sp_port_mapping *),
1786                                          GFP_KERNEL);
1787         if (!mlxsw_sp->port_mapping)
1788                 return -ENOMEM;
1789
1790         for (i = 1; i < max_ports; i++) {
1791                 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping);
1792                 if (err)
1793                         goto err_port_module_info_get;
1794                 if (!port_mapping.width)
1795                         continue;
1796
1797                 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping,
1798                                                     sizeof(port_mapping),
1799                                                     GFP_KERNEL);
1800                 if (!mlxsw_sp->port_mapping[i]) {
1801                         err = -ENOMEM;
1802                         goto err_port_module_info_dup;
1803                 }
1804         }
1805         return 0;
1806
1807 err_port_module_info_get:
1808 err_port_module_info_dup:
1809         for (i--; i >= 1; i--)
1810                 kfree(mlxsw_sp->port_mapping[i]);
1811         kfree(mlxsw_sp->port_mapping);
1812         return err;
1813 }
1814
1815 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
1816 {
1817         int i;
1818
1819         for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
1820                 kfree(mlxsw_sp->port_mapping[i]);
1821         kfree(mlxsw_sp->port_mapping);
1822 }
1823
1824 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width)
1825 {
1826         u8 offset = (local_port - 1) % max_width;
1827
1828         return local_port - offset;
1829 }
1830
1831 static int
1832 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
1833                            struct mlxsw_sp_port_mapping *port_mapping,
1834                            unsigned int count, u8 offset)
1835 {
1836         struct mlxsw_sp_port_mapping split_port_mapping;
1837         int err, i;
1838
1839         split_port_mapping = *port_mapping;
1840         split_port_mapping.width /= count;
1841         for (i = 0; i < count; i++) {
1842                 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset,
1843                                            base_port, &split_port_mapping);
1844                 if (err)
1845                         goto err_port_create;
1846                 split_port_mapping.lane += split_port_mapping.width;
1847         }
1848
1849         return 0;
1850
1851 err_port_create:
1852         for (i--; i >= 0; i--)
1853                 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
1854                         mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
1855         return err;
1856 }
1857
1858 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
1859                                          u8 base_port,
1860                                          unsigned int count, u8 offset)
1861 {
1862         struct mlxsw_sp_port_mapping *port_mapping;
1863         int i;
1864
1865         /* Go over original unsplit ports in the gap and recreate them. */
1866         for (i = 0; i < count * offset; i++) {
1867                 port_mapping = mlxsw_sp->port_mapping[base_port + i];
1868                 if (!port_mapping)
1869                         continue;
1870                 mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping);
1871         }
1872 }
1873
1874 static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core,
1875                                        unsigned int count,
1876                                        unsigned int max_width)
1877 {
1878         enum mlxsw_res_id local_ports_in_x_res_id;
1879         int split_width = max_width / count;
1880
1881         if (split_width == 1)
1882                 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X;
1883         else if (split_width == 2)
1884                 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X;
1885         else if (split_width == 4)
1886                 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X;
1887         else
1888                 return -EINVAL;
1889
1890         if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id))
1891                 return -EINVAL;
1892         return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id);
1893 }
1894
1895 static struct mlxsw_sp_port *
1896 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1897 {
1898         if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
1899                 return mlxsw_sp->ports[local_port];
1900         return NULL;
1901 }
1902
1903 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
1904                                unsigned int count,
1905                                struct netlink_ext_ack *extack)
1906 {
1907         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1908         struct mlxsw_sp_port_mapping port_mapping;
1909         struct mlxsw_sp_port *mlxsw_sp_port;
1910         int max_width;
1911         u8 base_port;
1912         int offset;
1913         int i;
1914         int err;
1915
1916         mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
1917         if (!mlxsw_sp_port) {
1918                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
1919                         local_port);
1920                 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
1921                 return -EINVAL;
1922         }
1923
1924         max_width = mlxsw_core_module_max_width(mlxsw_core,
1925                                                 mlxsw_sp_port->mapping.module);
1926         if (max_width < 0) {
1927                 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
1928                 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
1929                 return max_width;
1930         }
1931
1932         /* Split port with non-max cannot be split. */
1933         if (mlxsw_sp_port->mapping.width != max_width) {
1934                 netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n");
1935                 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split");
1936                 return -EINVAL;
1937         }
1938
1939         offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
1940         if (offset < 0) {
1941                 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
1942                 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
1943                 return -EINVAL;
1944         }
1945
1946         /* Only in case max split is being done, the local port and
1947          * base port may differ.
1948          */
1949         base_port = count == max_width ?
1950                     mlxsw_sp_cluster_base_port_get(local_port, max_width) :
1951                     local_port;
1952
1953         for (i = 0; i < count * offset; i++) {
1954                 /* Expect base port to exist and also the one in the middle in
1955                  * case of maximal split count.
1956                  */
1957                 if (i == 0 || (count == max_width && i == count / 2))
1958                         continue;
1959
1960                 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) {
1961                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
1962                         NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
1963                         return -EINVAL;
1964                 }
1965         }
1966
1967         port_mapping = mlxsw_sp_port->mapping;
1968
1969         for (i = 0; i < count; i++)
1970                 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
1971                         mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
1972
1973         err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping,
1974                                          count, offset);
1975         if (err) {
1976                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
1977                 goto err_port_split_create;
1978         }
1979
1980         return 0;
1981
1982 err_port_split_create:
1983         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
1984         return err;
1985 }
1986
1987 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
1988                                  struct netlink_ext_ack *extack)
1989 {
1990         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1991         struct mlxsw_sp_port *mlxsw_sp_port;
1992         unsigned int count;
1993         int max_width;
1994         u8 base_port;
1995         int offset;
1996         int i;
1997
1998         mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
1999         if (!mlxsw_sp_port) {
2000                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2001                         local_port);
2002                 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2003                 return -EINVAL;
2004         }
2005
2006         if (!mlxsw_sp_port->split) {
2007                 netdev_err(mlxsw_sp_port->dev, "Port was not split\n");
2008                 NL_SET_ERR_MSG_MOD(extack, "Port was not split");
2009                 return -EINVAL;
2010         }
2011
2012         max_width = mlxsw_core_module_max_width(mlxsw_core,
2013                                                 mlxsw_sp_port->mapping.module);
2014         if (max_width < 0) {
2015                 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
2016                 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
2017                 return max_width;
2018         }
2019
2020         count = max_width / mlxsw_sp_port->mapping.width;
2021
2022         offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
2023         if (WARN_ON(offset < 0)) {
2024                 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
2025                 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
2026                 return -EINVAL;
2027         }
2028
2029         base_port = mlxsw_sp_port->split_base_local_port;
2030
2031         for (i = 0; i < count; i++)
2032                 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
2033                         mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
2034
2035         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
2036
2037         return 0;
2038 }
2039
2040 static void
2041 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
2042 {
2043         int i;
2044
2045         for (i = 0; i < TC_MAX_QUEUE; i++)
2046                 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
2047 }
2048
2049 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2050                                      char *pude_pl, void *priv)
2051 {
2052         struct mlxsw_sp *mlxsw_sp = priv;
2053         struct mlxsw_sp_port *mlxsw_sp_port;
2054         enum mlxsw_reg_pude_oper_status status;
2055         u8 local_port;
2056
2057         local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2058         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2059         if (!mlxsw_sp_port)
2060                 return;
2061
2062         status = mlxsw_reg_pude_oper_status_get(pude_pl);
2063         if (status == MLXSW_PORT_OPER_STATUS_UP) {
2064                 netdev_info(mlxsw_sp_port->dev, "link up\n");
2065                 netif_carrier_on(mlxsw_sp_port->dev);
2066                 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
2067         } else {
2068                 netdev_info(mlxsw_sp_port->dev, "link down\n");
2069                 netif_carrier_off(mlxsw_sp_port->dev);
2070                 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
2071         }
2072 }
2073
2074 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
2075                                           char *mtpptr_pl, bool ingress)
2076 {
2077         u8 local_port;
2078         u8 num_rec;
2079         int i;
2080
2081         local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl);
2082         num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl);
2083         for (i = 0; i < num_rec; i++) {
2084                 u8 domain_number;
2085                 u8 message_type;
2086                 u16 sequence_id;
2087                 u64 timestamp;
2088
2089                 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type,
2090                                         &domain_number, &sequence_id,
2091                                         &timestamp);
2092                 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
2093                                             message_type, domain_number,
2094                                             sequence_id, timestamp);
2095         }
2096 }
2097
2098 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
2099                                               char *mtpptr_pl, void *priv)
2100 {
2101         struct mlxsw_sp *mlxsw_sp = priv;
2102
2103         mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true);
2104 }
2105
2106 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
2107                                               char *mtpptr_pl, void *priv)
2108 {
2109         struct mlxsw_sp *mlxsw_sp = priv;
2110
2111         mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false);
2112 }
2113
2114 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2115                                        u8 local_port, void *priv)
2116 {
2117         struct mlxsw_sp *mlxsw_sp = priv;
2118         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2119         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2120
2121         if (unlikely(!mlxsw_sp_port)) {
2122                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2123                                      local_port);
2124                 return;
2125         }
2126
2127         skb->dev = mlxsw_sp_port->dev;
2128
2129         pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2130         u64_stats_update_begin(&pcpu_stats->syncp);
2131         pcpu_stats->rx_packets++;
2132         pcpu_stats->rx_bytes += skb->len;
2133         u64_stats_update_end(&pcpu_stats->syncp);
2134
2135         skb->protocol = eth_type_trans(skb, skb->dev);
2136         netif_receive_skb(skb);
2137 }
2138
2139 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
2140                                            void *priv)
2141 {
2142         skb->offload_fwd_mark = 1;
2143         return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2144 }
2145
2146 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
2147                                               u8 local_port, void *priv)
2148 {
2149         skb->offload_l3_fwd_mark = 1;
2150         skb->offload_fwd_mark = 1;
2151         return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2152 }
2153
2154 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2155                           u8 local_port)
2156 {
2157         mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
2158 }
2159
2160 void mlxsw_sp_sample_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2161                              u8 local_port)
2162 {
2163         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2164         struct mlxsw_sp_port_sample *sample;
2165         u32 size;
2166
2167         if (unlikely(!mlxsw_sp_port)) {
2168                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
2169                                      local_port);
2170                 goto out;
2171         }
2172
2173         rcu_read_lock();
2174         sample = rcu_dereference(mlxsw_sp_port->sample);
2175         if (!sample)
2176                 goto out_unlock;
2177         size = sample->truncate ? sample->trunc_size : skb->len;
2178         psample_sample_packet(sample->psample_group, skb, size,
2179                               mlxsw_sp_port->dev->ifindex, 0, sample->rate);
2180 out_unlock:
2181         rcu_read_unlock();
2182 out:
2183         consume_skb(skb);
2184 }
2185
2186 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
2187         MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
2188                   _is_ctrl, SP_##_trap_group, DISCARD)
2189
2190 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl)     \
2191         MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action,    \
2192                 _is_ctrl, SP_##_trap_group, DISCARD)
2193
2194 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
2195         MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
2196                 _is_ctrl, SP_##_trap_group, DISCARD)
2197
2198 #define MLXSW_SP_EVENTL(_func, _trap_id)                \
2199         MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2200
2201 static const struct mlxsw_listener mlxsw_sp_listener[] = {
2202         /* Events */
2203         MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2204         /* L2 traps */
2205         MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
2206         /* L3 traps */
2207         MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
2208                           false),
2209         MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
2210         MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
2211                           false),
2212         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
2213                              ROUTER_EXP, false),
2214         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
2215                              ROUTER_EXP, false),
2216         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
2217                              ROUTER_EXP, false),
2218         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
2219                              ROUTER_EXP, false),
2220         /* Multicast Router Traps */
2221         MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
2222         MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
2223         /* NVE traps */
2224         MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
2225 };
2226
2227 static const struct mlxsw_listener mlxsw_sp1_listener[] = {
2228         /* Events */
2229         MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
2230         MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
2231 };
2232
2233 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2234 {
2235         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2236         char qpcr_pl[MLXSW_REG_QPCR_LEN];
2237         enum mlxsw_reg_qpcr_ir_units ir_units;
2238         int max_cpu_policers;
2239         bool is_bytes;
2240         u8 burst_size;
2241         u32 rate;
2242         int i, err;
2243
2244         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
2245                 return -EIO;
2246
2247         max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2248
2249         ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
2250         for (i = 0; i < max_cpu_policers; i++) {
2251                 is_bytes = false;
2252                 switch (i) {
2253                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2254                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2255                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2256                         rate = 1024;
2257                         burst_size = 7;
2258                         break;
2259                 default:
2260                         continue;
2261                 }
2262
2263                 __set_bit(i, mlxsw_sp->trap->policers_usage);
2264                 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
2265                                     burst_size);
2266                 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
2267                 if (err)
2268                         return err;
2269         }
2270
2271         return 0;
2272 }
2273
2274 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
2275 {
2276         char htgt_pl[MLXSW_REG_HTGT_LEN];
2277         enum mlxsw_reg_htgt_trap_group i;
2278         int max_cpu_policers;
2279         int max_trap_groups;
2280         u8 priority, tc;
2281         u16 policer_id;
2282         int err;
2283
2284         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
2285                 return -EIO;
2286
2287         max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
2288         max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2289
2290         for (i = 0; i < max_trap_groups; i++) {
2291                 policer_id = i;
2292                 switch (i) {
2293                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2294                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2295                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2296                         priority = 1;
2297                         tc = 1;
2298                         break;
2299                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
2300                         priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
2301                         tc = MLXSW_REG_HTGT_DEFAULT_TC;
2302                         policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
2303                         break;
2304                 default:
2305                         continue;
2306                 }
2307
2308                 if (max_cpu_policers <= policer_id &&
2309                     policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
2310                         return -EIO;
2311
2312                 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
2313                 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2314                 if (err)
2315                         return err;
2316         }
2317
2318         return 0;
2319 }
2320
2321 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp,
2322                                    const struct mlxsw_listener listeners[],
2323                                    size_t listeners_count)
2324 {
2325         int i;
2326         int err;
2327
2328         for (i = 0; i < listeners_count; i++) {
2329                 err = mlxsw_core_trap_register(mlxsw_sp->core,
2330                                                &listeners[i],
2331                                                mlxsw_sp);
2332                 if (err)
2333                         goto err_listener_register;
2334
2335         }
2336         return 0;
2337
2338 err_listener_register:
2339         for (i--; i >= 0; i--) {
2340                 mlxsw_core_trap_unregister(mlxsw_sp->core,
2341                                            &listeners[i],
2342                                            mlxsw_sp);
2343         }
2344         return err;
2345 }
2346
2347 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp,
2348                                       const struct mlxsw_listener listeners[],
2349                                       size_t listeners_count)
2350 {
2351         int i;
2352
2353         for (i = 0; i < listeners_count; i++) {
2354                 mlxsw_core_trap_unregister(mlxsw_sp->core,
2355                                            &listeners[i],
2356                                            mlxsw_sp);
2357         }
2358 }
2359
2360 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2361 {
2362         struct mlxsw_sp_trap *trap;
2363         u64 max_policers;
2364         int err;
2365
2366         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
2367                 return -EIO;
2368         max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
2369         trap = kzalloc(struct_size(trap, policers_usage,
2370                                    BITS_TO_LONGS(max_policers)), GFP_KERNEL);
2371         if (!trap)
2372                 return -ENOMEM;
2373         trap->max_policers = max_policers;
2374         mlxsw_sp->trap = trap;
2375
2376         err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
2377         if (err)
2378                 goto err_cpu_policers_set;
2379
2380         err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
2381         if (err)
2382                 goto err_trap_groups_set;
2383
2384         err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener,
2385                                       ARRAY_SIZE(mlxsw_sp_listener));
2386         if (err)
2387                 goto err_traps_register;
2388
2389         err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners,
2390                                       mlxsw_sp->listeners_count);
2391         if (err)
2392                 goto err_extra_traps_init;
2393
2394         return 0;
2395
2396 err_extra_traps_init:
2397         mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
2398                                   ARRAY_SIZE(mlxsw_sp_listener));
2399 err_traps_register:
2400 err_trap_groups_set:
2401 err_cpu_policers_set:
2402         kfree(trap);
2403         return err;
2404 }
2405
2406 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2407 {
2408         mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners,
2409                                   mlxsw_sp->listeners_count);
2410         mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
2411                                   ARRAY_SIZE(mlxsw_sp_listener));
2412         kfree(mlxsw_sp->trap);
2413 }
2414
2415 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
2416
2417 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2418 {
2419         char slcr_pl[MLXSW_REG_SLCR_LEN];
2420         u32 seed;
2421         int err;
2422
2423         seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
2424                      MLXSW_SP_LAG_SEED_INIT);
2425         mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2426                                      MLXSW_REG_SLCR_LAG_HASH_DMAC |
2427                                      MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2428                                      MLXSW_REG_SLCR_LAG_HASH_VLANID |
2429                                      MLXSW_REG_SLCR_LAG_HASH_SIP |
2430                                      MLXSW_REG_SLCR_LAG_HASH_DIP |
2431                                      MLXSW_REG_SLCR_LAG_HASH_SPORT |
2432                                      MLXSW_REG_SLCR_LAG_HASH_DPORT |
2433                                      MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
2434         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2435         if (err)
2436                 return err;
2437
2438         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
2439             !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
2440                 return -EIO;
2441
2442         mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
2443                                  sizeof(struct mlxsw_sp_upper),
2444                                  GFP_KERNEL);
2445         if (!mlxsw_sp->lags)
2446                 return -ENOMEM;
2447
2448         return 0;
2449 }
2450
2451 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
2452 {
2453         kfree(mlxsw_sp->lags);
2454 }
2455
2456 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
2457 {
2458         char htgt_pl[MLXSW_REG_HTGT_LEN];
2459         int err;
2460
2461         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
2462                             MLXSW_REG_HTGT_INVALID_POLICER,
2463                             MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2464                             MLXSW_REG_HTGT_DEFAULT_TC);
2465         err =  mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2466         if (err)
2467                 return err;
2468
2469         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MFDE,
2470                             MLXSW_REG_HTGT_INVALID_POLICER,
2471                             MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2472                             MLXSW_REG_HTGT_DEFAULT_TC);
2473         err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2474         if (err)
2475                 return err;
2476
2477         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MTWE,
2478                             MLXSW_REG_HTGT_INVALID_POLICER,
2479                             MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2480                             MLXSW_REG_HTGT_DEFAULT_TC);
2481         err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2482         if (err)
2483                 return err;
2484
2485         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_PMPE,
2486                             MLXSW_REG_HTGT_INVALID_POLICER,
2487                             MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2488                             MLXSW_REG_HTGT_DEFAULT_TC);
2489         return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2490 }
2491
2492 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
2493         .clock_init     = mlxsw_sp1_ptp_clock_init,
2494         .clock_fini     = mlxsw_sp1_ptp_clock_fini,
2495         .init           = mlxsw_sp1_ptp_init,
2496         .fini           = mlxsw_sp1_ptp_fini,
2497         .receive        = mlxsw_sp1_ptp_receive,
2498         .transmitted    = mlxsw_sp1_ptp_transmitted,
2499         .hwtstamp_get   = mlxsw_sp1_ptp_hwtstamp_get,
2500         .hwtstamp_set   = mlxsw_sp1_ptp_hwtstamp_set,
2501         .shaper_work    = mlxsw_sp1_ptp_shaper_work,
2502         .get_ts_info    = mlxsw_sp1_ptp_get_ts_info,
2503         .get_stats_count = mlxsw_sp1_get_stats_count,
2504         .get_stats_strings = mlxsw_sp1_get_stats_strings,
2505         .get_stats      = mlxsw_sp1_get_stats,
2506 };
2507
2508 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
2509         .clock_init     = mlxsw_sp2_ptp_clock_init,
2510         .clock_fini     = mlxsw_sp2_ptp_clock_fini,
2511         .init           = mlxsw_sp2_ptp_init,
2512         .fini           = mlxsw_sp2_ptp_fini,
2513         .receive        = mlxsw_sp2_ptp_receive,
2514         .transmitted    = mlxsw_sp2_ptp_transmitted,
2515         .hwtstamp_get   = mlxsw_sp2_ptp_hwtstamp_get,
2516         .hwtstamp_set   = mlxsw_sp2_ptp_hwtstamp_set,
2517         .shaper_work    = mlxsw_sp2_ptp_shaper_work,
2518         .get_ts_info    = mlxsw_sp2_ptp_get_ts_info,
2519         .get_stats_count = mlxsw_sp2_get_stats_count,
2520         .get_stats_strings = mlxsw_sp2_get_stats_strings,
2521         .get_stats      = mlxsw_sp2_get_stats,
2522 };
2523
2524 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
2525                                     unsigned long event, void *ptr);
2526
2527 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2528                          const struct mlxsw_bus_info *mlxsw_bus_info,
2529                          struct netlink_ext_ack *extack)
2530 {
2531         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2532         int err;
2533
2534         mlxsw_sp->core = mlxsw_core;
2535         mlxsw_sp->bus_info = mlxsw_bus_info;
2536
2537         mlxsw_core_emad_string_tlv_enable(mlxsw_core);
2538
2539         err = mlxsw_sp_base_mac_get(mlxsw_sp);
2540         if (err) {
2541                 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2542                 return err;
2543         }
2544
2545         err = mlxsw_sp_kvdl_init(mlxsw_sp);
2546         if (err) {
2547                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
2548                 return err;
2549         }
2550
2551         err = mlxsw_sp_fids_init(mlxsw_sp);
2552         if (err) {
2553                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
2554                 goto err_fids_init;
2555         }
2556
2557         err = mlxsw_sp_policers_init(mlxsw_sp);
2558         if (err) {
2559                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n");
2560                 goto err_policers_init;
2561         }
2562
2563         err = mlxsw_sp_traps_init(mlxsw_sp);
2564         if (err) {
2565                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
2566                 goto err_traps_init;
2567         }
2568
2569         err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
2570         if (err) {
2571                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
2572                 goto err_devlink_traps_init;
2573         }
2574
2575         err = mlxsw_sp_buffers_init(mlxsw_sp);
2576         if (err) {
2577                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2578                 goto err_buffers_init;
2579         }
2580
2581         err = mlxsw_sp_lag_init(mlxsw_sp);
2582         if (err) {
2583                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2584                 goto err_lag_init;
2585         }
2586
2587         /* Initialize SPAN before router and switchdev, so that those components
2588          * can call mlxsw_sp_span_respin().
2589          */
2590         err = mlxsw_sp_span_init(mlxsw_sp);
2591         if (err) {
2592                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
2593                 goto err_span_init;
2594         }
2595
2596         err = mlxsw_sp_switchdev_init(mlxsw_sp);
2597         if (err) {
2598                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2599                 goto err_switchdev_init;
2600         }
2601
2602         err = mlxsw_sp_counter_pool_init(mlxsw_sp);
2603         if (err) {
2604                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
2605                 goto err_counter_pool_init;
2606         }
2607
2608         err = mlxsw_sp_afa_init(mlxsw_sp);
2609         if (err) {
2610                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
2611                 goto err_afa_init;
2612         }
2613
2614         err = mlxsw_sp_nve_init(mlxsw_sp);
2615         if (err) {
2616                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
2617                 goto err_nve_init;
2618         }
2619
2620         err = mlxsw_sp_acl_init(mlxsw_sp);
2621         if (err) {
2622                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
2623                 goto err_acl_init;
2624         }
2625
2626         err = mlxsw_sp_router_init(mlxsw_sp, extack);
2627         if (err) {
2628                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
2629                 goto err_router_init;
2630         }
2631
2632         if (mlxsw_sp->bus_info->read_frc_capable) {
2633                 /* NULL is a valid return value from clock_init */
2634                 mlxsw_sp->clock =
2635                         mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
2636                                                       mlxsw_sp->bus_info->dev);
2637                 if (IS_ERR(mlxsw_sp->clock)) {
2638                         err = PTR_ERR(mlxsw_sp->clock);
2639                         dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
2640                         goto err_ptp_clock_init;
2641                 }
2642         }
2643
2644         if (mlxsw_sp->clock) {
2645                 /* NULL is a valid return value from ptp_ops->init */
2646                 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
2647                 if (IS_ERR(mlxsw_sp->ptp_state)) {
2648                         err = PTR_ERR(mlxsw_sp->ptp_state);
2649                         dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
2650                         goto err_ptp_init;
2651                 }
2652         }
2653
2654         /* Initialize netdevice notifier after router and SPAN is initialized,
2655          * so that the event handler can use router structures and call SPAN
2656          * respin.
2657          */
2658         mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
2659         err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2660                                               &mlxsw_sp->netdevice_nb);
2661         if (err) {
2662                 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
2663                 goto err_netdev_notifier;
2664         }
2665
2666         err = mlxsw_sp_dpipe_init(mlxsw_sp);
2667         if (err) {
2668                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
2669                 goto err_dpipe_init;
2670         }
2671
2672         err = mlxsw_sp_port_module_info_init(mlxsw_sp);
2673         if (err) {
2674                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
2675                 goto err_port_module_info_init;
2676         }
2677
2678         err = mlxsw_sp_ports_create(mlxsw_sp);
2679         if (err) {
2680                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2681                 goto err_ports_create;
2682         }
2683
2684         return 0;
2685
2686 err_ports_create:
2687         mlxsw_sp_port_module_info_fini(mlxsw_sp);
2688 err_port_module_info_init:
2689         mlxsw_sp_dpipe_fini(mlxsw_sp);
2690 err_dpipe_init:
2691         unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2692                                           &mlxsw_sp->netdevice_nb);
2693 err_netdev_notifier:
2694         if (mlxsw_sp->clock)
2695                 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
2696 err_ptp_init:
2697         if (mlxsw_sp->clock)
2698                 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
2699 err_ptp_clock_init:
2700         mlxsw_sp_router_fini(mlxsw_sp);
2701 err_router_init:
2702         mlxsw_sp_acl_fini(mlxsw_sp);
2703 err_acl_init:
2704         mlxsw_sp_nve_fini(mlxsw_sp);
2705 err_nve_init:
2706         mlxsw_sp_afa_fini(mlxsw_sp);
2707 err_afa_init:
2708         mlxsw_sp_counter_pool_fini(mlxsw_sp);
2709 err_counter_pool_init:
2710         mlxsw_sp_switchdev_fini(mlxsw_sp);
2711 err_switchdev_init:
2712         mlxsw_sp_span_fini(mlxsw_sp);
2713 err_span_init:
2714         mlxsw_sp_lag_fini(mlxsw_sp);
2715 err_lag_init:
2716         mlxsw_sp_buffers_fini(mlxsw_sp);
2717 err_buffers_init:
2718         mlxsw_sp_devlink_traps_fini(mlxsw_sp);
2719 err_devlink_traps_init:
2720         mlxsw_sp_traps_fini(mlxsw_sp);
2721 err_traps_init:
2722         mlxsw_sp_policers_fini(mlxsw_sp);
2723 err_policers_init:
2724         mlxsw_sp_fids_fini(mlxsw_sp);
2725 err_fids_init:
2726         mlxsw_sp_kvdl_fini(mlxsw_sp);
2727         return err;
2728 }
2729
2730 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
2731                           const struct mlxsw_bus_info *mlxsw_bus_info,
2732                           struct netlink_ext_ack *extack)
2733 {
2734         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2735
2736         mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
2737         mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
2738         mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
2739         mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
2740         mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
2741         mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
2742         mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
2743         mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
2744         mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
2745         mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
2746         mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
2747         mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
2748         mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
2749         mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
2750         mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
2751         mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
2752         mlxsw_sp->listeners = mlxsw_sp1_listener;
2753         mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
2754         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
2755
2756         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
2757 }
2758
2759 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
2760                           const struct mlxsw_bus_info *mlxsw_bus_info,
2761                           struct netlink_ext_ack *extack)
2762 {
2763         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2764
2765         mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
2766         mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
2767         mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
2768         mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
2769         mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
2770         mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
2771         mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
2772         mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
2773         mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
2774         mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
2775         mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
2776         mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
2777         mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
2778         mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
2779         mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
2780         mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
2781         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
2782
2783         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
2784 }
2785
2786 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
2787                           const struct mlxsw_bus_info *mlxsw_bus_info,
2788                           struct netlink_ext_ack *extack)
2789 {
2790         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2791
2792         mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
2793         mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
2794         mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
2795         mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
2796         mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
2797         mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
2798         mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
2799         mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
2800         mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
2801         mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
2802         mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
2803         mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
2804         mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
2805         mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
2806         mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
2807         mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
2808         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
2809
2810         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
2811 }
2812
2813 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2814 {
2815         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2816
2817         mlxsw_sp_ports_remove(mlxsw_sp);
2818         mlxsw_sp_port_module_info_fini(mlxsw_sp);
2819         mlxsw_sp_dpipe_fini(mlxsw_sp);
2820         unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2821                                           &mlxsw_sp->netdevice_nb);
2822         if (mlxsw_sp->clock) {
2823                 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
2824                 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
2825         }
2826         mlxsw_sp_router_fini(mlxsw_sp);
2827         mlxsw_sp_acl_fini(mlxsw_sp);
2828         mlxsw_sp_nve_fini(mlxsw_sp);
2829         mlxsw_sp_afa_fini(mlxsw_sp);
2830         mlxsw_sp_counter_pool_fini(mlxsw_sp);
2831         mlxsw_sp_switchdev_fini(mlxsw_sp);
2832         mlxsw_sp_span_fini(mlxsw_sp);
2833         mlxsw_sp_lag_fini(mlxsw_sp);
2834         mlxsw_sp_buffers_fini(mlxsw_sp);
2835         mlxsw_sp_devlink_traps_fini(mlxsw_sp);
2836         mlxsw_sp_traps_fini(mlxsw_sp);
2837         mlxsw_sp_policers_fini(mlxsw_sp);
2838         mlxsw_sp_fids_fini(mlxsw_sp);
2839         mlxsw_sp_kvdl_fini(mlxsw_sp);
2840 }
2841
2842 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated
2843  * 802.1Q FIDs
2844  */
2845 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE   (MLXSW_SP_FID_8021D_MAX + \
2846                                          VLAN_VID_MASK - 1)
2847
2848 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
2849         .used_max_mid                   = 1,
2850         .max_mid                        = MLXSW_SP_MID_MAX,
2851         .used_flood_tables              = 1,
2852         .used_flood_mode                = 1,
2853         .flood_mode                     = 3,
2854         .max_fid_flood_tables           = 3,
2855         .fid_flood_table_size           = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
2856         .used_max_ib_mc                 = 1,
2857         .max_ib_mc                      = 0,
2858         .used_max_pkey                  = 1,
2859         .max_pkey                       = 0,
2860         .used_kvd_sizes                 = 1,
2861         .kvd_hash_single_parts          = 59,
2862         .kvd_hash_double_parts          = 41,
2863         .kvd_linear_size                = MLXSW_SP_KVD_LINEAR_SIZE,
2864         .swid_config                    = {
2865                 {
2866                         .used_type      = 1,
2867                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
2868                 }
2869         },
2870 };
2871
2872 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
2873         .used_max_mid                   = 1,
2874         .max_mid                        = MLXSW_SP_MID_MAX,
2875         .used_flood_tables              = 1,
2876         .used_flood_mode                = 1,
2877         .flood_mode                     = 3,
2878         .max_fid_flood_tables           = 3,
2879         .fid_flood_table_size           = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
2880         .used_max_ib_mc                 = 1,
2881         .max_ib_mc                      = 0,
2882         .used_max_pkey                  = 1,
2883         .max_pkey                       = 0,
2884         .swid_config                    = {
2885                 {
2886                         .used_type      = 1,
2887                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
2888                 }
2889         },
2890 };
2891
2892 static void
2893 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
2894                                       struct devlink_resource_size_params *kvd_size_params,
2895                                       struct devlink_resource_size_params *linear_size_params,
2896                                       struct devlink_resource_size_params *hash_double_size_params,
2897                                       struct devlink_resource_size_params *hash_single_size_params)
2898 {
2899         u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
2900                                                  KVD_SINGLE_MIN_SIZE);
2901         u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
2902                                                  KVD_DOUBLE_MIN_SIZE);
2903         u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
2904         u32 linear_size_min = 0;
2905
2906         devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
2907                                           MLXSW_SP_KVD_GRANULARITY,
2908                                           DEVLINK_RESOURCE_UNIT_ENTRY);
2909         devlink_resource_size_params_init(linear_size_params, linear_size_min,
2910                                           kvd_size - single_size_min -
2911                                           double_size_min,
2912                                           MLXSW_SP_KVD_GRANULARITY,
2913                                           DEVLINK_RESOURCE_UNIT_ENTRY);
2914         devlink_resource_size_params_init(hash_double_size_params,
2915                                           double_size_min,
2916                                           kvd_size - single_size_min -
2917                                           linear_size_min,
2918                                           MLXSW_SP_KVD_GRANULARITY,
2919                                           DEVLINK_RESOURCE_UNIT_ENTRY);
2920         devlink_resource_size_params_init(hash_single_size_params,
2921                                           single_size_min,
2922                                           kvd_size - double_size_min -
2923                                           linear_size_min,
2924                                           MLXSW_SP_KVD_GRANULARITY,
2925                                           DEVLINK_RESOURCE_UNIT_ENTRY);
2926 }
2927
2928 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
2929 {
2930         struct devlink *devlink = priv_to_devlink(mlxsw_core);
2931         struct devlink_resource_size_params hash_single_size_params;
2932         struct devlink_resource_size_params hash_double_size_params;
2933         struct devlink_resource_size_params linear_size_params;
2934         struct devlink_resource_size_params kvd_size_params;
2935         u32 kvd_size, single_size, double_size, linear_size;
2936         const struct mlxsw_config_profile *profile;
2937         int err;
2938
2939         profile = &mlxsw_sp1_config_profile;
2940         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
2941                 return -EIO;
2942
2943         mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
2944                                               &linear_size_params,
2945                                               &hash_double_size_params,
2946                                               &hash_single_size_params);
2947
2948         kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
2949         err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
2950                                         kvd_size, MLXSW_SP_RESOURCE_KVD,
2951                                         DEVLINK_RESOURCE_ID_PARENT_TOP,
2952                                         &kvd_size_params);
2953         if (err)
2954                 return err;
2955
2956         linear_size = profile->kvd_linear_size;
2957         err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
2958                                         linear_size,
2959                                         MLXSW_SP_RESOURCE_KVD_LINEAR,
2960                                         MLXSW_SP_RESOURCE_KVD,
2961                                         &linear_size_params);
2962         if (err)
2963                 return err;
2964
2965         err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
2966         if  (err)
2967                 return err;
2968
2969         double_size = kvd_size - linear_size;
2970         double_size *= profile->kvd_hash_double_parts;
2971         double_size /= profile->kvd_hash_double_parts +
2972                        profile->kvd_hash_single_parts;
2973         double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
2974         err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
2975                                         double_size,
2976                                         MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
2977                                         MLXSW_SP_RESOURCE_KVD,
2978                                         &hash_double_size_params);
2979         if (err)
2980                 return err;
2981
2982         single_size = kvd_size - double_size - linear_size;
2983         err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
2984                                         single_size,
2985                                         MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
2986                                         MLXSW_SP_RESOURCE_KVD,
2987                                         &hash_single_size_params);
2988         if (err)
2989                 return err;
2990
2991         return 0;
2992 }
2993
2994 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
2995 {
2996         struct devlink *devlink = priv_to_devlink(mlxsw_core);
2997         struct devlink_resource_size_params kvd_size_params;
2998         u32 kvd_size;
2999
3000         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3001                 return -EIO;
3002
3003         kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3004         devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
3005                                           MLXSW_SP_KVD_GRANULARITY,
3006                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3007
3008         return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3009                                          kvd_size, MLXSW_SP_RESOURCE_KVD,
3010                                          DEVLINK_RESOURCE_ID_PARENT_TOP,
3011                                          &kvd_size_params);
3012 }
3013
3014 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
3015 {
3016         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3017         struct devlink_resource_size_params span_size_params;
3018         u32 max_span;
3019
3020         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
3021                 return -EIO;
3022
3023         max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
3024         devlink_resource_size_params_init(&span_size_params, max_span, max_span,
3025                                           1, DEVLINK_RESOURCE_UNIT_ENTRY);
3026
3027         return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
3028                                          max_span, MLXSW_SP_RESOURCE_SPAN,
3029                                          DEVLINK_RESOURCE_ID_PARENT_TOP,
3030                                          &span_size_params);
3031 }
3032
3033 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
3034 {
3035         int err;
3036
3037         err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
3038         if (err)
3039                 return err;
3040
3041         err = mlxsw_sp_resources_span_register(mlxsw_core);
3042         if (err)
3043                 goto err_resources_span_register;
3044
3045         err = mlxsw_sp_counter_resources_register(mlxsw_core);
3046         if (err)
3047                 goto err_resources_counter_register;
3048
3049         err = mlxsw_sp_policer_resources_register(mlxsw_core);
3050         if (err)
3051                 goto err_resources_counter_register;
3052
3053         return 0;
3054
3055 err_resources_counter_register:
3056 err_resources_span_register:
3057         devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
3058         return err;
3059 }
3060
3061 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
3062 {
3063         int err;
3064
3065         err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
3066         if (err)
3067                 return err;
3068
3069         err = mlxsw_sp_resources_span_register(mlxsw_core);
3070         if (err)
3071                 goto err_resources_span_register;
3072
3073         err = mlxsw_sp_counter_resources_register(mlxsw_core);
3074         if (err)
3075                 goto err_resources_counter_register;
3076
3077         err = mlxsw_sp_policer_resources_register(mlxsw_core);
3078         if (err)
3079                 goto err_resources_counter_register;
3080
3081         return 0;
3082
3083 err_resources_counter_register:
3084 err_resources_span_register:
3085         devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
3086         return err;
3087 }
3088
3089 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3090                                   const struct mlxsw_config_profile *profile,
3091                                   u64 *p_single_size, u64 *p_double_size,
3092                                   u64 *p_linear_size)
3093 {
3094         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3095         u32 double_size;
3096         int err;
3097
3098         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3099             !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
3100                 return -EIO;
3101
3102         /* The hash part is what left of the kvd without the
3103          * linear part. It is split to the single size and
3104          * double size by the parts ratio from the profile.
3105          * Both sizes must be a multiplications of the
3106          * granularity from the profile. In case the user
3107          * provided the sizes they are obtained via devlink.
3108          */
3109         err = devlink_resource_size_get(devlink,
3110                                         MLXSW_SP_RESOURCE_KVD_LINEAR,
3111                                         p_linear_size);
3112         if (err)
3113                 *p_linear_size = profile->kvd_linear_size;
3114
3115         err = devlink_resource_size_get(devlink,
3116                                         MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3117                                         p_double_size);
3118         if (err) {
3119                 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3120                               *p_linear_size;
3121                 double_size *= profile->kvd_hash_double_parts;
3122                 double_size /= profile->kvd_hash_double_parts +
3123                                profile->kvd_hash_single_parts;
3124                 *p_double_size = rounddown(double_size,
3125                                            MLXSW_SP_KVD_GRANULARITY);
3126         }
3127
3128         err = devlink_resource_size_get(devlink,
3129                                         MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3130                                         p_single_size);
3131         if (err)
3132                 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3133                                  *p_double_size - *p_linear_size;
3134
3135         /* Check results are legal. */
3136         if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3137             *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
3138             MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
3139                 return -EIO;
3140
3141         return 0;
3142 }
3143
3144 static int
3145 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
3146                                              struct devlink_param_gset_ctx *ctx)
3147 {
3148         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3149         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3150
3151         ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp);
3152         return 0;
3153 }
3154
3155 static int
3156 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
3157                                              struct devlink_param_gset_ctx *ctx)
3158 {
3159         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3160         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3161
3162         return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32);
3163 }
3164
3165 static const struct devlink_param mlxsw_sp2_devlink_params[] = {
3166         DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3167                              "acl_region_rehash_interval",
3168                              DEVLINK_PARAM_TYPE_U32,
3169                              BIT(DEVLINK_PARAM_CMODE_RUNTIME),
3170                              mlxsw_sp_params_acl_region_rehash_intrvl_get,
3171                              mlxsw_sp_params_acl_region_rehash_intrvl_set,
3172                              NULL),
3173 };
3174
3175 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
3176 {
3177         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3178         union devlink_param_value value;
3179         int err;
3180
3181         err = devlink_params_register(devlink, mlxsw_sp2_devlink_params,
3182                                       ARRAY_SIZE(mlxsw_sp2_devlink_params));
3183         if (err)
3184                 return err;
3185
3186         value.vu32 = 0;
3187         devlink_param_driverinit_value_set(devlink,
3188                                            MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3189                                            value);
3190         return 0;
3191 }
3192
3193 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
3194 {
3195         devlink_params_unregister(priv_to_devlink(mlxsw_core),
3196                                   mlxsw_sp2_devlink_params,
3197                                   ARRAY_SIZE(mlxsw_sp2_devlink_params));
3198 }
3199
3200 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
3201                                      struct sk_buff *skb, u8 local_port)
3202 {
3203         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3204
3205         skb_pull(skb, MLXSW_TXHDR_LEN);
3206         mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
3207 }
3208
3209 static struct mlxsw_driver mlxsw_sp1_driver = {
3210         .kind                           = mlxsw_sp1_driver_name,
3211         .priv_size                      = sizeof(struct mlxsw_sp),
3212         .fw_req_rev                     = &mlxsw_sp1_fw_rev,
3213         .fw_filename                    = MLXSW_SP1_FW_FILENAME,
3214         .init                           = mlxsw_sp1_init,
3215         .fini                           = mlxsw_sp_fini,
3216         .basic_trap_groups_set          = mlxsw_sp_basic_trap_groups_set,
3217         .port_split                     = mlxsw_sp_port_split,
3218         .port_unsplit                   = mlxsw_sp_port_unsplit,
3219         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
3220         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
3221         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
3222         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
3223         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
3224         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
3225         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
3226         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
3227         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
3228         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
3229         .trap_init                      = mlxsw_sp_trap_init,
3230         .trap_fini                      = mlxsw_sp_trap_fini,
3231         .trap_action_set                = mlxsw_sp_trap_action_set,
3232         .trap_group_init                = mlxsw_sp_trap_group_init,
3233         .trap_group_set                 = mlxsw_sp_trap_group_set,
3234         .trap_policer_init              = mlxsw_sp_trap_policer_init,
3235         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
3236         .trap_policer_set               = mlxsw_sp_trap_policer_set,
3237         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
3238         .txhdr_construct                = mlxsw_sp_txhdr_construct,
3239         .resources_register             = mlxsw_sp1_resources_register,
3240         .kvd_sizes_get                  = mlxsw_sp_kvd_sizes_get,
3241         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
3242         .txhdr_len                      = MLXSW_TXHDR_LEN,
3243         .profile                        = &mlxsw_sp1_config_profile,
3244         .res_query_enabled              = true,
3245         .fw_fatal_enabled               = true,
3246         .temp_warn_enabled              = true,
3247 };
3248
3249 static struct mlxsw_driver mlxsw_sp2_driver = {
3250         .kind                           = mlxsw_sp2_driver_name,
3251         .priv_size                      = sizeof(struct mlxsw_sp),
3252         .fw_req_rev                     = &mlxsw_sp2_fw_rev,
3253         .fw_filename                    = MLXSW_SP2_FW_FILENAME,
3254         .init                           = mlxsw_sp2_init,
3255         .fini                           = mlxsw_sp_fini,
3256         .basic_trap_groups_set          = mlxsw_sp_basic_trap_groups_set,
3257         .port_split                     = mlxsw_sp_port_split,
3258         .port_unsplit                   = mlxsw_sp_port_unsplit,
3259         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
3260         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
3261         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
3262         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
3263         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
3264         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
3265         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
3266         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
3267         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
3268         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
3269         .trap_init                      = mlxsw_sp_trap_init,
3270         .trap_fini                      = mlxsw_sp_trap_fini,
3271         .trap_action_set                = mlxsw_sp_trap_action_set,
3272         .trap_group_init                = mlxsw_sp_trap_group_init,
3273         .trap_group_set                 = mlxsw_sp_trap_group_set,
3274         .trap_policer_init              = mlxsw_sp_trap_policer_init,
3275         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
3276         .trap_policer_set               = mlxsw_sp_trap_policer_set,
3277         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
3278         .txhdr_construct                = mlxsw_sp_txhdr_construct,
3279         .resources_register             = mlxsw_sp2_resources_register,
3280         .params_register                = mlxsw_sp2_params_register,
3281         .params_unregister              = mlxsw_sp2_params_unregister,
3282         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
3283         .txhdr_len                      = MLXSW_TXHDR_LEN,
3284         .profile                        = &mlxsw_sp2_config_profile,
3285         .res_query_enabled              = true,
3286         .fw_fatal_enabled               = true,
3287         .temp_warn_enabled              = true,
3288 };
3289
3290 static struct mlxsw_driver mlxsw_sp3_driver = {
3291         .kind                           = mlxsw_sp3_driver_name,
3292         .priv_size                      = sizeof(struct mlxsw_sp),
3293         .fw_req_rev                     = &mlxsw_sp3_fw_rev,
3294         .fw_filename                    = MLXSW_SP3_FW_FILENAME,
3295         .init                           = mlxsw_sp3_init,
3296         .fini                           = mlxsw_sp_fini,
3297         .basic_trap_groups_set          = mlxsw_sp_basic_trap_groups_set,
3298         .port_split                     = mlxsw_sp_port_split,
3299         .port_unsplit                   = mlxsw_sp_port_unsplit,
3300         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
3301         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
3302         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
3303         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
3304         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
3305         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
3306         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
3307         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
3308         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
3309         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
3310         .trap_init                      = mlxsw_sp_trap_init,
3311         .trap_fini                      = mlxsw_sp_trap_fini,
3312         .trap_action_set                = mlxsw_sp_trap_action_set,
3313         .trap_group_init                = mlxsw_sp_trap_group_init,
3314         .trap_group_set                 = mlxsw_sp_trap_group_set,
3315         .trap_policer_init              = mlxsw_sp_trap_policer_init,
3316         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
3317         .trap_policer_set               = mlxsw_sp_trap_policer_set,
3318         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
3319         .txhdr_construct                = mlxsw_sp_txhdr_construct,
3320         .resources_register             = mlxsw_sp2_resources_register,
3321         .params_register                = mlxsw_sp2_params_register,
3322         .params_unregister              = mlxsw_sp2_params_unregister,
3323         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
3324         .txhdr_len                      = MLXSW_TXHDR_LEN,
3325         .profile                        = &mlxsw_sp2_config_profile,
3326         .res_query_enabled              = true,
3327         .fw_fatal_enabled               = true,
3328         .temp_warn_enabled              = true,
3329 };
3330
3331 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
3332 {
3333         return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3334 }
3335
3336 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev,
3337                                    struct netdev_nested_priv *priv)
3338 {
3339         int ret = 0;
3340
3341         if (mlxsw_sp_port_dev_check(lower_dev)) {
3342                 priv->data = (void *)netdev_priv(lower_dev);
3343                 ret = 1;
3344         }
3345
3346         return ret;
3347 }
3348
3349 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
3350 {
3351         struct netdev_nested_priv priv = {
3352                 .data = NULL,
3353         };
3354
3355         if (mlxsw_sp_port_dev_check(dev))
3356                 return netdev_priv(dev);
3357
3358         netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv);
3359
3360         return (struct mlxsw_sp_port *)priv.data;
3361 }
3362
3363 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
3364 {
3365         struct mlxsw_sp_port *mlxsw_sp_port;
3366
3367         mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3368         return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3369 }
3370
3371 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
3372 {
3373         struct netdev_nested_priv priv = {
3374                 .data = NULL,
3375         };
3376
3377         if (mlxsw_sp_port_dev_check(dev))
3378                 return netdev_priv(dev);
3379
3380         netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3381                                       &priv);
3382
3383         return (struct mlxsw_sp_port *)priv.data;
3384 }
3385
3386 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3387 {
3388         struct mlxsw_sp_port *mlxsw_sp_port;
3389
3390         rcu_read_lock();
3391         mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3392         if (mlxsw_sp_port)
3393                 dev_hold(mlxsw_sp_port->dev);
3394         rcu_read_unlock();
3395         return mlxsw_sp_port;
3396 }
3397
3398 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3399 {
3400         dev_put(mlxsw_sp_port->dev);
3401 }
3402
3403 static void
3404 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
3405                                  struct net_device *lag_dev)
3406 {
3407         struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
3408         struct net_device *upper_dev;
3409         struct list_head *iter;
3410
3411         if (netif_is_bridge_port(lag_dev))
3412                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
3413
3414         netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
3415                 if (!netif_is_bridge_port(upper_dev))
3416                         continue;
3417                 br_dev = netdev_master_upper_dev_get(upper_dev);
3418                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
3419         }
3420 }
3421
3422 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3423 {
3424         char sldr_pl[MLXSW_REG_SLDR_LEN];
3425
3426         mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3427         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3428 }
3429
3430 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3431 {
3432         char sldr_pl[MLXSW_REG_SLDR_LEN];
3433
3434         mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3435         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3436 }
3437
3438 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3439                                      u16 lag_id, u8 port_index)
3440 {
3441         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3442         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3443
3444         mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3445                                       lag_id, port_index);
3446         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3447 }
3448
3449 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3450                                         u16 lag_id)
3451 {
3452         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3453         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3454
3455         mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3456                                          lag_id);
3457         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3458 }
3459
3460 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3461                                         u16 lag_id)
3462 {
3463         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3464         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3465
3466         mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3467                                         lag_id);
3468         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3469 }
3470
3471 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3472                                          u16 lag_id)
3473 {
3474         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3475         char slcor_pl[MLXSW_REG_SLCOR_LEN];
3476
3477         mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3478                                          lag_id);
3479         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3480 }
3481
3482 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3483                                   struct net_device *lag_dev,
3484                                   u16 *p_lag_id)
3485 {
3486         struct mlxsw_sp_upper *lag;
3487         int free_lag_id = -1;
3488         u64 max_lag;
3489         int i;
3490
3491         max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
3492         for (i = 0; i < max_lag; i++) {
3493                 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3494                 if (lag->ref_count) {
3495                         if (lag->dev == lag_dev) {
3496                                 *p_lag_id = i;
3497                                 return 0;
3498                         }
3499                 } else if (free_lag_id < 0) {
3500                         free_lag_id = i;
3501                 }
3502         }
3503         if (free_lag_id < 0)
3504                 return -EBUSY;
3505         *p_lag_id = free_lag_id;
3506         return 0;
3507 }
3508
3509 static bool
3510 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3511                           struct net_device *lag_dev,
3512                           struct netdev_lag_upper_info *lag_upper_info,
3513                           struct netlink_ext_ack *extack)
3514 {
3515         u16 lag_id;
3516
3517         if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
3518                 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
3519                 return false;
3520         }
3521         if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
3522                 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
3523                 return false;
3524         }
3525         return true;
3526 }
3527
3528 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3529                                        u16 lag_id, u8 *p_port_index)
3530 {
3531         u64 max_lag_members;
3532         int i;
3533
3534         max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3535                                              MAX_LAG_MEMBERS);
3536         for (i = 0; i < max_lag_members; i++) {
3537                 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3538                         *p_port_index = i;
3539                         return 0;
3540                 }
3541         }
3542         return -EBUSY;
3543 }
3544
3545 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3546                                   struct net_device *lag_dev)
3547 {
3548         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3549         struct mlxsw_sp_upper *lag;
3550         u16 lag_id;
3551         u8 port_index;
3552         int err;
3553
3554         err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3555         if (err)
3556                 return err;
3557         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3558         if (!lag->ref_count) {
3559                 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3560                 if (err)
3561                         return err;
3562                 lag->dev = lag_dev;
3563         }
3564
3565         err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3566         if (err)
3567                 return err;
3568         err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3569         if (err)
3570                 goto err_col_port_add;
3571
3572         mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
3573                                    mlxsw_sp_port->local_port);
3574         mlxsw_sp_port->lag_id = lag_id;
3575         mlxsw_sp_port->lagged = 1;
3576         lag->ref_count++;
3577
3578         /* Port is no longer usable as a router interface */
3579         if (mlxsw_sp_port->default_vlan->fid)
3580                 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
3581
3582         return 0;
3583
3584 err_col_port_add:
3585         if (!lag->ref_count)
3586                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3587         return err;
3588 }
3589
3590 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
3591                                     struct net_device *lag_dev)
3592 {
3593         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3594         u16 lag_id = mlxsw_sp_port->lag_id;
3595         struct mlxsw_sp_upper *lag;
3596
3597         if (!mlxsw_sp_port->lagged)
3598                 return;
3599         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3600         WARN_ON(lag->ref_count == 0);
3601
3602         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3603
3604         /* Any VLANs configured on the port are no longer valid */
3605         mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
3606         mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
3607         /* Make the LAG and its directly linked uppers leave bridges they
3608          * are memeber in
3609          */
3610         mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
3611
3612         if (lag->ref_count == 1)
3613                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3614
3615         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3616                                      mlxsw_sp_port->local_port);
3617         mlxsw_sp_port->lagged = 0;
3618         lag->ref_count--;
3619
3620         /* Make sure untagged frames are allowed to ingress */
3621         mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
3622 }
3623
3624 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3625                                       u16 lag_id)
3626 {
3627         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3628         char sldr_pl[MLXSW_REG_SLDR_LEN];
3629
3630         mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
3631                                          mlxsw_sp_port->local_port);
3632         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3633 }
3634
3635 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3636                                          u16 lag_id)
3637 {
3638         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3639         char sldr_pl[MLXSW_REG_SLDR_LEN];
3640
3641         mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
3642                                             mlxsw_sp_port->local_port);
3643         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3644 }
3645
3646 static int
3647 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
3648 {
3649         int err;
3650
3651         err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
3652                                            mlxsw_sp_port->lag_id);
3653         if (err)
3654                 return err;
3655
3656         err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
3657         if (err)
3658                 goto err_dist_port_add;
3659
3660         return 0;
3661
3662 err_dist_port_add:
3663         mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
3664         return err;
3665 }
3666
3667 static int
3668 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
3669 {
3670         int err;
3671
3672         err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
3673                                             mlxsw_sp_port->lag_id);
3674         if (err)
3675                 return err;
3676
3677         err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
3678                                             mlxsw_sp_port->lag_id);
3679         if (err)
3680                 goto err_col_port_disable;
3681
3682         return 0;
3683
3684 err_col_port_disable:
3685         mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
3686         return err;
3687 }
3688
3689 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
3690                                      struct netdev_lag_lower_state_info *info)
3691 {
3692         if (info->tx_enabled)
3693                 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
3694         else
3695                 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
3696 }
3697
3698 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
3699                                  bool enable)
3700 {
3701         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3702         enum mlxsw_reg_spms_state spms_state;
3703         char *spms_pl;
3704         u16 vid;
3705         int err;
3706
3707         spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
3708                               MLXSW_REG_SPMS_STATE_DISCARDING;
3709
3710         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
3711         if (!spms_pl)
3712                 return -ENOMEM;
3713         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
3714
3715         for (vid = 0; vid < VLAN_N_VID; vid++)
3716                 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
3717
3718         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
3719         kfree(spms_pl);
3720         return err;
3721 }
3722
3723 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
3724 {
3725         u16 vid = 1;
3726         int err;
3727
3728         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
3729         if (err)
3730                 return err;
3731         err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
3732         if (err)
3733                 goto err_port_stp_set;
3734         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
3735                                      true, false);
3736         if (err)
3737                 goto err_port_vlan_set;
3738
3739         for (; vid <= VLAN_N_VID - 1; vid++) {
3740                 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
3741                                                      vid, false);
3742                 if (err)
3743                         goto err_vid_learning_set;
3744         }
3745
3746         return 0;
3747
3748 err_vid_learning_set:
3749         for (vid--; vid >= 1; vid--)
3750                 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
3751 err_port_vlan_set:
3752         mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
3753 err_port_stp_set:
3754         mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
3755         return err;
3756 }
3757
3758 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3759 {
3760         u16 vid;
3761
3762         for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
3763                 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
3764                                                vid, true);
3765
3766         mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
3767                                false, false);
3768         mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
3769         mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
3770 }
3771
3772 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
3773 {
3774         unsigned int num_vxlans = 0;
3775         struct net_device *dev;
3776         struct list_head *iter;
3777
3778         netdev_for_each_lower_dev(br_dev, dev, iter) {
3779                 if (netif_is_vxlan(dev))
3780                         num_vxlans++;
3781         }
3782
3783         return num_vxlans > 1;
3784 }
3785
3786 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
3787 {
3788         DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
3789         struct net_device *dev;
3790         struct list_head *iter;
3791
3792         netdev_for_each_lower_dev(br_dev, dev, iter) {
3793                 u16 pvid;
3794                 int err;
3795
3796                 if (!netif_is_vxlan(dev))
3797                         continue;
3798
3799                 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
3800                 if (err || !pvid)
3801                         continue;
3802
3803                 if (test_and_set_bit(pvid, vlans))
3804                         return false;
3805         }
3806
3807         return true;
3808 }
3809
3810 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
3811                                            struct netlink_ext_ack *extack)
3812 {
3813         if (br_multicast_enabled(br_dev)) {
3814                 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
3815                 return false;
3816         }
3817
3818         if (!br_vlan_enabled(br_dev) &&
3819             mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
3820                 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
3821                 return false;
3822         }
3823
3824         if (br_vlan_enabled(br_dev) &&
3825             !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
3826                 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
3827                 return false;
3828         }
3829
3830         return true;
3831 }
3832
3833 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
3834                                                struct net_device *dev,
3835                                                unsigned long event, void *ptr)
3836 {
3837         struct netdev_notifier_changeupper_info *info;
3838         struct mlxsw_sp_port *mlxsw_sp_port;
3839         struct netlink_ext_ack *extack;
3840         struct net_device *upper_dev;
3841         struct mlxsw_sp *mlxsw_sp;
3842         int err = 0;
3843
3844         mlxsw_sp_port = netdev_priv(dev);
3845         mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3846         info = ptr;
3847         extack = netdev_notifier_info_to_extack(&info->info);
3848
3849         switch (event) {
3850         case NETDEV_PRECHANGEUPPER:
3851                 upper_dev = info->upper_dev;
3852                 if (!is_vlan_dev(upper_dev) &&
3853                     !netif_is_lag_master(upper_dev) &&
3854                     !netif_is_bridge_master(upper_dev) &&
3855                     !netif_is_ovs_master(upper_dev) &&
3856                     !netif_is_macvlan(upper_dev)) {
3857                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
3858                         return -EINVAL;
3859                 }
3860                 if (!info->linking)
3861                         break;
3862                 if (netif_is_bridge_master(upper_dev) &&
3863                     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
3864                     mlxsw_sp_bridge_has_vxlan(upper_dev) &&
3865                     !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
3866                         return -EOPNOTSUPP;
3867                 if (netdev_has_any_upper_dev(upper_dev) &&
3868                     (!netif_is_bridge_master(upper_dev) ||
3869                      !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
3870                                                           upper_dev))) {
3871                         NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
3872                         return -EINVAL;
3873                 }
3874                 if (netif_is_lag_master(upper_dev) &&
3875                     !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
3876                                                info->upper_info, extack))
3877                         return -EINVAL;
3878                 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
3879                         NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
3880                         return -EINVAL;
3881                 }
3882                 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
3883                     !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
3884                         NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
3885                         return -EINVAL;
3886                 }
3887                 if (netif_is_macvlan(upper_dev) &&
3888                     !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) {
3889                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
3890                         return -EOPNOTSUPP;
3891                 }
3892                 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
3893                         NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
3894                         return -EINVAL;
3895                 }
3896                 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
3897                         NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
3898                         return -EINVAL;
3899                 }
3900                 break;
3901         case NETDEV_CHANGEUPPER:
3902                 upper_dev = info->upper_dev;
3903                 if (netif_is_bridge_master(upper_dev)) {
3904                         if (info->linking)
3905                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
3906                                                                 lower_dev,
3907                                                                 upper_dev,
3908                                                                 extack);
3909                         else
3910                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
3911                                                            lower_dev,
3912                                                            upper_dev);
3913                 } else if (netif_is_lag_master(upper_dev)) {
3914                         if (info->linking) {
3915                                 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
3916                                                              upper_dev);
3917                         } else {
3918                                 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
3919                                 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
3920                                                         upper_dev);
3921                         }
3922                 } else if (netif_is_ovs_master(upper_dev)) {
3923                         if (info->linking)
3924                                 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
3925                         else
3926                                 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
3927                 } else if (netif_is_macvlan(upper_dev)) {
3928                         if (!info->linking)
3929                                 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
3930                 } else if (is_vlan_dev(upper_dev)) {
3931                         struct net_device *br_dev;
3932
3933                         if (!netif_is_bridge_port(upper_dev))
3934                                 break;
3935                         if (info->linking)
3936                                 break;
3937                         br_dev = netdev_master_upper_dev_get(upper_dev);
3938                         mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
3939                                                    br_dev);
3940                 }
3941                 break;
3942         }
3943
3944         return err;
3945 }
3946
3947 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
3948                                                unsigned long event, void *ptr)
3949 {
3950         struct netdev_notifier_changelowerstate_info *info;
3951         struct mlxsw_sp_port *mlxsw_sp_port;
3952         int err;
3953
3954         mlxsw_sp_port = netdev_priv(dev);
3955         info = ptr;
3956
3957         switch (event) {
3958         case NETDEV_CHANGELOWERSTATE:
3959                 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
3960                         err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
3961                                                         info->lower_state_info);
3962                         if (err)
3963                                 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
3964                 }
3965                 break;
3966         }
3967
3968         return 0;
3969 }
3970
3971 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
3972                                          struct net_device *port_dev,
3973                                          unsigned long event, void *ptr)
3974 {
3975         switch (event) {
3976         case NETDEV_PRECHANGEUPPER:
3977         case NETDEV_CHANGEUPPER:
3978                 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
3979                                                            event, ptr);
3980         case NETDEV_CHANGELOWERSTATE:
3981                 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
3982                                                            ptr);
3983         }
3984
3985         return 0;
3986 }
3987
3988 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
3989                                         unsigned long event, void *ptr)
3990 {
3991         struct net_device *dev;
3992         struct list_head *iter;
3993         int ret;
3994
3995         netdev_for_each_lower_dev(lag_dev, dev, iter) {
3996                 if (mlxsw_sp_port_dev_check(dev)) {
3997                         ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
3998                                                             ptr);
3999                         if (ret)
4000                                 return ret;
4001                 }
4002         }
4003
4004         return 0;
4005 }
4006
4007 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4008                                               struct net_device *dev,
4009                                               unsigned long event, void *ptr,
4010                                               u16 vid)
4011 {
4012         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4013         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4014         struct netdev_notifier_changeupper_info *info = ptr;
4015         struct netlink_ext_ack *extack;
4016         struct net_device *upper_dev;
4017         int err = 0;
4018
4019         extack = netdev_notifier_info_to_extack(&info->info);
4020
4021         switch (event) {
4022         case NETDEV_PRECHANGEUPPER:
4023                 upper_dev = info->upper_dev;
4024                 if (!netif_is_bridge_master(upper_dev) &&
4025                     !netif_is_macvlan(upper_dev)) {
4026                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4027                         return -EINVAL;
4028                 }
4029                 if (!info->linking)
4030                         break;
4031                 if (netif_is_bridge_master(upper_dev) &&
4032                     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4033                     mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4034                     !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4035                         return -EOPNOTSUPP;
4036                 if (netdev_has_any_upper_dev(upper_dev) &&
4037                     (!netif_is_bridge_master(upper_dev) ||
4038                      !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4039                                                           upper_dev))) {
4040                         NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
4041                         return -EINVAL;
4042                 }
4043                 if (netif_is_macvlan(upper_dev) &&
4044                     !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4045                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4046                         return -EOPNOTSUPP;
4047                 }
4048                 break;
4049         case NETDEV_CHANGEUPPER:
4050                 upper_dev = info->upper_dev;
4051                 if (netif_is_bridge_master(upper_dev)) {
4052                         if (info->linking)
4053                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4054                                                                 vlan_dev,
4055                                                                 upper_dev,
4056                                                                 extack);
4057                         else
4058                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4059                                                            vlan_dev,
4060                                                            upper_dev);
4061                 } else if (netif_is_macvlan(upper_dev)) {
4062                         if (!info->linking)
4063                                 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4064                 } else {
4065                         err = -EINVAL;
4066                         WARN_ON(1);
4067                 }
4068                 break;
4069         }
4070
4071         return err;
4072 }
4073
4074 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
4075                                                   struct net_device *lag_dev,
4076                                                   unsigned long event,
4077                                                   void *ptr, u16 vid)
4078 {
4079         struct net_device *dev;
4080         struct list_head *iter;
4081         int ret;
4082
4083         netdev_for_each_lower_dev(lag_dev, dev, iter) {
4084                 if (mlxsw_sp_port_dev_check(dev)) {
4085                         ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
4086                                                                  event, ptr,
4087                                                                  vid);
4088                         if (ret)
4089                                 return ret;
4090                 }
4091         }
4092
4093         return 0;
4094 }
4095
4096 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
4097                                                 struct net_device *br_dev,
4098                                                 unsigned long event, void *ptr,
4099                                                 u16 vid)
4100 {
4101         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
4102         struct netdev_notifier_changeupper_info *info = ptr;
4103         struct netlink_ext_ack *extack;
4104         struct net_device *upper_dev;
4105
4106         if (!mlxsw_sp)
4107                 return 0;
4108
4109         extack = netdev_notifier_info_to_extack(&info->info);
4110
4111         switch (event) {
4112         case NETDEV_PRECHANGEUPPER:
4113                 upper_dev = info->upper_dev;
4114                 if (!netif_is_macvlan(upper_dev)) {
4115                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4116                         return -EOPNOTSUPP;
4117                 }
4118                 if (!info->linking)
4119                         break;
4120                 if (netif_is_macvlan(upper_dev) &&
4121                     !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4122                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4123                         return -EOPNOTSUPP;
4124                 }
4125                 break;
4126         case NETDEV_CHANGEUPPER:
4127                 upper_dev = info->upper_dev;
4128                 if (info->linking)
4129                         break;
4130                 if (netif_is_macvlan(upper_dev))
4131                         mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4132                 break;
4133         }
4134
4135         return 0;
4136 }
4137
4138 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4139                                          unsigned long event, void *ptr)
4140 {
4141         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4142         u16 vid = vlan_dev_vlan_id(vlan_dev);
4143
4144         if (mlxsw_sp_port_dev_check(real_dev))
4145                 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
4146                                                           event, ptr, vid);
4147         else if (netif_is_lag_master(real_dev))
4148                 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
4149                                                               real_dev, event,
4150                                                               ptr, vid);
4151         else if (netif_is_bridge_master(real_dev))
4152                 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev,
4153                                                             event, ptr, vid);
4154
4155         return 0;
4156 }
4157
4158 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
4159                                            unsigned long event, void *ptr)
4160 {
4161         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
4162         struct netdev_notifier_changeupper_info *info = ptr;
4163         struct netlink_ext_ack *extack;
4164         struct net_device *upper_dev;
4165
4166         if (!mlxsw_sp)
4167                 return 0;
4168
4169         extack = netdev_notifier_info_to_extack(&info->info);
4170
4171         switch (event) {
4172         case NETDEV_PRECHANGEUPPER:
4173                 upper_dev = info->upper_dev;
4174                 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) {
4175                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4176                         return -EOPNOTSUPP;
4177                 }
4178                 if (!info->linking)
4179                         break;
4180                 if (netif_is_macvlan(upper_dev) &&
4181                     !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) {
4182                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4183                         return -EOPNOTSUPP;
4184                 }
4185                 break;
4186         case NETDEV_CHANGEUPPER:
4187                 upper_dev = info->upper_dev;
4188                 if (info->linking)
4189                         break;
4190                 if (is_vlan_dev(upper_dev))
4191                         mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
4192                 if (netif_is_macvlan(upper_dev))
4193                         mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4194                 break;
4195         }
4196
4197         return 0;
4198 }
4199
4200 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
4201                                             unsigned long event, void *ptr)
4202 {
4203         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
4204         struct netdev_notifier_changeupper_info *info = ptr;
4205         struct netlink_ext_ack *extack;
4206
4207         if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
4208                 return 0;
4209
4210         extack = netdev_notifier_info_to_extack(&info->info);
4211
4212         /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */
4213         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4214
4215         return -EOPNOTSUPP;
4216 }
4217
4218 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
4219 {
4220         struct netdev_notifier_changeupper_info *info = ptr;
4221
4222         if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
4223                 return false;
4224         return netif_is_l3_master(info->upper_dev);
4225 }
4226
4227 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
4228                                           struct net_device *dev,
4229                                           unsigned long event, void *ptr)
4230 {
4231         struct netdev_notifier_changeupper_info *cu_info;
4232         struct netdev_notifier_info *info = ptr;
4233         struct netlink_ext_ack *extack;
4234         struct net_device *upper_dev;
4235
4236         extack = netdev_notifier_info_to_extack(info);
4237
4238         switch (event) {
4239         case NETDEV_CHANGEUPPER:
4240                 cu_info = container_of(info,
4241                                        struct netdev_notifier_changeupper_info,
4242                                        info);
4243                 upper_dev = cu_info->upper_dev;
4244                 if (!netif_is_bridge_master(upper_dev))
4245                         return 0;
4246                 if (!mlxsw_sp_lower_get(upper_dev))
4247                         return 0;
4248                 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4249                         return -EOPNOTSUPP;
4250                 if (cu_info->linking) {
4251                         if (!netif_running(dev))
4252                                 return 0;
4253                         /* When the bridge is VLAN-aware, the VNI of the VxLAN
4254                          * device needs to be mapped to a VLAN, but at this
4255                          * point no VLANs are configured on the VxLAN device
4256                          */
4257                         if (br_vlan_enabled(upper_dev))
4258                                 return 0;
4259                         return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
4260                                                           dev, 0, extack);
4261                 } else {
4262                         /* VLANs were already flushed, which triggered the
4263                          * necessary cleanup
4264                          */
4265                         if (br_vlan_enabled(upper_dev))
4266                                 return 0;
4267                         mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
4268                 }
4269                 break;
4270         case NETDEV_PRE_UP:
4271                 upper_dev = netdev_master_upper_dev_get(dev);
4272                 if (!upper_dev)
4273                         return 0;
4274                 if (!netif_is_bridge_master(upper_dev))
4275                         return 0;
4276                 if (!mlxsw_sp_lower_get(upper_dev))
4277                         return 0;
4278                 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
4279                                                   extack);
4280         case NETDEV_DOWN:
4281                 upper_dev = netdev_master_upper_dev_get(dev);
4282                 if (!upper_dev)
4283                         return 0;
4284                 if (!netif_is_bridge_master(upper_dev))
4285                         return 0;
4286                 if (!mlxsw_sp_lower_get(upper_dev))
4287                         return 0;
4288                 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
4289                 break;
4290         }
4291
4292         return 0;
4293 }
4294
4295 static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
4296                                     unsigned long event, void *ptr)
4297 {
4298         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4299         struct mlxsw_sp_span_entry *span_entry;
4300         struct mlxsw_sp *mlxsw_sp;
4301         int err = 0;
4302
4303         mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
4304         if (event == NETDEV_UNREGISTER) {
4305                 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
4306                 if (span_entry)
4307                         mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
4308         }
4309         mlxsw_sp_span_respin(mlxsw_sp);
4310
4311         if (netif_is_vxlan(dev))
4312                 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
4313         if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
4314                 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
4315                                                        event, ptr);
4316         else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
4317                 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
4318                                                        event, ptr);
4319         else if (event == NETDEV_PRE_CHANGEADDR ||
4320                  event == NETDEV_CHANGEADDR ||
4321                  event == NETDEV_CHANGEMTU)
4322                 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
4323         else if (mlxsw_sp_is_vrf_event(event, ptr))
4324                 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
4325         else if (mlxsw_sp_port_dev_check(dev))
4326                 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
4327         else if (netif_is_lag_master(dev))
4328                 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4329         else if (is_vlan_dev(dev))
4330                 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4331         else if (netif_is_bridge_master(dev))
4332                 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
4333         else if (netif_is_macvlan(dev))
4334                 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
4335
4336         return notifier_from_errno(err);
4337 }
4338
4339 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
4340         .notifier_call = mlxsw_sp_inetaddr_valid_event,
4341 };
4342
4343 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
4344         .notifier_call = mlxsw_sp_inet6addr_valid_event,
4345 };
4346
4347 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
4348         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4349         {0, },
4350 };
4351
4352 static struct pci_driver mlxsw_sp1_pci_driver = {
4353         .name = mlxsw_sp1_driver_name,
4354         .id_table = mlxsw_sp1_pci_id_table,
4355 };
4356
4357 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
4358         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
4359         {0, },
4360 };
4361
4362 static struct pci_driver mlxsw_sp2_pci_driver = {
4363         .name = mlxsw_sp2_driver_name,
4364         .id_table = mlxsw_sp2_pci_id_table,
4365 };
4366
4367 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
4368         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
4369         {0, },
4370 };
4371
4372 static struct pci_driver mlxsw_sp3_pci_driver = {
4373         .name = mlxsw_sp3_driver_name,
4374         .id_table = mlxsw_sp3_pci_id_table,
4375 };
4376
4377 static int __init mlxsw_sp_module_init(void)
4378 {
4379         int err;
4380
4381         register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4382         register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4383
4384         err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
4385         if (err)
4386                 goto err_sp1_core_driver_register;
4387
4388         err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
4389         if (err)
4390                 goto err_sp2_core_driver_register;
4391
4392         err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
4393         if (err)
4394                 goto err_sp3_core_driver_register;
4395
4396         err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
4397         if (err)
4398                 goto err_sp1_pci_driver_register;
4399
4400         err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
4401         if (err)
4402                 goto err_sp2_pci_driver_register;
4403
4404         err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
4405         if (err)
4406                 goto err_sp3_pci_driver_register;
4407
4408         return 0;
4409
4410 err_sp3_pci_driver_register:
4411         mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
4412 err_sp2_pci_driver_register:
4413         mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
4414 err_sp1_pci_driver_register:
4415         mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
4416 err_sp3_core_driver_register:
4417         mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
4418 err_sp2_core_driver_register:
4419         mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
4420 err_sp1_core_driver_register:
4421         unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4422         unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4423         return err;
4424 }
4425
4426 static void __exit mlxsw_sp_module_exit(void)
4427 {
4428         mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
4429         mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
4430         mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
4431         mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
4432         mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
4433         mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
4434         unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4435         unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4436 }
4437
4438 module_init(mlxsw_sp_module_init);
4439 module_exit(mlxsw_sp_module_exit);
4440
4441 MODULE_LICENSE("Dual BSD/GPL");
4442 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4443 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4444 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
4445 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
4446 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
4447 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
4448 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
4449 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);