2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/pci.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/ethtool.h>
44 #include <linux/slab.h>
45 #include <linux/device.h>
46 #include <linux/skbuff.h>
47 #include <linux/if_vlan.h>
48 #include <linux/if_bridge.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/bitops.h>
52 #include <linux/list.h>
53 #include <linux/notifier.h>
54 #include <linux/dcbnl.h>
55 #include <linux/inetdevice.h>
56 #include <net/switchdev.h>
57 #include <net/pkt_cls.h>
58 #include <net/tc_act/tc_mirred.h>
59 #include <net/netevent.h>
60 #include <net/tc_act/tc_sample.h>
69 #include "spectrum_cnt.h"
70 #include "spectrum_dpipe.h"
72 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
73 static const char mlxsw_sp_driver_version[] = "1.0";
79 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
82 * Packet control type.
83 * 0 - Ethernet control (e.g. EMADs, LACP)
86 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
89 * Packet protocol type. Must be set to 1 (Ethernet).
91 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
93 /* tx_hdr_rx_is_router
94 * Packet is sent from the router. Valid for data packets only.
96 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
99 * Indicates if the 'fid' field is valid and should be used for
100 * forwarding lookup. Valid for data packets only.
102 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
105 * Switch partition ID. Must be set to 0.
107 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
109 /* tx_hdr_control_tclass
110 * Indicates if the packet should use the control TClass and not one
111 * of the data TClasses.
113 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
116 * Egress TClass to be used on the egress device on the egress port.
118 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
121 * Destination local port for unicast packets.
122 * Destination multicast ID for multicast packets.
124 * Control packets are directed to a specific egress port, while data
125 * packets are transmitted through the CPU port (0) into the switch partition,
126 * where forwarding rules are applied.
128 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
131 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
132 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
133 * Valid for data packets only.
135 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
139 * 6 - Control packets
141 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
143 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
144 unsigned int counter_index, u64 *packets,
147 char mgpc_pl[MLXSW_REG_MGPC_LEN];
150 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
151 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
152 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
155 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
156 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
160 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
161 unsigned int counter_index)
163 char mgpc_pl[MLXSW_REG_MGPC_LEN];
165 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
166 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
167 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
170 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
171 unsigned int *p_counter_index)
175 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
179 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
181 goto err_counter_clear;
185 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
190 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
191 unsigned int counter_index)
193 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
197 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
198 const struct mlxsw_tx_info *tx_info)
200 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
202 memset(txhdr, 0, MLXSW_TXHDR_LEN);
204 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
205 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
206 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
207 mlxsw_tx_hdr_swid_set(txhdr, 0);
208 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
209 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
210 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
213 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
215 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
218 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
221 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
225 static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
229 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
232 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
234 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
235 sizeof(struct mlxsw_sp_span_entry),
237 if (!mlxsw_sp->span.entries)
240 for (i = 0; i < mlxsw_sp->span.entries_count; i++)
241 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
246 static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
250 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
251 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
253 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
255 kfree(mlxsw_sp->span.entries);
258 static struct mlxsw_sp_span_entry *
259 mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
261 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
262 struct mlxsw_sp_span_entry *span_entry;
263 char mpat_pl[MLXSW_REG_MPAT_LEN];
264 u8 local_port = port->local_port;
269 /* find a free entry to use */
271 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
272 if (!mlxsw_sp->span.entries[i].used) {
274 span_entry = &mlxsw_sp->span.entries[i];
281 /* create a new port analayzer entry for local_port */
282 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
283 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
287 span_entry->used = true;
288 span_entry->id = index;
289 span_entry->ref_count = 1;
290 span_entry->local_port = local_port;
294 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
295 struct mlxsw_sp_span_entry *span_entry)
297 u8 local_port = span_entry->local_port;
298 char mpat_pl[MLXSW_REG_MPAT_LEN];
299 int pa_id = span_entry->id;
301 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
302 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
303 span_entry->used = false;
306 static struct mlxsw_sp_span_entry *
307 mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
309 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
312 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
313 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
315 if (curr->used && curr->local_port == port->local_port)
321 static struct mlxsw_sp_span_entry
322 *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
324 struct mlxsw_sp_span_entry *span_entry;
326 span_entry = mlxsw_sp_span_entry_find(port);
328 /* Already exists, just take a reference */
329 span_entry->ref_count++;
333 return mlxsw_sp_span_entry_create(port);
336 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
337 struct mlxsw_sp_span_entry *span_entry)
339 WARN_ON(!span_entry->ref_count);
340 if (--span_entry->ref_count == 0)
341 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
345 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
347 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
348 struct mlxsw_sp_span_inspected_port *p;
351 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
352 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
354 list_for_each_entry(p, &curr->bound_ports_list, list)
355 if (p->local_port == port->local_port &&
356 p->type == MLXSW_SP_SPAN_EGRESS)
363 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
366 return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
369 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
371 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
372 char sbib_pl[MLXSW_REG_SBIB_LEN];
375 /* If port is egress mirrored, the shared buffer size should be
376 * updated according to the mtu value
378 if (mlxsw_sp_span_is_egress_mirror(port)) {
379 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
381 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
382 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
384 netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
392 static struct mlxsw_sp_span_inspected_port *
393 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
394 struct mlxsw_sp_span_entry *span_entry)
396 struct mlxsw_sp_span_inspected_port *p;
398 list_for_each_entry(p, &span_entry->bound_ports_list, list)
399 if (port->local_port == p->local_port)
405 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
406 struct mlxsw_sp_span_entry *span_entry,
407 enum mlxsw_sp_span_type type)
409 struct mlxsw_sp_span_inspected_port *inspected_port;
410 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
411 char mpar_pl[MLXSW_REG_MPAR_LEN];
412 char sbib_pl[MLXSW_REG_SBIB_LEN];
413 int pa_id = span_entry->id;
416 /* if it is an egress SPAN, bind a shared buffer to it */
417 if (type == MLXSW_SP_SPAN_EGRESS) {
418 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
421 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
422 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
424 netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
429 /* bind the port to the SPAN entry */
430 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
431 (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
432 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
434 goto err_mpar_reg_write;
436 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
437 if (!inspected_port) {
439 goto err_inspected_port_alloc;
441 inspected_port->local_port = port->local_port;
442 inspected_port->type = type;
443 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
448 err_inspected_port_alloc:
449 if (type == MLXSW_SP_SPAN_EGRESS) {
450 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
451 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
457 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
458 struct mlxsw_sp_span_entry *span_entry,
459 enum mlxsw_sp_span_type type)
461 struct mlxsw_sp_span_inspected_port *inspected_port;
462 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
463 char mpar_pl[MLXSW_REG_MPAR_LEN];
464 char sbib_pl[MLXSW_REG_SBIB_LEN];
465 int pa_id = span_entry->id;
467 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
471 /* remove the inspected port */
472 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
473 (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
474 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
476 /* remove the SBIB buffer if it was egress SPAN */
477 if (type == MLXSW_SP_SPAN_EGRESS) {
478 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
479 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
482 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
484 list_del(&inspected_port->list);
485 kfree(inspected_port);
488 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
489 struct mlxsw_sp_port *to,
490 enum mlxsw_sp_span_type type)
492 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
493 struct mlxsw_sp_span_entry *span_entry;
496 span_entry = mlxsw_sp_span_entry_get(to);
500 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
503 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
510 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
514 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
515 struct mlxsw_sp_port *to,
516 enum mlxsw_sp_span_type type)
518 struct mlxsw_sp_span_entry *span_entry;
520 span_entry = mlxsw_sp_span_entry_find(to);
522 netdev_err(from->dev, "no span entry found\n");
526 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
528 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
531 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
532 bool enable, u32 rate)
534 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
535 char mpsc_pl[MLXSW_REG_MPSC_LEN];
537 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
538 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
541 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
544 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
545 char paos_pl[MLXSW_REG_PAOS_LEN];
547 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
548 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
549 MLXSW_PORT_ADMIN_STATUS_DOWN);
550 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
553 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
556 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
557 char ppad_pl[MLXSW_REG_PPAD_LEN];
559 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
560 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
561 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
564 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
566 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
567 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
569 ether_addr_copy(addr, mlxsw_sp->base_mac);
570 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
571 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
574 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
576 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
577 char pmtu_pl[MLXSW_REG_PMTU_LEN];
581 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
582 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
583 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
586 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
591 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
592 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
595 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
598 char pspa_pl[MLXSW_REG_PSPA_LEN];
600 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
601 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
604 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
606 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
608 return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
612 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
615 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
616 char svpe_pl[MLXSW_REG_SVPE_LEN];
618 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
619 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
622 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
623 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
626 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
627 char svfa_pl[MLXSW_REG_SVFA_LEN];
629 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
631 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
634 int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
635 u16 vid_begin, u16 vid_end,
638 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
642 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
645 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid_begin,
646 vid_end, learn_enable);
647 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
652 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
653 u16 vid, bool learn_enable)
655 return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
660 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
662 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
663 char sspr_pl[MLXSW_REG_SSPR_LEN];
665 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
666 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
669 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
670 u8 local_port, u8 *p_module,
671 u8 *p_width, u8 *p_lane)
673 char pmlp_pl[MLXSW_REG_PMLP_LEN];
676 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
677 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
680 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
681 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
682 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
686 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
687 u8 module, u8 width, u8 lane)
689 char pmlp_pl[MLXSW_REG_PMLP_LEN];
692 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
693 mlxsw_reg_pmlp_width_set(pmlp_pl, width);
694 for (i = 0; i < width; i++) {
695 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
696 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
699 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
702 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
704 char pmlp_pl[MLXSW_REG_PMLP_LEN];
706 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
707 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
708 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
711 static int mlxsw_sp_port_open(struct net_device *dev)
713 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
716 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
719 netif_start_queue(dev);
723 static int mlxsw_sp_port_stop(struct net_device *dev)
725 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
727 netif_stop_queue(dev);
728 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
731 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
732 struct net_device *dev)
734 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
735 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
736 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
737 const struct mlxsw_tx_info tx_info = {
738 .local_port = mlxsw_sp_port->local_port,
744 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
745 return NETDEV_TX_BUSY;
747 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
748 struct sk_buff *skb_orig = skb;
750 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
752 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
753 dev_kfree_skb_any(skb_orig);
756 dev_consume_skb_any(skb_orig);
759 if (eth_skb_pad(skb)) {
760 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
764 mlxsw_sp_txhdr_construct(skb, &tx_info);
765 /* TX header is consumed by HW on the way so we shouldn't count its
766 * bytes as being sent.
768 len = skb->len - MLXSW_TXHDR_LEN;
770 /* Due to a race we might fail here because of a full queue. In that
771 * unlikely case we simply drop the packet.
773 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
776 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
777 u64_stats_update_begin(&pcpu_stats->syncp);
778 pcpu_stats->tx_packets++;
779 pcpu_stats->tx_bytes += len;
780 u64_stats_update_end(&pcpu_stats->syncp);
782 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
783 dev_kfree_skb_any(skb);
788 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
792 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
794 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
795 struct sockaddr *addr = p;
798 if (!is_valid_ether_addr(addr->sa_data))
799 return -EADDRNOTAVAIL;
801 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
804 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
808 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
811 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
814 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
816 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
819 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
821 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
825 /* Maximum delay buffer needed in case of PAUSE frames, in bytes.
826 * Assumes 100m cable and maximum MTU.
828 #define MLXSW_SP_PAUSE_DELAY 58752
830 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
831 u16 delay, bool pfc, bool pause)
834 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
836 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
841 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
845 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
847 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
851 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
852 u8 *prio_tc, bool pause_en,
853 struct ieee_pfc *my_pfc)
855 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
856 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
857 u16 delay = !!my_pfc ? my_pfc->delay : 0;
858 char pbmc_pl[MLXSW_REG_PBMC_LEN];
861 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
862 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
866 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
867 bool configure = false;
872 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
873 if (prio_tc[j] == i) {
874 pfc = pfc_en & BIT(j);
883 lossy = !(pfc || pause_en);
884 thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
885 delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
887 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
890 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
893 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
894 int mtu, bool pause_en)
896 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
897 bool dcb_en = !!mlxsw_sp_port->dcb.ets;
898 struct ieee_pfc *my_pfc;
901 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
902 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
904 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
908 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
910 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
911 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
914 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
917 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
919 goto err_span_port_mtu_update;
920 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
922 goto err_port_mtu_set;
927 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
928 err_span_port_mtu_update:
929 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
934 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
935 struct rtnl_link_stats64 *stats)
937 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
938 struct mlxsw_sp_port_pcpu_stats *p;
939 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
944 for_each_possible_cpu(i) {
945 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
947 start = u64_stats_fetch_begin_irq(&p->syncp);
948 rx_packets = p->rx_packets;
949 rx_bytes = p->rx_bytes;
950 tx_packets = p->tx_packets;
951 tx_bytes = p->tx_bytes;
952 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
954 stats->rx_packets += rx_packets;
955 stats->rx_bytes += rx_bytes;
956 stats->tx_packets += tx_packets;
957 stats->tx_bytes += tx_bytes;
958 /* tx_dropped is u32, updated without syncp protection. */
959 tx_dropped += p->tx_dropped;
961 stats->tx_dropped = tx_dropped;
965 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
968 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
975 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
979 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
980 return mlxsw_sp_port_get_sw_stats64(dev, sp);
986 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
987 int prio, char *ppcnt_pl)
989 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
990 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
992 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
993 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
996 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
997 struct rtnl_link_stats64 *stats)
999 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1002 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
1008 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
1010 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
1012 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
1014 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
1016 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
1018 stats->rx_crc_errors =
1019 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
1020 stats->rx_frame_errors =
1021 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
1023 stats->rx_length_errors = (
1024 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
1025 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
1026 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
1028 stats->rx_errors = (stats->rx_crc_errors +
1029 stats->rx_frame_errors + stats->rx_length_errors);
1035 static void update_stats_cache(struct work_struct *work)
1037 struct mlxsw_sp_port *mlxsw_sp_port =
1038 container_of(work, struct mlxsw_sp_port,
1039 hw_stats.update_dw.work);
1041 if (!netif_carrier_ok(mlxsw_sp_port->dev))
1044 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
1045 mlxsw_sp_port->hw_stats.cache);
1048 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
1049 MLXSW_HW_STATS_UPDATE_TIME);
1052 /* Return the stats from a cache that is updated periodically,
1053 * as this function might get called in an atomic context.
1056 mlxsw_sp_port_get_stats64(struct net_device *dev,
1057 struct rtnl_link_stats64 *stats)
1059 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1061 memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
1064 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1065 u16 vid_end, bool is_member, bool untagged)
1067 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1071 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1075 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1076 vid_end, is_member, untagged);
1077 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1082 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
1084 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
1085 u16 vid, last_visited_vid;
1088 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1089 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
1092 last_visited_vid = vid;
1093 goto err_port_vid_to_fid_set;
1097 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
1099 last_visited_vid = VLAN_N_VID;
1100 goto err_port_vid_to_fid_set;
1105 err_port_vid_to_fid_set:
1106 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
1107 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
1112 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
1114 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
1118 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
1122 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1123 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
1132 static struct mlxsw_sp_port *
1133 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1135 struct mlxsw_sp_port *mlxsw_sp_vport;
1137 mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
1138 if (!mlxsw_sp_vport)
1141 /* dev will be set correctly after the VLAN device is linked
1142 * with the real device. In case of bridge SELF invocation, dev
1143 * will remain as is.
1145 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
1146 mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1147 mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
1148 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
1149 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
1150 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
1151 mlxsw_sp_vport->vport.vid = vid;
1153 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
1155 return mlxsw_sp_vport;
1158 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
1160 list_del(&mlxsw_sp_vport->vport.list);
1161 kfree(mlxsw_sp_vport);
1164 static int mlxsw_sp_port_add_vid(struct net_device *dev,
1165 __be16 __always_unused proto, u16 vid)
1167 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1168 struct mlxsw_sp_port *mlxsw_sp_vport;
1169 bool untagged = vid == 1;
1172 /* VLAN 0 is added to HW filter when device goes up, but it is
1173 * reserved in our case, so simply return.
1178 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid))
1181 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
1182 if (!mlxsw_sp_vport)
1185 /* When adding the first VLAN interface on a bridged port we need to
1186 * transition all the active 802.1Q bridge VLANs to use explicit
1187 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
1189 if (list_is_singular(&mlxsw_sp_port->vports_list)) {
1190 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
1192 goto err_port_vp_mode_trans;
1195 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
1197 goto err_port_add_vid;
1202 if (list_is_singular(&mlxsw_sp_port->vports_list))
1203 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
1204 err_port_vp_mode_trans:
1205 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
1209 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1210 __be16 __always_unused proto, u16 vid)
1212 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1213 struct mlxsw_sp_port *mlxsw_sp_vport;
1214 struct mlxsw_sp_fid *f;
1216 /* VLAN 0 is removed from HW filter when device goes down, but
1217 * it is reserved in our case, so simply return.
1222 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
1223 if (WARN_ON(!mlxsw_sp_vport))
1226 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
1228 /* Drop FID reference. If this was the last reference the
1229 * resources will be freed.
1231 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
1232 if (f && !WARN_ON(!f->leave))
1233 f->leave(mlxsw_sp_vport);
1235 /* When removing the last VLAN interface on a bridged port we need to
1236 * transition all active 802.1Q bridge VLANs to use VID to FID
1237 * mappings and set port's mode to VLAN mode.
1239 if (list_is_singular(&mlxsw_sp_port->vports_list))
1240 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
1242 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
1247 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
1250 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1251 u8 module = mlxsw_sp_port->mapping.module;
1252 u8 width = mlxsw_sp_port->mapping.width;
1253 u8 lane = mlxsw_sp_port->mapping.lane;
1256 if (!mlxsw_sp_port->split)
1257 err = snprintf(name, len, "p%d", module + 1);
1259 err = snprintf(name, len, "p%ds%d", module + 1,
1268 static struct mlxsw_sp_port_mall_tc_entry *
1269 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
1270 unsigned long cookie) {
1271 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1273 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
1274 if (mall_tc_entry->cookie == cookie)
1275 return mall_tc_entry;
1281 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1282 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
1283 const struct tc_action *a,
1286 struct net *net = dev_net(mlxsw_sp_port->dev);
1287 enum mlxsw_sp_span_type span_type;
1288 struct mlxsw_sp_port *to_port;
1289 struct net_device *to_dev;
1292 ifindex = tcf_mirred_ifindex(a);
1293 to_dev = __dev_get_by_index(net, ifindex);
1295 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1299 if (!mlxsw_sp_port_dev_check(to_dev)) {
1300 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
1303 to_port = netdev_priv(to_dev);
1305 mirror->to_local_port = to_port->local_port;
1306 mirror->ingress = ingress;
1307 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1308 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
1312 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1313 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
1315 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1316 enum mlxsw_sp_span_type span_type;
1317 struct mlxsw_sp_port *to_port;
1319 to_port = mlxsw_sp->ports[mirror->to_local_port];
1320 span_type = mirror->ingress ?
1321 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1322 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
1326 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
1327 struct tc_cls_matchall_offload *cls,
1328 const struct tc_action *a,
1333 if (!mlxsw_sp_port->sample)
1335 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
1336 netdev_err(mlxsw_sp_port->dev, "sample already active\n");
1339 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
1340 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
1344 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
1345 tcf_sample_psample_group(a));
1346 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
1347 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
1348 mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
1350 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
1352 goto err_port_sample_set;
1355 err_port_sample_set:
1356 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1361 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
1363 if (!mlxsw_sp_port->sample)
1366 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
1367 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1370 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1372 struct tc_cls_matchall_offload *cls,
1375 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1376 const struct tc_action *a;
1380 if (!tc_single_action(cls->exts)) {
1381 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
1385 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1388 mall_tc_entry->cookie = cls->cookie;
1390 tcf_exts_to_list(cls->exts, &actions);
1391 a = list_first_entry(&actions, struct tc_action, list);
1393 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
1394 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
1396 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1397 mirror = &mall_tc_entry->mirror;
1398 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
1399 mirror, a, ingress);
1400 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
1401 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
1402 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, cls,
1409 goto err_add_action;
1411 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
1415 kfree(mall_tc_entry);
1419 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1420 struct tc_cls_matchall_offload *cls)
1422 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1424 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
1426 if (!mall_tc_entry) {
1427 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
1430 list_del(&mall_tc_entry->list);
1432 switch (mall_tc_entry->type) {
1433 case MLXSW_SP_PORT_MALL_MIRROR:
1434 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
1435 &mall_tc_entry->mirror);
1437 case MLXSW_SP_PORT_MALL_SAMPLE:
1438 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
1444 kfree(mall_tc_entry);
1447 static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
1448 __be16 proto, struct tc_to_netdev *tc)
1450 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1451 bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);
1454 case TC_SETUP_MATCHALL:
1455 switch (tc->cls_mall->command) {
1456 case TC_CLSMATCHALL_REPLACE:
1457 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port,
1461 case TC_CLSMATCHALL_DESTROY:
1462 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port,
1468 case TC_SETUP_CLSFLOWER:
1469 switch (tc->cls_flower->command) {
1470 case TC_CLSFLOWER_REPLACE:
1471 return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress,
1472 proto, tc->cls_flower);
1473 case TC_CLSFLOWER_DESTROY:
1474 mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress,
1477 case TC_CLSFLOWER_STATS:
1478 return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress,
1488 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1489 .ndo_open = mlxsw_sp_port_open,
1490 .ndo_stop = mlxsw_sp_port_stop,
1491 .ndo_start_xmit = mlxsw_sp_port_xmit,
1492 .ndo_setup_tc = mlxsw_sp_setup_tc,
1493 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
1494 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
1495 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
1496 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
1497 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1498 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
1499 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
1500 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
1501 .ndo_fdb_add = switchdev_port_fdb_add,
1502 .ndo_fdb_del = switchdev_port_fdb_del,
1503 .ndo_fdb_dump = switchdev_port_fdb_dump,
1504 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
1505 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
1506 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
1507 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
1510 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
1511 struct ethtool_drvinfo *drvinfo)
1513 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1514 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1516 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
1517 strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1518 sizeof(drvinfo->version));
1519 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1521 mlxsw_sp->bus_info->fw_rev.major,
1522 mlxsw_sp->bus_info->fw_rev.minor,
1523 mlxsw_sp->bus_info->fw_rev.subminor);
1524 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1525 sizeof(drvinfo->bus_info));
1528 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1529 struct ethtool_pauseparam *pause)
1531 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1533 pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1534 pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1537 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1538 struct ethtool_pauseparam *pause)
1540 char pfcc_pl[MLXSW_REG_PFCC_LEN];
1542 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1543 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1544 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1546 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1550 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1551 struct ethtool_pauseparam *pause)
1553 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1554 bool pause_en = pause->tx_pause || pause->rx_pause;
1557 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1558 netdev_err(dev, "PFC already enabled on port\n");
1562 if (pause->autoneg) {
1563 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1567 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1569 netdev_err(dev, "Failed to configure port's headroom\n");
1573 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1575 netdev_err(dev, "Failed to set PAUSE parameters\n");
1576 goto err_port_pause_configure;
1579 mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1580 mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1584 err_port_pause_configure:
1585 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1586 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1590 struct mlxsw_sp_port_hw_stats {
1591 char str[ETH_GSTRING_LEN];
1592 u64 (*getter)(const char *payload);
1596 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1598 .str = "a_frames_transmitted_ok",
1599 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1602 .str = "a_frames_received_ok",
1603 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1606 .str = "a_frame_check_sequence_errors",
1607 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1610 .str = "a_alignment_errors",
1611 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1614 .str = "a_octets_transmitted_ok",
1615 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1618 .str = "a_octets_received_ok",
1619 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1622 .str = "a_multicast_frames_xmitted_ok",
1623 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1626 .str = "a_broadcast_frames_xmitted_ok",
1627 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1630 .str = "a_multicast_frames_received_ok",
1631 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1634 .str = "a_broadcast_frames_received_ok",
1635 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1638 .str = "a_in_range_length_errors",
1639 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1642 .str = "a_out_of_range_length_field",
1643 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1646 .str = "a_frame_too_long_errors",
1647 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1650 .str = "a_symbol_error_during_carrier",
1651 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1654 .str = "a_mac_control_frames_transmitted",
1655 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1658 .str = "a_mac_control_frames_received",
1659 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1662 .str = "a_unsupported_opcodes_received",
1663 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1666 .str = "a_pause_mac_ctrl_frames_received",
1667 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1670 .str = "a_pause_mac_ctrl_frames_xmitted",
1671 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1675 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1677 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
1679 .str = "rx_octets_prio",
1680 .getter = mlxsw_reg_ppcnt_rx_octets_get,
1683 .str = "rx_frames_prio",
1684 .getter = mlxsw_reg_ppcnt_rx_frames_get,
1687 .str = "tx_octets_prio",
1688 .getter = mlxsw_reg_ppcnt_tx_octets_get,
1691 .str = "tx_frames_prio",
1692 .getter = mlxsw_reg_ppcnt_tx_frames_get,
1695 .str = "rx_pause_prio",
1696 .getter = mlxsw_reg_ppcnt_rx_pause_get,
1699 .str = "rx_pause_duration_prio",
1700 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
1703 .str = "tx_pause_prio",
1704 .getter = mlxsw_reg_ppcnt_tx_pause_get,
1707 .str = "tx_pause_duration_prio",
1708 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
1712 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1714 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
1716 .str = "tc_transmit_queue_tc",
1717 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
1718 .cells_bytes = true,
1721 .str = "tc_no_buffer_discard_uc_tc",
1722 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
1726 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
1728 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
1729 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
1730 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
1731 IEEE_8021QAZ_MAX_TCS)
1733 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
1737 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
1738 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1739 mlxsw_sp_port_hw_prio_stats[i].str, prio);
1740 *p += ETH_GSTRING_LEN;
1744 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
1748 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
1749 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1750 mlxsw_sp_port_hw_tc_stats[i].str, tc);
1751 *p += ETH_GSTRING_LEN;
1755 static void mlxsw_sp_port_get_strings(struct net_device *dev,
1756 u32 stringset, u8 *data)
1761 switch (stringset) {
1763 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1764 memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1766 p += ETH_GSTRING_LEN;
1769 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1770 mlxsw_sp_port_get_prio_strings(&p, i);
1772 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1773 mlxsw_sp_port_get_tc_strings(&p, i);
1779 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1780 enum ethtool_phys_id_state state)
1782 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1783 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1784 char mlcr_pl[MLXSW_REG_MLCR_LEN];
1788 case ETHTOOL_ID_ACTIVE:
1791 case ETHTOOL_ID_INACTIVE:
1798 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1799 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1803 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
1804 int *p_len, enum mlxsw_reg_ppcnt_grp grp)
1807 case MLXSW_REG_PPCNT_IEEE_8023_CNT:
1808 *p_hw_stats = mlxsw_sp_port_hw_stats;
1809 *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
1811 case MLXSW_REG_PPCNT_PRIO_CNT:
1812 *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
1813 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1815 case MLXSW_REG_PPCNT_TC_CNT:
1816 *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
1817 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
1826 static void __mlxsw_sp_port_get_stats(struct net_device *dev,
1827 enum mlxsw_reg_ppcnt_grp grp, int prio,
1828 u64 *data, int data_index)
1830 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1831 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1832 struct mlxsw_sp_port_hw_stats *hw_stats;
1833 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1837 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
1840 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
1841 for (i = 0; i < len; i++) {
1842 data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
1843 if (!hw_stats[i].cells_bytes)
1845 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
1846 data[data_index + i]);
1850 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1851 struct ethtool_stats *stats, u64 *data)
1853 int i, data_index = 0;
1855 /* IEEE 802.3 Counters */
1856 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
1858 data_index = MLXSW_SP_PORT_HW_STATS_LEN;
1860 /* Per-Priority Counters */
1861 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1862 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
1864 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1867 /* Per-TC Counters */
1868 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1869 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
1871 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
1875 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1879 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
1885 struct mlxsw_sp_port_link_mode {
1886 enum ethtool_link_mode_bit_indices mask_ethtool;
1891 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1893 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1894 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1898 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1899 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1900 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1901 .speed = SPEED_1000,
1904 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1905 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
1906 .speed = SPEED_10000,
1909 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1910 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1911 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
1912 .speed = SPEED_10000,
1915 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1916 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1917 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1918 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1919 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1920 .speed = SPEED_10000,
1923 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1924 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
1925 .speed = SPEED_20000,
1928 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1929 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1930 .speed = SPEED_40000,
1933 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1934 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1935 .speed = SPEED_40000,
1938 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1939 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1940 .speed = SPEED_40000,
1943 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1944 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1945 .speed = SPEED_40000,
1948 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
1949 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1950 .speed = SPEED_25000,
1953 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
1954 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1955 .speed = SPEED_25000,
1958 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1959 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1960 .speed = SPEED_25000,
1963 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1964 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1965 .speed = SPEED_25000,
1968 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
1969 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1970 .speed = SPEED_50000,
1973 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1974 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1975 .speed = SPEED_50000,
1978 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
1979 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1980 .speed = SPEED_50000,
1983 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1984 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
1985 .speed = SPEED_56000,
1988 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1989 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
1990 .speed = SPEED_56000,
1993 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1994 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
1995 .speed = SPEED_56000,
1998 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1999 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
2000 .speed = SPEED_56000,
2003 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
2004 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
2005 .speed = SPEED_100000,
2008 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
2009 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
2010 .speed = SPEED_100000,
2013 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
2014 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
2015 .speed = SPEED_100000,
2018 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
2019 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
2020 .speed = SPEED_100000,
2024 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
2027 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
2028 struct ethtool_link_ksettings *cmd)
2030 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2031 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2032 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2033 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2034 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2035 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2036 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
2038 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2039 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2040 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2041 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
2042 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
2043 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
2046 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
2050 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2051 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
2052 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2057 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
2058 struct ethtool_link_ksettings *cmd)
2060 u32 speed = SPEED_UNKNOWN;
2061 u8 duplex = DUPLEX_UNKNOWN;
2067 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2068 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
2069 speed = mlxsw_sp_port_link_mode[i].speed;
2070 duplex = DUPLEX_FULL;
2075 cmd->base.speed = speed;
2076 cmd->base.duplex = duplex;
2079 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
2081 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2082 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2083 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2084 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2087 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2088 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2089 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
2092 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2093 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2094 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2095 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
2102 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
2107 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2108 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2109 cmd->link_modes.advertising))
2110 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2115 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
2120 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2121 if (speed == mlxsw_sp_port_link_mode[i].speed)
2122 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2127 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
2132 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2133 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
2134 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2139 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
2140 struct ethtool_link_ksettings *cmd)
2142 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
2143 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
2144 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
2146 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
2147 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
2150 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
2151 struct ethtool_link_ksettings *cmd)
2156 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
2157 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
2161 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
2162 struct ethtool_link_ksettings *cmd)
2164 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
2167 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
2168 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
2171 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
2172 struct ethtool_link_ksettings *cmd)
2174 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
2175 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2176 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2177 char ptys_pl[MLXSW_REG_PTYS_LEN];
2182 autoneg = mlxsw_sp_port->link.autoneg;
2183 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
2184 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2187 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin,
2190 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
2192 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
2194 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
2195 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
2196 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
2198 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
2199 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
2200 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
2207 mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
2208 const struct ethtool_link_ksettings *cmd)
2210 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2211 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2212 char ptys_pl[MLXSW_REG_PTYS_LEN];
2213 u32 eth_proto_cap, eth_proto_new;
2217 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
2218 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2221 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL);
2223 autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
2224 eth_proto_new = autoneg ?
2225 mlxsw_sp_to_ptys_advert_link(cmd) :
2226 mlxsw_sp_to_ptys_speed(cmd->base.speed);
2228 eth_proto_new = eth_proto_new & eth_proto_cap;
2229 if (!eth_proto_new) {
2230 netdev_err(dev, "No supported speed requested\n");
2234 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2236 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2240 if (!netif_running(dev))
2243 mlxsw_sp_port->link.autoneg = autoneg;
2245 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2246 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
2251 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
2252 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
2253 .get_link = ethtool_op_get_link,
2254 .get_pauseparam = mlxsw_sp_port_get_pauseparam,
2255 .set_pauseparam = mlxsw_sp_port_set_pauseparam,
2256 .get_strings = mlxsw_sp_port_get_strings,
2257 .set_phys_id = mlxsw_sp_port_set_phys_id,
2258 .get_ethtool_stats = mlxsw_sp_port_get_stats,
2259 .get_sset_count = mlxsw_sp_port_get_sset_count,
2260 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
2261 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
2265 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
2267 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2268 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
2269 char ptys_pl[MLXSW_REG_PTYS_LEN];
2270 u32 eth_proto_admin;
2272 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
2273 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2275 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2278 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
2279 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
2280 bool dwrr, u8 dwrr_weight)
2282 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2283 char qeec_pl[MLXSW_REG_QEEC_LEN];
2285 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2287 mlxsw_reg_qeec_de_set(qeec_pl, true);
2288 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
2289 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
2290 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2293 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
2294 enum mlxsw_reg_qeec_hr hr, u8 index,
2295 u8 next_index, u32 maxrate)
2297 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2298 char qeec_pl[MLXSW_REG_QEEC_LEN];
2300 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2302 mlxsw_reg_qeec_mase_set(qeec_pl, true);
2303 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
2304 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2307 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
2308 u8 switch_prio, u8 tclass)
2310 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2311 char qtct_pl[MLXSW_REG_QTCT_LEN];
2313 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
2315 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
2318 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2322 /* Setup the elements hierarcy, so that each TC is linked to
2323 * one subgroup, which are all member in the same group.
2325 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2326 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
2330 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2331 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2332 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
2337 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2338 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2339 MLXSW_REG_QEEC_HIERARCY_TC, i, i,
2345 /* Make sure the max shaper is disabled in all hierarcies that
2348 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2349 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
2350 MLXSW_REG_QEEC_MAS_DIS);
2353 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2354 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2355 MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
2357 MLXSW_REG_QEEC_MAS_DIS);
2361 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2362 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2363 MLXSW_REG_QEEC_HIERARCY_TC,
2365 MLXSW_REG_QEEC_MAS_DIS);
2370 /* Map all priorities to traffic class 0. */
2371 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2372 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
2380 static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port)
2382 mlxsw_sp_port->pvid = 1;
2384 return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1);
2387 static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
2389 return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
2392 static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2393 bool split, u8 module, u8 width, u8 lane)
2395 struct mlxsw_sp_port *mlxsw_sp_port;
2396 struct net_device *dev;
2400 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
2403 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
2404 mlxsw_sp_port = netdev_priv(dev);
2405 mlxsw_sp_port->dev = dev;
2406 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
2407 mlxsw_sp_port->local_port = local_port;
2408 mlxsw_sp_port->split = split;
2409 mlxsw_sp_port->mapping.module = module;
2410 mlxsw_sp_port->mapping.width = width;
2411 mlxsw_sp_port->mapping.lane = lane;
2412 mlxsw_sp_port->link.autoneg = 1;
2413 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
2414 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
2415 if (!mlxsw_sp_port->active_vlans) {
2417 goto err_port_active_vlans_alloc;
2419 mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
2420 if (!mlxsw_sp_port->untagged_vlans) {
2422 goto err_port_untagged_vlans_alloc;
2424 INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
2425 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
2427 mlxsw_sp_port->pcpu_stats =
2428 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
2429 if (!mlxsw_sp_port->pcpu_stats) {
2431 goto err_alloc_stats;
2434 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
2436 if (!mlxsw_sp_port->sample) {
2438 goto err_alloc_sample;
2441 mlxsw_sp_port->hw_stats.cache =
2442 kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
2444 if (!mlxsw_sp_port->hw_stats.cache) {
2446 goto err_alloc_hw_stats;
2448 INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
2449 &update_stats_cache);
2451 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
2452 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
2454 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
2456 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
2457 mlxsw_sp_port->local_port);
2458 goto err_port_swid_set;
2461 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
2463 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
2464 mlxsw_sp_port->local_port);
2465 goto err_dev_addr_init;
2468 netif_carrier_off(dev);
2470 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
2471 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
2472 dev->hw_features |= NETIF_F_HW_TC;
2475 dev->max_mtu = ETH_MAX_MTU;
2477 /* Each packet needs to have a Tx header (metadata) on top all other
2480 dev->needed_headroom = MLXSW_TXHDR_LEN;
2482 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
2484 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
2485 mlxsw_sp_port->local_port);
2486 goto err_port_system_port_mapping_set;
2489 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
2491 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
2492 mlxsw_sp_port->local_port);
2493 goto err_port_speed_by_width_set;
2496 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
2498 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
2499 mlxsw_sp_port->local_port);
2500 goto err_port_mtu_set;
2503 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2505 goto err_port_admin_status_set;
2507 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
2509 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
2510 mlxsw_sp_port->local_port);
2511 goto err_port_buffers_init;
2514 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
2516 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
2517 mlxsw_sp_port->local_port);
2518 goto err_port_ets_init;
2521 /* ETS and buffers must be initialized before DCB. */
2522 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
2524 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
2525 mlxsw_sp_port->local_port);
2526 goto err_port_dcb_init;
2529 err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port);
2531 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n",
2532 mlxsw_sp_port->local_port);
2533 goto err_port_pvid_vport_create;
2536 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
2537 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
2538 err = register_netdev(dev);
2540 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
2541 mlxsw_sp_port->local_port);
2542 goto err_register_netdev;
2545 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
2546 mlxsw_sp_port, dev, mlxsw_sp_port->split,
2548 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
2551 err_register_netdev:
2552 mlxsw_sp->ports[local_port] = NULL;
2553 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2554 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
2555 err_port_pvid_vport_create:
2556 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2559 err_port_buffers_init:
2560 err_port_admin_status_set:
2562 err_port_speed_by_width_set:
2563 err_port_system_port_mapping_set:
2565 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2567 kfree(mlxsw_sp_port->hw_stats.cache);
2569 kfree(mlxsw_sp_port->sample);
2571 free_percpu(mlxsw_sp_port->pcpu_stats);
2573 kfree(mlxsw_sp_port->untagged_vlans);
2574 err_port_untagged_vlans_alloc:
2575 kfree(mlxsw_sp_port->active_vlans);
2576 err_port_active_vlans_alloc:
2581 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2582 bool split, u8 module, u8 width, u8 lane)
2586 err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
2588 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
2592 err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split,
2593 module, width, lane);
2595 goto err_port_create;
2599 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2603 static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2605 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2607 cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
2608 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
2609 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
2610 mlxsw_sp->ports[local_port] = NULL;
2611 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2612 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
2613 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2614 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2615 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
2616 kfree(mlxsw_sp_port->hw_stats.cache);
2617 kfree(mlxsw_sp_port->sample);
2618 free_percpu(mlxsw_sp_port->pcpu_stats);
2619 kfree(mlxsw_sp_port->untagged_vlans);
2620 kfree(mlxsw_sp_port->active_vlans);
2621 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
2622 free_netdev(mlxsw_sp_port->dev);
2625 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2627 __mlxsw_sp_port_remove(mlxsw_sp, local_port);
2628 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2631 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2633 return mlxsw_sp->ports[local_port] != NULL;
2636 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
2640 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
2641 if (mlxsw_sp_port_created(mlxsw_sp, i))
2642 mlxsw_sp_port_remove(mlxsw_sp, i);
2643 kfree(mlxsw_sp->port_to_module);
2644 kfree(mlxsw_sp->ports);
2647 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
2649 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2650 u8 module, width, lane;
2655 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
2656 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
2657 if (!mlxsw_sp->ports)
2660 mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL);
2661 if (!mlxsw_sp->port_to_module) {
2663 goto err_port_to_module_alloc;
2666 for (i = 1; i < max_ports; i++) {
2667 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
2670 goto err_port_module_info_get;
2673 mlxsw_sp->port_to_module[i] = module;
2674 err = mlxsw_sp_port_create(mlxsw_sp, i, false,
2675 module, width, lane);
2677 goto err_port_create;
2682 err_port_module_info_get:
2683 for (i--; i >= 1; i--)
2684 if (mlxsw_sp_port_created(mlxsw_sp, i))
2685 mlxsw_sp_port_remove(mlxsw_sp, i);
2686 kfree(mlxsw_sp->port_to_module);
2687 err_port_to_module_alloc:
2688 kfree(mlxsw_sp->ports);
2692 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
2694 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
2696 return local_port - offset;
2699 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
2700 u8 module, unsigned int count)
2702 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
2705 for (i = 0; i < count; i++) {
2706 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
2709 goto err_port_module_map;
2712 for (i = 0; i < count; i++) {
2713 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
2715 goto err_port_swid_set;
2718 for (i = 0; i < count; i++) {
2719 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
2720 module, width, i * width);
2722 goto err_port_create;
2728 for (i--; i >= 0; i--)
2729 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2730 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2733 for (i--; i >= 0; i--)
2734 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
2735 MLXSW_PORT_SWID_DISABLED_PORT);
2737 err_port_module_map:
2738 for (i--; i >= 0; i--)
2739 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
2743 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
2744 u8 base_port, unsigned int count)
2746 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
2749 /* Split by four means we need to re-create two ports, otherwise
2754 for (i = 0; i < count; i++) {
2755 local_port = base_port + i * 2;
2756 module = mlxsw_sp->port_to_module[local_port];
2758 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
2762 for (i = 0; i < count; i++)
2763 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
2765 for (i = 0; i < count; i++) {
2766 local_port = base_port + i * 2;
2767 module = mlxsw_sp->port_to_module[local_port];
2769 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
2774 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
2777 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2778 struct mlxsw_sp_port *mlxsw_sp_port;
2779 u8 module, cur_width, base_port;
2783 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2784 if (!mlxsw_sp_port) {
2785 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2790 module = mlxsw_sp_port->mapping.module;
2791 cur_width = mlxsw_sp_port->mapping.width;
2793 if (count != 2 && count != 4) {
2794 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
2798 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
2799 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
2803 /* Make sure we have enough slave (even) ports for the split. */
2805 base_port = local_port;
2806 if (mlxsw_sp->ports[base_port + 1]) {
2807 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2811 base_port = mlxsw_sp_cluster_base_port_get(local_port);
2812 if (mlxsw_sp->ports[base_port + 1] ||
2813 mlxsw_sp->ports[base_port + 3]) {
2814 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2819 for (i = 0; i < count; i++)
2820 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2821 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2823 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
2825 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2826 goto err_port_split_create;
2831 err_port_split_create:
2832 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2836 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
2838 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2839 struct mlxsw_sp_port *mlxsw_sp_port;
2840 u8 cur_width, base_port;
2844 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2845 if (!mlxsw_sp_port) {
2846 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2851 if (!mlxsw_sp_port->split) {
2852 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
2856 cur_width = mlxsw_sp_port->mapping.width;
2857 count = cur_width == 1 ? 4 : 2;
2859 base_port = mlxsw_sp_cluster_base_port_get(local_port);
2861 /* Determine which ports to remove. */
2862 if (count == 2 && local_port >= base_port + 2)
2863 base_port = base_port + 2;
2865 for (i = 0; i < count; i++)
2866 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2867 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2869 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2874 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2875 char *pude_pl, void *priv)
2877 struct mlxsw_sp *mlxsw_sp = priv;
2878 struct mlxsw_sp_port *mlxsw_sp_port;
2879 enum mlxsw_reg_pude_oper_status status;
2882 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2883 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2887 status = mlxsw_reg_pude_oper_status_get(pude_pl);
2888 if (status == MLXSW_PORT_OPER_STATUS_UP) {
2889 netdev_info(mlxsw_sp_port->dev, "link up\n");
2890 netif_carrier_on(mlxsw_sp_port->dev);
2892 netdev_info(mlxsw_sp_port->dev, "link down\n");
2893 netif_carrier_off(mlxsw_sp_port->dev);
2897 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2898 u8 local_port, void *priv)
2900 struct mlxsw_sp *mlxsw_sp = priv;
2901 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2902 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2904 if (unlikely(!mlxsw_sp_port)) {
2905 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2910 skb->dev = mlxsw_sp_port->dev;
2912 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2913 u64_stats_update_begin(&pcpu_stats->syncp);
2914 pcpu_stats->rx_packets++;
2915 pcpu_stats->rx_bytes += skb->len;
2916 u64_stats_update_end(&pcpu_stats->syncp);
2918 skb->protocol = eth_type_trans(skb, skb->dev);
2919 netif_receive_skb(skb);
2922 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
2925 skb->offload_fwd_mark = 1;
2926 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2929 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
2932 struct mlxsw_sp *mlxsw_sp = priv;
2933 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2934 struct psample_group *psample_group;
2937 if (unlikely(!mlxsw_sp_port)) {
2938 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
2942 if (unlikely(!mlxsw_sp_port->sample)) {
2943 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
2948 size = mlxsw_sp_port->sample->truncate ?
2949 mlxsw_sp_port->sample->trunc_size : skb->len;
2952 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
2955 psample_sample_packet(psample_group, skb, size,
2956 mlxsw_sp_port->dev->ifindex, 0,
2957 mlxsw_sp_port->sample->rate);
2964 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2965 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
2966 _is_ctrl, SP_##_trap_group, DISCARD)
2968 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2969 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
2970 _is_ctrl, SP_##_trap_group, DISCARD)
2972 #define MLXSW_SP_EVENTL(_func, _trap_id) \
2973 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2975 static const struct mlxsw_listener mlxsw_sp_listener[] = {
2977 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2979 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
2980 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
2981 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
2982 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
2983 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
2984 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
2985 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
2986 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
2987 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
2988 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
2989 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
2991 MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
2992 MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
2993 MLXSW_SP_RXL_NO_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false),
2994 MLXSW_SP_RXL_MARK(OSPF, TRAP_TO_CPU, OSPF, false),
2995 MLXSW_SP_RXL_NO_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
2996 MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
2997 MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false),
2998 MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false),
2999 /* PKT Sample trap */
3000 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
3001 false, SP_IP2ME, DISCARD)
3004 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
3006 char qpcr_pl[MLXSW_REG_QPCR_LEN];
3007 enum mlxsw_reg_qpcr_ir_units ir_units;
3008 int max_cpu_policers;
3014 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
3017 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3019 ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
3020 for (i = 0; i < max_cpu_policers; i++) {
3023 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3024 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3025 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3026 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3030 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3034 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
3035 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3036 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3037 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
3038 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3039 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3043 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3052 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
3054 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
3062 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
3064 char htgt_pl[MLXSW_REG_HTGT_LEN];
3065 enum mlxsw_reg_htgt_trap_group i;
3066 int max_cpu_policers;
3067 int max_trap_groups;
3072 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
3075 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
3076 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3078 for (i = 0; i < max_trap_groups; i++) {
3081 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3082 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3083 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3084 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3088 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
3089 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3093 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3094 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3098 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3102 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
3103 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3104 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3108 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
3109 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
3110 tc = MLXSW_REG_HTGT_DEFAULT_TC;
3111 policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
3117 if (max_cpu_policers <= policer_id &&
3118 policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
3121 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
3122 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3130 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
3135 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
3139 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
3143 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3144 err = mlxsw_core_trap_register(mlxsw_sp->core,
3145 &mlxsw_sp_listener[i],
3148 goto err_listener_register;
3153 err_listener_register:
3154 for (i--; i >= 0; i--) {
3155 mlxsw_core_trap_unregister(mlxsw_sp->core,
3156 &mlxsw_sp_listener[i],
3162 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
3166 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3167 mlxsw_core_trap_unregister(mlxsw_sp->core,
3168 &mlxsw_sp_listener[i],
3173 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
3174 enum mlxsw_reg_sfgc_type type,
3175 enum mlxsw_reg_sfgc_bridge_type bridge_type)
3177 enum mlxsw_flood_table_type table_type;
3178 enum mlxsw_sp_flood_table flood_table;
3179 char sfgc_pl[MLXSW_REG_SFGC_LEN];
3181 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
3182 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
3184 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
3187 case MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST:
3188 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
3190 case MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4:
3191 flood_table = MLXSW_SP_FLOOD_TABLE_MC;
3194 flood_table = MLXSW_SP_FLOOD_TABLE_BC;
3197 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
3199 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
3202 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
3206 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
3207 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
3210 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
3211 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
3215 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
3216 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
3224 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
3226 char slcr_pl[MLXSW_REG_SLCR_LEN];
3229 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
3230 MLXSW_REG_SLCR_LAG_HASH_DMAC |
3231 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
3232 MLXSW_REG_SLCR_LAG_HASH_VLANID |
3233 MLXSW_REG_SLCR_LAG_HASH_SIP |
3234 MLXSW_REG_SLCR_LAG_HASH_DIP |
3235 MLXSW_REG_SLCR_LAG_HASH_SPORT |
3236 MLXSW_REG_SLCR_LAG_HASH_DPORT |
3237 MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
3238 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
3242 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
3243 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
3246 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
3247 sizeof(struct mlxsw_sp_upper),
3249 if (!mlxsw_sp->lags)
3255 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
3257 kfree(mlxsw_sp->lags);
3260 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
3262 char htgt_pl[MLXSW_REG_HTGT_LEN];
3264 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
3265 MLXSW_REG_HTGT_INVALID_POLICER,
3266 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
3267 MLXSW_REG_HTGT_DEFAULT_TC);
3268 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3271 static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create);
3273 static int mlxsw_sp_dummy_fid_init(struct mlxsw_sp *mlxsw_sp)
3275 return mlxsw_sp_vfid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, true);
3278 static void mlxsw_sp_dummy_fid_fini(struct mlxsw_sp *mlxsw_sp)
3280 mlxsw_sp_vfid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, false);
3283 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3284 const struct mlxsw_bus_info *mlxsw_bus_info)
3286 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3289 mlxsw_sp->core = mlxsw_core;
3290 mlxsw_sp->bus_info = mlxsw_bus_info;
3291 INIT_LIST_HEAD(&mlxsw_sp->fids);
3292 INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
3293 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
3295 err = mlxsw_sp_base_mac_get(mlxsw_sp);
3297 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3301 err = mlxsw_sp_traps_init(mlxsw_sp);
3303 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3307 err = mlxsw_sp_flood_init(mlxsw_sp);
3309 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
3310 goto err_flood_init;
3313 err = mlxsw_sp_buffers_init(mlxsw_sp);
3315 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3316 goto err_buffers_init;
3319 err = mlxsw_sp_lag_init(mlxsw_sp);
3321 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3325 err = mlxsw_sp_switchdev_init(mlxsw_sp);
3327 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3328 goto err_switchdev_init;
3331 err = mlxsw_sp_router_init(mlxsw_sp);
3333 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3334 goto err_router_init;
3337 err = mlxsw_sp_span_init(mlxsw_sp);
3339 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3343 err = mlxsw_sp_acl_init(mlxsw_sp);
3345 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3349 err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3351 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3352 goto err_counter_pool_init;
3355 err = mlxsw_sp_dpipe_init(mlxsw_sp);
3357 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
3358 goto err_dpipe_init;
3361 err = mlxsw_sp_dummy_fid_init(mlxsw_sp);
3363 dev_err(mlxsw_sp->bus_info->dev, "Failed to init dummy FID\n");
3364 goto err_dummy_fid_init;
3367 err = mlxsw_sp_ports_create(mlxsw_sp);
3369 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3370 goto err_ports_create;
3376 mlxsw_sp_dummy_fid_fini(mlxsw_sp);
3378 mlxsw_sp_dpipe_fini(mlxsw_sp);
3380 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3381 err_counter_pool_init:
3382 mlxsw_sp_acl_fini(mlxsw_sp);
3384 mlxsw_sp_span_fini(mlxsw_sp);
3386 mlxsw_sp_router_fini(mlxsw_sp);
3388 mlxsw_sp_switchdev_fini(mlxsw_sp);
3390 mlxsw_sp_lag_fini(mlxsw_sp);
3392 mlxsw_sp_buffers_fini(mlxsw_sp);
3395 mlxsw_sp_traps_fini(mlxsw_sp);
3399 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3401 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3403 mlxsw_sp_ports_remove(mlxsw_sp);
3404 mlxsw_sp_dummy_fid_fini(mlxsw_sp);
3405 mlxsw_sp_dpipe_fini(mlxsw_sp);
3406 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3407 mlxsw_sp_acl_fini(mlxsw_sp);
3408 mlxsw_sp_span_fini(mlxsw_sp);
3409 mlxsw_sp_router_fini(mlxsw_sp);
3410 mlxsw_sp_switchdev_fini(mlxsw_sp);
3411 mlxsw_sp_lag_fini(mlxsw_sp);
3412 mlxsw_sp_buffers_fini(mlxsw_sp);
3413 mlxsw_sp_traps_fini(mlxsw_sp);
3414 WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
3415 WARN_ON(!list_empty(&mlxsw_sp->fids));
3418 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
3419 .used_max_vepa_channels = 1,
3420 .max_vepa_channels = 0,
3422 .max_mid = MLXSW_SP_MID_MAX,
3425 .used_flood_tables = 1,
3426 .used_flood_mode = 1,
3428 .max_fid_offset_flood_tables = 3,
3429 .fid_offset_flood_table_size = VLAN_N_VID - 1,
3430 .max_fid_flood_tables = 3,
3431 .fid_flood_table_size = MLXSW_SP_VFID_MAX,
3432 .used_max_ib_mc = 1,
3436 .used_kvd_split_data = 1,
3437 .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY,
3438 .kvd_hash_single_parts = 2,
3439 .kvd_hash_double_parts = 1,
3440 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
3444 .type = MLXSW_PORT_SWID_TYPE_ETH,
3447 .resource_query_enable = 1,
3450 static struct mlxsw_driver mlxsw_sp_driver = {
3451 .kind = mlxsw_sp_driver_name,
3452 .priv_size = sizeof(struct mlxsw_sp),
3453 .init = mlxsw_sp_init,
3454 .fini = mlxsw_sp_fini,
3455 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
3456 .port_split = mlxsw_sp_port_split,
3457 .port_unsplit = mlxsw_sp_port_unsplit,
3458 .sb_pool_get = mlxsw_sp_sb_pool_get,
3459 .sb_pool_set = mlxsw_sp_sb_pool_set,
3460 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3461 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3462 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3463 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3464 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3465 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3466 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3467 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3468 .txhdr_construct = mlxsw_sp_txhdr_construct,
3469 .txhdr_len = MLXSW_TXHDR_LEN,
3470 .profile = &mlxsw_sp_config_profile,
3473 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
3475 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3478 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
3480 struct mlxsw_sp_port **p_mlxsw_sp_port = data;
3483 if (mlxsw_sp_port_dev_check(lower_dev)) {
3484 *p_mlxsw_sp_port = netdev_priv(lower_dev);
3491 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
3493 struct mlxsw_sp_port *mlxsw_sp_port;
3495 if (mlxsw_sp_port_dev_check(dev))
3496 return netdev_priv(dev);
3498 mlxsw_sp_port = NULL;
3499 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
3501 return mlxsw_sp_port;
3504 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
3506 struct mlxsw_sp_port *mlxsw_sp_port;
3508 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3509 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3512 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
3514 struct mlxsw_sp_port *mlxsw_sp_port;
3516 if (mlxsw_sp_port_dev_check(dev))
3517 return netdev_priv(dev);
3519 mlxsw_sp_port = NULL;
3520 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3523 return mlxsw_sp_port;
3526 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3528 struct mlxsw_sp_port *mlxsw_sp_port;
3531 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3533 dev_hold(mlxsw_sp_port->dev);
3535 return mlxsw_sp_port;
3538 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3540 dev_put(mlxsw_sp_port->dev);
3543 static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
3546 if (mlxsw_sp_fid_is_vfid(fid))
3547 return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
3549 return test_bit(fid, lag_port->active_vlans);
3552 static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
3555 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3556 u8 local_port = mlxsw_sp_port->local_port;
3557 u16 lag_id = mlxsw_sp_port->lag_id;
3558 u64 max_lag_members;
3561 if (!mlxsw_sp_port->lagged)
3564 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3566 for (i = 0; i < max_lag_members; i++) {
3567 struct mlxsw_sp_port *lag_port;
3569 lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
3570 if (!lag_port || lag_port->local_port == local_port)
3572 if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
3580 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3583 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3584 char sfdf_pl[MLXSW_REG_SFDF_LEN];
3586 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
3587 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3588 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
3589 mlxsw_sp_port->local_port);
3591 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
3592 mlxsw_sp_port->local_port, fid);
3594 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3598 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3601 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3602 char sfdf_pl[MLXSW_REG_SFDF_LEN];
3604 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
3605 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3606 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
3608 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
3609 mlxsw_sp_port->lag_id, fid);
3611 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3614 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
3616 if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
3619 if (mlxsw_sp_port->lagged)
3620 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
3623 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
3626 static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
3628 struct mlxsw_sp_fid *f, *tmp;
3630 list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list)
3631 if (--f->ref_count == 0)
3632 mlxsw_sp_fid_destroy(mlxsw_sp, f);
3637 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
3638 struct net_device *br_dev)
3640 return !mlxsw_sp->master_bridge.dev ||
3641 mlxsw_sp->master_bridge.dev == br_dev;
3644 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
3645 struct net_device *br_dev)
3647 mlxsw_sp->master_bridge.dev = br_dev;
3648 mlxsw_sp->master_bridge.ref_count++;
3651 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
3653 if (--mlxsw_sp->master_bridge.ref_count == 0) {
3654 mlxsw_sp->master_bridge.dev = NULL;
3655 /* It's possible upper VLAN devices are still holding
3656 * references to underlying FIDs. Drop the reference
3657 * and release the resources if it was the last one.
3658 * If it wasn't, then something bad happened.
3660 mlxsw_sp_master_bridge_gone_sync(mlxsw_sp);
3664 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
3665 struct net_device *br_dev)
3667 struct net_device *dev = mlxsw_sp_port->dev;
3670 /* When port is not bridged untagged packets are tagged with
3671 * PVID=VID=1, thereby creating an implicit VLAN interface in
3672 * the device. Remove it and let bridge code take care of its
3675 err = mlxsw_sp_port_kill_vid(dev, 0, 1);
3679 mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
3681 mlxsw_sp_port->learning = 1;
3682 mlxsw_sp_port->learning_sync = 1;
3683 mlxsw_sp_port->uc_flood = 1;
3684 mlxsw_sp_port->mc_flood = 1;
3685 mlxsw_sp_port->mc_router = 0;
3686 mlxsw_sp_port->mc_disabled = 1;
3687 mlxsw_sp_port->bridged = 1;
3692 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3694 struct net_device *dev = mlxsw_sp_port->dev;
3696 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
3698 mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
3700 mlxsw_sp_port->learning = 0;
3701 mlxsw_sp_port->learning_sync = 0;
3702 mlxsw_sp_port->uc_flood = 0;
3703 mlxsw_sp_port->mc_flood = 0;
3704 mlxsw_sp_port->mc_router = 0;
3705 mlxsw_sp_port->bridged = 0;
3707 /* Add implicit VLAN interface in the device, so that untagged
3708 * packets will be classified to the default vFID.
3710 mlxsw_sp_port_add_vid(dev, 0, 1);
3713 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3715 char sldr_pl[MLXSW_REG_SLDR_LEN];
3717 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3718 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3721 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3723 char sldr_pl[MLXSW_REG_SLDR_LEN];
3725 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3726 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3729 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3730 u16 lag_id, u8 port_index)
3732 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3733 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3735 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3736 lag_id, port_index);
3737 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3740 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3743 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3744 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3746 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3748 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3751 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3754 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3755 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3757 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3759 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3762 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3765 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3766 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3768 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3770 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3773 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3774 struct net_device *lag_dev,
3777 struct mlxsw_sp_upper *lag;
3778 int free_lag_id = -1;
3782 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
3783 for (i = 0; i < max_lag; i++) {
3784 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3785 if (lag->ref_count) {
3786 if (lag->dev == lag_dev) {
3790 } else if (free_lag_id < 0) {
3794 if (free_lag_id < 0)
3796 *p_lag_id = free_lag_id;
3801 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3802 struct net_device *lag_dev,
3803 struct netdev_lag_upper_info *lag_upper_info)
3807 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
3809 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
3814 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3815 u16 lag_id, u8 *p_port_index)
3817 u64 max_lag_members;
3820 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3822 for (i = 0; i < max_lag_members; i++) {
3823 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3832 mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3833 struct net_device *lag_dev, u16 lag_id)
3835 struct mlxsw_sp_port *mlxsw_sp_vport;
3836 struct mlxsw_sp_fid *f;
3838 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3839 if (WARN_ON(!mlxsw_sp_vport))
3842 /* If vPort is assigned a RIF, then leave it since it's no
3845 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3847 f->leave(mlxsw_sp_vport);
3849 mlxsw_sp_vport->lag_id = lag_id;
3850 mlxsw_sp_vport->lagged = 1;
3851 mlxsw_sp_vport->dev = lag_dev;
3855 mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3857 struct mlxsw_sp_port *mlxsw_sp_vport;
3858 struct mlxsw_sp_fid *f;
3860 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3861 if (WARN_ON(!mlxsw_sp_vport))
3864 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3866 f->leave(mlxsw_sp_vport);
3868 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
3869 mlxsw_sp_vport->lagged = 0;
3872 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3873 struct net_device *lag_dev)
3875 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3876 struct mlxsw_sp_upper *lag;
3881 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3884 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3885 if (!lag->ref_count) {
3886 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3892 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3895 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3897 goto err_col_port_add;
3898 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
3900 goto err_col_port_enable;
3902 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
3903 mlxsw_sp_port->local_port);
3904 mlxsw_sp_port->lag_id = lag_id;
3905 mlxsw_sp_port->lagged = 1;
3908 mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_dev, lag_id);
3912 err_col_port_enable:
3913 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3915 if (!lag->ref_count)
3916 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3920 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
3921 struct net_device *lag_dev)
3923 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3924 u16 lag_id = mlxsw_sp_port->lag_id;
3925 struct mlxsw_sp_upper *lag;
3927 if (!mlxsw_sp_port->lagged)
3929 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3930 WARN_ON(lag->ref_count == 0);
3932 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
3933 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3935 if (mlxsw_sp_port->bridged) {
3936 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
3937 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
3940 if (lag->ref_count == 1)
3941 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3943 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3944 mlxsw_sp_port->local_port);
3945 mlxsw_sp_port->lagged = 0;
3948 mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port);
3951 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3954 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3955 char sldr_pl[MLXSW_REG_SLDR_LEN];
3957 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
3958 mlxsw_sp_port->local_port);
3959 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3962 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3965 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3966 char sldr_pl[MLXSW_REG_SLDR_LEN];
3968 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
3969 mlxsw_sp_port->local_port);
3970 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3973 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
3974 bool lag_tx_enabled)
3977 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
3978 mlxsw_sp_port->lag_id);
3980 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
3981 mlxsw_sp_port->lag_id);
3984 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
3985 struct netdev_lag_lower_state_info *info)
3987 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
3990 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
3991 struct net_device *vlan_dev)
3993 struct mlxsw_sp_port *mlxsw_sp_vport;
3994 u16 vid = vlan_dev_vlan_id(vlan_dev);
3996 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3997 if (WARN_ON(!mlxsw_sp_vport))
4000 mlxsw_sp_vport->dev = vlan_dev;
4005 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
4006 struct net_device *vlan_dev)
4008 struct mlxsw_sp_port *mlxsw_sp_vport;
4009 u16 vid = vlan_dev_vlan_id(vlan_dev);
4011 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
4012 if (WARN_ON(!mlxsw_sp_vport))
4015 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
4018 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
4019 unsigned long event, void *ptr)
4021 struct netdev_notifier_changeupper_info *info;
4022 struct mlxsw_sp_port *mlxsw_sp_port;
4023 struct net_device *upper_dev;
4024 struct mlxsw_sp *mlxsw_sp;
4027 mlxsw_sp_port = netdev_priv(dev);
4028 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4032 case NETDEV_PRECHANGEUPPER:
4033 upper_dev = info->upper_dev;
4034 if (!is_vlan_dev(upper_dev) &&
4035 !netif_is_lag_master(upper_dev) &&
4036 !netif_is_bridge_master(upper_dev) &&
4037 !netif_is_l3_master(upper_dev))
4041 /* HW limitation forbids to put ports to multiple bridges. */
4042 if (netif_is_bridge_master(upper_dev) &&
4043 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
4045 if (netif_is_lag_master(upper_dev) &&
4046 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4049 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
4051 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4052 !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
4055 case NETDEV_CHANGEUPPER:
4056 upper_dev = info->upper_dev;
4057 if (is_vlan_dev(upper_dev)) {
4059 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
4062 mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
4064 } else if (netif_is_bridge_master(upper_dev)) {
4066 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4069 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
4070 } else if (netif_is_lag_master(upper_dev)) {
4072 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4075 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4077 } else if (netif_is_l3_master(upper_dev)) {
4079 err = mlxsw_sp_port_vrf_join(mlxsw_sp_port);
4081 mlxsw_sp_port_vrf_leave(mlxsw_sp_port);
4092 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4093 unsigned long event, void *ptr)
4095 struct netdev_notifier_changelowerstate_info *info;
4096 struct mlxsw_sp_port *mlxsw_sp_port;
4099 mlxsw_sp_port = netdev_priv(dev);
4103 case NETDEV_CHANGELOWERSTATE:
4104 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4105 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4106 info->lower_state_info);
4108 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4116 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
4117 unsigned long event, void *ptr)
4120 case NETDEV_PRECHANGEUPPER:
4121 case NETDEV_CHANGEUPPER:
4122 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
4123 case NETDEV_CHANGELOWERSTATE:
4124 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
4130 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4131 unsigned long event, void *ptr)
4133 struct net_device *dev;
4134 struct list_head *iter;
4137 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4138 if (mlxsw_sp_port_dev_check(dev)) {
4139 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
4148 static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp,
4149 struct net_device *vlan_dev)
4151 u16 fid = vlan_dev_vlan_id(vlan_dev);
4152 struct mlxsw_sp_fid *f;
4154 f = mlxsw_sp_fid_find(mlxsw_sp, fid);
4156 f = mlxsw_sp_fid_create(mlxsw_sp, fid);
4166 static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
4167 struct net_device *vlan_dev)
4169 u16 fid = vlan_dev_vlan_id(vlan_dev);
4170 struct mlxsw_sp_fid *f;
4172 f = mlxsw_sp_fid_find(mlxsw_sp, fid);
4174 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
4175 if (f && --f->ref_count == 0)
4176 mlxsw_sp_fid_destroy(mlxsw_sp, f);
4179 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
4180 unsigned long event, void *ptr)
4182 struct netdev_notifier_changeupper_info *info;
4183 struct net_device *upper_dev;
4184 struct mlxsw_sp *mlxsw_sp;
4187 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
4194 case NETDEV_PRECHANGEUPPER:
4195 upper_dev = info->upper_dev;
4196 if (!is_vlan_dev(upper_dev) && !netif_is_l3_master(upper_dev))
4198 if (is_vlan_dev(upper_dev) &&
4199 br_dev != mlxsw_sp->master_bridge.dev)
4202 case NETDEV_CHANGEUPPER:
4203 upper_dev = info->upper_dev;
4204 if (is_vlan_dev(upper_dev)) {
4206 err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
4209 mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp,
4211 } else if (netif_is_l3_master(upper_dev)) {
4213 err = mlxsw_sp_bridge_vrf_join(mlxsw_sp,
4216 mlxsw_sp_bridge_vrf_leave(mlxsw_sp, br_dev);
4227 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
4229 return find_first_zero_bit(mlxsw_sp->vfids.mapped,
4233 static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
4235 char sfmr_pl[MLXSW_REG_SFMR_LEN];
4237 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
4238 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
4241 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
4243 static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
4244 struct net_device *br_dev)
4246 struct device *dev = mlxsw_sp->bus_info->dev;
4247 struct mlxsw_sp_fid *f;
4251 vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
4252 if (vfid == MLXSW_SP_VFID_MAX) {
4253 dev_err(dev, "No available vFIDs\n");
4254 return ERR_PTR(-ERANGE);
4257 fid = mlxsw_sp_vfid_to_fid(vfid);
4258 err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
4260 dev_err(dev, "Failed to create FID=%d\n", fid);
4261 return ERR_PTR(err);
4264 f = kzalloc(sizeof(*f), GFP_KERNEL);
4266 goto err_allocate_vfid;
4268 f->leave = mlxsw_sp_vport_vfid_leave;
4272 list_add(&f->list, &mlxsw_sp->vfids.list);
4273 set_bit(vfid, mlxsw_sp->vfids.mapped);
4278 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
4279 return ERR_PTR(-ENOMEM);
4282 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
4283 struct mlxsw_sp_fid *f)
4285 u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
4288 clear_bit(vfid, mlxsw_sp->vfids.mapped);
4292 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
4296 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
4299 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
4302 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
4303 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
4305 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
4309 static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
4310 struct net_device *br_dev)
4312 struct mlxsw_sp_fid *f;
4315 f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
4317 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
4322 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
4324 goto err_vport_flood_set;
4326 err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
4328 goto err_vport_fid_map;
4330 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
4333 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
4338 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
4339 err_vport_flood_set:
4341 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
4345 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
4347 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
4349 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
4351 mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
4353 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
4355 mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
4357 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
4358 if (--f->ref_count == 0)
4359 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
4362 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
4363 struct net_device *br_dev)
4365 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
4366 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
4367 struct net_device *dev = mlxsw_sp_vport->dev;
4370 if (f && !WARN_ON(!f->leave))
4371 f->leave(mlxsw_sp_vport);
4373 err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
4375 netdev_err(dev, "Failed to join vFID\n");
4379 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
4381 netdev_err(dev, "Failed to enable learning\n");
4382 goto err_port_vid_learning_set;
4385 mlxsw_sp_vport->learning = 1;
4386 mlxsw_sp_vport->learning_sync = 1;
4387 mlxsw_sp_vport->uc_flood = 1;
4388 mlxsw_sp_vport->mc_flood = 1;
4389 mlxsw_sp_vport->mc_router = 0;
4390 mlxsw_sp_vport->mc_disabled = 1;
4391 mlxsw_sp_vport->bridged = 1;
4395 err_port_vid_learning_set:
4396 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
4400 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
4402 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
4404 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
4406 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
4408 mlxsw_sp_vport->learning = 0;
4409 mlxsw_sp_vport->learning_sync = 0;
4410 mlxsw_sp_vport->uc_flood = 0;
4411 mlxsw_sp_vport->mc_flood = 0;
4412 mlxsw_sp_vport->mc_router = 0;
4413 mlxsw_sp_vport->bridged = 0;
4417 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
4418 const struct net_device *br_dev)
4420 struct mlxsw_sp_port *mlxsw_sp_vport;
4422 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
4424 struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
4426 if (dev && dev == br_dev)
4433 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
4434 unsigned long event, void *ptr,
4437 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4438 struct netdev_notifier_changeupper_info *info = ptr;
4439 struct mlxsw_sp_port *mlxsw_sp_vport;
4440 struct net_device *upper_dev;
4443 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
4444 if (!mlxsw_sp_vport)
4448 case NETDEV_PRECHANGEUPPER:
4449 upper_dev = info->upper_dev;
4450 if (!netif_is_bridge_master(upper_dev) &&
4451 !netif_is_l3_master(upper_dev))
4455 /* We can't have multiple VLAN interfaces configured on
4456 * the same port and being members in the same bridge.
4458 if (netif_is_bridge_master(upper_dev) &&
4459 !mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
4463 case NETDEV_CHANGEUPPER:
4464 upper_dev = info->upper_dev;
4465 if (netif_is_bridge_master(upper_dev)) {
4467 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
4470 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
4471 } else if (netif_is_l3_master(upper_dev)) {
4473 err = mlxsw_sp_vport_vrf_join(mlxsw_sp_vport);
4475 mlxsw_sp_vport_vrf_leave(mlxsw_sp_vport);
4486 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
4487 unsigned long event, void *ptr,
4490 struct net_device *dev;
4491 struct list_head *iter;
4494 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4495 if (mlxsw_sp_port_dev_check(dev)) {
4496 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
4506 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
4507 unsigned long event, void *ptr)
4509 struct netdev_notifier_changeupper_info *info;
4510 struct mlxsw_sp *mlxsw_sp;
4513 mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
4520 case NETDEV_PRECHANGEUPPER:
4521 /* VLAN devices are only allowed on top of the
4522 * VLAN-aware bridge.
4524 if (WARN_ON(vlan_dev_real_dev(vlan_dev) !=
4525 mlxsw_sp->master_bridge.dev))
4527 if (!netif_is_l3_master(info->upper_dev))
4530 case NETDEV_CHANGEUPPER:
4531 if (netif_is_l3_master(info->upper_dev)) {
4533 err = mlxsw_sp_bridge_vrf_join(mlxsw_sp,
4536 mlxsw_sp_bridge_vrf_leave(mlxsw_sp, vlan_dev);
4547 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4548 unsigned long event, void *ptr)
4550 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4551 u16 vid = vlan_dev_vlan_id(vlan_dev);
4553 if (mlxsw_sp_port_dev_check(real_dev))
4554 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
4556 else if (netif_is_lag_master(real_dev))
4557 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
4559 else if (netif_is_bridge_master(real_dev))
4560 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, event,
4566 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
4567 unsigned long event, void *ptr)
4569 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4572 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
4573 err = mlxsw_sp_netdevice_router_port_event(dev);
4574 else if (mlxsw_sp_port_dev_check(dev))
4575 err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
4576 else if (netif_is_lag_master(dev))
4577 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4578 else if (netif_is_bridge_master(dev))
4579 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
4580 else if (is_vlan_dev(dev))
4581 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4583 return notifier_from_errno(err);
4586 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
4587 .notifier_call = mlxsw_sp_netdevice_event,
4590 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4591 .notifier_call = mlxsw_sp_inetaddr_event,
4592 .priority = 10, /* Must be called before FIB notifier block */
4595 static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
4596 .notifier_call = mlxsw_sp_router_netevent_event,
4599 static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
4600 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4604 static struct pci_driver mlxsw_sp_pci_driver = {
4605 .name = mlxsw_sp_driver_name,
4606 .id_table = mlxsw_sp_pci_id_table,
4609 static int __init mlxsw_sp_module_init(void)
4613 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4614 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4615 register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4617 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4619 goto err_core_driver_register;
4621 err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
4623 goto err_pci_driver_register;
4627 err_pci_driver_register:
4628 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4629 err_core_driver_register:
4630 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4631 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4632 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4636 static void __exit mlxsw_sp_module_exit(void)
4638 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
4639 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4640 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4641 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4642 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4645 module_init(mlxsw_sp_module_init);
4646 module_exit(mlxsw_sp_module_exit);
4648 MODULE_LICENSE("Dual BSD/GPL");
4649 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4650 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4651 MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);