1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <linux/mutex.h>
21 #include <net/netevent.h>
22 #include <net/neighbour.h>
24 #include <net/inet_dscp.h>
25 #include <net/ip_fib.h>
26 #include <net/ip6_fib.h>
27 #include <net/nexthop.h>
28 #include <net/fib_rules.h>
29 #include <net/ip_tunnels.h>
30 #include <net/l3mdev.h>
31 #include <net/addrconf.h>
32 #include <net/ndisc.h>
34 #include <net/fib_notifier.h>
35 #include <net/switchdev.h>
40 #include "spectrum_cnt.h"
41 #include "spectrum_dpipe.h"
42 #include "spectrum_ipip.h"
43 #include "spectrum_mr.h"
44 #include "spectrum_mr_tcam.h"
45 #include "spectrum_router.h"
46 #include "spectrum_span.h"
50 struct mlxsw_sp_lpm_tree;
51 struct mlxsw_sp_rif_ops;
54 struct list_head nexthop_list;
55 struct list_head neigh_list;
56 struct net_device *dev; /* NULL for underlay RIF */
57 struct mlxsw_sp_fid *fid;
58 unsigned char addr[ETH_ALEN];
63 const struct mlxsw_sp_rif_ops *ops;
64 struct mlxsw_sp *mlxsw_sp;
66 unsigned int counter_ingress;
67 bool counter_ingress_valid;
68 unsigned int counter_egress;
69 bool counter_egress_valid;
72 struct mlxsw_sp_rif_params {
73 struct net_device *dev;
82 struct mlxsw_sp_rif_subport {
83 struct mlxsw_sp_rif common;
93 struct mlxsw_sp_rif_ipip_lb {
94 struct mlxsw_sp_rif common;
95 struct mlxsw_sp_rif_ipip_lb_config lb_config;
96 u16 ul_vr_id; /* Reserved for Spectrum-2. */
97 u16 ul_rif_id; /* Reserved for Spectrum. */
100 struct mlxsw_sp_rif_params_ipip_lb {
101 struct mlxsw_sp_rif_params common;
102 struct mlxsw_sp_rif_ipip_lb_config lb_config;
105 struct mlxsw_sp_rif_ops {
106 enum mlxsw_sp_rif_type type;
109 void (*setup)(struct mlxsw_sp_rif *rif,
110 const struct mlxsw_sp_rif_params *params);
111 int (*configure)(struct mlxsw_sp_rif *rif,
112 struct netlink_ext_ack *extack);
113 void (*deconfigure)(struct mlxsw_sp_rif *rif);
114 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
115 struct netlink_ext_ack *extack);
116 void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
119 struct mlxsw_sp_rif_mac_profile {
120 unsigned char mac_prefix[ETH_ALEN];
121 refcount_t ref_count;
125 struct mlxsw_sp_router_ops {
126 int (*init)(struct mlxsw_sp *mlxsw_sp);
127 int (*ipips_init)(struct mlxsw_sp *mlxsw_sp);
130 static struct mlxsw_sp_rif *
131 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
132 const struct net_device *dev);
133 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
134 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
135 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
136 struct mlxsw_sp_lpm_tree *lpm_tree);
137 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
138 const struct mlxsw_sp_fib *fib,
140 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
141 const struct mlxsw_sp_fib *fib);
143 static unsigned int *
144 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
145 enum mlxsw_sp_rif_counter_dir dir)
148 case MLXSW_SP_RIF_COUNTER_EGRESS:
149 return &rif->counter_egress;
150 case MLXSW_SP_RIF_COUNTER_INGRESS:
151 return &rif->counter_ingress;
157 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
158 enum mlxsw_sp_rif_counter_dir dir)
161 case MLXSW_SP_RIF_COUNTER_EGRESS:
162 return rif->counter_egress_valid;
163 case MLXSW_SP_RIF_COUNTER_INGRESS:
164 return rif->counter_ingress_valid;
170 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
171 enum mlxsw_sp_rif_counter_dir dir,
175 case MLXSW_SP_RIF_COUNTER_EGRESS:
176 rif->counter_egress_valid = valid;
178 case MLXSW_SP_RIF_COUNTER_INGRESS:
179 rif->counter_ingress_valid = valid;
184 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
185 unsigned int counter_index, bool enable,
186 enum mlxsw_sp_rif_counter_dir dir)
188 char ritr_pl[MLXSW_REG_RITR_LEN];
189 bool is_egress = false;
192 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
194 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
195 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
199 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
201 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
204 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
205 struct mlxsw_sp_rif *rif,
206 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
208 char ricnt_pl[MLXSW_REG_RICNT_LEN];
209 unsigned int *p_counter_index;
213 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
217 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
218 if (!p_counter_index)
220 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
221 MLXSW_REG_RICNT_OPCODE_NOP);
222 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
225 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
229 struct mlxsw_sp_rif_counter_set_basic {
230 u64 good_unicast_packets;
231 u64 good_multicast_packets;
232 u64 good_broadcast_packets;
233 u64 good_unicast_bytes;
234 u64 good_multicast_bytes;
235 u64 good_broadcast_bytes;
243 mlxsw_sp_rif_counter_fetch_clear(struct mlxsw_sp_rif *rif,
244 enum mlxsw_sp_rif_counter_dir dir,
245 struct mlxsw_sp_rif_counter_set_basic *set)
247 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
248 char ricnt_pl[MLXSW_REG_RICNT_LEN];
249 unsigned int *p_counter_index;
252 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
255 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
256 if (!p_counter_index)
259 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
260 MLXSW_REG_RICNT_OPCODE_CLEAR);
261 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
268 #define MLXSW_SP_RIF_COUNTER_EXTRACT(NAME) \
269 (set->NAME = mlxsw_reg_ricnt_ ## NAME ## _get(ricnt_pl))
271 MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_packets);
272 MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_packets);
273 MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_packets);
274 MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_bytes);
275 MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_bytes);
276 MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_bytes);
277 MLXSW_SP_RIF_COUNTER_EXTRACT(error_packets);
278 MLXSW_SP_RIF_COUNTER_EXTRACT(discard_packets);
279 MLXSW_SP_RIF_COUNTER_EXTRACT(error_bytes);
280 MLXSW_SP_RIF_COUNTER_EXTRACT(discard_bytes);
282 #undef MLXSW_SP_RIF_COUNTER_EXTRACT
287 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
288 unsigned int counter_index)
290 char ricnt_pl[MLXSW_REG_RICNT_LEN];
292 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
293 MLXSW_REG_RICNT_OPCODE_CLEAR);
294 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
297 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif *rif,
298 enum mlxsw_sp_rif_counter_dir dir)
300 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
301 unsigned int *p_counter_index;
304 if (mlxsw_sp_rif_counter_valid_get(rif, dir))
307 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
308 if (!p_counter_index)
311 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
316 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
318 goto err_counter_clear;
320 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
321 *p_counter_index, true, dir);
323 goto err_counter_edit;
324 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
329 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
334 void mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif *rif,
335 enum mlxsw_sp_rif_counter_dir dir)
337 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
338 unsigned int *p_counter_index;
340 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
343 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
344 if (WARN_ON(!p_counter_index))
346 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
347 *p_counter_index, false, dir);
348 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
350 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
353 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
355 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
356 struct devlink *devlink;
358 devlink = priv_to_devlink(mlxsw_sp->core);
359 if (!devlink_dpipe_table_counter_enabled(devlink,
360 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
362 mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
365 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
367 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
370 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
372 struct mlxsw_sp_prefix_usage {
373 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
376 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
377 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
380 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
381 struct mlxsw_sp_prefix_usage *prefix_usage2)
383 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
387 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
388 struct mlxsw_sp_prefix_usage *prefix_usage2)
390 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
394 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
395 unsigned char prefix_len)
397 set_bit(prefix_len, prefix_usage->b);
401 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
402 unsigned char prefix_len)
404 clear_bit(prefix_len, prefix_usage->b);
407 struct mlxsw_sp_fib_key {
408 unsigned char addr[sizeof(struct in6_addr)];
409 unsigned char prefix_len;
412 enum mlxsw_sp_fib_entry_type {
413 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
414 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
415 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
416 MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
417 MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
419 /* This is a special case of local delivery, where a packet should be
420 * decapsulated on reception. Note that there is no corresponding ENCAP,
421 * because that's a type of next hop, not of FIB entry. (There can be
422 * several next hops in a REMOTE entry, and some of them may be
423 * encapsulating entries.)
425 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
426 MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
429 struct mlxsw_sp_nexthop_group_info;
430 struct mlxsw_sp_nexthop_group;
431 struct mlxsw_sp_fib_entry;
433 struct mlxsw_sp_fib_node {
434 struct mlxsw_sp_fib_entry *fib_entry;
435 struct list_head list;
436 struct rhash_head ht_node;
437 struct mlxsw_sp_fib *fib;
438 struct mlxsw_sp_fib_key key;
441 struct mlxsw_sp_fib_entry_decap {
442 struct mlxsw_sp_ipip_entry *ipip_entry;
446 static struct mlxsw_sp_fib_entry_priv *
447 mlxsw_sp_fib_entry_priv_create(const struct mlxsw_sp_router_ll_ops *ll_ops)
449 struct mlxsw_sp_fib_entry_priv *priv;
451 if (!ll_ops->fib_entry_priv_size)
452 /* No need to have priv */
455 priv = kzalloc(sizeof(*priv) + ll_ops->fib_entry_priv_size, GFP_KERNEL);
457 return ERR_PTR(-ENOMEM);
458 refcount_set(&priv->refcnt, 1);
463 mlxsw_sp_fib_entry_priv_destroy(struct mlxsw_sp_fib_entry_priv *priv)
468 static void mlxsw_sp_fib_entry_priv_hold(struct mlxsw_sp_fib_entry_priv *priv)
470 refcount_inc(&priv->refcnt);
473 static void mlxsw_sp_fib_entry_priv_put(struct mlxsw_sp_fib_entry_priv *priv)
475 if (!priv || !refcount_dec_and_test(&priv->refcnt))
477 mlxsw_sp_fib_entry_priv_destroy(priv);
480 static void mlxsw_sp_fib_entry_op_ctx_priv_hold(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
481 struct mlxsw_sp_fib_entry_priv *priv)
485 mlxsw_sp_fib_entry_priv_hold(priv);
486 list_add(&priv->list, &op_ctx->fib_entry_priv_list);
489 static void mlxsw_sp_fib_entry_op_ctx_priv_put_all(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
491 struct mlxsw_sp_fib_entry_priv *priv, *tmp;
493 list_for_each_entry_safe(priv, tmp, &op_ctx->fib_entry_priv_list, list)
494 mlxsw_sp_fib_entry_priv_put(priv);
495 INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
498 struct mlxsw_sp_fib_entry {
499 struct mlxsw_sp_fib_node *fib_node;
500 enum mlxsw_sp_fib_entry_type type;
501 struct list_head nexthop_group_node;
502 struct mlxsw_sp_nexthop_group *nh_group;
503 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
504 struct mlxsw_sp_fib_entry_priv *priv;
507 struct mlxsw_sp_fib4_entry {
508 struct mlxsw_sp_fib_entry common;
515 struct mlxsw_sp_fib6_entry {
516 struct mlxsw_sp_fib_entry common;
517 struct list_head rt6_list;
521 struct mlxsw_sp_rt6 {
522 struct list_head list;
523 struct fib6_info *rt;
526 struct mlxsw_sp_lpm_tree {
528 unsigned int ref_count;
529 enum mlxsw_sp_l3proto proto;
530 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
531 struct mlxsw_sp_prefix_usage prefix_usage;
534 struct mlxsw_sp_fib {
535 struct rhashtable ht;
536 struct list_head node_list;
537 struct mlxsw_sp_vr *vr;
538 struct mlxsw_sp_lpm_tree *lpm_tree;
539 enum mlxsw_sp_l3proto proto;
540 const struct mlxsw_sp_router_ll_ops *ll_ops;
544 u16 id; /* virtual router ID */
545 u32 tb_id; /* kernel fib table id */
546 unsigned int rif_count;
547 struct mlxsw_sp_fib *fib4;
548 struct mlxsw_sp_fib *fib6;
549 struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
550 struct mlxsw_sp_rif *ul_rif;
551 refcount_t ul_rif_refcnt;
554 static int mlxsw_sp_router_ll_basic_init(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
555 enum mlxsw_sp_l3proto proto)
560 static int mlxsw_sp_router_ll_basic_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
562 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta),
563 xralta_pl + MLXSW_REG_XRALTA_RALTA_OFFSET);
566 static int mlxsw_sp_router_ll_basic_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
568 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst),
569 xralst_pl + MLXSW_REG_XRALST_RALST_OFFSET);
572 static int mlxsw_sp_router_ll_basic_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
574 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
575 xraltb_pl + MLXSW_REG_XRALTB_RALTB_OFFSET);
578 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
580 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
581 struct mlxsw_sp_vr *vr,
582 enum mlxsw_sp_l3proto proto)
584 const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
585 struct mlxsw_sp_lpm_tree *lpm_tree;
586 struct mlxsw_sp_fib *fib;
589 err = ll_ops->init(mlxsw_sp, vr->id, proto);
593 lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
594 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
596 return ERR_PTR(-ENOMEM);
597 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
599 goto err_rhashtable_init;
600 INIT_LIST_HEAD(&fib->node_list);
603 fib->lpm_tree = lpm_tree;
604 fib->ll_ops = ll_ops;
605 mlxsw_sp_lpm_tree_hold(lpm_tree);
606 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
608 goto err_lpm_tree_bind;
612 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
618 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
619 struct mlxsw_sp_fib *fib)
621 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
622 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
623 WARN_ON(!list_empty(&fib->node_list));
624 rhashtable_destroy(&fib->ht);
628 static struct mlxsw_sp_lpm_tree *
629 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
631 static struct mlxsw_sp_lpm_tree *lpm_tree;
634 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
635 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
636 if (lpm_tree->ref_count == 0)
642 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
643 const struct mlxsw_sp_router_ll_ops *ll_ops,
644 struct mlxsw_sp_lpm_tree *lpm_tree)
646 char xralta_pl[MLXSW_REG_XRALTA_LEN];
648 mlxsw_reg_xralta_pack(xralta_pl, true,
649 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
651 return ll_ops->ralta_write(mlxsw_sp, xralta_pl);
654 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
655 const struct mlxsw_sp_router_ll_ops *ll_ops,
656 struct mlxsw_sp_lpm_tree *lpm_tree)
658 char xralta_pl[MLXSW_REG_XRALTA_LEN];
660 mlxsw_reg_xralta_pack(xralta_pl, false,
661 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
663 ll_ops->ralta_write(mlxsw_sp, xralta_pl);
667 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
668 const struct mlxsw_sp_router_ll_ops *ll_ops,
669 struct mlxsw_sp_prefix_usage *prefix_usage,
670 struct mlxsw_sp_lpm_tree *lpm_tree)
672 char xralst_pl[MLXSW_REG_XRALST_LEN];
675 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
677 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
680 mlxsw_reg_xralst_pack(xralst_pl, root_bin, lpm_tree->id);
681 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
684 mlxsw_reg_xralst_bin_pack(xralst_pl, prefix, last_prefix,
685 MLXSW_REG_RALST_BIN_NO_CHILD);
686 last_prefix = prefix;
688 return ll_ops->ralst_write(mlxsw_sp, xralst_pl);
691 static struct mlxsw_sp_lpm_tree *
692 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
693 const struct mlxsw_sp_router_ll_ops *ll_ops,
694 struct mlxsw_sp_prefix_usage *prefix_usage,
695 enum mlxsw_sp_l3proto proto)
697 struct mlxsw_sp_lpm_tree *lpm_tree;
700 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
702 return ERR_PTR(-EBUSY);
703 lpm_tree->proto = proto;
704 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, ll_ops, lpm_tree);
708 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, ll_ops, prefix_usage, lpm_tree);
710 goto err_left_struct_set;
711 memcpy(&lpm_tree->prefix_usage, prefix_usage,
712 sizeof(lpm_tree->prefix_usage));
713 memset(&lpm_tree->prefix_ref_count, 0,
714 sizeof(lpm_tree->prefix_ref_count));
715 lpm_tree->ref_count = 1;
719 mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
723 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
724 const struct mlxsw_sp_router_ll_ops *ll_ops,
725 struct mlxsw_sp_lpm_tree *lpm_tree)
727 mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
730 static struct mlxsw_sp_lpm_tree *
731 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
732 struct mlxsw_sp_prefix_usage *prefix_usage,
733 enum mlxsw_sp_l3proto proto)
735 const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
736 struct mlxsw_sp_lpm_tree *lpm_tree;
739 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
740 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
741 if (lpm_tree->ref_count != 0 &&
742 lpm_tree->proto == proto &&
743 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
745 mlxsw_sp_lpm_tree_hold(lpm_tree);
749 return mlxsw_sp_lpm_tree_create(mlxsw_sp, ll_ops, prefix_usage, proto);
752 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
754 lpm_tree->ref_count++;
757 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
758 struct mlxsw_sp_lpm_tree *lpm_tree)
760 const struct mlxsw_sp_router_ll_ops *ll_ops =
761 mlxsw_sp->router->proto_ll_ops[lpm_tree->proto];
763 if (--lpm_tree->ref_count == 0)
764 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, ll_ops, lpm_tree);
767 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
769 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
771 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
772 struct mlxsw_sp_lpm_tree *lpm_tree;
776 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
779 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
780 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
781 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
782 sizeof(struct mlxsw_sp_lpm_tree),
784 if (!mlxsw_sp->router->lpm.trees)
787 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
788 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
789 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
792 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
793 MLXSW_SP_L3_PROTO_IPV4);
794 if (IS_ERR(lpm_tree)) {
795 err = PTR_ERR(lpm_tree);
796 goto err_ipv4_tree_get;
798 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
800 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
801 MLXSW_SP_L3_PROTO_IPV6);
802 if (IS_ERR(lpm_tree)) {
803 err = PTR_ERR(lpm_tree);
804 goto err_ipv6_tree_get;
806 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
811 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
812 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
814 kfree(mlxsw_sp->router->lpm.trees);
818 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
820 struct mlxsw_sp_lpm_tree *lpm_tree;
822 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
823 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
825 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
826 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
828 kfree(mlxsw_sp->router->lpm.trees);
831 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
833 return !!vr->fib4 || !!vr->fib6 ||
834 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
835 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
838 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
840 struct mlxsw_sp_vr *vr;
843 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
844 vr = &mlxsw_sp->router->vrs[i];
845 if (!mlxsw_sp_vr_is_used(vr))
851 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
852 const struct mlxsw_sp_fib *fib, u8 tree_id)
854 char xraltb_pl[MLXSW_REG_XRALTB_LEN];
856 mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
857 (enum mlxsw_reg_ralxx_protocol) fib->proto,
859 return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
862 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
863 const struct mlxsw_sp_fib *fib)
865 char xraltb_pl[MLXSW_REG_XRALTB_LEN];
867 /* Bind to tree 0 which is default */
868 mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
869 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
870 return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
873 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
875 /* For our purpose, squash main, default and local tables into one */
876 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
877 tb_id = RT_TABLE_MAIN;
881 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
884 struct mlxsw_sp_vr *vr;
887 tb_id = mlxsw_sp_fix_tb_id(tb_id);
889 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
890 vr = &mlxsw_sp->router->vrs[i];
891 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
897 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
900 struct mlxsw_sp_vr *vr;
903 mutex_lock(&mlxsw_sp->router->lock);
904 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
911 mutex_unlock(&mlxsw_sp->router->lock);
915 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
916 enum mlxsw_sp_l3proto proto)
919 case MLXSW_SP_L3_PROTO_IPV4:
921 case MLXSW_SP_L3_PROTO_IPV6:
927 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
929 struct netlink_ext_ack *extack)
931 struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
932 struct mlxsw_sp_fib *fib4;
933 struct mlxsw_sp_fib *fib6;
934 struct mlxsw_sp_vr *vr;
937 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
939 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
940 return ERR_PTR(-EBUSY);
942 fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
944 return ERR_CAST(fib4);
945 fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
948 goto err_fib6_create;
950 mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
951 MLXSW_SP_L3_PROTO_IPV4);
952 if (IS_ERR(mr4_table)) {
953 err = PTR_ERR(mr4_table);
954 goto err_mr4_table_create;
956 mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
957 MLXSW_SP_L3_PROTO_IPV6);
958 if (IS_ERR(mr6_table)) {
959 err = PTR_ERR(mr6_table);
960 goto err_mr6_table_create;
965 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
966 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
970 err_mr6_table_create:
971 mlxsw_sp_mr_table_destroy(mr4_table);
972 err_mr4_table_create:
973 mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
975 mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
979 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
980 struct mlxsw_sp_vr *vr)
982 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
983 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
984 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
985 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
986 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
988 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
992 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
993 struct netlink_ext_ack *extack)
995 struct mlxsw_sp_vr *vr;
997 tb_id = mlxsw_sp_fix_tb_id(tb_id);
998 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1000 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
1004 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
1006 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
1007 list_empty(&vr->fib6->node_list) &&
1008 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
1009 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
1010 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
1014 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
1015 enum mlxsw_sp_l3proto proto, u8 tree_id)
1017 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
1019 if (!mlxsw_sp_vr_is_used(vr))
1021 if (fib->lpm_tree->id == tree_id)
1026 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
1027 struct mlxsw_sp_fib *fib,
1028 struct mlxsw_sp_lpm_tree *new_tree)
1030 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
1033 fib->lpm_tree = new_tree;
1034 mlxsw_sp_lpm_tree_hold(new_tree);
1035 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
1038 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
1042 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
1043 fib->lpm_tree = old_tree;
1047 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
1048 struct mlxsw_sp_fib *fib,
1049 struct mlxsw_sp_lpm_tree *new_tree)
1051 enum mlxsw_sp_l3proto proto = fib->proto;
1052 struct mlxsw_sp_lpm_tree *old_tree;
1053 u8 old_id, new_id = new_tree->id;
1054 struct mlxsw_sp_vr *vr;
1057 old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
1058 old_id = old_tree->id;
1060 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
1061 vr = &mlxsw_sp->router->vrs[i];
1062 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
1064 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1065 mlxsw_sp_vr_fib(vr, proto),
1068 goto err_tree_replace;
1071 memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
1072 sizeof(new_tree->prefix_ref_count));
1073 mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
1074 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
1079 for (i--; i >= 0; i--) {
1080 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
1082 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1083 mlxsw_sp_vr_fib(vr, proto),
1089 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
1091 struct mlxsw_sp_vr *vr;
1095 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
1098 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
1099 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
1101 if (!mlxsw_sp->router->vrs)
1104 for (i = 0; i < max_vrs; i++) {
1105 vr = &mlxsw_sp->router->vrs[i];
1112 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
1114 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
1116 /* At this stage we're guaranteed not to have new incoming
1117 * FIB notifications and the work queue is free from FIBs
1118 * sitting on top of mlxsw netdevs. However, we can still
1119 * have other FIBs queued. Flush the queue before flushing
1120 * the device's tables. No need for locks, as we're the only
1123 mlxsw_core_flush_owq();
1124 mlxsw_sp_router_fib_flush(mlxsw_sp);
1125 kfree(mlxsw_sp->router->vrs);
1128 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
1130 struct net_device *d;
1134 d = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1136 tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1138 tb_id = RT_TABLE_MAIN;
1144 static struct mlxsw_sp_rif *
1145 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
1146 const struct mlxsw_sp_rif_params *params,
1147 struct netlink_ext_ack *extack);
1149 static struct mlxsw_sp_rif_ipip_lb *
1150 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1151 enum mlxsw_sp_ipip_type ipipt,
1152 struct net_device *ol_dev,
1153 struct netlink_ext_ack *extack)
1155 struct mlxsw_sp_rif_params_ipip_lb lb_params;
1156 const struct mlxsw_sp_ipip_ops *ipip_ops;
1157 struct mlxsw_sp_rif *rif;
1159 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1160 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1161 .common.dev = ol_dev,
1162 .common.lag = false,
1163 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1166 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1168 return ERR_CAST(rif);
1169 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1172 static struct mlxsw_sp_ipip_entry *
1173 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1174 enum mlxsw_sp_ipip_type ipipt,
1175 struct net_device *ol_dev)
1177 const struct mlxsw_sp_ipip_ops *ipip_ops;
1178 struct mlxsw_sp_ipip_entry *ipip_entry;
1179 struct mlxsw_sp_ipip_entry *ret = NULL;
1182 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1183 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1185 return ERR_PTR(-ENOMEM);
1187 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1189 if (IS_ERR(ipip_entry->ol_lb)) {
1190 ret = ERR_CAST(ipip_entry->ol_lb);
1191 goto err_ol_ipip_lb_create;
1194 ipip_entry->ipipt = ipipt;
1195 ipip_entry->ol_dev = ol_dev;
1196 ipip_entry->parms = ipip_ops->parms_init(ol_dev);
1198 err = ipip_ops->rem_ip_addr_set(mlxsw_sp, ipip_entry);
1201 goto err_rem_ip_addr_set;
1206 err_rem_ip_addr_set:
1207 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1208 err_ol_ipip_lb_create:
1213 static void mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp *mlxsw_sp,
1214 struct mlxsw_sp_ipip_entry *ipip_entry)
1216 const struct mlxsw_sp_ipip_ops *ipip_ops =
1217 mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1219 ipip_ops->rem_ip_addr_unset(mlxsw_sp, ipip_entry);
1220 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1225 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1226 const enum mlxsw_sp_l3proto ul_proto,
1227 union mlxsw_sp_l3addr saddr,
1229 struct mlxsw_sp_ipip_entry *ipip_entry)
1231 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1232 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1233 union mlxsw_sp_l3addr tun_saddr;
1235 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1238 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1239 return tun_ul_tb_id == ul_tb_id &&
1240 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1243 static int mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp,
1244 enum mlxsw_sp_ipip_type ipipt)
1246 const struct mlxsw_sp_ipip_ops *ipip_ops;
1248 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1250 /* Not all tunnels require to increase the default pasing depth
1253 if (ipip_ops->inc_parsing_depth)
1254 return mlxsw_sp_parsing_depth_inc(mlxsw_sp);
1259 static void mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp,
1260 enum mlxsw_sp_ipip_type ipipt)
1262 const struct mlxsw_sp_ipip_ops *ipip_ops =
1263 mlxsw_sp->router->ipip_ops_arr[ipipt];
1265 if (ipip_ops->inc_parsing_depth)
1266 mlxsw_sp_parsing_depth_dec(mlxsw_sp);
1270 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1271 struct mlxsw_sp_fib_entry *fib_entry,
1272 struct mlxsw_sp_ipip_entry *ipip_entry)
1277 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1282 err = mlxsw_sp_ipip_decap_parsing_depth_inc(mlxsw_sp,
1285 goto err_parsing_depth_inc;
1287 ipip_entry->decap_fib_entry = fib_entry;
1288 fib_entry->decap.ipip_entry = ipip_entry;
1289 fib_entry->decap.tunnel_index = tunnel_index;
1293 err_parsing_depth_inc:
1294 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
1295 fib_entry->decap.tunnel_index);
1299 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1300 struct mlxsw_sp_fib_entry *fib_entry)
1302 enum mlxsw_sp_ipip_type ipipt = fib_entry->decap.ipip_entry->ipipt;
1304 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1305 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1306 fib_entry->decap.ipip_entry = NULL;
1307 mlxsw_sp_ipip_decap_parsing_depth_dec(mlxsw_sp, ipipt);
1308 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1309 1, fib_entry->decap.tunnel_index);
1312 static struct mlxsw_sp_fib_node *
1313 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1314 size_t addr_len, unsigned char prefix_len);
1315 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1316 struct mlxsw_sp_fib_entry *fib_entry);
1319 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1320 struct mlxsw_sp_ipip_entry *ipip_entry)
1322 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1324 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1325 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1327 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1331 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1332 struct mlxsw_sp_ipip_entry *ipip_entry,
1333 struct mlxsw_sp_fib_entry *decap_fib_entry)
1335 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1338 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1340 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1341 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1344 static struct mlxsw_sp_fib_entry *
1345 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1346 enum mlxsw_sp_l3proto proto,
1347 const union mlxsw_sp_l3addr *addr,
1348 enum mlxsw_sp_fib_entry_type type)
1350 struct mlxsw_sp_fib_node *fib_node;
1351 unsigned char addr_prefix_len;
1352 struct mlxsw_sp_fib *fib;
1353 struct mlxsw_sp_vr *vr;
1358 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1361 fib = mlxsw_sp_vr_fib(vr, proto);
1364 case MLXSW_SP_L3_PROTO_IPV4:
1365 addr4 = be32_to_cpu(addr->addr4);
1368 addr_prefix_len = 32;
1370 case MLXSW_SP_L3_PROTO_IPV6:
1371 addrp = &addr->addr6;
1373 addr_prefix_len = 128;
1380 fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1382 if (!fib_node || fib_node->fib_entry->type != type)
1385 return fib_node->fib_entry;
1388 /* Given an IPIP entry, find the corresponding decap route. */
1389 static struct mlxsw_sp_fib_entry *
1390 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1391 struct mlxsw_sp_ipip_entry *ipip_entry)
1393 static struct mlxsw_sp_fib_node *fib_node;
1394 const struct mlxsw_sp_ipip_ops *ipip_ops;
1395 unsigned char saddr_prefix_len;
1396 union mlxsw_sp_l3addr saddr;
1397 struct mlxsw_sp_fib *ul_fib;
1398 struct mlxsw_sp_vr *ul_vr;
1404 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1406 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1407 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1411 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1412 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1413 ipip_entry->ol_dev);
1415 switch (ipip_ops->ul_proto) {
1416 case MLXSW_SP_L3_PROTO_IPV4:
1417 saddr4 = be32_to_cpu(saddr.addr4);
1420 saddr_prefix_len = 32;
1422 case MLXSW_SP_L3_PROTO_IPV6:
1423 saddrp = &saddr.addr6;
1425 saddr_prefix_len = 128;
1432 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1435 fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1438 return fib_node->fib_entry;
1441 static struct mlxsw_sp_ipip_entry *
1442 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1443 enum mlxsw_sp_ipip_type ipipt,
1444 struct net_device *ol_dev)
1446 struct mlxsw_sp_ipip_entry *ipip_entry;
1448 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1449 if (IS_ERR(ipip_entry))
1452 list_add_tail(&ipip_entry->ipip_list_node,
1453 &mlxsw_sp->router->ipip_list);
1459 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1460 struct mlxsw_sp_ipip_entry *ipip_entry)
1462 list_del(&ipip_entry->ipip_list_node);
1463 mlxsw_sp_ipip_entry_dealloc(mlxsw_sp, ipip_entry);
1467 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1468 const struct net_device *ul_dev,
1469 enum mlxsw_sp_l3proto ul_proto,
1470 union mlxsw_sp_l3addr ul_dip,
1471 struct mlxsw_sp_ipip_entry *ipip_entry)
1473 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1474 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1476 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1479 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1480 ul_tb_id, ipip_entry);
1483 /* Given decap parameters, find the corresponding IPIP entry. */
1484 static struct mlxsw_sp_ipip_entry *
1485 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, int ul_dev_ifindex,
1486 enum mlxsw_sp_l3proto ul_proto,
1487 union mlxsw_sp_l3addr ul_dip)
1489 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1490 struct net_device *ul_dev;
1494 ul_dev = dev_get_by_index_rcu(mlxsw_sp_net(mlxsw_sp), ul_dev_ifindex);
1498 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1500 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1514 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1515 const struct net_device *dev,
1516 enum mlxsw_sp_ipip_type *p_type)
1518 struct mlxsw_sp_router *router = mlxsw_sp->router;
1519 const struct mlxsw_sp_ipip_ops *ipip_ops;
1520 enum mlxsw_sp_ipip_type ipipt;
1522 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1523 ipip_ops = router->ipip_ops_arr[ipipt];
1524 if (dev->type == ipip_ops->dev_type) {
1533 static bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1534 const struct net_device *dev)
1536 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1539 static struct mlxsw_sp_ipip_entry *
1540 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1541 const struct net_device *ol_dev)
1543 struct mlxsw_sp_ipip_entry *ipip_entry;
1545 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1547 if (ipip_entry->ol_dev == ol_dev)
1553 static struct mlxsw_sp_ipip_entry *
1554 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1555 const struct net_device *ul_dev,
1556 struct mlxsw_sp_ipip_entry *start)
1558 struct mlxsw_sp_ipip_entry *ipip_entry;
1560 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1562 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1564 struct net_device *ol_dev = ipip_entry->ol_dev;
1565 struct net_device *ipip_ul_dev;
1568 ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1571 if (ipip_ul_dev == ul_dev)
1578 static bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1579 const struct net_device *dev)
1581 return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1584 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1585 const struct net_device *ol_dev,
1586 enum mlxsw_sp_ipip_type ipipt)
1588 const struct mlxsw_sp_ipip_ops *ops
1589 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1591 return ops->can_offload(mlxsw_sp, ol_dev);
1594 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1595 struct net_device *ol_dev)
1597 enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
1598 struct mlxsw_sp_ipip_entry *ipip_entry;
1599 enum mlxsw_sp_l3proto ul_proto;
1600 union mlxsw_sp_l3addr saddr;
1603 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1604 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1605 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1606 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1607 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1608 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1611 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1613 if (IS_ERR(ipip_entry))
1614 return PTR_ERR(ipip_entry);
1621 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1622 struct net_device *ol_dev)
1624 struct mlxsw_sp_ipip_entry *ipip_entry;
1626 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1628 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1632 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1633 struct mlxsw_sp_ipip_entry *ipip_entry)
1635 struct mlxsw_sp_fib_entry *decap_fib_entry;
1637 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1638 if (decap_fib_entry)
1639 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1644 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1645 u16 ul_rif_id, bool enable)
1647 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1648 enum mlxsw_reg_ritr_loopback_ipip_options ipip_options;
1649 struct mlxsw_sp_rif *rif = &lb_rif->common;
1650 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1651 char ritr_pl[MLXSW_REG_RITR_LEN];
1652 struct in6_addr *saddr6;
1655 ipip_options = MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET;
1656 switch (lb_cf.ul_protocol) {
1657 case MLXSW_SP_L3_PROTO_IPV4:
1658 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1659 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1660 rif->rif_index, rif->vr_id, rif->dev->mtu);
1661 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1662 ipip_options, ul_vr_id,
1667 case MLXSW_SP_L3_PROTO_IPV6:
1668 saddr6 = &lb_cf.saddr.addr6;
1669 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1670 rif->rif_index, rif->vr_id, rif->dev->mtu);
1671 mlxsw_reg_ritr_loopback_ipip6_pack(ritr_pl, lb_cf.lb_ipipt,
1672 ipip_options, ul_vr_id,
1678 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1681 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1682 struct net_device *ol_dev)
1684 struct mlxsw_sp_ipip_entry *ipip_entry;
1685 struct mlxsw_sp_rif_ipip_lb *lb_rif;
1688 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1690 lb_rif = ipip_entry->ol_lb;
1691 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1692 lb_rif->ul_rif_id, true);
1695 lb_rif->common.mtu = ol_dev->mtu;
1702 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1703 struct net_device *ol_dev)
1705 struct mlxsw_sp_ipip_entry *ipip_entry;
1707 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1709 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1713 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1714 struct mlxsw_sp_ipip_entry *ipip_entry)
1716 if (ipip_entry->decap_fib_entry)
1717 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1720 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1721 struct net_device *ol_dev)
1723 struct mlxsw_sp_ipip_entry *ipip_entry;
1725 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1727 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1730 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1731 struct mlxsw_sp_rif *old_rif,
1732 struct mlxsw_sp_rif *new_rif);
1734 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1735 struct mlxsw_sp_ipip_entry *ipip_entry,
1737 struct netlink_ext_ack *extack)
1739 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1740 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1742 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1746 if (IS_ERR(new_lb_rif))
1747 return PTR_ERR(new_lb_rif);
1748 ipip_entry->ol_lb = new_lb_rif;
1751 mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1752 &new_lb_rif->common);
1754 mlxsw_sp_rif_destroy(&old_lb_rif->common);
1759 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1760 struct mlxsw_sp_rif *rif);
1763 * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
1764 * @mlxsw_sp: mlxsw_sp.
1765 * @ipip_entry: IPIP entry.
1766 * @recreate_loopback: Recreates the associated loopback RIF.
1767 * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
1768 * relevant when recreate_loopback is true.
1769 * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
1770 * is only relevant when recreate_loopback is false.
1773 * Return: Non-zero value on failure.
1775 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1776 struct mlxsw_sp_ipip_entry *ipip_entry,
1777 bool recreate_loopback,
1779 bool update_nexthops,
1780 struct netlink_ext_ack *extack)
1784 /* RIFs can't be edited, so to update loopback, we need to destroy and
1785 * recreate it. That creates a window of opportunity where RALUE and
1786 * RATR registers end up referencing a RIF that's already gone. RATRs
1787 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1788 * of RALUE, demote the decap route back.
1790 if (ipip_entry->decap_fib_entry)
1791 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1793 if (recreate_loopback) {
1794 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1795 keep_encap, extack);
1798 } else if (update_nexthops) {
1799 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1800 &ipip_entry->ol_lb->common);
1803 if (ipip_entry->ol_dev->flags & IFF_UP)
1804 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1809 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1810 struct net_device *ol_dev,
1811 struct netlink_ext_ack *extack)
1813 struct mlxsw_sp_ipip_entry *ipip_entry =
1814 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1819 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1820 true, false, false, extack);
1824 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1825 struct mlxsw_sp_ipip_entry *ipip_entry,
1826 struct net_device *ul_dev,
1828 struct netlink_ext_ack *extack)
1830 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1831 enum mlxsw_sp_l3proto ul_proto;
1832 union mlxsw_sp_l3addr saddr;
1834 /* Moving underlay to a different VRF might cause local address
1835 * conflict, and the conflicting tunnels need to be demoted.
1837 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1838 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1839 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1842 *demote_this = true;
1846 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1847 true, true, false, extack);
1851 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1852 struct mlxsw_sp_ipip_entry *ipip_entry,
1853 struct net_device *ul_dev)
1855 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1856 false, false, true, NULL);
1860 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1861 struct mlxsw_sp_ipip_entry *ipip_entry,
1862 struct net_device *ul_dev)
1864 /* A down underlay device causes encapsulated packets to not be
1865 * forwarded, but decap still works. So refresh next hops without
1866 * touching anything else.
1868 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1869 false, false, true, NULL);
1873 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1874 struct net_device *ol_dev,
1875 struct netlink_ext_ack *extack)
1877 const struct mlxsw_sp_ipip_ops *ipip_ops;
1878 struct mlxsw_sp_ipip_entry *ipip_entry;
1881 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1883 /* A change might make a tunnel eligible for offloading, but
1884 * that is currently not implemented. What falls to slow path
1889 /* A change might make a tunnel not eligible for offloading. */
1890 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1891 ipip_entry->ipipt)) {
1892 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1896 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1897 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1901 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1902 struct mlxsw_sp_ipip_entry *ipip_entry)
1904 struct net_device *ol_dev = ipip_entry->ol_dev;
1906 if (ol_dev->flags & IFF_UP)
1907 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1908 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1911 /* The configuration where several tunnels have the same local address in the
1912 * same underlay table needs special treatment in the HW. That is currently not
1913 * implemented in the driver. This function finds and demotes the first tunnel
1914 * with a given source address, except the one passed in in the argument
1918 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1919 enum mlxsw_sp_l3proto ul_proto,
1920 union mlxsw_sp_l3addr saddr,
1922 const struct mlxsw_sp_ipip_entry *except)
1924 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1926 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1928 if (ipip_entry != except &&
1929 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1930 ul_tb_id, ipip_entry)) {
1931 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1939 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1940 struct net_device *ul_dev)
1942 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1944 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1946 struct net_device *ol_dev = ipip_entry->ol_dev;
1947 struct net_device *ipip_ul_dev;
1950 ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1952 if (ipip_ul_dev == ul_dev)
1953 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1957 static int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1958 struct net_device *ol_dev,
1959 unsigned long event,
1960 struct netdev_notifier_info *info)
1962 struct netdev_notifier_changeupper_info *chup;
1963 struct netlink_ext_ack *extack;
1967 case NETDEV_REGISTER:
1968 err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1970 case NETDEV_UNREGISTER:
1971 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1974 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1977 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1979 case NETDEV_CHANGEUPPER:
1980 chup = container_of(info, typeof(*chup), info);
1981 extack = info->extack;
1982 if (netif_is_l3_master(chup->upper_dev))
1983 err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1988 extack = info->extack;
1989 err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1992 case NETDEV_CHANGEMTU:
1993 err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
2000 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
2001 struct mlxsw_sp_ipip_entry *ipip_entry,
2002 struct net_device *ul_dev,
2004 unsigned long event,
2005 struct netdev_notifier_info *info)
2007 struct netdev_notifier_changeupper_info *chup;
2008 struct netlink_ext_ack *extack;
2011 case NETDEV_CHANGEUPPER:
2012 chup = container_of(info, typeof(*chup), info);
2013 extack = info->extack;
2014 if (netif_is_l3_master(chup->upper_dev))
2015 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
2023 return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
2026 return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
2034 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
2035 struct net_device *ul_dev,
2036 unsigned long event,
2037 struct netdev_notifier_info *info)
2039 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
2042 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
2045 struct mlxsw_sp_ipip_entry *prev;
2046 bool demote_this = false;
2048 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
2049 ul_dev, &demote_this,
2052 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
2058 if (list_is_first(&ipip_entry->ipip_list_node,
2059 &mlxsw_sp->router->ipip_list))
2062 /* This can't be cached from previous iteration,
2063 * because that entry could be gone now.
2065 prev = list_prev_entry(ipip_entry,
2067 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
2075 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2076 enum mlxsw_sp_l3proto ul_proto,
2077 const union mlxsw_sp_l3addr *ul_sip,
2080 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2081 struct mlxsw_sp_router *router = mlxsw_sp->router;
2082 struct mlxsw_sp_fib_entry *fib_entry;
2085 mutex_lock(&mlxsw_sp->router->lock);
2087 if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
2092 router->nve_decap_config.ul_tb_id = ul_tb_id;
2093 router->nve_decap_config.tunnel_index = tunnel_index;
2094 router->nve_decap_config.ul_proto = ul_proto;
2095 router->nve_decap_config.ul_sip = *ul_sip;
2096 router->nve_decap_config.valid = true;
2098 /* It is valid to create a tunnel with a local IP and only later
2099 * assign this IP address to a local interface
2101 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2107 fib_entry->decap.tunnel_index = tunnel_index;
2108 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2110 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2112 goto err_fib_entry_update;
2116 err_fib_entry_update:
2117 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2118 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2120 mutex_unlock(&mlxsw_sp->router->lock);
2124 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2125 enum mlxsw_sp_l3proto ul_proto,
2126 const union mlxsw_sp_l3addr *ul_sip)
2128 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2129 struct mlxsw_sp_router *router = mlxsw_sp->router;
2130 struct mlxsw_sp_fib_entry *fib_entry;
2132 mutex_lock(&mlxsw_sp->router->lock);
2134 if (WARN_ON_ONCE(!router->nve_decap_config.valid))
2137 router->nve_decap_config.valid = false;
2139 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2145 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2146 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2148 mutex_unlock(&mlxsw_sp->router->lock);
2151 static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
2153 enum mlxsw_sp_l3proto ul_proto,
2154 const union mlxsw_sp_l3addr *ul_sip)
2156 struct mlxsw_sp_router *router = mlxsw_sp->router;
2158 return router->nve_decap_config.valid &&
2159 router->nve_decap_config.ul_tb_id == ul_tb_id &&
2160 router->nve_decap_config.ul_proto == ul_proto &&
2161 !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
2165 struct mlxsw_sp_neigh_key {
2166 struct neighbour *n;
2169 struct mlxsw_sp_neigh_entry {
2170 struct list_head rif_list_node;
2171 struct rhash_head ht_node;
2172 struct mlxsw_sp_neigh_key key;
2175 unsigned char ha[ETH_ALEN];
2176 struct list_head nexthop_list; /* list of nexthops using
2179 struct list_head nexthop_neighs_list_node;
2180 unsigned int counter_index;
2184 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
2185 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
2186 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
2187 .key_len = sizeof(struct mlxsw_sp_neigh_key),
2190 struct mlxsw_sp_neigh_entry *
2191 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
2192 struct mlxsw_sp_neigh_entry *neigh_entry)
2195 if (list_empty(&rif->neigh_list))
2198 return list_first_entry(&rif->neigh_list,
2199 typeof(*neigh_entry),
2202 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
2204 return list_next_entry(neigh_entry, rif_list_node);
2207 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
2209 return neigh_entry->key.n->tbl->family;
2213 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
2215 return neigh_entry->ha;
2218 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2220 struct neighbour *n;
2222 n = neigh_entry->key.n;
2223 return ntohl(*((__be32 *) n->primary_key));
2227 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2229 struct neighbour *n;
2231 n = neigh_entry->key.n;
2232 return (struct in6_addr *) &n->primary_key;
2235 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
2236 struct mlxsw_sp_neigh_entry *neigh_entry,
2239 if (!neigh_entry->counter_valid)
2242 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
2246 static struct mlxsw_sp_neigh_entry *
2247 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
2250 struct mlxsw_sp_neigh_entry *neigh_entry;
2252 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2256 neigh_entry->key.n = n;
2257 neigh_entry->rif = rif;
2258 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2263 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2269 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2270 struct mlxsw_sp_neigh_entry *neigh_entry)
2272 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2273 &neigh_entry->ht_node,
2274 mlxsw_sp_neigh_ht_params);
2278 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2279 struct mlxsw_sp_neigh_entry *neigh_entry)
2281 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2282 &neigh_entry->ht_node,
2283 mlxsw_sp_neigh_ht_params);
2287 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2288 struct mlxsw_sp_neigh_entry *neigh_entry)
2290 struct devlink *devlink;
2291 const char *table_name;
2293 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2295 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2298 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2305 devlink = priv_to_devlink(mlxsw_sp->core);
2306 return devlink_dpipe_table_counter_enabled(devlink, table_name);
2310 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2311 struct mlxsw_sp_neigh_entry *neigh_entry)
2313 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2316 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2319 neigh_entry->counter_valid = true;
2323 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2324 struct mlxsw_sp_neigh_entry *neigh_entry)
2326 if (!neigh_entry->counter_valid)
2328 mlxsw_sp_flow_counter_free(mlxsw_sp,
2329 neigh_entry->counter_index);
2330 neigh_entry->counter_valid = false;
2333 static struct mlxsw_sp_neigh_entry *
2334 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2336 struct mlxsw_sp_neigh_entry *neigh_entry;
2337 struct mlxsw_sp_rif *rif;
2340 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2342 return ERR_PTR(-EINVAL);
2344 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2346 return ERR_PTR(-ENOMEM);
2348 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2350 goto err_neigh_entry_insert;
2352 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2353 atomic_inc(&mlxsw_sp->router->neighs_update.neigh_count);
2354 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2358 err_neigh_entry_insert:
2359 mlxsw_sp_neigh_entry_free(neigh_entry);
2360 return ERR_PTR(err);
2364 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2365 struct mlxsw_sp_neigh_entry *neigh_entry)
2367 list_del(&neigh_entry->rif_list_node);
2368 atomic_dec(&mlxsw_sp->router->neighs_update.neigh_count);
2369 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2370 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2371 mlxsw_sp_neigh_entry_free(neigh_entry);
2374 static struct mlxsw_sp_neigh_entry *
2375 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2377 struct mlxsw_sp_neigh_key key;
2380 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2381 &key, mlxsw_sp_neigh_ht_params);
2385 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2387 unsigned long interval;
2389 #if IS_ENABLED(CONFIG_IPV6)
2390 interval = min_t(unsigned long,
2391 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2392 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2394 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2396 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2399 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2403 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
2404 struct net_device *dev;
2405 struct neighbour *n;
2410 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2412 if (WARN_ON_ONCE(rif >= max_rifs))
2414 if (!mlxsw_sp->router->rifs[rif]) {
2415 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2420 dev = mlxsw_sp->router->rifs[rif]->dev;
2421 n = neigh_lookup(&arp_tbl, &dipn, dev);
2425 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2426 neigh_event_send(n, NULL);
2430 #if IS_ENABLED(CONFIG_IPV6)
2431 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2435 struct net_device *dev;
2436 struct neighbour *n;
2437 struct in6_addr dip;
2440 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2443 if (!mlxsw_sp->router->rifs[rif]) {
2444 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2448 dev = mlxsw_sp->router->rifs[rif]->dev;
2449 n = neigh_lookup(&nd_tbl, &dip, dev);
2453 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2454 neigh_event_send(n, NULL);
2458 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2465 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2472 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2474 /* Hardware starts counting at 0, so add 1. */
2477 /* Each record consists of several neighbour entries. */
2478 for (i = 0; i < num_entries; i++) {
2481 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2482 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2488 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2492 /* One record contains one entry. */
2493 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2497 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2498 char *rauhtd_pl, int rec_index)
2500 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2501 case MLXSW_REG_RAUHTD_TYPE_IPV4:
2502 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2505 case MLXSW_REG_RAUHTD_TYPE_IPV6:
2506 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2512 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2514 u8 num_rec, last_rec_index, num_entries;
2516 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2517 last_rec_index = num_rec - 1;
2519 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2521 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2522 MLXSW_REG_RAUHTD_TYPE_IPV6)
2525 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2527 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2533 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2535 enum mlxsw_reg_rauhtd_type type)
2540 /* Ensure the RIF we read from the device does not change mid-dump. */
2541 mutex_lock(&mlxsw_sp->router->lock);
2543 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2544 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2547 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2550 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2551 for (i = 0; i < num_rec; i++)
2552 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2554 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2555 mutex_unlock(&mlxsw_sp->router->lock);
2560 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2562 enum mlxsw_reg_rauhtd_type type;
2566 if (!atomic_read(&mlxsw_sp->router->neighs_update.neigh_count))
2569 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2573 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2574 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2578 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2579 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2585 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2587 struct mlxsw_sp_neigh_entry *neigh_entry;
2589 mutex_lock(&mlxsw_sp->router->lock);
2590 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2591 nexthop_neighs_list_node)
2592 /* If this neigh have nexthops, make the kernel think this neigh
2593 * is active regardless of the traffic.
2595 neigh_event_send(neigh_entry->key.n, NULL);
2596 mutex_unlock(&mlxsw_sp->router->lock);
2600 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2602 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2604 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2605 msecs_to_jiffies(interval));
2608 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2610 struct mlxsw_sp_router *router;
2613 router = container_of(work, struct mlxsw_sp_router,
2614 neighs_update.dw.work);
2615 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2617 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2619 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2621 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2624 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2626 struct mlxsw_sp_neigh_entry *neigh_entry;
2627 struct mlxsw_sp_router *router;
2629 router = container_of(work, struct mlxsw_sp_router,
2630 nexthop_probe_dw.work);
2631 /* Iterate over nexthop neighbours, find those who are unresolved and
2632 * send arp on them. This solves the chicken-egg problem when
2633 * the nexthop wouldn't get offloaded until the neighbor is resolved
2634 * but it wouldn't get resolved ever in case traffic is flowing in HW
2635 * using different nexthop.
2637 mutex_lock(&router->lock);
2638 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2639 nexthop_neighs_list_node)
2640 if (!neigh_entry->connected)
2641 neigh_event_send(neigh_entry->key.n, NULL);
2642 mutex_unlock(&router->lock);
2644 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2645 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2649 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2650 struct mlxsw_sp_neigh_entry *neigh_entry,
2651 bool removing, bool dead);
2653 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2655 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2656 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2660 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2661 struct mlxsw_sp_neigh_entry *neigh_entry,
2662 enum mlxsw_reg_rauht_op op)
2664 struct neighbour *n = neigh_entry->key.n;
2665 u32 dip = ntohl(*((__be32 *) n->primary_key));
2666 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2668 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2670 if (neigh_entry->counter_valid)
2671 mlxsw_reg_rauht_pack_counter(rauht_pl,
2672 neigh_entry->counter_index);
2673 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2677 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2678 struct mlxsw_sp_neigh_entry *neigh_entry,
2679 enum mlxsw_reg_rauht_op op)
2681 struct neighbour *n = neigh_entry->key.n;
2682 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2683 const char *dip = n->primary_key;
2685 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2687 if (neigh_entry->counter_valid)
2688 mlxsw_reg_rauht_pack_counter(rauht_pl,
2689 neigh_entry->counter_index);
2690 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2693 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2695 struct neighbour *n = neigh_entry->key.n;
2697 /* Packets with a link-local destination address are trapped
2698 * after LPM lookup and never reach the neighbour table, so
2699 * there is no need to program such neighbours to the device.
2701 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2702 IPV6_ADDR_LINKLOCAL)
2708 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2709 struct mlxsw_sp_neigh_entry *neigh_entry,
2712 enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2715 if (!adding && !neigh_entry->connected)
2717 neigh_entry->connected = adding;
2718 if (neigh_entry->key.n->tbl->family == AF_INET) {
2719 err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2723 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2724 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2726 err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2736 neigh_entry->key.n->flags |= NTF_OFFLOADED;
2738 neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2742 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2743 struct mlxsw_sp_neigh_entry *neigh_entry,
2747 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2749 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2750 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2753 struct mlxsw_sp_netevent_work {
2754 struct work_struct work;
2755 struct mlxsw_sp *mlxsw_sp;
2756 struct neighbour *n;
2759 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2761 struct mlxsw_sp_netevent_work *net_work =
2762 container_of(work, struct mlxsw_sp_netevent_work, work);
2763 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2764 struct mlxsw_sp_neigh_entry *neigh_entry;
2765 struct neighbour *n = net_work->n;
2766 unsigned char ha[ETH_ALEN];
2767 bool entry_connected;
2770 /* If these parameters are changed after we release the lock,
2771 * then we are guaranteed to receive another event letting us
2774 read_lock_bh(&n->lock);
2775 memcpy(ha, n->ha, ETH_ALEN);
2776 nud_state = n->nud_state;
2778 read_unlock_bh(&n->lock);
2780 mutex_lock(&mlxsw_sp->router->lock);
2781 mlxsw_sp_span_respin(mlxsw_sp);
2783 entry_connected = nud_state & NUD_VALID && !dead;
2784 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2785 if (!entry_connected && !neigh_entry)
2788 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2789 if (IS_ERR(neigh_entry))
2793 if (neigh_entry->connected && entry_connected &&
2794 !memcmp(neigh_entry->ha, ha, ETH_ALEN))
2797 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2798 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2799 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2802 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2803 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2806 mutex_unlock(&mlxsw_sp->router->lock);
2811 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2813 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2815 struct mlxsw_sp_netevent_work *net_work =
2816 container_of(work, struct mlxsw_sp_netevent_work, work);
2817 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2819 mlxsw_sp_mp_hash_init(mlxsw_sp);
2823 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2825 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2827 struct mlxsw_sp_netevent_work *net_work =
2828 container_of(work, struct mlxsw_sp_netevent_work, work);
2829 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2831 __mlxsw_sp_router_init(mlxsw_sp);
2835 static int mlxsw_sp_router_schedule_work(struct net *net,
2836 struct notifier_block *nb,
2837 void (*cb)(struct work_struct *))
2839 struct mlxsw_sp_netevent_work *net_work;
2840 struct mlxsw_sp_router *router;
2842 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2843 if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2846 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2850 INIT_WORK(&net_work->work, cb);
2851 net_work->mlxsw_sp = router->mlxsw_sp;
2852 mlxsw_core_schedule_work(&net_work->work);
2856 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2857 unsigned long event, void *ptr)
2859 struct mlxsw_sp_netevent_work *net_work;
2860 struct mlxsw_sp_port *mlxsw_sp_port;
2861 struct mlxsw_sp *mlxsw_sp;
2862 unsigned long interval;
2863 struct neigh_parms *p;
2864 struct neighbour *n;
2867 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2870 /* We don't care about changes in the default table. */
2871 if (!p->dev || (p->tbl->family != AF_INET &&
2872 p->tbl->family != AF_INET6))
2875 /* We are in atomic context and can't take RTNL mutex,
2876 * so use RCU variant to walk the device chain.
2878 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2882 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2883 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2884 mlxsw_sp->router->neighs_update.interval = interval;
2886 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2888 case NETEVENT_NEIGH_UPDATE:
2891 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2894 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
2898 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2900 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2904 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2905 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2908 /* Take a reference to ensure the neighbour won't be
2909 * destructed until we drop the reference in delayed
2913 mlxsw_core_schedule_work(&net_work->work);
2914 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2916 case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2917 case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2918 return mlxsw_sp_router_schedule_work(ptr, nb,
2919 mlxsw_sp_router_mp_hash_event_work);
2921 case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2922 return mlxsw_sp_router_schedule_work(ptr, nb,
2923 mlxsw_sp_router_update_priority_work);
2929 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2933 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2934 &mlxsw_sp_neigh_ht_params);
2938 /* Initialize the polling interval according to the default
2941 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2943 /* Create the delayed works for the activity_update */
2944 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2945 mlxsw_sp_router_neighs_update_work);
2946 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2947 mlxsw_sp_router_probe_unresolved_nexthops);
2948 atomic_set(&mlxsw_sp->router->neighs_update.neigh_count, 0);
2949 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2950 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2954 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2956 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2957 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2958 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2961 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2962 struct mlxsw_sp_rif *rif)
2964 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2966 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2968 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2969 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2973 enum mlxsw_sp_nexthop_type {
2974 MLXSW_SP_NEXTHOP_TYPE_ETH,
2975 MLXSW_SP_NEXTHOP_TYPE_IPIP,
2978 enum mlxsw_sp_nexthop_action {
2979 /* Nexthop forwards packets to an egress RIF */
2980 MLXSW_SP_NEXTHOP_ACTION_FORWARD,
2981 /* Nexthop discards packets */
2982 MLXSW_SP_NEXTHOP_ACTION_DISCARD,
2983 /* Nexthop traps packets */
2984 MLXSW_SP_NEXTHOP_ACTION_TRAP,
2987 struct mlxsw_sp_nexthop_key {
2988 struct fib_nh *fib_nh;
2991 struct mlxsw_sp_nexthop {
2992 struct list_head neigh_list_node; /* member of neigh entry list */
2993 struct list_head rif_list_node;
2994 struct list_head router_list_node;
2995 struct mlxsw_sp_nexthop_group_info *nhgi; /* pointer back to the group
2996 * this nexthop belongs to
2998 struct rhash_head ht_node;
2999 struct neigh_table *neigh_tbl;
3000 struct mlxsw_sp_nexthop_key key;
3001 unsigned char gw_addr[sizeof(struct in6_addr)];
3005 int num_adj_entries;
3006 struct mlxsw_sp_rif *rif;
3007 u8 should_offload:1, /* set indicates this nexthop should be written
3008 * to the adjacency table.
3010 offloaded:1, /* set indicates this nexthop was written to the
3013 update:1; /* set indicates this nexthop should be updated in the
3014 * adjacency table (f.e., its MAC changed).
3016 enum mlxsw_sp_nexthop_action action;
3017 enum mlxsw_sp_nexthop_type type;
3019 struct mlxsw_sp_neigh_entry *neigh_entry;
3020 struct mlxsw_sp_ipip_entry *ipip_entry;
3022 unsigned int counter_index;
3026 enum mlxsw_sp_nexthop_group_type {
3027 MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
3028 MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
3029 MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ,
3032 struct mlxsw_sp_nexthop_group_info {
3033 struct mlxsw_sp_nexthop_group *nh_grp;
3037 int sum_norm_weight;
3038 u8 adj_index_valid:1,
3039 gateway:1, /* routes using the group use a gateway */
3041 struct list_head list; /* member in nh_res_grp_list */
3042 struct mlxsw_sp_nexthop nexthops[0];
3043 #define nh_rif nexthops[0].rif
3046 struct mlxsw_sp_nexthop_group_vr_key {
3048 enum mlxsw_sp_l3proto proto;
3051 struct mlxsw_sp_nexthop_group_vr_entry {
3052 struct list_head list; /* member in vr_list */
3053 struct rhash_head ht_node; /* member in vr_ht */
3054 refcount_t ref_count;
3055 struct mlxsw_sp_nexthop_group_vr_key key;
3058 struct mlxsw_sp_nexthop_group {
3059 struct rhash_head ht_node;
3060 struct list_head fib_list; /* list of fib entries that use this group */
3063 struct fib_info *fi;
3069 struct mlxsw_sp_nexthop_group_info *nhgi;
3070 struct list_head vr_list;
3071 struct rhashtable vr_ht;
3072 enum mlxsw_sp_nexthop_group_type type;
3076 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
3077 struct mlxsw_sp_nexthop *nh)
3079 struct devlink *devlink;
3081 devlink = priv_to_devlink(mlxsw_sp->core);
3082 if (!devlink_dpipe_table_counter_enabled(devlink,
3083 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
3086 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
3089 nh->counter_valid = true;
3092 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
3093 struct mlxsw_sp_nexthop *nh)
3095 if (!nh->counter_valid)
3097 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
3098 nh->counter_valid = false;
3101 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
3102 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
3104 if (!nh->counter_valid)
3107 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
3111 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
3112 struct mlxsw_sp_nexthop *nh)
3115 if (list_empty(&router->nexthop_list))
3118 return list_first_entry(&router->nexthop_list,
3119 typeof(*nh), router_list_node);
3121 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
3123 return list_next_entry(nh, router_list_node);
3126 bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh)
3128 return nh->offloaded && nh->action == MLXSW_SP_NEXTHOP_ACTION_FORWARD;
3131 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
3133 if (nh->type != MLXSW_SP_NEXTHOP_TYPE_ETH ||
3134 !mlxsw_sp_nexthop_is_forward(nh))
3136 return nh->neigh_entry->ha;
3139 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
3140 u32 *p_adj_size, u32 *p_adj_hash_index)
3142 struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3143 u32 adj_hash_index = 0;
3146 if (!nh->offloaded || !nhgi->adj_index_valid)
3149 *p_adj_index = nhgi->adj_index;
3150 *p_adj_size = nhgi->ecmp_size;
3152 for (i = 0; i < nhgi->count; i++) {
3153 struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3157 if (nh_iter->offloaded)
3158 adj_hash_index += nh_iter->num_adj_entries;
3161 *p_adj_hash_index = adj_hash_index;
3165 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
3170 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
3172 struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3175 for (i = 0; i < nhgi->count; i++) {
3176 struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3178 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
3184 static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = {
3185 .key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key),
3186 .head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node),
3187 .key_len = sizeof(struct mlxsw_sp_nexthop_group_vr_key),
3188 .automatic_shrinking = true,
3191 static struct mlxsw_sp_nexthop_group_vr_entry *
3192 mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group *nh_grp,
3193 const struct mlxsw_sp_fib *fib)
3195 struct mlxsw_sp_nexthop_group_vr_key key;
3197 memset(&key, 0, sizeof(key));
3198 key.vr_id = fib->vr->id;
3199 key.proto = fib->proto;
3200 return rhashtable_lookup_fast(&nh_grp->vr_ht, &key,
3201 mlxsw_sp_nexthop_group_vr_ht_params);
3205 mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group *nh_grp,
3206 const struct mlxsw_sp_fib *fib)
3208 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3211 vr_entry = kzalloc(sizeof(*vr_entry), GFP_KERNEL);
3215 vr_entry->key.vr_id = fib->vr->id;
3216 vr_entry->key.proto = fib->proto;
3217 refcount_set(&vr_entry->ref_count, 1);
3219 err = rhashtable_insert_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3220 mlxsw_sp_nexthop_group_vr_ht_params);
3222 goto err_hashtable_insert;
3224 list_add(&vr_entry->list, &nh_grp->vr_list);
3228 err_hashtable_insert:
3234 mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group *nh_grp,
3235 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry)
3237 list_del(&vr_entry->list);
3238 rhashtable_remove_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3239 mlxsw_sp_nexthop_group_vr_ht_params);
3244 mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group *nh_grp,
3245 const struct mlxsw_sp_fib *fib)
3247 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3249 vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3251 refcount_inc(&vr_entry->ref_count);
3255 return mlxsw_sp_nexthop_group_vr_entry_create(nh_grp, fib);
3259 mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group *nh_grp,
3260 const struct mlxsw_sp_fib *fib)
3262 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3264 vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3265 if (WARN_ON_ONCE(!vr_entry))
3268 if (!refcount_dec_and_test(&vr_entry->ref_count))
3271 mlxsw_sp_nexthop_group_vr_entry_destroy(nh_grp, vr_entry);
3274 struct mlxsw_sp_nexthop_group_cmp_arg {
3275 enum mlxsw_sp_nexthop_group_type type;
3277 struct fib_info *fi;
3278 struct mlxsw_sp_fib6_entry *fib6_entry;
3284 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
3285 const struct in6_addr *gw, int ifindex,
3290 for (i = 0; i < nh_grp->nhgi->count; i++) {
3291 const struct mlxsw_sp_nexthop *nh;
3293 nh = &nh_grp->nhgi->nexthops[i];
3294 if (nh->ifindex == ifindex && nh->nh_weight == weight &&
3295 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
3303 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
3304 const struct mlxsw_sp_fib6_entry *fib6_entry)
3306 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3308 if (nh_grp->nhgi->count != fib6_entry->nrt6)
3311 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3312 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3313 struct in6_addr *gw;
3314 int ifindex, weight;
3316 ifindex = fib6_nh->fib_nh_dev->ifindex;
3317 weight = fib6_nh->fib_nh_weight;
3318 gw = &fib6_nh->fib_nh_gw6;
3319 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
3328 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
3330 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
3331 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
3333 if (nh_grp->type != cmp_arg->type)
3336 switch (cmp_arg->type) {
3337 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3338 return cmp_arg->fi != nh_grp->ipv4.fi;
3339 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3340 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
3341 cmp_arg->fib6_entry);
3342 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3343 return cmp_arg->id != nh_grp->obj.id;
3350 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
3352 const struct mlxsw_sp_nexthop_group *nh_grp = data;
3353 const struct mlxsw_sp_nexthop *nh;
3354 struct fib_info *fi;
3358 switch (nh_grp->type) {
3359 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3360 fi = nh_grp->ipv4.fi;
3361 return jhash(&fi, sizeof(fi), seed);
3362 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3363 val = nh_grp->nhgi->count;
3364 for (i = 0; i < nh_grp->nhgi->count; i++) {
3365 nh = &nh_grp->nhgi->nexthops[i];
3366 val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
3367 val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
3369 return jhash(&val, sizeof(val), seed);
3370 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3371 return jhash(&nh_grp->obj.id, sizeof(nh_grp->obj.id), seed);
3379 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
3381 unsigned int val = fib6_entry->nrt6;
3382 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3384 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3385 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3386 struct net_device *dev = fib6_nh->fib_nh_dev;
3387 struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
3389 val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3390 val ^= jhash(gw, sizeof(*gw), seed);
3393 return jhash(&val, sizeof(val), seed);
3397 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
3399 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
3401 switch (cmp_arg->type) {
3402 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3403 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3404 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3405 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3406 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3407 return jhash(&cmp_arg->id, sizeof(cmp_arg->id), seed);
3414 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3415 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3416 .hashfn = mlxsw_sp_nexthop_group_hash,
3417 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
3418 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
3421 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3422 struct mlxsw_sp_nexthop_group *nh_grp)
3424 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3425 !nh_grp->nhgi->gateway)
3428 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3430 mlxsw_sp_nexthop_group_ht_params);
3433 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3434 struct mlxsw_sp_nexthop_group *nh_grp)
3436 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3437 !nh_grp->nhgi->gateway)
3440 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3442 mlxsw_sp_nexthop_group_ht_params);
3445 static struct mlxsw_sp_nexthop_group *
3446 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3447 struct fib_info *fi)
3449 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3451 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
3453 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3455 mlxsw_sp_nexthop_group_ht_params);
3458 static struct mlxsw_sp_nexthop_group *
3459 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3460 struct mlxsw_sp_fib6_entry *fib6_entry)
3462 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3464 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
3465 cmp_arg.fib6_entry = fib6_entry;
3466 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3468 mlxsw_sp_nexthop_group_ht_params);
3471 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3472 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3473 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3474 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
3477 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3478 struct mlxsw_sp_nexthop *nh)
3480 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3481 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3484 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3485 struct mlxsw_sp_nexthop *nh)
3487 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3488 mlxsw_sp_nexthop_ht_params);
3491 static struct mlxsw_sp_nexthop *
3492 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3493 struct mlxsw_sp_nexthop_key key)
3495 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3496 mlxsw_sp_nexthop_ht_params);
3499 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3500 enum mlxsw_sp_l3proto proto,
3502 u32 adj_index, u16 ecmp_size,
3506 char raleu_pl[MLXSW_REG_RALEU_LEN];
3508 mlxsw_reg_raleu_pack(raleu_pl,
3509 (enum mlxsw_reg_ralxx_protocol) proto, vr_id,
3510 adj_index, ecmp_size, new_adj_index,
3512 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3515 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3516 struct mlxsw_sp_nexthop_group *nh_grp,
3517 u32 old_adj_index, u16 old_ecmp_size)
3519 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3520 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3523 list_for_each_entry(vr_entry, &nh_grp->vr_list, list) {
3524 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp,
3525 vr_entry->key.proto,
3526 vr_entry->key.vr_id,
3532 goto err_mass_update_vr;
3537 list_for_each_entry_continue_reverse(vr_entry, &nh_grp->vr_list, list)
3538 mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr_entry->key.proto,
3539 vr_entry->key.vr_id,
3542 old_adj_index, old_ecmp_size);
3546 static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp,
3548 struct mlxsw_sp_nexthop *nh,
3549 bool force, char *ratr_pl)
3551 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3552 enum mlxsw_reg_ratr_op op;
3555 rif_index = nh->rif ? nh->rif->rif_index :
3556 mlxsw_sp->router->lb_rif_index;
3557 op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
3558 MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
3559 mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_ETHERNET,
3560 adj_index, rif_index);
3561 switch (nh->action) {
3562 case MLXSW_SP_NEXTHOP_ACTION_FORWARD:
3563 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3565 case MLXSW_SP_NEXTHOP_ACTION_DISCARD:
3566 mlxsw_reg_ratr_trap_action_set(ratr_pl,
3567 MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS);
3569 case MLXSW_SP_NEXTHOP_ACTION_TRAP:
3570 mlxsw_reg_ratr_trap_action_set(ratr_pl,
3571 MLXSW_REG_RATR_TRAP_ACTION_TRAP);
3572 mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
3578 if (nh->counter_valid)
3579 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3581 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3583 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3586 int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3587 struct mlxsw_sp_nexthop *nh, bool force,
3592 for (i = 0; i < nh->num_adj_entries; i++) {
3595 err = __mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index + i,
3596 nh, force, ratr_pl);
3604 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3606 struct mlxsw_sp_nexthop *nh,
3607 bool force, char *ratr_pl)
3609 const struct mlxsw_sp_ipip_ops *ipip_ops;
3611 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3612 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry,
3616 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3618 struct mlxsw_sp_nexthop *nh, bool force,
3623 for (i = 0; i < nh->num_adj_entries; i++) {
3626 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3627 nh, force, ratr_pl);
3635 static int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3636 struct mlxsw_sp_nexthop *nh, bool force,
3639 /* When action is discard or trap, the nexthop must be
3640 * programmed as an Ethernet nexthop.
3642 if (nh->type == MLXSW_SP_NEXTHOP_TYPE_ETH ||
3643 nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD ||
3644 nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3645 return mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index, nh,
3648 return mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index, nh,
3653 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3654 struct mlxsw_sp_nexthop_group_info *nhgi,
3657 char ratr_pl[MLXSW_REG_RATR_LEN];
3658 u32 adj_index = nhgi->adj_index; /* base */
3659 struct mlxsw_sp_nexthop *nh;
3662 for (i = 0; i < nhgi->count; i++) {
3663 nh = &nhgi->nexthops[i];
3665 if (!nh->should_offload) {
3670 if (nh->update || reallocate) {
3673 err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh,
3680 adj_index += nh->num_adj_entries;
3686 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3687 struct mlxsw_sp_nexthop_group *nh_grp)
3689 struct mlxsw_sp_fib_entry *fib_entry;
3692 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3693 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3700 struct mlxsw_sp_adj_grp_size_range {
3701 u16 start; /* Inclusive */
3702 u16 end; /* Inclusive */
3705 /* Ordered by range start value */
3706 static const struct mlxsw_sp_adj_grp_size_range
3707 mlxsw_sp1_adj_grp_size_ranges[] = {
3708 { .start = 1, .end = 64 },
3709 { .start = 512, .end = 512 },
3710 { .start = 1024, .end = 1024 },
3711 { .start = 2048, .end = 2048 },
3712 { .start = 4096, .end = 4096 },
3715 /* Ordered by range start value */
3716 static const struct mlxsw_sp_adj_grp_size_range
3717 mlxsw_sp2_adj_grp_size_ranges[] = {
3718 { .start = 1, .end = 128 },
3719 { .start = 256, .end = 256 },
3720 { .start = 512, .end = 512 },
3721 { .start = 1024, .end = 1024 },
3722 { .start = 2048, .end = 2048 },
3723 { .start = 4096, .end = 4096 },
3726 static void mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp *mlxsw_sp,
3727 u16 *p_adj_grp_size)
3731 for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
3732 const struct mlxsw_sp_adj_grp_size_range *size_range;
3734 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3736 if (*p_adj_grp_size >= size_range->start &&
3737 *p_adj_grp_size <= size_range->end)
3740 if (*p_adj_grp_size <= size_range->end) {
3741 *p_adj_grp_size = size_range->end;
3747 static void mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp *mlxsw_sp,
3748 u16 *p_adj_grp_size,
3749 unsigned int alloc_size)
3753 for (i = mlxsw_sp->router->adj_grp_size_ranges_count - 1; i >= 0; i--) {
3754 const struct mlxsw_sp_adj_grp_size_range *size_range;
3756 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3758 if (alloc_size >= size_range->end) {
3759 *p_adj_grp_size = size_range->end;
3765 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3766 u16 *p_adj_grp_size)
3768 unsigned int alloc_size;
3771 /* Round up the requested group size to the next size supported
3772 * by the device and make sure the request can be satisfied.
3774 mlxsw_sp_adj_grp_size_round_up(mlxsw_sp, p_adj_grp_size);
3775 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3776 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3777 *p_adj_grp_size, &alloc_size);
3780 /* It is possible the allocation results in more allocated
3781 * entries than requested. Try to use as much of them as
3784 mlxsw_sp_adj_grp_size_round_down(mlxsw_sp, p_adj_grp_size, alloc_size);
3790 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info *nhgi)
3792 int i, g = 0, sum_norm_weight = 0;
3793 struct mlxsw_sp_nexthop *nh;
3795 for (i = 0; i < nhgi->count; i++) {
3796 nh = &nhgi->nexthops[i];
3798 if (!nh->should_offload)
3801 g = gcd(nh->nh_weight, g);
3806 for (i = 0; i < nhgi->count; i++) {
3807 nh = &nhgi->nexthops[i];
3809 if (!nh->should_offload)
3811 nh->norm_nh_weight = nh->nh_weight / g;
3812 sum_norm_weight += nh->norm_nh_weight;
3815 nhgi->sum_norm_weight = sum_norm_weight;
3819 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info *nhgi)
3821 int i, weight = 0, lower_bound = 0;
3822 int total = nhgi->sum_norm_weight;
3823 u16 ecmp_size = nhgi->ecmp_size;
3825 for (i = 0; i < nhgi->count; i++) {
3826 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
3829 if (!nh->should_offload)
3831 weight += nh->norm_nh_weight;
3832 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3833 nh->num_adj_entries = upper_bound - lower_bound;
3834 lower_bound = upper_bound;
3838 static struct mlxsw_sp_nexthop *
3839 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3840 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
3843 mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3844 struct mlxsw_sp_nexthop_group *nh_grp)
3848 for (i = 0; i < nh_grp->nhgi->count; i++) {
3849 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3852 nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3854 nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3859 __mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
3860 struct mlxsw_sp_fib6_entry *fib6_entry)
3862 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3864 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3865 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3866 struct mlxsw_sp_nexthop *nh;
3868 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3869 if (nh && nh->offloaded)
3870 fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3872 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3877 mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3878 struct mlxsw_sp_nexthop_group *nh_grp)
3880 struct mlxsw_sp_fib6_entry *fib6_entry;
3882 /* Unfortunately, in IPv6 the route and the nexthop are described by
3883 * the same struct, so we need to iterate over all the routes using the
3884 * nexthop group and set / clear the offload indication for them.
3886 list_for_each_entry(fib6_entry, &nh_grp->fib_list,
3887 common.nexthop_group_node)
3888 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
3892 mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3893 const struct mlxsw_sp_nexthop *nh,
3896 struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
3897 bool offload = false, trap = false;
3899 if (nh->offloaded) {
3900 if (nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3905 nexthop_bucket_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3906 bucket_index, offload, trap);
3910 mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3911 struct mlxsw_sp_nexthop_group *nh_grp)
3915 /* Do not update the flags if the nexthop group is being destroyed
3917 * 1. The nexthop objects is being deleted, in which case the flags are
3919 * 2. The nexthop group was replaced by a newer group, in which case
3920 * the flags of the nexthop object were already updated based on the
3923 if (nh_grp->can_destroy)
3926 nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3927 nh_grp->nhgi->adj_index_valid, false);
3929 /* Update flags of individual nexthop buckets in case of a resilient
3932 if (!nh_grp->nhgi->is_resilient)
3935 for (i = 0; i < nh_grp->nhgi->count; i++) {
3936 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3938 mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, i);
3943 mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3944 struct mlxsw_sp_nexthop_group *nh_grp)
3946 switch (nh_grp->type) {
3947 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3948 mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
3950 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3951 mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
3953 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3954 mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, nh_grp);
3960 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3961 struct mlxsw_sp_nexthop_group *nh_grp)
3963 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3964 u16 ecmp_size, old_ecmp_size;
3965 struct mlxsw_sp_nexthop *nh;
3966 bool offload_change = false;
3968 bool old_adj_index_valid;
3973 return mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3975 for (i = 0; i < nhgi->count; i++) {
3976 nh = &nhgi->nexthops[i];
3978 if (nh->should_offload != nh->offloaded) {
3979 offload_change = true;
3980 if (nh->should_offload)
3984 if (!offload_change) {
3985 /* Nothing was added or removed, so no need to reallocate. Just
3986 * update MAC on existing adjacency indexes.
3988 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, false);
3990 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3993 /* Flags of individual nexthop buckets might need to be
3996 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3999 mlxsw_sp_nexthop_group_normalize(nhgi);
4000 if (!nhgi->sum_norm_weight) {
4001 /* No neigh of this group is connected so we just set
4002 * the trap and let everthing flow through kernel.
4008 ecmp_size = nhgi->sum_norm_weight;
4009 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
4011 /* No valid allocation size available. */
4014 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4015 ecmp_size, &adj_index);
4017 /* We ran out of KVD linear space, just set the
4018 * trap and let everything flow through kernel.
4020 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
4023 old_adj_index_valid = nhgi->adj_index_valid;
4024 old_adj_index = nhgi->adj_index;
4025 old_ecmp_size = nhgi->ecmp_size;
4026 nhgi->adj_index_valid = 1;
4027 nhgi->adj_index = adj_index;
4028 nhgi->ecmp_size = ecmp_size;
4029 mlxsw_sp_nexthop_group_rebalance(nhgi);
4030 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, true);
4032 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
4036 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4038 if (!old_adj_index_valid) {
4039 /* The trap was set for fib entries, so we have to call
4040 * fib entry update to unset it and use adjacency index.
4042 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4044 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
4050 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
4051 old_adj_index, old_ecmp_size);
4052 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4053 old_ecmp_size, old_adj_index);
4055 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
4062 old_adj_index_valid = nhgi->adj_index_valid;
4063 nhgi->adj_index_valid = 0;
4064 for (i = 0; i < nhgi->count; i++) {
4065 nh = &nhgi->nexthops[i];
4068 err2 = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4070 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
4071 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4072 if (old_adj_index_valid)
4073 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4074 nhgi->ecmp_size, nhgi->adj_index);
4078 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
4082 nh->action = MLXSW_SP_NEXTHOP_ACTION_FORWARD;
4083 nh->should_offload = 1;
4084 } else if (nh->nhgi->is_resilient) {
4085 nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4086 nh->should_offload = 1;
4088 nh->should_offload = 0;
4094 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
4095 struct mlxsw_sp_neigh_entry *neigh_entry)
4097 struct neighbour *n, *old_n = neigh_entry->key.n;
4098 struct mlxsw_sp_nexthop *nh;
4099 bool entry_connected;
4103 nh = list_first_entry(&neigh_entry->nexthop_list,
4104 struct mlxsw_sp_nexthop, neigh_list_node);
4106 n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4108 n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4111 neigh_event_send(n, NULL);
4114 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
4115 neigh_entry->key.n = n;
4116 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4118 goto err_neigh_entry_insert;
4120 read_lock_bh(&n->lock);
4121 nud_state = n->nud_state;
4123 read_unlock_bh(&n->lock);
4124 entry_connected = nud_state & NUD_VALID && !dead;
4126 list_for_each_entry(nh, &neigh_entry->nexthop_list,
4128 neigh_release(old_n);
4130 __mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
4131 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4138 err_neigh_entry_insert:
4139 neigh_entry->key.n = old_n;
4140 mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4146 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
4147 struct mlxsw_sp_neigh_entry *neigh_entry,
4148 bool removing, bool dead)
4150 struct mlxsw_sp_nexthop *nh;
4152 if (list_empty(&neigh_entry->nexthop_list))
4158 err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
4161 dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
4165 list_for_each_entry(nh, &neigh_entry->nexthop_list,
4167 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4168 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4172 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
4173 struct mlxsw_sp_rif *rif)
4179 list_add(&nh->rif_list_node, &rif->nexthop_list);
4182 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
4187 list_del(&nh->rif_list_node);
4191 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
4192 struct mlxsw_sp_nexthop *nh)
4194 struct mlxsw_sp_neigh_entry *neigh_entry;
4195 struct neighbour *n;
4199 if (!nh->nhgi->gateway || nh->neigh_entry)
4202 /* Take a reference of neigh here ensuring that neigh would
4203 * not be destructed before the nexthop entry is finished.
4204 * The reference is taken either in neigh_lookup() or
4205 * in neigh_create() in case n is not found.
4207 n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4209 n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4212 neigh_event_send(n, NULL);
4214 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
4216 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
4217 if (IS_ERR(neigh_entry)) {
4219 goto err_neigh_entry_create;
4223 /* If that is the first nexthop connected to that neigh, add to
4224 * nexthop_neighs_list
4226 if (list_empty(&neigh_entry->nexthop_list))
4227 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
4228 &mlxsw_sp->router->nexthop_neighs_list);
4230 nh->neigh_entry = neigh_entry;
4231 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
4232 read_lock_bh(&n->lock);
4233 nud_state = n->nud_state;
4235 read_unlock_bh(&n->lock);
4236 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
4240 err_neigh_entry_create:
4245 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
4246 struct mlxsw_sp_nexthop *nh)
4248 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
4249 struct neighbour *n;
4253 n = neigh_entry->key.n;
4255 __mlxsw_sp_nexthop_neigh_update(nh, true);
4256 list_del(&nh->neigh_list_node);
4257 nh->neigh_entry = NULL;
4259 /* If that is the last nexthop connected to that neigh, remove from
4260 * nexthop_neighs_list
4262 if (list_empty(&neigh_entry->nexthop_list))
4263 list_del(&neigh_entry->nexthop_neighs_list_node);
4265 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
4266 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
4271 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
4273 struct net_device *ul_dev;
4277 ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
4278 is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
4284 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
4285 struct mlxsw_sp_nexthop *nh,
4286 struct mlxsw_sp_ipip_entry *ipip_entry)
4290 if (!nh->nhgi->gateway || nh->ipip_entry)
4293 nh->ipip_entry = ipip_entry;
4294 removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
4295 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4296 mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
4299 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
4300 struct mlxsw_sp_nexthop *nh)
4302 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
4307 __mlxsw_sp_nexthop_neigh_update(nh, true);
4308 nh->ipip_entry = NULL;
4311 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4312 const struct fib_nh *fib_nh,
4313 enum mlxsw_sp_ipip_type *p_ipipt)
4315 struct net_device *dev = fib_nh->fib_nh_dev;
4318 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
4319 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
4322 static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
4323 struct mlxsw_sp_nexthop *nh,
4324 const struct net_device *dev)
4326 const struct mlxsw_sp_ipip_ops *ipip_ops;
4327 struct mlxsw_sp_ipip_entry *ipip_entry;
4328 struct mlxsw_sp_rif *rif;
4331 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4333 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4334 if (ipip_ops->can_offload(mlxsw_sp, dev)) {
4335 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4336 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4341 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
4342 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4346 mlxsw_sp_nexthop_rif_init(nh, rif);
4347 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4349 goto err_neigh_init;
4354 mlxsw_sp_nexthop_rif_fini(nh);
4358 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
4359 struct mlxsw_sp_nexthop *nh)
4362 case MLXSW_SP_NEXTHOP_TYPE_ETH:
4363 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
4364 mlxsw_sp_nexthop_rif_fini(nh);
4366 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4367 mlxsw_sp_nexthop_rif_fini(nh);
4368 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
4373 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
4374 struct mlxsw_sp_nexthop_group *nh_grp,
4375 struct mlxsw_sp_nexthop *nh,
4376 struct fib_nh *fib_nh)
4378 struct net_device *dev = fib_nh->fib_nh_dev;
4379 struct in_device *in_dev;
4382 nh->nhgi = nh_grp->nhgi;
4383 nh->key.fib_nh = fib_nh;
4384 #ifdef CONFIG_IP_ROUTE_MULTIPATH
4385 nh->nh_weight = fib_nh->fib_nh_weight;
4389 memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
4390 nh->neigh_tbl = &arp_tbl;
4391 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
4395 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4396 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4400 nh->ifindex = dev->ifindex;
4403 in_dev = __in_dev_get_rcu(dev);
4404 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
4405 fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
4411 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4413 goto err_nexthop_neigh_init;
4417 err_nexthop_neigh_init:
4418 list_del(&nh->router_list_node);
4419 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4420 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4424 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
4425 struct mlxsw_sp_nexthop *nh)
4427 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4428 list_del(&nh->router_list_node);
4429 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4430 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4433 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
4434 unsigned long event, struct fib_nh *fib_nh)
4436 struct mlxsw_sp_nexthop_key key;
4437 struct mlxsw_sp_nexthop *nh;
4439 key.fib_nh = fib_nh;
4440 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
4445 case FIB_EVENT_NH_ADD:
4446 mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, fib_nh->fib_nh_dev);
4448 case FIB_EVENT_NH_DEL:
4449 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4453 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4456 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
4457 struct mlxsw_sp_rif *rif)
4459 struct mlxsw_sp_nexthop *nh;
4462 list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
4464 case MLXSW_SP_NEXTHOP_TYPE_ETH:
4467 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4468 removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
4475 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4476 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4480 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
4481 struct mlxsw_sp_rif *old_rif,
4482 struct mlxsw_sp_rif *new_rif)
4484 struct mlxsw_sp_nexthop *nh;
4486 list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
4487 list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
4489 mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
4492 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
4493 struct mlxsw_sp_rif *rif)
4495 struct mlxsw_sp_nexthop *nh, *tmp;
4497 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
4498 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4499 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4503 static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp)
4505 enum mlxsw_reg_ratr_trap_action trap_action;
4506 char ratr_pl[MLXSW_REG_RATR_LEN];
4509 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4510 &mlxsw_sp->router->adj_trap_index);
4514 trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
4515 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
4516 MLXSW_REG_RATR_TYPE_ETHERNET,
4517 mlxsw_sp->router->adj_trap_index,
4518 mlxsw_sp->router->lb_rif_index);
4519 mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
4520 mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
4521 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
4523 goto err_ratr_write;
4528 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4529 mlxsw_sp->router->adj_trap_index);
4533 static void mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp *mlxsw_sp)
4535 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4536 mlxsw_sp->router->adj_trap_index);
4539 static int mlxsw_sp_nexthop_group_inc(struct mlxsw_sp *mlxsw_sp)
4543 if (refcount_inc_not_zero(&mlxsw_sp->router->num_groups))
4546 err = mlxsw_sp_adj_trap_entry_init(mlxsw_sp);
4550 refcount_set(&mlxsw_sp->router->num_groups, 1);
4555 static void mlxsw_sp_nexthop_group_dec(struct mlxsw_sp *mlxsw_sp)
4557 if (!refcount_dec_and_test(&mlxsw_sp->router->num_groups))
4560 mlxsw_sp_adj_trap_entry_fini(mlxsw_sp);
4564 mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
4565 const struct mlxsw_sp_nexthop_group *nh_grp,
4566 unsigned long *activity)
4571 ratrad_pl = kmalloc(MLXSW_REG_RATRAD_LEN, GFP_KERNEL);
4575 mlxsw_reg_ratrad_pack(ratrad_pl, nh_grp->nhgi->adj_index,
4576 nh_grp->nhgi->count);
4577 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratrad), ratrad_pl);
4581 for (i = 0; i < nh_grp->nhgi->count; i++) {
4582 if (!mlxsw_reg_ratrad_activity_vector_get(ratrad_pl, i))
4584 bitmap_set(activity, i, 1);
4591 #define MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL 1000 /* ms */
4594 mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp *mlxsw_sp,
4595 const struct mlxsw_sp_nexthop_group *nh_grp)
4597 unsigned long *activity;
4599 activity = bitmap_zalloc(nh_grp->nhgi->count, GFP_KERNEL);
4603 mlxsw_sp_nh_grp_activity_get(mlxsw_sp, nh_grp, activity);
4604 nexthop_res_grp_activity_update(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4605 nh_grp->nhgi->count, activity);
4607 bitmap_free(activity);
4611 mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp *mlxsw_sp)
4613 unsigned int interval = MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL;
4615 mlxsw_core_schedule_dw(&mlxsw_sp->router->nh_grp_activity_dw,
4616 msecs_to_jiffies(interval));
4619 static void mlxsw_sp_nh_grp_activity_work(struct work_struct *work)
4621 struct mlxsw_sp_nexthop_group_info *nhgi;
4622 struct mlxsw_sp_router *router;
4623 bool reschedule = false;
4625 router = container_of(work, struct mlxsw_sp_router,
4626 nh_grp_activity_dw.work);
4628 mutex_lock(&router->lock);
4630 list_for_each_entry(nhgi, &router->nh_res_grp_list, list) {
4631 mlxsw_sp_nh_grp_activity_update(router->mlxsw_sp, nhgi->nh_grp);
4635 mutex_unlock(&router->lock);
4639 mlxsw_sp_nh_grp_activity_work_schedule(router->mlxsw_sp);
4643 mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
4644 const struct nh_notifier_single_info *nh,
4645 struct netlink_ext_ack *extack)
4650 NL_SET_ERR_MSG_MOD(extack, "FDB nexthops are not supported");
4651 else if (nh->has_encap)
4652 NL_SET_ERR_MSG_MOD(extack, "Encapsulating nexthops are not supported");
4660 mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp *mlxsw_sp,
4661 const struct nh_notifier_single_info *nh,
4662 struct netlink_ext_ack *extack)
4666 err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, extack);
4670 /* Device only nexthops with an IPIP device are programmed as
4671 * encapsulating adjacency entries.
4673 if (!nh->gw_family && !nh->is_reject &&
4674 !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
4675 NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
4683 mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
4684 const struct nh_notifier_grp_info *nh_grp,
4685 struct netlink_ext_ack *extack)
4689 if (nh_grp->is_fdb) {
4690 NL_SET_ERR_MSG_MOD(extack, "FDB nexthop groups are not supported");
4694 for (i = 0; i < nh_grp->num_nh; i++) {
4695 const struct nh_notifier_single_info *nh;
4698 nh = &nh_grp->nh_entries[i].nh;
4699 err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4709 mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp *mlxsw_sp,
4710 const struct nh_notifier_res_table_info *nh_res_table,
4711 struct netlink_ext_ack *extack)
4713 unsigned int alloc_size;
4714 bool valid_size = false;
4717 if (nh_res_table->num_nh_buckets < 32) {
4718 NL_SET_ERR_MSG_MOD(extack, "Minimum number of buckets is 32");
4722 for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
4723 const struct mlxsw_sp_adj_grp_size_range *size_range;
4725 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
4727 if (nh_res_table->num_nh_buckets >= size_range->start &&
4728 nh_res_table->num_nh_buckets <= size_range->end) {
4735 NL_SET_ERR_MSG_MOD(extack, "Invalid number of buckets");
4739 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
4740 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4741 nh_res_table->num_nh_buckets,
4743 if (err || nh_res_table->num_nh_buckets != alloc_size) {
4744 NL_SET_ERR_MSG_MOD(extack, "Number of buckets does not fit allocation size of any KVDL partition");
4752 mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp *mlxsw_sp,
4753 const struct nh_notifier_res_table_info *nh_res_table,
4754 struct netlink_ext_ack *extack)
4759 err = mlxsw_sp_nexthop_obj_res_group_size_validate(mlxsw_sp,
4765 for (i = 0; i < nh_res_table->num_nh_buckets; i++) {
4766 const struct nh_notifier_single_info *nh;
4769 nh = &nh_res_table->nhs[i];
4770 err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4779 static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
4780 unsigned long event,
4781 struct nh_notifier_info *info)
4783 struct nh_notifier_single_info *nh;
4785 if (event != NEXTHOP_EVENT_REPLACE &&
4786 event != NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE &&
4787 event != NEXTHOP_EVENT_BUCKET_REPLACE)
4790 switch (info->type) {
4791 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4792 return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
4794 case NH_NOTIFIER_INFO_TYPE_GRP:
4795 return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp,
4798 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4799 return mlxsw_sp_nexthop_obj_res_group_validate(mlxsw_sp,
4802 case NH_NOTIFIER_INFO_TYPE_RES_BUCKET:
4803 nh = &info->nh_res_bucket->new_nh;
4804 return mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4807 NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
4812 static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
4813 const struct nh_notifier_info *info)
4815 const struct net_device *dev;
4817 switch (info->type) {
4818 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4819 dev = info->nh->dev;
4820 return info->nh->gw_family || info->nh->is_reject ||
4821 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
4822 case NH_NOTIFIER_INFO_TYPE_GRP:
4823 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4824 /* Already validated earlier. */
4831 static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
4832 struct mlxsw_sp_nexthop *nh)
4834 u16 lb_rif_index = mlxsw_sp->router->lb_rif_index;
4836 nh->action = MLXSW_SP_NEXTHOP_ACTION_DISCARD;
4837 nh->should_offload = 1;
4838 /* While nexthops that discard packets do not forward packets
4839 * via an egress RIF, they still need to be programmed using a
4840 * valid RIF, so use the loopback RIF created during init.
4842 nh->rif = mlxsw_sp->router->rifs[lb_rif_index];
4845 static void mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp *mlxsw_sp,
4846 struct mlxsw_sp_nexthop *nh)
4849 nh->should_offload = 0;
4853 mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
4854 struct mlxsw_sp_nexthop_group *nh_grp,
4855 struct mlxsw_sp_nexthop *nh,
4856 struct nh_notifier_single_info *nh_obj, int weight)
4858 struct net_device *dev = nh_obj->dev;
4861 nh->nhgi = nh_grp->nhgi;
4862 nh->nh_weight = weight;
4864 switch (nh_obj->gw_family) {
4866 memcpy(&nh->gw_addr, &nh_obj->ipv4, sizeof(nh_obj->ipv4));
4867 nh->neigh_tbl = &arp_tbl;
4870 memcpy(&nh->gw_addr, &nh_obj->ipv6, sizeof(nh_obj->ipv6));
4871 #if IS_ENABLED(CONFIG_IPV6)
4872 nh->neigh_tbl = &nd_tbl;
4877 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4878 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4879 nh->ifindex = dev->ifindex;
4881 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4885 if (nh_obj->is_reject)
4886 mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh);
4888 /* In a resilient nexthop group, all the nexthops must be written to
4889 * the adjacency table. Even if they do not have a valid neighbour or
4892 if (nh_grp->nhgi->is_resilient && !nh->should_offload) {
4893 nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4894 nh->should_offload = 1;
4900 list_del(&nh->router_list_node);
4901 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4905 static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
4906 struct mlxsw_sp_nexthop *nh)
4908 if (nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD)
4909 mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
4910 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4911 list_del(&nh->router_list_node);
4912 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4913 nh->should_offload = 0;
4917 mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
4918 struct mlxsw_sp_nexthop_group *nh_grp,
4919 struct nh_notifier_info *info)
4921 struct mlxsw_sp_nexthop_group_info *nhgi;
4922 struct mlxsw_sp_nexthop *nh;
4923 bool is_resilient = false;
4927 switch (info->type) {
4928 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4931 case NH_NOTIFIER_INFO_TYPE_GRP:
4932 nhs = info->nh_grp->num_nh;
4934 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4935 nhs = info->nh_res_table->num_nh_buckets;
4936 is_resilient = true;
4942 nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
4945 nh_grp->nhgi = nhgi;
4946 nhgi->nh_grp = nh_grp;
4947 nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
4948 nhgi->is_resilient = is_resilient;
4950 for (i = 0; i < nhgi->count; i++) {
4951 struct nh_notifier_single_info *nh_obj;
4954 nh = &nhgi->nexthops[i];
4955 switch (info->type) {
4956 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4960 case NH_NOTIFIER_INFO_TYPE_GRP:
4961 nh_obj = &info->nh_grp->nh_entries[i].nh;
4962 weight = info->nh_grp->nh_entries[i].weight;
4964 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4965 nh_obj = &info->nh_res_table->nhs[i];
4970 goto err_nexthop_obj_init;
4972 err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
4975 goto err_nexthop_obj_init;
4977 err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
4980 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4982 NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
4983 goto err_group_refresh;
4986 /* Add resilient nexthop groups to a list so that the activity of their
4987 * nexthop buckets will be periodically queried and cleared.
4989 if (nhgi->is_resilient) {
4990 if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
4991 mlxsw_sp_nh_grp_activity_work_schedule(mlxsw_sp);
4992 list_add(&nhgi->list, &mlxsw_sp->router->nh_res_grp_list);
4998 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5001 err_nexthop_obj_init:
5002 for (i--; i >= 0; i--) {
5003 nh = &nhgi->nexthops[i];
5004 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5011 mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5012 struct mlxsw_sp_nexthop_group *nh_grp)
5014 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5015 struct mlxsw_sp_router *router = mlxsw_sp->router;
5018 if (nhgi->is_resilient) {
5019 list_del(&nhgi->list);
5020 if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
5021 cancel_delayed_work(&router->nh_grp_activity_dw);
5024 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5025 for (i = nhgi->count - 1; i >= 0; i--) {
5026 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5028 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5030 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5031 WARN_ON_ONCE(nhgi->adj_index_valid);
5035 static struct mlxsw_sp_nexthop_group *
5036 mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp *mlxsw_sp,
5037 struct nh_notifier_info *info)
5039 struct mlxsw_sp_nexthop_group *nh_grp;
5042 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5044 return ERR_PTR(-ENOMEM);
5045 INIT_LIST_HEAD(&nh_grp->vr_list);
5046 err = rhashtable_init(&nh_grp->vr_ht,
5047 &mlxsw_sp_nexthop_group_vr_ht_params);
5049 goto err_nexthop_group_vr_ht_init;
5050 INIT_LIST_HEAD(&nh_grp->fib_list);
5051 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5052 nh_grp->obj.id = info->id;
5054 err = mlxsw_sp_nexthop_obj_group_info_init(mlxsw_sp, nh_grp, info);
5056 goto err_nexthop_group_info_init;
5058 nh_grp->can_destroy = false;
5062 err_nexthop_group_info_init:
5063 rhashtable_destroy(&nh_grp->vr_ht);
5064 err_nexthop_group_vr_ht_init:
5066 return ERR_PTR(err);
5070 mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp *mlxsw_sp,
5071 struct mlxsw_sp_nexthop_group *nh_grp)
5073 if (!nh_grp->can_destroy)
5075 mlxsw_sp_nexthop_obj_group_info_fini(mlxsw_sp, nh_grp);
5076 WARN_ON_ONCE(!list_empty(&nh_grp->fib_list));
5077 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5078 rhashtable_destroy(&nh_grp->vr_ht);
5082 static struct mlxsw_sp_nexthop_group *
5083 mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
5085 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
5087 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5089 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
5091 mlxsw_sp_nexthop_group_ht_params);
5094 static int mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp *mlxsw_sp,
5095 struct mlxsw_sp_nexthop_group *nh_grp)
5097 return mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5101 mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp *mlxsw_sp,
5102 struct mlxsw_sp_nexthop_group *nh_grp,
5103 struct mlxsw_sp_nexthop_group *old_nh_grp,
5104 struct netlink_ext_ack *extack)
5106 struct mlxsw_sp_nexthop_group_info *old_nhgi = old_nh_grp->nhgi;
5107 struct mlxsw_sp_nexthop_group_info *new_nhgi = nh_grp->nhgi;
5110 old_nh_grp->nhgi = new_nhgi;
5111 new_nhgi->nh_grp = old_nh_grp;
5112 nh_grp->nhgi = old_nhgi;
5113 old_nhgi->nh_grp = nh_grp;
5115 if (old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5116 /* Both the old adjacency index and the new one are valid.
5117 * Routes are currently using the old one. Tell the device to
5118 * replace the old adjacency index with the new one.
5120 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, old_nh_grp,
5121 old_nhgi->adj_index,
5122 old_nhgi->ecmp_size);
5124 NL_SET_ERR_MSG_MOD(extack, "Failed to replace old adjacency index with new one");
5127 } else if (old_nhgi->adj_index_valid && !new_nhgi->adj_index_valid) {
5128 /* The old adjacency index is valid, while the new one is not.
5129 * Iterate over all the routes using the group and change them
5130 * to trap packets to the CPU.
5132 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5134 NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to trap packets");
5137 } else if (!old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5138 /* The old adjacency index is invalid, while the new one is.
5139 * Iterate over all the routes using the group and change them
5140 * to forward packets using the new valid index.
5142 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5144 NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to forward packets");
5149 /* Make sure the flags are set / cleared based on the new nexthop group
5152 mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, old_nh_grp);
5154 /* At this point 'nh_grp' is just a shell that is not used by anyone
5155 * and its nexthop group info is the old info that was just replaced
5156 * with the new one. Remove it.
5158 nh_grp->can_destroy = true;
5159 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5164 old_nhgi->nh_grp = old_nh_grp;
5165 nh_grp->nhgi = new_nhgi;
5166 new_nhgi->nh_grp = nh_grp;
5167 old_nh_grp->nhgi = old_nhgi;
5171 static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
5172 struct nh_notifier_info *info)
5174 struct mlxsw_sp_nexthop_group *nh_grp, *old_nh_grp;
5175 struct netlink_ext_ack *extack = info->extack;
5178 nh_grp = mlxsw_sp_nexthop_obj_group_create(mlxsw_sp, info);
5180 return PTR_ERR(nh_grp);
5182 old_nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5184 err = mlxsw_sp_nexthop_obj_group_add(mlxsw_sp, nh_grp);
5186 err = mlxsw_sp_nexthop_obj_group_replace(mlxsw_sp, nh_grp,
5187 old_nh_grp, extack);
5190 nh_grp->can_destroy = true;
5191 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5197 static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp,
5198 struct nh_notifier_info *info)
5200 struct mlxsw_sp_nexthop_group *nh_grp;
5202 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5206 nh_grp->can_destroy = true;
5207 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5209 /* If the group still has routes using it, then defer the delete
5210 * operation until the last route using it is deleted.
5212 if (!list_empty(&nh_grp->fib_list))
5214 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5217 static int mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp *mlxsw_sp,
5218 u32 adj_index, char *ratr_pl)
5220 MLXSW_REG_ZERO(ratr, ratr_pl);
5221 mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5222 mlxsw_reg_ratr_adjacency_index_low_set(ratr_pl, adj_index);
5223 mlxsw_reg_ratr_adjacency_index_high_set(ratr_pl, adj_index >> 16);
5225 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
5228 static int mlxsw_sp_nexthop_obj_bucket_compare(char *ratr_pl, char *ratr_pl_new)
5230 /* Clear the opcode and activity on both the old and new payload as
5231 * they are irrelevant for the comparison.
5233 mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5234 mlxsw_reg_ratr_a_set(ratr_pl, 0);
5235 mlxsw_reg_ratr_op_set(ratr_pl_new, MLXSW_REG_RATR_OP_QUERY_READ);
5236 mlxsw_reg_ratr_a_set(ratr_pl_new, 0);
5238 /* If the contents of the adjacency entry are consistent with the
5239 * replacement request, then replacement was successful.
5241 if (!memcmp(ratr_pl, ratr_pl_new, MLXSW_REG_RATR_LEN))
5248 mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp *mlxsw_sp,
5249 struct mlxsw_sp_nexthop *nh,
5250 struct nh_notifier_info *info)
5252 u16 bucket_index = info->nh_res_bucket->bucket_index;
5253 struct netlink_ext_ack *extack = info->extack;
5254 bool force = info->nh_res_bucket->force;
5255 char ratr_pl_new[MLXSW_REG_RATR_LEN];
5256 char ratr_pl[MLXSW_REG_RATR_LEN];
5260 /* No point in trying an atomic replacement if the idle timer interval
5261 * is smaller than the interval in which we query and clear activity.
5263 if (!force && info->nh_res_bucket->idle_timer_ms <
5264 MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL)
5267 adj_index = nh->nhgi->adj_index + bucket_index;
5268 err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh, force, ratr_pl);
5270 NL_SET_ERR_MSG_MOD(extack, "Failed to overwrite nexthop bucket");
5275 err = mlxsw_sp_nexthop_obj_bucket_query(mlxsw_sp, adj_index,
5278 NL_SET_ERR_MSG_MOD(extack, "Failed to query nexthop bucket state after replacement. State might be inconsistent");
5282 err = mlxsw_sp_nexthop_obj_bucket_compare(ratr_pl, ratr_pl_new);
5284 NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket was not replaced because it was active during replacement");
5291 mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, bucket_index);
5296 static int mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp *mlxsw_sp,
5297 struct nh_notifier_info *info)
5299 u16 bucket_index = info->nh_res_bucket->bucket_index;
5300 struct netlink_ext_ack *extack = info->extack;
5301 struct mlxsw_sp_nexthop_group_info *nhgi;
5302 struct nh_notifier_single_info *nh_obj;
5303 struct mlxsw_sp_nexthop_group *nh_grp;
5304 struct mlxsw_sp_nexthop *nh;
5307 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5309 NL_SET_ERR_MSG_MOD(extack, "Nexthop group was not found");
5313 nhgi = nh_grp->nhgi;
5315 if (bucket_index >= nhgi->count) {
5316 NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket index out of range");
5320 nh = &nhgi->nexthops[bucket_index];
5321 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5323 nh_obj = &info->nh_res_bucket->new_nh;
5324 err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5326 NL_SET_ERR_MSG_MOD(extack, "Failed to initialize nexthop object for nexthop bucket replacement");
5327 goto err_nexthop_obj_init;
5330 err = mlxsw_sp_nexthop_obj_bucket_adj_update(mlxsw_sp, nh, info);
5332 goto err_nexthop_obj_bucket_adj_update;
5336 err_nexthop_obj_bucket_adj_update:
5337 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5338 err_nexthop_obj_init:
5339 nh_obj = &info->nh_res_bucket->old_nh;
5340 mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5341 /* The old adjacency entry was not overwritten */
5347 static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
5348 unsigned long event, void *ptr)
5350 struct nh_notifier_info *info = ptr;
5351 struct mlxsw_sp_router *router;
5354 router = container_of(nb, struct mlxsw_sp_router, nexthop_nb);
5355 err = mlxsw_sp_nexthop_obj_validate(router->mlxsw_sp, event, info);
5359 mutex_lock(&router->lock);
5362 case NEXTHOP_EVENT_REPLACE:
5363 err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
5365 case NEXTHOP_EVENT_DEL:
5366 mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info);
5368 case NEXTHOP_EVENT_BUCKET_REPLACE:
5369 err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp,
5376 mutex_unlock(&router->lock);
5379 return notifier_from_errno(err);
5382 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5383 struct fib_info *fi)
5385 const struct fib_nh *nh = fib_info_nh(fi, 0);
5387 return nh->fib_nh_scope == RT_SCOPE_LINK ||
5388 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
5392 mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
5393 struct mlxsw_sp_nexthop_group *nh_grp)
5395 unsigned int nhs = fib_info_num_path(nh_grp->ipv4.fi);
5396 struct mlxsw_sp_nexthop_group_info *nhgi;
5397 struct mlxsw_sp_nexthop *nh;
5400 nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
5403 nh_grp->nhgi = nhgi;
5404 nhgi->nh_grp = nh_grp;
5405 nhgi->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, nh_grp->ipv4.fi);
5407 for (i = 0; i < nhgi->count; i++) {
5408 struct fib_nh *fib_nh;
5410 nh = &nhgi->nexthops[i];
5411 fib_nh = fib_info_nh(nh_grp->ipv4.fi, i);
5412 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
5414 goto err_nexthop4_init;
5416 err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
5419 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5421 goto err_group_refresh;
5426 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5430 for (i--; i >= 0; i--) {
5431 nh = &nhgi->nexthops[i];
5432 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5439 mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5440 struct mlxsw_sp_nexthop_group *nh_grp)
5442 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5445 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5446 for (i = nhgi->count - 1; i >= 0; i--) {
5447 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5449 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5451 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5452 WARN_ON_ONCE(nhgi->adj_index_valid);
5456 static struct mlxsw_sp_nexthop_group *
5457 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
5459 struct mlxsw_sp_nexthop_group *nh_grp;
5462 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5464 return ERR_PTR(-ENOMEM);
5465 INIT_LIST_HEAD(&nh_grp->vr_list);
5466 err = rhashtable_init(&nh_grp->vr_ht,
5467 &mlxsw_sp_nexthop_group_vr_ht_params);
5469 goto err_nexthop_group_vr_ht_init;
5470 INIT_LIST_HEAD(&nh_grp->fib_list);
5471 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
5472 nh_grp->ipv4.fi = fi;
5475 err = mlxsw_sp_nexthop4_group_info_init(mlxsw_sp, nh_grp);
5477 goto err_nexthop_group_info_init;
5479 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5481 goto err_nexthop_group_insert;
5483 nh_grp->can_destroy = true;
5487 err_nexthop_group_insert:
5488 mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5489 err_nexthop_group_info_init:
5491 rhashtable_destroy(&nh_grp->vr_ht);
5492 err_nexthop_group_vr_ht_init:
5494 return ERR_PTR(err);
5498 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
5499 struct mlxsw_sp_nexthop_group *nh_grp)
5501 if (!nh_grp->can_destroy)
5503 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5504 mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5505 fib_info_put(nh_grp->ipv4.fi);
5506 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5507 rhashtable_destroy(&nh_grp->vr_ht);
5511 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
5512 struct mlxsw_sp_fib_entry *fib_entry,
5513 struct fib_info *fi)
5515 struct mlxsw_sp_nexthop_group *nh_grp;
5518 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
5520 if (WARN_ON_ONCE(!nh_grp))
5525 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
5527 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
5529 return PTR_ERR(nh_grp);
5532 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
5533 fib_entry->nh_group = nh_grp;
5537 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
5538 struct mlxsw_sp_fib_entry *fib_entry)
5540 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5542 list_del(&fib_entry->nexthop_group_node);
5543 if (!list_empty(&nh_grp->fib_list))
5546 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
5547 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5551 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
5555 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5557 struct mlxsw_sp_fib4_entry *fib4_entry;
5559 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5561 return !fib4_entry->dscp;
5565 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5567 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5569 switch (fib_entry->fib_node->fib->proto) {
5570 case MLXSW_SP_L3_PROTO_IPV4:
5571 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
5574 case MLXSW_SP_L3_PROTO_IPV6:
5578 switch (fib_entry->type) {
5579 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
5580 return !!nh_group->nhgi->adj_index_valid;
5581 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
5582 return !!nh_group->nhgi->nh_rif;
5583 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
5584 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5585 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
5592 static struct mlxsw_sp_nexthop *
5593 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
5594 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5598 for (i = 0; i < nh_grp->nhgi->count; i++) {
5599 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
5600 struct fib6_info *rt = mlxsw_sp_rt6->rt;
5602 if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
5603 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
5604 &rt->fib6_nh->fib_nh_gw6))
5612 mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5613 struct fib_entry_notifier_info *fen_info)
5615 u32 *p_dst = (u32 *) &fen_info->dst;
5616 struct fib_rt_info fri;
5618 fri.fi = fen_info->fi;
5619 fri.tb_id = fen_info->tb_id;
5620 fri.dst = cpu_to_be32(*p_dst);
5621 fri.dst_len = fen_info->dst_len;
5622 fri.dscp = fen_info->dscp;
5623 fri.type = fen_info->type;
5624 fri.offload = false;
5626 fri.offload_failed = true;
5627 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5631 mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5632 struct mlxsw_sp_fib_entry *fib_entry)
5634 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5635 int dst_len = fib_entry->fib_node->key.prefix_len;
5636 struct mlxsw_sp_fib4_entry *fib4_entry;
5637 struct fib_rt_info fri;
5638 bool should_offload;
5640 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5641 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5643 fri.fi = fib4_entry->fi;
5644 fri.tb_id = fib4_entry->tb_id;
5645 fri.dst = cpu_to_be32(*p_dst);
5646 fri.dst_len = dst_len;
5647 fri.dscp = fib4_entry->dscp;
5648 fri.type = fib4_entry->type;
5649 fri.offload = should_offload;
5650 fri.trap = !should_offload;
5651 fri.offload_failed = false;
5652 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5656 mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5657 struct mlxsw_sp_fib_entry *fib_entry)
5659 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5660 int dst_len = fib_entry->fib_node->key.prefix_len;
5661 struct mlxsw_sp_fib4_entry *fib4_entry;
5662 struct fib_rt_info fri;
5664 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5666 fri.fi = fib4_entry->fi;
5667 fri.tb_id = fib4_entry->tb_id;
5668 fri.dst = cpu_to_be32(*p_dst);
5669 fri.dst_len = dst_len;
5670 fri.dscp = fib4_entry->dscp;
5671 fri.type = fib4_entry->type;
5672 fri.offload = false;
5674 fri.offload_failed = false;
5675 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5678 #if IS_ENABLED(CONFIG_IPV6)
5680 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5681 struct fib6_info **rt_arr,
5686 /* In IPv6 a multipath route is represented using multiple routes, so
5687 * we need to set the flags on all of them.
5689 for (i = 0; i < nrt6; i++)
5690 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), rt_arr[i],
5691 false, false, true);
5695 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5696 struct fib6_info **rt_arr,
5702 #if IS_ENABLED(CONFIG_IPV6)
5704 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5705 struct mlxsw_sp_fib_entry *fib_entry)
5707 struct mlxsw_sp_fib6_entry *fib6_entry;
5708 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5709 bool should_offload;
5711 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5713 /* In IPv6 a multipath route is represented using multiple routes, so
5714 * we need to set the flags on all of them.
5716 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5718 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5719 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5720 should_offload, !should_offload, false);
5724 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5725 struct mlxsw_sp_fib_entry *fib_entry)
5730 #if IS_ENABLED(CONFIG_IPV6)
5732 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5733 struct mlxsw_sp_fib_entry *fib_entry)
5735 struct mlxsw_sp_fib6_entry *fib6_entry;
5736 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5738 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5740 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5741 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5742 false, false, false);
5746 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5747 struct mlxsw_sp_fib_entry *fib_entry)
5753 mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5754 struct mlxsw_sp_fib_entry *fib_entry)
5756 switch (fib_entry->fib_node->fib->proto) {
5757 case MLXSW_SP_L3_PROTO_IPV4:
5758 mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
5760 case MLXSW_SP_L3_PROTO_IPV6:
5761 mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
5767 mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5768 struct mlxsw_sp_fib_entry *fib_entry)
5770 switch (fib_entry->fib_node->fib->proto) {
5771 case MLXSW_SP_L3_PROTO_IPV4:
5772 mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5774 case MLXSW_SP_L3_PROTO_IPV6:
5775 mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5781 mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
5782 struct mlxsw_sp_fib_entry *fib_entry,
5783 enum mlxsw_sp_fib_entry_op op)
5786 case MLXSW_SP_FIB_ENTRY_OP_WRITE:
5787 case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
5788 mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
5790 case MLXSW_SP_FIB_ENTRY_OP_DELETE:
5791 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5798 struct mlxsw_sp_fib_entry_op_ctx_basic {
5799 char ralue_pl[MLXSW_REG_RALUE_LEN];
5803 mlxsw_sp_router_ll_basic_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5804 enum mlxsw_sp_l3proto proto,
5805 enum mlxsw_sp_fib_entry_op op,
5806 u16 virtual_router, u8 prefix_len,
5807 unsigned char *addr,
5808 struct mlxsw_sp_fib_entry_priv *priv)
5810 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5811 enum mlxsw_reg_ralxx_protocol ralxx_proto;
5812 char *ralue_pl = op_ctx_basic->ralue_pl;
5813 enum mlxsw_reg_ralue_op ralue_op;
5815 ralxx_proto = (enum mlxsw_reg_ralxx_protocol) proto;
5818 case MLXSW_SP_FIB_ENTRY_OP_WRITE:
5819 case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
5820 ralue_op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
5822 case MLXSW_SP_FIB_ENTRY_OP_DELETE:
5823 ralue_op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
5831 case MLXSW_SP_L3_PROTO_IPV4:
5832 mlxsw_reg_ralue_pack4(ralue_pl, ralxx_proto, ralue_op,
5833 virtual_router, prefix_len, (u32 *) addr);
5835 case MLXSW_SP_L3_PROTO_IPV6:
5836 mlxsw_reg_ralue_pack6(ralue_pl, ralxx_proto, ralue_op,
5837 virtual_router, prefix_len, addr);
5843 mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5844 enum mlxsw_reg_ralue_trap_action trap_action,
5845 u16 trap_id, u32 adjacency_index, u16 ecmp_size)
5847 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5849 mlxsw_reg_ralue_act_remote_pack(op_ctx_basic->ralue_pl, trap_action,
5850 trap_id, adjacency_index, ecmp_size);
5854 mlxsw_sp_router_ll_basic_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5855 enum mlxsw_reg_ralue_trap_action trap_action,
5856 u16 trap_id, u16 local_erif)
5858 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5860 mlxsw_reg_ralue_act_local_pack(op_ctx_basic->ralue_pl, trap_action,
5861 trap_id, local_erif);
5865 mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
5867 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5869 mlxsw_reg_ralue_act_ip2me_pack(op_ctx_basic->ralue_pl);
5873 mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5876 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5878 mlxsw_reg_ralue_act_ip2me_tun_pack(op_ctx_basic->ralue_pl, tunnel_ptr);
5882 mlxsw_sp_router_ll_basic_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
5883 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5884 bool *postponed_for_bulk)
5886 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5888 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5889 op_ctx_basic->ralue_pl);
5893 mlxsw_sp_router_ll_basic_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv)
5898 static void mlxsw_sp_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5899 struct mlxsw_sp_fib_entry *fib_entry,
5900 enum mlxsw_sp_fib_entry_op op)
5902 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
5904 mlxsw_sp_fib_entry_op_ctx_priv_hold(op_ctx, fib_entry->priv);
5905 fib->ll_ops->fib_entry_pack(op_ctx, fib->proto, op, fib->vr->id,
5906 fib_entry->fib_node->key.prefix_len,
5907 fib_entry->fib_node->key.addr,
5911 static int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
5912 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5913 const struct mlxsw_sp_router_ll_ops *ll_ops)
5915 bool postponed_for_bulk = false;
5918 err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, &postponed_for_bulk);
5919 if (!postponed_for_bulk)
5920 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
5924 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
5925 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5926 struct mlxsw_sp_fib_entry *fib_entry,
5927 enum mlxsw_sp_fib_entry_op op)
5929 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5930 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5931 struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
5932 enum mlxsw_reg_ralue_trap_action trap_action;
5934 u32 adjacency_index = 0;
5937 /* In case the nexthop group adjacency index is valid, use it
5938 * with provided ECMP size. Otherwise, setup trap and pass
5939 * traffic to kernel.
5941 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5942 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5943 adjacency_index = nhgi->adj_index;
5944 ecmp_size = nhgi->ecmp_size;
5945 } else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) {
5946 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5947 adjacency_index = mlxsw_sp->router->adj_trap_index;
5950 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5951 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5954 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5955 ll_ops->fib_entry_act_remote_pack(op_ctx, trap_action, trap_id,
5956 adjacency_index, ecmp_size);
5957 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5960 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
5961 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5962 struct mlxsw_sp_fib_entry *fib_entry,
5963 enum mlxsw_sp_fib_entry_op op)
5965 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5966 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nhgi->nh_rif;
5967 enum mlxsw_reg_ralue_trap_action trap_action;
5971 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5972 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5973 rif_index = rif->rif_index;
5975 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5976 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5979 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5980 ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, rif_index);
5981 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5984 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
5985 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5986 struct mlxsw_sp_fib_entry *fib_entry,
5987 enum mlxsw_sp_fib_entry_op op)
5989 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5991 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5992 ll_ops->fib_entry_act_ip2me_pack(op_ctx);
5993 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5996 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
5997 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5998 struct mlxsw_sp_fib_entry *fib_entry,
5999 enum mlxsw_sp_fib_entry_op op)
6001 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
6002 enum mlxsw_reg_ralue_trap_action trap_action;
6004 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
6005 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
6006 ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, 0, 0);
6007 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
6011 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
6012 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6013 struct mlxsw_sp_fib_entry *fib_entry,
6014 enum mlxsw_sp_fib_entry_op op)
6016 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
6017 enum mlxsw_reg_ralue_trap_action trap_action;
6020 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
6021 trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
6023 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
6024 ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, 0);
6025 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
6029 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
6030 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6031 struct mlxsw_sp_fib_entry *fib_entry,
6032 enum mlxsw_sp_fib_entry_op op)
6034 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
6035 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
6036 const struct mlxsw_sp_ipip_ops *ipip_ops;
6039 if (WARN_ON(!ipip_entry))
6042 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
6043 err = ipip_ops->decap_config(mlxsw_sp, ipip_entry,
6044 fib_entry->decap.tunnel_index);
6048 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
6049 ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
6050 fib_entry->decap.tunnel_index);
6051 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
6054 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
6055 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6056 struct mlxsw_sp_fib_entry *fib_entry,
6057 enum mlxsw_sp_fib_entry_op op)
6059 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
6061 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
6062 ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
6063 fib_entry->decap.tunnel_index);
6064 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
6067 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6068 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6069 struct mlxsw_sp_fib_entry *fib_entry,
6070 enum mlxsw_sp_fib_entry_op op)
6072 switch (fib_entry->type) {
6073 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
6074 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, op_ctx, fib_entry, op);
6075 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
6076 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, op_ctx, fib_entry, op);
6077 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
6078 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, op_ctx, fib_entry, op);
6079 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
6080 return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, op_ctx, fib_entry, op);
6081 case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
6082 return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, op_ctx, fib_entry, op);
6083 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6084 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, op_ctx, fib_entry, op);
6085 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
6086 return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, op_ctx, fib_entry, op);
6091 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6092 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6093 struct mlxsw_sp_fib_entry *fib_entry,
6094 enum mlxsw_sp_fib_entry_op op)
6096 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry, op);
6101 mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
6106 static int __mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
6107 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6108 struct mlxsw_sp_fib_entry *fib_entry,
6111 return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
6112 is_new ? MLXSW_SP_FIB_ENTRY_OP_WRITE :
6113 MLXSW_SP_FIB_ENTRY_OP_UPDATE);
6116 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
6117 struct mlxsw_sp_fib_entry *fib_entry)
6119 struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
6121 mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
6122 return __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, false);
6125 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
6126 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6127 struct mlxsw_sp_fib_entry *fib_entry)
6129 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
6131 if (!ll_ops->fib_entry_is_committed(fib_entry->priv))
6133 return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
6134 MLXSW_SP_FIB_ENTRY_OP_DELETE);
6138 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
6139 const struct fib_entry_notifier_info *fen_info,
6140 struct mlxsw_sp_fib_entry *fib_entry)
6142 struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
6143 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
6144 struct mlxsw_sp_router *router = mlxsw_sp->router;
6145 u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
6146 int ifindex = nhgi->nexthops[0].ifindex;
6147 struct mlxsw_sp_ipip_entry *ipip_entry;
6149 switch (fen_info->type) {
6151 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
6152 MLXSW_SP_L3_PROTO_IPV4, dip);
6153 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
6154 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
6155 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
6159 if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
6160 MLXSW_SP_L3_PROTO_IPV4,
6164 tunnel_index = router->nve_decap_config.tunnel_index;
6165 fib_entry->decap.tunnel_index = tunnel_index;
6166 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
6171 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6174 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
6176 case RTN_UNREACHABLE:
6178 /* Packets hitting these routes need to be trapped, but
6179 * can do so with a lower priority than packets directed
6180 * at the host, so use action type local instead of trap.
6182 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
6186 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
6188 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
6196 mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6197 struct mlxsw_sp_fib_entry *fib_entry)
6199 switch (fib_entry->type) {
6200 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6201 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
6209 mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6210 struct mlxsw_sp_fib4_entry *fib4_entry)
6212 mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib4_entry->common);
6215 static struct mlxsw_sp_fib4_entry *
6216 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
6217 struct mlxsw_sp_fib_node *fib_node,
6218 const struct fib_entry_notifier_info *fen_info)
6220 struct mlxsw_sp_fib4_entry *fib4_entry;
6221 struct mlxsw_sp_fib_entry *fib_entry;
6224 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
6226 return ERR_PTR(-ENOMEM);
6227 fib_entry = &fib4_entry->common;
6229 fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
6230 if (IS_ERR(fib_entry->priv)) {
6231 err = PTR_ERR(fib_entry->priv);
6232 goto err_fib_entry_priv_create;
6235 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
6237 goto err_nexthop4_group_get;
6239 err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
6242 goto err_nexthop_group_vr_link;
6244 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
6246 goto err_fib4_entry_type_set;
6248 fib4_entry->fi = fen_info->fi;
6249 fib_info_hold(fib4_entry->fi);
6250 fib4_entry->tb_id = fen_info->tb_id;
6251 fib4_entry->type = fen_info->type;
6252 fib4_entry->dscp = fen_info->dscp;
6254 fib_entry->fib_node = fib_node;
6258 err_fib4_entry_type_set:
6259 mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
6260 err_nexthop_group_vr_link:
6261 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6262 err_nexthop4_group_get:
6263 mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
6264 err_fib_entry_priv_create:
6266 return ERR_PTR(err);
6269 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
6270 struct mlxsw_sp_fib4_entry *fib4_entry)
6272 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6274 fib_info_put(fib4_entry->fi);
6275 mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib4_entry);
6276 mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
6278 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6279 mlxsw_sp_fib_entry_priv_put(fib4_entry->common.priv);
6283 static struct mlxsw_sp_fib4_entry *
6284 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
6285 const struct fib_entry_notifier_info *fen_info)
6287 struct mlxsw_sp_fib4_entry *fib4_entry;
6288 struct mlxsw_sp_fib_node *fib_node;
6289 struct mlxsw_sp_fib *fib;
6290 struct mlxsw_sp_vr *vr;
6292 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
6295 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
6297 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
6298 sizeof(fen_info->dst),
6303 fib4_entry = container_of(fib_node->fib_entry,
6304 struct mlxsw_sp_fib4_entry, common);
6305 if (fib4_entry->tb_id == fen_info->tb_id &&
6306 fib4_entry->dscp == fen_info->dscp &&
6307 fib4_entry->type == fen_info->type &&
6308 fib4_entry->fi == fen_info->fi)
6314 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
6315 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
6316 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
6317 .key_len = sizeof(struct mlxsw_sp_fib_key),
6318 .automatic_shrinking = true,
6321 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
6322 struct mlxsw_sp_fib_node *fib_node)
6324 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
6325 mlxsw_sp_fib_ht_params);
6328 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
6329 struct mlxsw_sp_fib_node *fib_node)
6331 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
6332 mlxsw_sp_fib_ht_params);
6335 static struct mlxsw_sp_fib_node *
6336 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
6337 size_t addr_len, unsigned char prefix_len)
6339 struct mlxsw_sp_fib_key key;
6341 memset(&key, 0, sizeof(key));
6342 memcpy(key.addr, addr, addr_len);
6343 key.prefix_len = prefix_len;
6344 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
6347 static struct mlxsw_sp_fib_node *
6348 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
6349 size_t addr_len, unsigned char prefix_len)
6351 struct mlxsw_sp_fib_node *fib_node;
6353 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
6357 list_add(&fib_node->list, &fib->node_list);
6358 memcpy(fib_node->key.addr, addr, addr_len);
6359 fib_node->key.prefix_len = prefix_len;
6364 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
6366 list_del(&fib_node->list);
6370 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
6371 struct mlxsw_sp_fib_node *fib_node)
6373 struct mlxsw_sp_prefix_usage req_prefix_usage;
6374 struct mlxsw_sp_fib *fib = fib_node->fib;
6375 struct mlxsw_sp_lpm_tree *lpm_tree;
6378 lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
6379 if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6382 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6383 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
6384 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6386 if (IS_ERR(lpm_tree))
6387 return PTR_ERR(lpm_tree);
6389 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6391 goto err_lpm_tree_replace;
6394 lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
6397 err_lpm_tree_replace:
6398 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6402 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
6403 struct mlxsw_sp_fib_node *fib_node)
6405 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
6406 struct mlxsw_sp_prefix_usage req_prefix_usage;
6407 struct mlxsw_sp_fib *fib = fib_node->fib;
6410 if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6412 /* Try to construct a new LPM tree from the current prefix usage
6413 * minus the unused one. If we fail, continue using the old one.
6415 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6416 mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
6417 fib_node->key.prefix_len);
6418 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6420 if (IS_ERR(lpm_tree))
6423 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6425 goto err_lpm_tree_replace;
6429 err_lpm_tree_replace:
6430 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6433 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
6434 struct mlxsw_sp_fib_node *fib_node,
6435 struct mlxsw_sp_fib *fib)
6439 err = mlxsw_sp_fib_node_insert(fib, fib_node);
6442 fib_node->fib = fib;
6444 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
6446 goto err_fib_lpm_tree_link;
6450 err_fib_lpm_tree_link:
6451 fib_node->fib = NULL;
6452 mlxsw_sp_fib_node_remove(fib, fib_node);
6456 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
6457 struct mlxsw_sp_fib_node *fib_node)
6459 struct mlxsw_sp_fib *fib = fib_node->fib;
6461 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
6462 fib_node->fib = NULL;
6463 mlxsw_sp_fib_node_remove(fib, fib_node);
6466 static struct mlxsw_sp_fib_node *
6467 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
6468 size_t addr_len, unsigned char prefix_len,
6469 enum mlxsw_sp_l3proto proto)
6471 struct mlxsw_sp_fib_node *fib_node;
6472 struct mlxsw_sp_fib *fib;
6473 struct mlxsw_sp_vr *vr;
6476 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
6478 return ERR_CAST(vr);
6479 fib = mlxsw_sp_vr_fib(vr, proto);
6481 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
6485 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
6488 goto err_fib_node_create;
6491 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
6493 goto err_fib_node_init;
6498 mlxsw_sp_fib_node_destroy(fib_node);
6499 err_fib_node_create:
6500 mlxsw_sp_vr_put(mlxsw_sp, vr);
6501 return ERR_PTR(err);
6504 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
6505 struct mlxsw_sp_fib_node *fib_node)
6507 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
6509 if (fib_node->fib_entry)
6511 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
6512 mlxsw_sp_fib_node_destroy(fib_node);
6513 mlxsw_sp_vr_put(mlxsw_sp, vr);
6516 static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
6517 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6518 struct mlxsw_sp_fib_entry *fib_entry)
6520 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6521 bool is_new = !fib_node->fib_entry;
6524 fib_node->fib_entry = fib_entry;
6526 err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, is_new);
6528 goto err_fib_entry_update;
6532 err_fib_entry_update:
6533 fib_node->fib_entry = NULL;
6537 static int __mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6538 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6539 struct mlxsw_sp_fib_entry *fib_entry)
6541 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6544 err = mlxsw_sp_fib_entry_del(mlxsw_sp, op_ctx, fib_entry);
6545 fib_node->fib_entry = NULL;
6549 static void mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6550 struct mlxsw_sp_fib_entry *fib_entry)
6552 struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
6554 mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
6555 __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, fib_entry);
6558 static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
6560 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6561 struct mlxsw_sp_fib4_entry *fib4_replaced;
6563 if (!fib_node->fib_entry)
6566 fib4_replaced = container_of(fib_node->fib_entry,
6567 struct mlxsw_sp_fib4_entry, common);
6568 if (fib4_entry->tb_id == RT_TABLE_MAIN &&
6569 fib4_replaced->tb_id == RT_TABLE_LOCAL)
6576 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
6577 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6578 const struct fib_entry_notifier_info *fen_info)
6580 struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
6581 struct mlxsw_sp_fib_entry *replaced;
6582 struct mlxsw_sp_fib_node *fib_node;
6585 if (fen_info->fi->nh &&
6586 !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, fen_info->fi->nh->id))
6589 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
6590 &fen_info->dst, sizeof(fen_info->dst),
6592 MLXSW_SP_L3_PROTO_IPV4);
6593 if (IS_ERR(fib_node)) {
6594 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
6595 return PTR_ERR(fib_node);
6598 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
6599 if (IS_ERR(fib4_entry)) {
6600 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
6601 err = PTR_ERR(fib4_entry);
6602 goto err_fib4_entry_create;
6605 if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
6606 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6607 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6611 replaced = fib_node->fib_entry;
6612 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib4_entry->common);
6614 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
6615 goto err_fib_node_entry_link;
6618 /* Nothing to replace */
6622 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
6623 fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
6625 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
6629 err_fib_node_entry_link:
6630 fib_node->fib_entry = replaced;
6631 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6632 err_fib4_entry_create:
6633 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6637 static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
6638 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6639 struct fib_entry_notifier_info *fen_info)
6641 struct mlxsw_sp_fib4_entry *fib4_entry;
6642 struct mlxsw_sp_fib_node *fib_node;
6645 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
6648 fib_node = fib4_entry->common.fib_node;
6650 err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib4_entry->common);
6651 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6652 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6656 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
6658 /* Multicast routes aren't supported, so ignore them. Neighbour
6659 * Discovery packets are specifically trapped.
6661 if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
6664 /* Cloned routes are irrelevant in the forwarding path. */
6665 if (rt->fib6_flags & RTF_CACHE)
6671 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
6673 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6675 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
6677 return ERR_PTR(-ENOMEM);
6679 /* In case of route replace, replaced route is deleted with
6680 * no notification. Take reference to prevent accessing freed
6683 mlxsw_sp_rt6->rt = rt;
6686 return mlxsw_sp_rt6;
6689 #if IS_ENABLED(CONFIG_IPV6)
6690 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6692 fib6_info_release(rt);
6695 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6700 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
6702 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
6704 if (!mlxsw_sp_rt6->rt->nh)
6705 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
6706 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
6707 kfree(mlxsw_sp_rt6);
6710 static struct fib6_info *
6711 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
6713 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
6717 static struct mlxsw_sp_rt6 *
6718 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
6719 const struct fib6_info *rt)
6721 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6723 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
6724 if (mlxsw_sp_rt6->rt == rt)
6725 return mlxsw_sp_rt6;
6731 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
6732 const struct fib6_info *rt,
6733 enum mlxsw_sp_ipip_type *ret)
6735 return rt->fib6_nh->fib_nh_dev &&
6736 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
6739 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
6740 struct mlxsw_sp_nexthop_group *nh_grp,
6741 struct mlxsw_sp_nexthop *nh,
6742 const struct fib6_info *rt)
6744 struct net_device *dev = rt->fib6_nh->fib_nh_dev;
6747 nh->nhgi = nh_grp->nhgi;
6748 nh->nh_weight = rt->fib6_nh->fib_nh_weight;
6749 memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
6750 #if IS_ENABLED(CONFIG_IPV6)
6751 nh->neigh_tbl = &nd_tbl;
6753 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
6755 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
6759 nh->ifindex = dev->ifindex;
6761 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
6763 goto err_nexthop_type_init;
6767 err_nexthop_type_init:
6768 list_del(&nh->router_list_node);
6769 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6773 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
6774 struct mlxsw_sp_nexthop *nh)
6776 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
6777 list_del(&nh->router_list_node);
6778 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6781 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
6782 const struct fib6_info *rt)
6784 return rt->fib6_nh->fib_nh_gw_family ||
6785 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
6789 mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
6790 struct mlxsw_sp_nexthop_group *nh_grp,
6791 struct mlxsw_sp_fib6_entry *fib6_entry)
6793 struct mlxsw_sp_nexthop_group_info *nhgi;
6794 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6795 struct mlxsw_sp_nexthop *nh;
6798 nhgi = kzalloc(struct_size(nhgi, nexthops, fib6_entry->nrt6),
6802 nh_grp->nhgi = nhgi;
6803 nhgi->nh_grp = nh_grp;
6804 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
6805 struct mlxsw_sp_rt6, list);
6806 nhgi->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
6807 nhgi->count = fib6_entry->nrt6;
6808 for (i = 0; i < nhgi->count; i++) {
6809 struct fib6_info *rt = mlxsw_sp_rt6->rt;
6811 nh = &nhgi->nexthops[i];
6812 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
6814 goto err_nexthop6_init;
6815 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
6817 nh_grp->nhgi = nhgi;
6818 err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
6821 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6823 goto err_group_refresh;
6828 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6832 for (i--; i >= 0; i--) {
6833 nh = &nhgi->nexthops[i];
6834 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6841 mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
6842 struct mlxsw_sp_nexthop_group *nh_grp)
6844 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
6847 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6848 for (i = nhgi->count - 1; i >= 0; i--) {
6849 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
6851 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6853 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6854 WARN_ON_ONCE(nhgi->adj_index_valid);
6858 static struct mlxsw_sp_nexthop_group *
6859 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
6860 struct mlxsw_sp_fib6_entry *fib6_entry)
6862 struct mlxsw_sp_nexthop_group *nh_grp;
6865 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
6867 return ERR_PTR(-ENOMEM);
6868 INIT_LIST_HEAD(&nh_grp->vr_list);
6869 err = rhashtable_init(&nh_grp->vr_ht,
6870 &mlxsw_sp_nexthop_group_vr_ht_params);
6872 goto err_nexthop_group_vr_ht_init;
6873 INIT_LIST_HEAD(&nh_grp->fib_list);
6874 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
6876 err = mlxsw_sp_nexthop6_group_info_init(mlxsw_sp, nh_grp, fib6_entry);
6878 goto err_nexthop_group_info_init;
6880 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
6882 goto err_nexthop_group_insert;
6884 nh_grp->can_destroy = true;
6888 err_nexthop_group_insert:
6889 mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6890 err_nexthop_group_info_init:
6891 rhashtable_destroy(&nh_grp->vr_ht);
6892 err_nexthop_group_vr_ht_init:
6894 return ERR_PTR(err);
6898 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
6899 struct mlxsw_sp_nexthop_group *nh_grp)
6901 if (!nh_grp->can_destroy)
6903 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
6904 mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6905 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
6906 rhashtable_destroy(&nh_grp->vr_ht);
6910 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
6911 struct mlxsw_sp_fib6_entry *fib6_entry)
6913 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
6914 struct mlxsw_sp_nexthop_group *nh_grp;
6917 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
6919 if (WARN_ON_ONCE(!nh_grp))
6924 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
6926 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
6928 return PTR_ERR(nh_grp);
6931 /* The route and the nexthop are described by the same struct, so we
6932 * need to the update the nexthop offload indication for the new route.
6934 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
6937 list_add_tail(&fib6_entry->common.nexthop_group_node,
6939 fib6_entry->common.nh_group = nh_grp;
6944 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
6945 struct mlxsw_sp_fib_entry *fib_entry)
6947 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
6949 list_del(&fib_entry->nexthop_group_node);
6950 if (!list_empty(&nh_grp->fib_list))
6953 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
6954 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
6958 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
6961 static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
6962 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6963 struct mlxsw_sp_fib6_entry *fib6_entry)
6965 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
6966 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
6969 mlxsw_sp_nexthop_group_vr_unlink(old_nh_grp, fib_node->fib);
6970 fib6_entry->common.nh_group = NULL;
6971 list_del(&fib6_entry->common.nexthop_group_node);
6973 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
6975 goto err_nexthop6_group_get;
6977 err = mlxsw_sp_nexthop_group_vr_link(fib6_entry->common.nh_group,
6980 goto err_nexthop_group_vr_link;
6982 /* In case this entry is offloaded, then the adjacency index
6983 * currently associated with it in the device's table is that
6984 * of the old group. Start using the new one instead.
6986 err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx,
6987 &fib6_entry->common, false);
6989 goto err_fib_entry_update;
6991 if (list_empty(&old_nh_grp->fib_list))
6992 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
6996 err_fib_entry_update:
6997 mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
6999 err_nexthop_group_vr_link:
7000 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
7001 err_nexthop6_group_get:
7002 list_add_tail(&fib6_entry->common.nexthop_group_node,
7003 &old_nh_grp->fib_list);
7004 fib6_entry->common.nh_group = old_nh_grp;
7005 mlxsw_sp_nexthop_group_vr_link(old_nh_grp, fib_node->fib);
7010 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
7011 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7012 struct mlxsw_sp_fib6_entry *fib6_entry,
7013 struct fib6_info **rt_arr, unsigned int nrt6)
7015 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7018 for (i = 0; i < nrt6; i++) {
7019 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
7020 if (IS_ERR(mlxsw_sp_rt6)) {
7021 err = PTR_ERR(mlxsw_sp_rt6);
7022 goto err_rt6_unwind;
7025 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
7029 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
7031 goto err_rt6_unwind;
7036 for (; i > 0; i--) {
7038 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
7039 struct mlxsw_sp_rt6, list);
7040 list_del(&mlxsw_sp_rt6->list);
7041 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7047 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
7048 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7049 struct mlxsw_sp_fib6_entry *fib6_entry,
7050 struct fib6_info **rt_arr, unsigned int nrt6)
7052 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7055 for (i = 0; i < nrt6; i++) {
7056 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
7058 if (WARN_ON_ONCE(!mlxsw_sp_rt6))
7062 list_del(&mlxsw_sp_rt6->list);
7063 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7066 mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
7070 mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
7071 struct mlxsw_sp_fib_entry *fib_entry,
7072 const struct fib6_info *rt)
7074 struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
7075 union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr };
7076 u32 tb_id = mlxsw_sp_fix_tb_id(rt->fib6_table->tb6_id);
7077 struct mlxsw_sp_router *router = mlxsw_sp->router;
7078 int ifindex = nhgi->nexthops[0].ifindex;
7079 struct mlxsw_sp_ipip_entry *ipip_entry;
7081 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
7082 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
7083 MLXSW_SP_L3_PROTO_IPV6,
7086 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
7087 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
7088 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry,
7091 if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
7092 MLXSW_SP_L3_PROTO_IPV6, &dip)) {
7095 tunnel_index = router->nve_decap_config.tunnel_index;
7096 fib_entry->decap.tunnel_index = tunnel_index;
7097 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
7103 static int mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
7104 struct mlxsw_sp_fib_entry *fib_entry,
7105 const struct fib6_info *rt)
7107 if (rt->fib6_flags & RTF_LOCAL)
7108 return mlxsw_sp_fib6_entry_type_set_local(mlxsw_sp, fib_entry,
7110 if (rt->fib6_flags & RTF_ANYCAST)
7111 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
7112 else if (rt->fib6_type == RTN_BLACKHOLE)
7113 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
7114 else if (rt->fib6_flags & RTF_REJECT)
7115 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
7116 else if (fib_entry->nh_group->nhgi->gateway)
7117 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
7119 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
7125 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
7127 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
7129 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
7132 list_del(&mlxsw_sp_rt6->list);
7133 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7137 static struct mlxsw_sp_fib6_entry *
7138 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
7139 struct mlxsw_sp_fib_node *fib_node,
7140 struct fib6_info **rt_arr, unsigned int nrt6)
7142 struct mlxsw_sp_fib6_entry *fib6_entry;
7143 struct mlxsw_sp_fib_entry *fib_entry;
7144 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7147 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
7149 return ERR_PTR(-ENOMEM);
7150 fib_entry = &fib6_entry->common;
7152 fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
7153 if (IS_ERR(fib_entry->priv)) {
7154 err = PTR_ERR(fib_entry->priv);
7155 goto err_fib_entry_priv_create;
7158 INIT_LIST_HEAD(&fib6_entry->rt6_list);
7160 for (i = 0; i < nrt6; i++) {
7161 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
7162 if (IS_ERR(mlxsw_sp_rt6)) {
7163 err = PTR_ERR(mlxsw_sp_rt6);
7164 goto err_rt6_unwind;
7166 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
7170 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
7172 goto err_rt6_unwind;
7174 err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
7177 goto err_nexthop_group_vr_link;
7179 err = mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
7181 goto err_fib6_entry_type_set;
7183 fib_entry->fib_node = fib_node;
7187 err_fib6_entry_type_set:
7188 mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
7189 err_nexthop_group_vr_link:
7190 mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
7192 for (; i > 0; i--) {
7194 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
7195 struct mlxsw_sp_rt6, list);
7196 list_del(&mlxsw_sp_rt6->list);
7197 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7199 mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
7200 err_fib_entry_priv_create:
7202 return ERR_PTR(err);
7206 mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
7207 struct mlxsw_sp_fib6_entry *fib6_entry)
7209 mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib6_entry->common);
7212 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
7213 struct mlxsw_sp_fib6_entry *fib6_entry)
7215 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7217 mlxsw_sp_fib6_entry_type_unset(mlxsw_sp, fib6_entry);
7218 mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
7220 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
7221 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
7222 WARN_ON(fib6_entry->nrt6);
7223 mlxsw_sp_fib_entry_priv_put(fib6_entry->common.priv);
7227 static struct mlxsw_sp_fib6_entry *
7228 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
7229 const struct fib6_info *rt)
7231 struct mlxsw_sp_fib6_entry *fib6_entry;
7232 struct mlxsw_sp_fib_node *fib_node;
7233 struct mlxsw_sp_fib *fib;
7234 struct fib6_info *cmp_rt;
7235 struct mlxsw_sp_vr *vr;
7237 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
7240 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
7242 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
7243 sizeof(rt->fib6_dst.addr),
7248 fib6_entry = container_of(fib_node->fib_entry,
7249 struct mlxsw_sp_fib6_entry, common);
7250 cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7251 if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
7252 rt->fib6_metric == cmp_rt->fib6_metric &&
7253 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
7259 static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
7261 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7262 struct mlxsw_sp_fib6_entry *fib6_replaced;
7263 struct fib6_info *rt, *rt_replaced;
7265 if (!fib_node->fib_entry)
7268 fib6_replaced = container_of(fib_node->fib_entry,
7269 struct mlxsw_sp_fib6_entry,
7271 rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7272 rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
7273 if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
7274 rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
7280 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
7281 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7282 struct fib6_info **rt_arr, unsigned int nrt6)
7284 struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
7285 struct mlxsw_sp_fib_entry *replaced;
7286 struct mlxsw_sp_fib_node *fib_node;
7287 struct fib6_info *rt = rt_arr[0];
7290 if (rt->fib6_src.plen)
7293 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7296 if (rt->nh && !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, rt->nh->id))
7299 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7301 sizeof(rt->fib6_dst.addr),
7303 MLXSW_SP_L3_PROTO_IPV6);
7304 if (IS_ERR(fib_node))
7305 return PTR_ERR(fib_node);
7307 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
7309 if (IS_ERR(fib6_entry)) {
7310 err = PTR_ERR(fib6_entry);
7311 goto err_fib6_entry_create;
7314 if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
7315 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7316 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7320 replaced = fib_node->fib_entry;
7321 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib6_entry->common);
7323 goto err_fib_node_entry_link;
7325 /* Nothing to replace */
7329 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
7330 fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
7332 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
7336 err_fib_node_entry_link:
7337 fib_node->fib_entry = replaced;
7338 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7339 err_fib6_entry_create:
7340 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7344 static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
7345 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7346 struct fib6_info **rt_arr, unsigned int nrt6)
7348 struct mlxsw_sp_fib6_entry *fib6_entry;
7349 struct mlxsw_sp_fib_node *fib_node;
7350 struct fib6_info *rt = rt_arr[0];
7353 if (rt->fib6_src.plen)
7356 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7359 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7361 sizeof(rt->fib6_dst.addr),
7363 MLXSW_SP_L3_PROTO_IPV6);
7364 if (IS_ERR(fib_node))
7365 return PTR_ERR(fib_node);
7367 if (WARN_ON_ONCE(!fib_node->fib_entry)) {
7368 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7372 fib6_entry = container_of(fib_node->fib_entry,
7373 struct mlxsw_sp_fib6_entry, common);
7374 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
7376 goto err_fib6_entry_nexthop_add;
7380 err_fib6_entry_nexthop_add:
7381 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7385 static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
7386 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7387 struct fib6_info **rt_arr, unsigned int nrt6)
7389 struct mlxsw_sp_fib6_entry *fib6_entry;
7390 struct mlxsw_sp_fib_node *fib_node;
7391 struct fib6_info *rt = rt_arr[0];
7394 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7397 /* Multipath routes are first added to the FIB trie and only then
7398 * notified. If we vetoed the addition, we will get a delete
7399 * notification for a route we do not have. Therefore, do not warn if
7400 * route was not found.
7402 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
7406 /* If not all the nexthops are deleted, then only reduce the nexthop
7409 if (nrt6 != fib6_entry->nrt6) {
7410 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
7414 fib_node = fib6_entry->common.fib_node;
7416 err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib6_entry->common);
7417 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7418 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7422 static struct mlxsw_sp_mr_table *
7423 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
7425 if (family == RTNL_FAMILY_IPMR)
7426 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
7428 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
7431 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
7432 struct mfc_entry_notifier_info *men_info,
7435 struct mlxsw_sp_mr_table *mrt;
7436 struct mlxsw_sp_vr *vr;
7438 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
7442 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7443 return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
7446 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
7447 struct mfc_entry_notifier_info *men_info)
7449 struct mlxsw_sp_mr_table *mrt;
7450 struct mlxsw_sp_vr *vr;
7452 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
7456 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7457 mlxsw_sp_mr_route_del(mrt, men_info->mfc);
7458 mlxsw_sp_vr_put(mlxsw_sp, vr);
7462 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
7463 struct vif_entry_notifier_info *ven_info)
7465 struct mlxsw_sp_mr_table *mrt;
7466 struct mlxsw_sp_rif *rif;
7467 struct mlxsw_sp_vr *vr;
7469 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
7473 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7474 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
7475 return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
7476 ven_info->vif_index,
7477 ven_info->vif_flags, rif);
7481 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
7482 struct vif_entry_notifier_info *ven_info)
7484 struct mlxsw_sp_mr_table *mrt;
7485 struct mlxsw_sp_vr *vr;
7487 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
7491 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7492 mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
7493 mlxsw_sp_vr_put(mlxsw_sp, vr);
7496 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
7497 struct mlxsw_sp_fib_node *fib_node)
7499 struct mlxsw_sp_fib4_entry *fib4_entry;
7501 fib4_entry = container_of(fib_node->fib_entry,
7502 struct mlxsw_sp_fib4_entry, common);
7503 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7504 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
7505 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7508 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
7509 struct mlxsw_sp_fib_node *fib_node)
7511 struct mlxsw_sp_fib6_entry *fib6_entry;
7513 fib6_entry = container_of(fib_node->fib_entry,
7514 struct mlxsw_sp_fib6_entry, common);
7515 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7516 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7517 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7520 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
7521 struct mlxsw_sp_fib_node *fib_node)
7523 switch (fib_node->fib->proto) {
7524 case MLXSW_SP_L3_PROTO_IPV4:
7525 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
7527 case MLXSW_SP_L3_PROTO_IPV6:
7528 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
7533 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
7534 struct mlxsw_sp_vr *vr,
7535 enum mlxsw_sp_l3proto proto)
7537 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
7538 struct mlxsw_sp_fib_node *fib_node, *tmp;
7540 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
7541 bool do_break = &tmp->list == &fib->node_list;
7543 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
7549 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
7553 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
7554 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
7556 if (!mlxsw_sp_vr_is_used(vr))
7559 for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
7560 mlxsw_sp_mr_table_flush(vr->mr_table[j]);
7561 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
7563 /* If virtual router was only used for IPv4, then it's no
7566 if (!mlxsw_sp_vr_is_used(vr))
7568 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
7572 struct mlxsw_sp_fib6_event {
7573 struct fib6_info **rt_arr;
7577 struct mlxsw_sp_fib_event {
7578 struct list_head list; /* node in fib queue */
7580 struct mlxsw_sp_fib6_event fib6_event;
7581 struct fib_entry_notifier_info fen_info;
7582 struct fib_rule_notifier_info fr_info;
7583 struct fib_nh_notifier_info fnh_info;
7584 struct mfc_entry_notifier_info men_info;
7585 struct vif_entry_notifier_info ven_info;
7587 struct mlxsw_sp *mlxsw_sp;
7588 unsigned long event;
7593 mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
7594 struct fib6_entry_notifier_info *fen6_info)
7596 struct fib6_info *rt = fen6_info->rt;
7597 struct fib6_info **rt_arr;
7598 struct fib6_info *iter;
7602 nrt6 = fen6_info->nsiblings + 1;
7604 rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
7608 fib6_event->rt_arr = rt_arr;
7609 fib6_event->nrt6 = nrt6;
7614 if (!fen6_info->nsiblings)
7617 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
7618 if (i == fen6_info->nsiblings)
7621 rt_arr[i + 1] = iter;
7622 fib6_info_hold(iter);
7625 WARN_ON_ONCE(i != fen6_info->nsiblings);
7631 mlxsw_sp_router_fib6_event_fini(struct mlxsw_sp_fib6_event *fib6_event)
7635 for (i = 0; i < fib6_event->nrt6; i++)
7636 mlxsw_sp_rt6_release(fib6_event->rt_arr[i]);
7637 kfree(fib6_event->rt_arr);
7640 static void mlxsw_sp_router_fib4_event_process(struct mlxsw_sp *mlxsw_sp,
7641 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7642 struct mlxsw_sp_fib_event *fib_event)
7646 mlxsw_sp_span_respin(mlxsw_sp);
7648 switch (fib_event->event) {
7649 case FIB_EVENT_ENTRY_REPLACE:
7650 err = mlxsw_sp_router_fib4_replace(mlxsw_sp, op_ctx, &fib_event->fen_info);
7652 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7653 dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7654 mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
7655 &fib_event->fen_info);
7657 fib_info_put(fib_event->fen_info.fi);
7659 case FIB_EVENT_ENTRY_DEL:
7660 err = mlxsw_sp_router_fib4_del(mlxsw_sp, op_ctx, &fib_event->fen_info);
7662 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7663 fib_info_put(fib_event->fen_info.fi);
7665 case FIB_EVENT_NH_ADD:
7666 case FIB_EVENT_NH_DEL:
7667 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_event->event, fib_event->fnh_info.fib_nh);
7668 fib_info_put(fib_event->fnh_info.fib_nh->nh_parent);
7673 static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
7674 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7675 struct mlxsw_sp_fib_event *fib_event)
7677 struct mlxsw_sp_fib6_event *fib6_event = &fib_event->fib6_event;
7680 mlxsw_sp_span_respin(mlxsw_sp);
7682 switch (fib_event->event) {
7683 case FIB_EVENT_ENTRY_REPLACE:
7684 err = mlxsw_sp_router_fib6_replace(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7685 fib_event->fib6_event.nrt6);
7687 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7688 dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7689 mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7693 mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7695 case FIB_EVENT_ENTRY_APPEND:
7696 err = mlxsw_sp_router_fib6_append(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7697 fib_event->fib6_event.nrt6);
7699 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7700 dev_warn(mlxsw_sp->bus_info->dev, "FIB append failed.\n");
7701 mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7705 mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7707 case FIB_EVENT_ENTRY_DEL:
7708 err = mlxsw_sp_router_fib6_del(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7709 fib_event->fib6_event.nrt6);
7711 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7712 mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7717 static void mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp *mlxsw_sp,
7718 struct mlxsw_sp_fib_event *fib_event)
7724 mutex_lock(&mlxsw_sp->router->lock);
7725 switch (fib_event->event) {
7726 case FIB_EVENT_ENTRY_REPLACE:
7727 case FIB_EVENT_ENTRY_ADD:
7728 replace = fib_event->event == FIB_EVENT_ENTRY_REPLACE;
7730 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_event->men_info, replace);
7732 dev_warn(mlxsw_sp->bus_info->dev, "MR entry add failed.\n");
7733 mr_cache_put(fib_event->men_info.mfc);
7735 case FIB_EVENT_ENTRY_DEL:
7736 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_event->men_info);
7737 mr_cache_put(fib_event->men_info.mfc);
7739 case FIB_EVENT_VIF_ADD:
7740 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
7741 &fib_event->ven_info);
7743 dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
7744 dev_put(fib_event->ven_info.dev);
7746 case FIB_EVENT_VIF_DEL:
7747 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, &fib_event->ven_info);
7748 dev_put(fib_event->ven_info.dev);
7751 mutex_unlock(&mlxsw_sp->router->lock);
7755 static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
7757 struct mlxsw_sp_router *router = container_of(work, struct mlxsw_sp_router, fib_event_work);
7758 struct mlxsw_sp_fib_entry_op_ctx *op_ctx = router->ll_op_ctx;
7759 struct mlxsw_sp *mlxsw_sp = router->mlxsw_sp;
7760 struct mlxsw_sp_fib_event *next_fib_event;
7761 struct mlxsw_sp_fib_event *fib_event;
7762 int last_family = AF_UNSPEC;
7763 LIST_HEAD(fib_event_queue);
7765 spin_lock_bh(&router->fib_event_queue_lock);
7766 list_splice_init(&router->fib_event_queue, &fib_event_queue);
7767 spin_unlock_bh(&router->fib_event_queue_lock);
7769 /* Router lock is held here to make sure per-instance
7770 * operation context is not used in between FIB4/6 events
7773 mutex_lock(&router->lock);
7774 mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
7775 list_for_each_entry_safe(fib_event, next_fib_event,
7776 &fib_event_queue, list) {
7777 /* Check if the next entry in the queue exists and it is
7778 * of the same type (family and event) as the currect one.
7779 * In that case it is permitted to do the bulking
7780 * of multiple FIB entries to a single register write.
7782 op_ctx->bulk_ok = !list_is_last(&fib_event->list, &fib_event_queue) &&
7783 fib_event->family == next_fib_event->family &&
7784 fib_event->event == next_fib_event->event;
7785 op_ctx->event = fib_event->event;
7787 /* In case family of this and the previous entry are different, context
7788 * reinitialization is going to be needed now, indicate that.
7789 * Note that since last_family is initialized to AF_UNSPEC, this is always
7790 * going to happen for the first entry processed in the work.
7792 if (fib_event->family != last_family)
7793 op_ctx->initialized = false;
7795 switch (fib_event->family) {
7797 mlxsw_sp_router_fib4_event_process(mlxsw_sp, op_ctx,
7801 mlxsw_sp_router_fib6_event_process(mlxsw_sp, op_ctx,
7804 case RTNL_FAMILY_IP6MR:
7805 case RTNL_FAMILY_IPMR:
7806 /* Unlock here as inside FIBMR the lock is taken again
7807 * under RTNL. The per-instance operation context
7808 * is not used by FIBMR.
7810 mutex_unlock(&router->lock);
7811 mlxsw_sp_router_fibmr_event_process(mlxsw_sp,
7813 mutex_lock(&router->lock);
7818 last_family = fib_event->family;
7822 WARN_ON_ONCE(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
7823 mutex_unlock(&router->lock);
7826 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event *fib_event,
7827 struct fib_notifier_info *info)
7829 struct fib_entry_notifier_info *fen_info;
7830 struct fib_nh_notifier_info *fnh_info;
7832 switch (fib_event->event) {
7833 case FIB_EVENT_ENTRY_REPLACE:
7834 case FIB_EVENT_ENTRY_DEL:
7835 fen_info = container_of(info, struct fib_entry_notifier_info,
7837 fib_event->fen_info = *fen_info;
7838 /* Take reference on fib_info to prevent it from being
7839 * freed while event is queued. Release it afterwards.
7841 fib_info_hold(fib_event->fen_info.fi);
7843 case FIB_EVENT_NH_ADD:
7844 case FIB_EVENT_NH_DEL:
7845 fnh_info = container_of(info, struct fib_nh_notifier_info,
7847 fib_event->fnh_info = *fnh_info;
7848 fib_info_hold(fib_event->fnh_info.fib_nh->nh_parent);
7853 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event,
7854 struct fib_notifier_info *info)
7856 struct fib6_entry_notifier_info *fen6_info;
7859 switch (fib_event->event) {
7860 case FIB_EVENT_ENTRY_REPLACE:
7861 case FIB_EVENT_ENTRY_APPEND:
7862 case FIB_EVENT_ENTRY_DEL:
7863 fen6_info = container_of(info, struct fib6_entry_notifier_info,
7865 err = mlxsw_sp_router_fib6_event_init(&fib_event->fib6_event,
7876 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event *fib_event,
7877 struct fib_notifier_info *info)
7879 switch (fib_event->event) {
7880 case FIB_EVENT_ENTRY_REPLACE:
7881 case FIB_EVENT_ENTRY_ADD:
7882 case FIB_EVENT_ENTRY_DEL:
7883 memcpy(&fib_event->men_info, info, sizeof(fib_event->men_info));
7884 mr_cache_hold(fib_event->men_info.mfc);
7886 case FIB_EVENT_VIF_ADD:
7887 case FIB_EVENT_VIF_DEL:
7888 memcpy(&fib_event->ven_info, info, sizeof(fib_event->ven_info));
7889 dev_hold(fib_event->ven_info.dev);
7894 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
7895 struct fib_notifier_info *info,
7896 struct mlxsw_sp *mlxsw_sp)
7898 struct netlink_ext_ack *extack = info->extack;
7899 struct fib_rule_notifier_info *fr_info;
7900 struct fib_rule *rule;
7903 /* nothing to do at the moment */
7904 if (event == FIB_EVENT_RULE_DEL)
7907 fr_info = container_of(info, struct fib_rule_notifier_info, info);
7908 rule = fr_info->rule;
7910 /* Rule only affects locally generated traffic */
7911 if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
7914 switch (info->family) {
7916 if (!fib4_rule_default(rule) && !rule->l3mdev)
7920 if (!fib6_rule_default(rule) && !rule->l3mdev)
7923 case RTNL_FAMILY_IPMR:
7924 if (!ipmr_rule_default(rule) && !rule->l3mdev)
7927 case RTNL_FAMILY_IP6MR:
7928 if (!ip6mr_rule_default(rule) && !rule->l3mdev)
7934 NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
7939 /* Called with rcu_read_lock() */
7940 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
7941 unsigned long event, void *ptr)
7943 struct mlxsw_sp_fib_event *fib_event;
7944 struct fib_notifier_info *info = ptr;
7945 struct mlxsw_sp_router *router;
7948 if ((info->family != AF_INET && info->family != AF_INET6 &&
7949 info->family != RTNL_FAMILY_IPMR &&
7950 info->family != RTNL_FAMILY_IP6MR))
7953 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7956 case FIB_EVENT_RULE_ADD:
7957 case FIB_EVENT_RULE_DEL:
7958 err = mlxsw_sp_router_fib_rule_event(event, info,
7960 return notifier_from_errno(err);
7961 case FIB_EVENT_ENTRY_ADD:
7962 case FIB_EVENT_ENTRY_REPLACE:
7963 case FIB_EVENT_ENTRY_APPEND:
7964 if (info->family == AF_INET) {
7965 struct fib_entry_notifier_info *fen_info = ptr;
7967 if (fen_info->fi->fib_nh_is_v6) {
7968 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
7969 return notifier_from_errno(-EINVAL);
7975 fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC);
7979 fib_event->mlxsw_sp = router->mlxsw_sp;
7980 fib_event->event = event;
7981 fib_event->family = info->family;
7983 switch (info->family) {
7985 mlxsw_sp_router_fib4_event(fib_event, info);
7988 err = mlxsw_sp_router_fib6_event(fib_event, info);
7992 case RTNL_FAMILY_IP6MR:
7993 case RTNL_FAMILY_IPMR:
7994 mlxsw_sp_router_fibmr_event(fib_event, info);
7998 /* Enqueue the event and trigger the work */
7999 spin_lock_bh(&router->fib_event_queue_lock);
8000 list_add_tail(&fib_event->list, &router->fib_event_queue);
8001 spin_unlock_bh(&router->fib_event_queue_lock);
8002 mlxsw_core_schedule_work(&router->fib_event_work);
8011 static struct mlxsw_sp_rif *
8012 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
8013 const struct net_device *dev)
8017 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
8018 if (mlxsw_sp->router->rifs[i] &&
8019 mlxsw_sp->router->rifs[i]->dev == dev)
8020 return mlxsw_sp->router->rifs[i];
8025 bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
8026 const struct net_device *dev)
8028 struct mlxsw_sp_rif *rif;
8030 mutex_lock(&mlxsw_sp->router->lock);
8031 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8032 mutex_unlock(&mlxsw_sp->router->lock);
8037 u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
8039 struct mlxsw_sp_rif *rif;
8042 mutex_lock(&mlxsw_sp->router->lock);
8043 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8047 /* We only return the VID for VLAN RIFs. Otherwise we return an
8048 * invalid value (0).
8050 if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
8053 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
8056 mutex_unlock(&mlxsw_sp->router->lock);
8060 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
8062 char ritr_pl[MLXSW_REG_RITR_LEN];
8065 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
8066 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8070 mlxsw_reg_ritr_enable_set(ritr_pl, false);
8071 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8074 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
8075 struct mlxsw_sp_rif *rif)
8077 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
8078 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
8079 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
8083 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
8084 unsigned long event)
8086 struct inet6_dev *inet6_dev;
8087 bool addr_list_empty = true;
8088 struct in_device *idev;
8095 idev = __in_dev_get_rcu(dev);
8096 if (idev && idev->ifa_list)
8097 addr_list_empty = false;
8099 inet6_dev = __in6_dev_get(dev);
8100 if (addr_list_empty && inet6_dev &&
8101 !list_empty(&inet6_dev->addr_list))
8102 addr_list_empty = false;
8105 /* macvlans do not have a RIF, but rather piggy back on the
8106 * RIF of their lower device.
8108 if (netif_is_macvlan(dev) && addr_list_empty)
8111 if (rif && addr_list_empty &&
8112 !netif_is_l3_slave(rif->dev))
8114 /* It is possible we already removed the RIF ourselves
8115 * if it was assigned to a netdev that is now a bridge
8124 static enum mlxsw_sp_rif_type
8125 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
8126 const struct net_device *dev)
8128 enum mlxsw_sp_fid_type type;
8130 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
8131 return MLXSW_SP_RIF_TYPE_IPIP_LB;
8133 /* Otherwise RIF type is derived from the type of the underlying FID. */
8134 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
8135 type = MLXSW_SP_FID_TYPE_8021Q;
8136 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
8137 type = MLXSW_SP_FID_TYPE_8021Q;
8138 else if (netif_is_bridge_master(dev))
8139 type = MLXSW_SP_FID_TYPE_8021D;
8141 type = MLXSW_SP_FID_TYPE_RFID;
8143 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
8146 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
8150 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
8151 if (!mlxsw_sp->router->rifs[i]) {
8160 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
8162 struct net_device *l3_dev)
8164 struct mlxsw_sp_rif *rif;
8166 rif = kzalloc(rif_size, GFP_KERNEL);
8170 INIT_LIST_HEAD(&rif->nexthop_list);
8171 INIT_LIST_HEAD(&rif->neigh_list);
8173 ether_addr_copy(rif->addr, l3_dev->dev_addr);
8174 rif->mtu = l3_dev->mtu;
8178 rif->rif_index = rif_index;
8183 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
8186 return mlxsw_sp->router->rifs[rif_index];
8189 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
8191 return rif->rif_index;
8194 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8196 return lb_rif->common.rif_index;
8199 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8201 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev);
8202 struct mlxsw_sp_vr *ul_vr;
8204 ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
8205 if (WARN_ON(IS_ERR(ul_vr)))
8211 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8213 return lb_rif->ul_rif_id;
8217 mlxsw_sp_router_port_l3_stats_enabled(struct mlxsw_sp_rif *rif)
8219 return mlxsw_sp_rif_counter_valid_get(rif,
8220 MLXSW_SP_RIF_COUNTER_EGRESS) &&
8221 mlxsw_sp_rif_counter_valid_get(rif,
8222 MLXSW_SP_RIF_COUNTER_INGRESS);
8226 mlxsw_sp_router_port_l3_stats_enable(struct mlxsw_sp_rif *rif)
8230 err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8234 /* Clear stale data. */
8235 err = mlxsw_sp_rif_counter_fetch_clear(rif,
8236 MLXSW_SP_RIF_COUNTER_INGRESS,
8239 goto err_clear_ingress;
8241 err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8243 goto err_alloc_egress;
8245 /* Clear stale data. */
8246 err = mlxsw_sp_rif_counter_fetch_clear(rif,
8247 MLXSW_SP_RIF_COUNTER_EGRESS,
8250 goto err_clear_egress;
8255 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8258 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8263 mlxsw_sp_router_port_l3_stats_disable(struct mlxsw_sp_rif *rif)
8265 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8266 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8270 mlxsw_sp_router_port_l3_stats_report_used(struct mlxsw_sp_rif *rif,
8271 struct netdev_notifier_offload_xstats_info *info)
8273 if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
8275 netdev_offload_xstats_report_used(info->report_used);
8279 mlxsw_sp_router_port_l3_stats_fetch(struct mlxsw_sp_rif *rif,
8280 struct rtnl_hw_stats64 *p_stats)
8282 struct mlxsw_sp_rif_counter_set_basic ingress;
8283 struct mlxsw_sp_rif_counter_set_basic egress;
8286 err = mlxsw_sp_rif_counter_fetch_clear(rif,
8287 MLXSW_SP_RIF_COUNTER_INGRESS,
8292 err = mlxsw_sp_rif_counter_fetch_clear(rif,
8293 MLXSW_SP_RIF_COUNTER_EGRESS,
8298 #define MLXSW_SP_ROUTER_ALL_GOOD(SET, SFX) \
8299 ((SET.good_unicast_ ## SFX) + \
8300 (SET.good_multicast_ ## SFX) + \
8301 (SET.good_broadcast_ ## SFX))
8303 p_stats->rx_packets = MLXSW_SP_ROUTER_ALL_GOOD(ingress, packets);
8304 p_stats->tx_packets = MLXSW_SP_ROUTER_ALL_GOOD(egress, packets);
8305 p_stats->rx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(ingress, bytes);
8306 p_stats->tx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(egress, bytes);
8307 p_stats->rx_errors = ingress.error_packets;
8308 p_stats->tx_errors = egress.error_packets;
8309 p_stats->rx_dropped = ingress.discard_packets;
8310 p_stats->tx_dropped = egress.discard_packets;
8311 p_stats->multicast = ingress.good_multicast_packets +
8312 ingress.good_broadcast_packets;
8314 #undef MLXSW_SP_ROUTER_ALL_GOOD
8320 mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif *rif,
8321 struct netdev_notifier_offload_xstats_info *info)
8323 struct rtnl_hw_stats64 stats = {};
8326 if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
8329 err = mlxsw_sp_router_port_l3_stats_fetch(rif, &stats);
8333 netdev_offload_xstats_report_delta(info->report_delta, &stats);
8337 struct mlxsw_sp_router_hwstats_notify_work {
8338 struct work_struct work;
8339 struct net_device *dev;
8342 static void mlxsw_sp_router_hwstats_notify_work(struct work_struct *work)
8344 struct mlxsw_sp_router_hwstats_notify_work *hws_work =
8345 container_of(work, struct mlxsw_sp_router_hwstats_notify_work,
8349 rtnl_offload_xstats_notify(hws_work->dev);
8351 dev_put(hws_work->dev);
8356 mlxsw_sp_router_hwstats_notify_schedule(struct net_device *dev)
8358 struct mlxsw_sp_router_hwstats_notify_work *hws_work;
8360 /* To collect notification payload, the core ends up sending another
8361 * notifier block message, which would deadlock on the attempt to
8362 * acquire the router lock again. Just postpone the notification until
8366 hws_work = kzalloc(sizeof(*hws_work), GFP_KERNEL);
8370 INIT_WORK(&hws_work->work, mlxsw_sp_router_hwstats_notify_work);
8372 hws_work->dev = dev;
8373 mlxsw_core_schedule_work(&hws_work->work);
8376 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
8378 return rif->dev->ifindex;
8381 const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
8386 static void mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif *rif)
8388 struct rtnl_hw_stats64 stats = {};
8390 if (!mlxsw_sp_router_port_l3_stats_fetch(rif, &stats))
8391 netdev_offload_xstats_push_delta(rif->dev,
8392 NETDEV_OFFLOAD_XSTATS_TYPE_L3,
8396 static struct mlxsw_sp_rif *
8397 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
8398 const struct mlxsw_sp_rif_params *params,
8399 struct netlink_ext_ack *extack)
8401 u32 tb_id = l3mdev_fib_table(params->dev);
8402 const struct mlxsw_sp_rif_ops *ops;
8403 struct mlxsw_sp_fid *fid = NULL;
8404 enum mlxsw_sp_rif_type type;
8405 struct mlxsw_sp_rif *rif;
8406 struct mlxsw_sp_vr *vr;
8410 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
8411 ops = mlxsw_sp->router->rif_ops_arr[type];
8413 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
8415 return ERR_CAST(vr);
8418 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
8420 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
8421 goto err_rif_index_alloc;
8424 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
8430 mlxsw_sp->router->rifs[rif_index] = rif;
8431 rif->mlxsw_sp = mlxsw_sp;
8435 fid = ops->fid_get(rif, extack);
8444 ops->setup(rif, params);
8446 err = ops->configure(rif, extack);
8450 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
8451 err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
8453 goto err_mr_rif_add;
8456 if (netdev_offload_xstats_enabled(rif->dev,
8457 NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8458 err = mlxsw_sp_router_port_l3_stats_enable(rif);
8460 goto err_stats_enable;
8461 mlxsw_sp_router_hwstats_notify_schedule(rif->dev);
8463 mlxsw_sp_rif_counters_alloc(rif);
8470 for (i--; i >= 0; i--)
8471 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8472 ops->deconfigure(rif);
8475 mlxsw_sp_fid_put(fid);
8477 mlxsw_sp->router->rifs[rif_index] = NULL;
8481 err_rif_index_alloc:
8483 mlxsw_sp_vr_put(mlxsw_sp, vr);
8484 return ERR_PTR(err);
8487 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
8489 const struct mlxsw_sp_rif_ops *ops = rif->ops;
8490 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8491 struct mlxsw_sp_fid *fid = rif->fid;
8492 struct mlxsw_sp_vr *vr;
8495 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
8496 vr = &mlxsw_sp->router->vrs[rif->vr_id];
8498 if (netdev_offload_xstats_enabled(rif->dev,
8499 NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8500 mlxsw_sp_rif_push_l3_stats(rif);
8501 mlxsw_sp_router_port_l3_stats_disable(rif);
8502 mlxsw_sp_router_hwstats_notify_schedule(rif->dev);
8504 mlxsw_sp_rif_counters_free(rif);
8507 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
8508 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8509 ops->deconfigure(rif);
8511 /* Loopback RIFs are not associated with a FID. */
8512 mlxsw_sp_fid_put(fid);
8513 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
8517 mlxsw_sp_vr_put(mlxsw_sp, vr);
8520 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
8521 struct net_device *dev)
8523 struct mlxsw_sp_rif *rif;
8525 mutex_lock(&mlxsw_sp->router->lock);
8526 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8529 mlxsw_sp_rif_destroy(rif);
8531 mutex_unlock(&mlxsw_sp->router->lock);
8535 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
8536 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8538 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8540 params->vid = mlxsw_sp_port_vlan->vid;
8541 params->lag = mlxsw_sp_port->lagged;
8543 params->lag_id = mlxsw_sp_port->lag_id;
8545 params->system_port = mlxsw_sp_port->local_port;
8548 static struct mlxsw_sp_rif_subport *
8549 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
8551 return container_of(rif, struct mlxsw_sp_rif_subport, common);
8554 static struct mlxsw_sp_rif *
8555 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
8556 const struct mlxsw_sp_rif_params *params,
8557 struct netlink_ext_ack *extack)
8559 struct mlxsw_sp_rif_subport *rif_subport;
8560 struct mlxsw_sp_rif *rif;
8562 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
8564 return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
8566 rif_subport = mlxsw_sp_rif_subport_rif(rif);
8567 refcount_inc(&rif_subport->ref_count);
8571 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
8573 struct mlxsw_sp_rif_subport *rif_subport;
8575 rif_subport = mlxsw_sp_rif_subport_rif(rif);
8576 if (!refcount_dec_and_test(&rif_subport->ref_count))
8579 mlxsw_sp_rif_destroy(rif);
8582 static int mlxsw_sp_rif_mac_profile_index_alloc(struct mlxsw_sp *mlxsw_sp,
8583 struct mlxsw_sp_rif_mac_profile *profile,
8584 struct netlink_ext_ack *extack)
8586 u8 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
8587 struct mlxsw_sp_router *router = mlxsw_sp->router;
8590 id = idr_alloc(&router->rif_mac_profiles_idr, profile, 0,
8591 max_rif_mac_profiles, GFP_KERNEL);
8599 NL_SET_ERR_MSG_MOD(extack,
8600 "Exceeded number of supported router interface MAC profiles");
8605 static struct mlxsw_sp_rif_mac_profile *
8606 mlxsw_sp_rif_mac_profile_index_free(struct mlxsw_sp *mlxsw_sp, u8 mac_profile)
8608 struct mlxsw_sp_rif_mac_profile *profile;
8610 profile = idr_remove(&mlxsw_sp->router->rif_mac_profiles_idr,
8616 static struct mlxsw_sp_rif_mac_profile *
8617 mlxsw_sp_rif_mac_profile_alloc(const char *mac)
8619 struct mlxsw_sp_rif_mac_profile *profile;
8621 profile = kzalloc(sizeof(*profile), GFP_KERNEL);
8625 ether_addr_copy(profile->mac_prefix, mac);
8626 refcount_set(&profile->ref_count, 1);
8630 static struct mlxsw_sp_rif_mac_profile *
8631 mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp *mlxsw_sp, const char *mac)
8633 struct mlxsw_sp_router *router = mlxsw_sp->router;
8634 struct mlxsw_sp_rif_mac_profile *profile;
8637 idr_for_each_entry(&router->rif_mac_profiles_idr, profile, id) {
8638 if (ether_addr_equal_masked(profile->mac_prefix, mac,
8639 mlxsw_sp->mac_mask))
8646 static u64 mlxsw_sp_rif_mac_profiles_occ_get(void *priv)
8648 const struct mlxsw_sp *mlxsw_sp = priv;
8650 return atomic_read(&mlxsw_sp->router->rif_mac_profiles_count);
8653 static struct mlxsw_sp_rif_mac_profile *
8654 mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp *mlxsw_sp, const char *mac,
8655 struct netlink_ext_ack *extack)
8657 struct mlxsw_sp_rif_mac_profile *profile;
8660 profile = mlxsw_sp_rif_mac_profile_alloc(mac);
8662 return ERR_PTR(-ENOMEM);
8664 err = mlxsw_sp_rif_mac_profile_index_alloc(mlxsw_sp, profile, extack);
8666 goto profile_index_alloc_err;
8668 atomic_inc(&mlxsw_sp->router->rif_mac_profiles_count);
8671 profile_index_alloc_err:
8673 return ERR_PTR(err);
8676 static void mlxsw_sp_rif_mac_profile_destroy(struct mlxsw_sp *mlxsw_sp,
8679 struct mlxsw_sp_rif_mac_profile *profile;
8681 atomic_dec(&mlxsw_sp->router->rif_mac_profiles_count);
8682 profile = mlxsw_sp_rif_mac_profile_index_free(mlxsw_sp, mac_profile);
8686 static int mlxsw_sp_rif_mac_profile_get(struct mlxsw_sp *mlxsw_sp,
8687 const char *mac, u8 *p_mac_profile,
8688 struct netlink_ext_ack *extack)
8690 struct mlxsw_sp_rif_mac_profile *profile;
8692 profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, mac);
8694 refcount_inc(&profile->ref_count);
8698 profile = mlxsw_sp_rif_mac_profile_create(mlxsw_sp, mac, extack);
8699 if (IS_ERR(profile))
8700 return PTR_ERR(profile);
8703 *p_mac_profile = profile->id;
8707 static void mlxsw_sp_rif_mac_profile_put(struct mlxsw_sp *mlxsw_sp,
8710 struct mlxsw_sp_rif_mac_profile *profile;
8712 profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8714 if (WARN_ON(!profile))
8717 if (!refcount_dec_and_test(&profile->ref_count))
8720 mlxsw_sp_rif_mac_profile_destroy(mlxsw_sp, mac_profile);
8723 static bool mlxsw_sp_rif_mac_profile_is_shared(const struct mlxsw_sp_rif *rif)
8725 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8726 struct mlxsw_sp_rif_mac_profile *profile;
8728 profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8729 rif->mac_profile_id);
8730 if (WARN_ON(!profile))
8733 return refcount_read(&profile->ref_count) > 1;
8736 static int mlxsw_sp_rif_mac_profile_edit(struct mlxsw_sp_rif *rif,
8737 const char *new_mac)
8739 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8740 struct mlxsw_sp_rif_mac_profile *profile;
8742 profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8743 rif->mac_profile_id);
8744 if (WARN_ON(!profile))
8747 ether_addr_copy(profile->mac_prefix, new_mac);
8752 mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp,
8753 struct mlxsw_sp_rif *rif,
8754 const char *new_mac,
8755 struct netlink_ext_ack *extack)
8760 if (!mlxsw_sp_rif_mac_profile_is_shared(rif) &&
8761 !mlxsw_sp_rif_mac_profile_find(mlxsw_sp, new_mac))
8762 return mlxsw_sp_rif_mac_profile_edit(rif, new_mac);
8764 err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac,
8765 &mac_profile, extack);
8769 mlxsw_sp_rif_mac_profile_put(mlxsw_sp, rif->mac_profile_id);
8770 rif->mac_profile_id = mac_profile;
8775 __mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8776 struct net_device *l3_dev,
8777 struct netlink_ext_ack *extack)
8779 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8780 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
8781 struct mlxsw_sp_rif_params params = {
8784 u16 vid = mlxsw_sp_port_vlan->vid;
8785 struct mlxsw_sp_rif *rif;
8786 struct mlxsw_sp_fid *fid;
8789 mlxsw_sp_rif_subport_params_init(¶ms, mlxsw_sp_port_vlan);
8790 rif = mlxsw_sp_rif_subport_get(mlxsw_sp, ¶ms, extack);
8792 return PTR_ERR(rif);
8794 /* FID was already created, just take a reference */
8795 fid = rif->ops->fid_get(rif, extack);
8796 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
8798 goto err_fid_port_vid_map;
8800 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
8802 goto err_port_vid_learning_set;
8804 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
8805 BR_STATE_FORWARDING);
8807 goto err_port_vid_stp_set;
8809 mlxsw_sp_port_vlan->fid = fid;
8813 err_port_vid_stp_set:
8814 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8815 err_port_vid_learning_set:
8816 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8817 err_fid_port_vid_map:
8818 mlxsw_sp_fid_put(fid);
8819 mlxsw_sp_rif_subport_put(rif);
8824 __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8826 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8827 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
8828 struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
8829 u16 vid = mlxsw_sp_port_vlan->vid;
8831 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
8834 mlxsw_sp_port_vlan->fid = NULL;
8835 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
8836 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8837 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8838 mlxsw_sp_fid_put(fid);
8839 mlxsw_sp_rif_subport_put(rif);
8843 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8844 struct net_device *l3_dev,
8845 struct netlink_ext_ack *extack)
8847 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8848 struct mlxsw_sp_rif *rif;
8851 mutex_lock(&mlxsw_sp->router->lock);
8852 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8856 err = __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, l3_dev,
8859 mutex_unlock(&mlxsw_sp->router->lock);
8864 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8866 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8868 mutex_lock(&mlxsw_sp->router->lock);
8869 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8870 mutex_unlock(&mlxsw_sp->router->lock);
8873 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
8874 struct net_device *port_dev,
8875 unsigned long event, u16 vid,
8876 struct netlink_ext_ack *extack)
8878 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
8879 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
8881 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
8882 if (WARN_ON(!mlxsw_sp_port_vlan))
8887 return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
8890 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8897 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
8898 unsigned long event,
8899 struct netlink_ext_ack *extack)
8901 if (netif_is_bridge_port(port_dev) ||
8902 netif_is_lag_port(port_dev) ||
8903 netif_is_ovs_port(port_dev))
8906 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
8907 MLXSW_SP_DEFAULT_VID, extack);
8910 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
8911 struct net_device *lag_dev,
8912 unsigned long event, u16 vid,
8913 struct netlink_ext_ack *extack)
8915 struct net_device *port_dev;
8916 struct list_head *iter;
8919 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
8920 if (mlxsw_sp_port_dev_check(port_dev)) {
8921 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
8933 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
8934 unsigned long event,
8935 struct netlink_ext_ack *extack)
8937 if (netif_is_bridge_port(lag_dev))
8940 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
8941 MLXSW_SP_DEFAULT_VID, extack);
8944 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
8945 struct net_device *l3_dev,
8946 unsigned long event,
8947 struct netlink_ext_ack *extack)
8949 struct mlxsw_sp_rif_params params = {
8952 struct mlxsw_sp_rif *rif;
8956 if (netif_is_bridge_master(l3_dev) && br_vlan_enabled(l3_dev)) {
8959 br_vlan_get_proto(l3_dev, &proto);
8960 if (proto == ETH_P_8021AD) {
8961 NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported");
8965 rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack);
8967 return PTR_ERR(rif);
8970 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8971 mlxsw_sp_rif_destroy(rif);
8978 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
8979 struct net_device *vlan_dev,
8980 unsigned long event,
8981 struct netlink_ext_ack *extack)
8983 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
8984 u16 vid = vlan_dev_vlan_id(vlan_dev);
8986 if (netif_is_bridge_port(vlan_dev))
8989 if (mlxsw_sp_port_dev_check(real_dev))
8990 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
8991 event, vid, extack);
8992 else if (netif_is_lag_master(real_dev))
8993 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
8995 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
8996 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
9002 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
9004 u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
9005 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
9007 return ether_addr_equal_masked(mac, vrrp4, mask);
9010 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
9012 u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
9013 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
9015 return ether_addr_equal_masked(mac, vrrp6, mask);
9018 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
9019 const u8 *mac, bool adding)
9021 char ritr_pl[MLXSW_REG_RITR_LEN];
9022 u8 vrrp_id = adding ? mac[5] : 0;
9025 if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
9026 !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
9029 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
9030 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9034 if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
9035 mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
9037 mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
9039 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9042 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
9043 const struct net_device *macvlan_dev,
9044 struct netlink_ext_ack *extack)
9046 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
9047 struct mlxsw_sp_rif *rif;
9050 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
9052 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
9056 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9057 mlxsw_sp_fid_index(rif->fid), true);
9061 err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
9062 macvlan_dev->dev_addr, true);
9064 goto err_rif_vrrp_add;
9066 /* Make sure the bridge driver does not have this MAC pointing at
9069 if (rif->ops->fdb_del)
9070 rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
9075 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9076 mlxsw_sp_fid_index(rif->fid), false);
9080 static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
9081 const struct net_device *macvlan_dev)
9083 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
9084 struct mlxsw_sp_rif *rif;
9086 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
9087 /* If we do not have a RIF, then we already took care of
9088 * removing the macvlan's MAC during RIF deletion.
9092 mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
9094 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9095 mlxsw_sp_fid_index(rif->fid), false);
9098 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
9099 const struct net_device *macvlan_dev)
9101 mutex_lock(&mlxsw_sp->router->lock);
9102 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
9103 mutex_unlock(&mlxsw_sp->router->lock);
9106 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
9107 struct net_device *macvlan_dev,
9108 unsigned long event,
9109 struct netlink_ext_ack *extack)
9113 return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
9115 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
9122 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
9123 struct net_device *dev,
9124 unsigned long event,
9125 struct netlink_ext_ack *extack)
9127 if (mlxsw_sp_port_dev_check(dev))
9128 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
9129 else if (netif_is_lag_master(dev))
9130 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
9131 else if (netif_is_bridge_master(dev))
9132 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
9134 else if (is_vlan_dev(dev))
9135 return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
9137 else if (netif_is_macvlan(dev))
9138 return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
9144 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
9145 unsigned long event, void *ptr)
9147 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
9148 struct net_device *dev = ifa->ifa_dev->dev;
9149 struct mlxsw_sp_router *router;
9150 struct mlxsw_sp_rif *rif;
9153 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
9154 if (event == NETDEV_UP)
9157 router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
9158 mutex_lock(&router->lock);
9159 rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
9160 if (!mlxsw_sp_rif_should_config(rif, dev, event))
9163 err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
9165 mutex_unlock(&router->lock);
9166 return notifier_from_errno(err);
9169 int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
9170 unsigned long event, void *ptr)
9172 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
9173 struct net_device *dev = ivi->ivi_dev->dev;
9174 struct mlxsw_sp *mlxsw_sp;
9175 struct mlxsw_sp_rif *rif;
9178 mlxsw_sp = mlxsw_sp_lower_get(dev);
9182 mutex_lock(&mlxsw_sp->router->lock);
9183 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9184 if (!mlxsw_sp_rif_should_config(rif, dev, event))
9187 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
9189 mutex_unlock(&mlxsw_sp->router->lock);
9190 return notifier_from_errno(err);
9193 struct mlxsw_sp_inet6addr_event_work {
9194 struct work_struct work;
9195 struct mlxsw_sp *mlxsw_sp;
9196 struct net_device *dev;
9197 unsigned long event;
9200 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
9202 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
9203 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
9204 struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
9205 struct net_device *dev = inet6addr_work->dev;
9206 unsigned long event = inet6addr_work->event;
9207 struct mlxsw_sp_rif *rif;
9210 mutex_lock(&mlxsw_sp->router->lock);
9212 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9213 if (!mlxsw_sp_rif_should_config(rif, dev, event))
9216 __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
9218 mutex_unlock(&mlxsw_sp->router->lock);
9221 kfree(inet6addr_work);
9224 /* Called with rcu_read_lock() */
9225 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
9226 unsigned long event, void *ptr)
9228 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
9229 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
9230 struct net_device *dev = if6->idev->dev;
9231 struct mlxsw_sp_router *router;
9233 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
9234 if (event == NETDEV_UP)
9237 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
9238 if (!inet6addr_work)
9241 router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
9242 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
9243 inet6addr_work->mlxsw_sp = router->mlxsw_sp;
9244 inet6addr_work->dev = dev;
9245 inet6addr_work->event = event;
9247 mlxsw_core_schedule_work(&inet6addr_work->work);
9252 int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
9253 unsigned long event, void *ptr)
9255 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
9256 struct net_device *dev = i6vi->i6vi_dev->dev;
9257 struct mlxsw_sp *mlxsw_sp;
9258 struct mlxsw_sp_rif *rif;
9261 mlxsw_sp = mlxsw_sp_lower_get(dev);
9265 mutex_lock(&mlxsw_sp->router->lock);
9266 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9267 if (!mlxsw_sp_rif_should_config(rif, dev, event))
9270 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
9272 mutex_unlock(&mlxsw_sp->router->lock);
9273 return notifier_from_errno(err);
9276 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
9277 const char *mac, int mtu, u8 mac_profile)
9279 char ritr_pl[MLXSW_REG_RITR_LEN];
9282 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
9283 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9287 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
9288 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
9289 mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, mac_profile);
9290 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
9291 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9295 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
9296 struct mlxsw_sp_rif *rif,
9297 struct netlink_ext_ack *extack)
9299 struct net_device *dev = rif->dev;
9304 fid_index = mlxsw_sp_fid_index(rif->fid);
9306 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
9310 old_mac_profile = rif->mac_profile_id;
9311 err = mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, dev->dev_addr,
9314 goto err_rif_mac_profile_replace;
9316 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
9317 dev->mtu, rif->mac_profile_id);
9321 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
9323 goto err_rif_fdb_op;
9325 if (rif->mtu != dev->mtu) {
9326 struct mlxsw_sp_vr *vr;
9329 /* The RIF is relevant only to its mr_table instance, as unlike
9330 * unicast routing, in multicast routing a RIF cannot be shared
9331 * between several multicast routing tables.
9333 vr = &mlxsw_sp->router->vrs[rif->vr_id];
9334 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
9335 mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
9339 ether_addr_copy(rif->addr, dev->dev_addr);
9340 rif->mtu = dev->mtu;
9342 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
9347 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu,
9350 mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, rif->addr, extack);
9351 err_rif_mac_profile_replace:
9352 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
9356 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
9357 struct netdev_notifier_pre_changeaddr_info *info)
9359 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9360 struct mlxsw_sp_rif_mac_profile *profile;
9361 struct netlink_ext_ack *extack;
9362 u8 max_rif_mac_profiles;
9365 extack = netdev_notifier_info_to_extack(&info->info);
9367 profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, info->dev_addr);
9371 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
9372 occ = mlxsw_sp_rif_mac_profiles_occ_get(mlxsw_sp);
9373 if (occ < max_rif_mac_profiles)
9376 if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
9379 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interface MAC profiles");
9383 static bool mlxsw_sp_is_offload_xstats_event(unsigned long event)
9386 case NETDEV_OFFLOAD_XSTATS_ENABLE:
9387 case NETDEV_OFFLOAD_XSTATS_DISABLE:
9388 case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9389 case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9397 mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif *rif,
9398 unsigned long event,
9399 struct netdev_notifier_offload_xstats_info *info)
9401 switch (info->type) {
9402 case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
9409 case NETDEV_OFFLOAD_XSTATS_ENABLE:
9410 return mlxsw_sp_router_port_l3_stats_enable(rif);
9411 case NETDEV_OFFLOAD_XSTATS_DISABLE:
9412 mlxsw_sp_router_port_l3_stats_disable(rif);
9414 case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9415 mlxsw_sp_router_port_l3_stats_report_used(rif, info);
9417 case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9418 return mlxsw_sp_router_port_l3_stats_report_delta(rif, info);
9426 mlxsw_sp_netdevice_offload_xstats_cmd(struct mlxsw_sp *mlxsw_sp,
9427 struct net_device *dev,
9428 unsigned long event,
9429 struct netdev_notifier_offload_xstats_info *info)
9431 struct mlxsw_sp_rif *rif;
9433 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9437 return mlxsw_sp_router_port_offload_xstats_cmd(rif, event, info);
9440 static bool mlxsw_sp_is_router_event(unsigned long event)
9443 case NETDEV_PRE_CHANGEADDR:
9444 case NETDEV_CHANGEADDR:
9445 case NETDEV_CHANGEMTU:
9452 static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
9453 unsigned long event, void *ptr)
9455 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
9456 struct mlxsw_sp *mlxsw_sp;
9457 struct mlxsw_sp_rif *rif;
9459 mlxsw_sp = mlxsw_sp_lower_get(dev);
9463 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9468 case NETDEV_CHANGEMTU:
9469 case NETDEV_CHANGEADDR:
9470 return mlxsw_sp_router_port_change_event(mlxsw_sp, rif, extack);
9471 case NETDEV_PRE_CHANGEADDR:
9472 return mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
9481 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
9482 struct net_device *l3_dev,
9483 struct netlink_ext_ack *extack)
9485 struct mlxsw_sp_rif *rif;
9487 /* If netdev is already associated with a RIF, then we need to
9488 * destroy it and create a new one with the new virtual router ID.
9490 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9492 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
9495 return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
9498 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
9499 struct net_device *l3_dev)
9501 struct mlxsw_sp_rif *rif;
9503 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9506 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
9509 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
9511 struct netdev_notifier_changeupper_info *info = ptr;
9513 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
9515 return netif_is_l3_master(info->upper_dev);
9519 mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
9520 struct netdev_notifier_changeupper_info *info)
9522 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
9525 /* We do not create a RIF for a macvlan, but only use it to
9526 * direct more MAC addresses to the router.
9528 if (!mlxsw_sp || netif_is_macvlan(l3_dev))
9532 case NETDEV_PRECHANGEUPPER:
9534 case NETDEV_CHANGEUPPER:
9535 if (info->linking) {
9536 struct netlink_ext_ack *extack;
9538 extack = netdev_notifier_info_to_extack(&info->info);
9539 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
9541 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
9549 static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb,
9550 unsigned long event, void *ptr)
9552 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
9553 struct mlxsw_sp_router *router;
9554 struct mlxsw_sp *mlxsw_sp;
9557 router = container_of(nb, struct mlxsw_sp_router, netdevice_nb);
9558 mlxsw_sp = router->mlxsw_sp;
9560 mutex_lock(&mlxsw_sp->router->lock);
9562 if (mlxsw_sp_is_offload_xstats_event(event))
9563 err = mlxsw_sp_netdevice_offload_xstats_cmd(mlxsw_sp, dev,
9565 else if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
9566 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
9568 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
9569 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
9571 else if (mlxsw_sp_is_router_event(event))
9572 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
9573 else if (mlxsw_sp_is_vrf_event(event, ptr))
9574 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
9576 mutex_unlock(&mlxsw_sp->router->lock);
9578 return notifier_from_errno(err);
9581 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
9582 struct netdev_nested_priv *priv)
9584 struct mlxsw_sp_rif *rif = (struct mlxsw_sp_rif *)priv->data;
9586 if (!netif_is_macvlan(dev))
9589 return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9590 mlxsw_sp_fid_index(rif->fid), false);
9593 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
9595 struct netdev_nested_priv priv = {
9596 .data = (void *)rif,
9599 if (!netif_is_macvlan_port(rif->dev))
9602 netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
9603 return netdev_walk_all_upper_dev_rcu(rif->dev,
9604 __mlxsw_sp_rif_macvlan_flush, &priv);
9607 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
9608 const struct mlxsw_sp_rif_params *params)
9610 struct mlxsw_sp_rif_subport *rif_subport;
9612 rif_subport = mlxsw_sp_rif_subport_rif(rif);
9613 refcount_set(&rif_subport->ref_count, 1);
9614 rif_subport->vid = params->vid;
9615 rif_subport->lag = params->lag;
9617 rif_subport->lag_id = params->lag_id;
9619 rif_subport->system_port = params->system_port;
9622 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
9624 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9625 struct mlxsw_sp_rif_subport *rif_subport;
9626 char ritr_pl[MLXSW_REG_RITR_LEN];
9628 rif_subport = mlxsw_sp_rif_subport_rif(rif);
9629 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
9630 rif->rif_index, rif->vr_id, rif->dev->mtu);
9631 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
9632 mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
9633 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
9634 rif_subport->lag ? rif_subport->lag_id :
9635 rif_subport->system_port,
9638 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9641 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
9642 struct netlink_ext_ack *extack)
9647 err = mlxsw_sp_rif_mac_profile_get(rif->mlxsw_sp, rif->addr,
9648 &mac_profile, extack);
9651 rif->mac_profile_id = mac_profile;
9653 err = mlxsw_sp_rif_subport_op(rif, true);
9655 goto err_rif_subport_op;
9657 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9658 mlxsw_sp_fid_index(rif->fid), true);
9660 goto err_rif_fdb_op;
9662 mlxsw_sp_fid_rif_set(rif->fid, rif);
9666 mlxsw_sp_rif_subport_op(rif, false);
9668 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, mac_profile);
9672 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
9674 struct mlxsw_sp_fid *fid = rif->fid;
9676 mlxsw_sp_fid_rif_set(fid, NULL);
9677 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9678 mlxsw_sp_fid_index(fid), false);
9679 mlxsw_sp_rif_macvlan_flush(rif);
9680 mlxsw_sp_rif_subport_op(rif, false);
9681 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
9684 static struct mlxsw_sp_fid *
9685 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
9686 struct netlink_ext_ack *extack)
9688 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
9691 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
9692 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
9693 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
9694 .setup = mlxsw_sp_rif_subport_setup,
9695 .configure = mlxsw_sp_rif_subport_configure,
9696 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
9697 .fid_get = mlxsw_sp_rif_subport_fid_get,
9700 static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
9701 enum mlxsw_reg_ritr_if_type type,
9702 u16 vid_fid, bool enable)
9704 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9705 char ritr_pl[MLXSW_REG_RITR_LEN];
9707 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
9709 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
9710 mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
9711 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
9713 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9716 u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
9718 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
9721 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
9722 struct netlink_ext_ack *extack)
9724 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9725 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
9729 err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
9730 &mac_profile, extack);
9733 rif->mac_profile_id = mac_profile;
9735 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
9738 goto err_rif_vlan_fid_op;
9740 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9741 mlxsw_sp_router_port(mlxsw_sp), true);
9743 goto err_fid_mc_flood_set;
9745 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9746 mlxsw_sp_router_port(mlxsw_sp), true);
9748 goto err_fid_bc_flood_set;
9750 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9751 mlxsw_sp_fid_index(rif->fid), true);
9753 goto err_rif_fdb_op;
9755 mlxsw_sp_fid_rif_set(rif->fid, rif);
9759 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9760 mlxsw_sp_router_port(mlxsw_sp), false);
9761 err_fid_bc_flood_set:
9762 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9763 mlxsw_sp_router_port(mlxsw_sp), false);
9764 err_fid_mc_flood_set:
9765 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
9766 err_rif_vlan_fid_op:
9767 mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
9771 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
9773 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
9774 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9775 struct mlxsw_sp_fid *fid = rif->fid;
9777 mlxsw_sp_fid_rif_set(fid, NULL);
9778 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9779 mlxsw_sp_fid_index(fid), false);
9780 mlxsw_sp_rif_macvlan_flush(rif);
9781 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9782 mlxsw_sp_router_port(mlxsw_sp), false);
9783 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9784 mlxsw_sp_router_port(mlxsw_sp), false);
9785 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
9786 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
9789 static struct mlxsw_sp_fid *
9790 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
9791 struct netlink_ext_ack *extack)
9793 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
9796 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
9798 struct switchdev_notifier_fdb_info info = {};
9799 struct net_device *dev;
9801 dev = br_fdb_find_port(rif->dev, mac, 0);
9807 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
9811 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
9812 .type = MLXSW_SP_RIF_TYPE_FID,
9813 .rif_size = sizeof(struct mlxsw_sp_rif),
9814 .configure = mlxsw_sp_rif_fid_configure,
9815 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
9816 .fid_get = mlxsw_sp_rif_fid_fid_get,
9817 .fdb_del = mlxsw_sp_rif_fid_fdb_del,
9820 static struct mlxsw_sp_fid *
9821 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
9822 struct netlink_ext_ack *extack)
9824 struct net_device *br_dev;
9828 if (is_vlan_dev(rif->dev)) {
9829 vid = vlan_dev_vlan_id(rif->dev);
9830 br_dev = vlan_dev_real_dev(rif->dev);
9831 if (WARN_ON(!netif_is_bridge_master(br_dev)))
9832 return ERR_PTR(-EINVAL);
9834 err = br_vlan_get_pvid(rif->dev, &vid);
9835 if (err < 0 || !vid) {
9836 NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
9837 return ERR_PTR(-EINVAL);
9841 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
9844 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
9846 struct switchdev_notifier_fdb_info info = {};
9847 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
9848 struct net_device *br_dev;
9849 struct net_device *dev;
9851 br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
9852 dev = br_fdb_find_port(br_dev, mac, vid);
9858 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
9862 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
9863 .type = MLXSW_SP_RIF_TYPE_VLAN,
9864 .rif_size = sizeof(struct mlxsw_sp_rif),
9865 .configure = mlxsw_sp_rif_fid_configure,
9866 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
9867 .fid_get = mlxsw_sp_rif_vlan_fid_get,
9868 .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
9871 static struct mlxsw_sp_rif_ipip_lb *
9872 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
9874 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
9878 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
9879 const struct mlxsw_sp_rif_params *params)
9881 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
9882 struct mlxsw_sp_rif_ipip_lb *rif_lb;
9884 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
9886 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
9887 rif_lb->lb_config = params_lb->lb_config;
9891 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
9892 struct netlink_ext_ack *extack)
9894 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9895 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
9896 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9897 struct mlxsw_sp_vr *ul_vr;
9900 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
9902 return PTR_ERR(ul_vr);
9904 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
9906 goto err_loopback_op;
9908 lb_rif->ul_vr_id = ul_vr->id;
9909 lb_rif->ul_rif_id = 0;
9914 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
9918 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
9920 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9921 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9922 struct mlxsw_sp_vr *ul_vr;
9924 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
9925 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
9928 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
9931 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
9932 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
9933 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
9934 .setup = mlxsw_sp_rif_ipip_lb_setup,
9935 .configure = mlxsw_sp1_rif_ipip_lb_configure,
9936 .deconfigure = mlxsw_sp1_rif_ipip_lb_deconfigure,
9939 static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
9940 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
9941 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
9942 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
9943 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops,
9947 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
9949 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9950 char ritr_pl[MLXSW_REG_RITR_LEN];
9952 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
9953 ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
9954 mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
9955 MLXSW_REG_RITR_LOOPBACK_GENERIC);
9957 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9960 static struct mlxsw_sp_rif *
9961 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
9962 struct netlink_ext_ack *extack)
9964 struct mlxsw_sp_rif *ul_rif;
9968 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
9970 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
9971 return ERR_PTR(err);
9974 ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL);
9976 return ERR_PTR(-ENOMEM);
9978 mlxsw_sp->router->rifs[rif_index] = ul_rif;
9979 ul_rif->mlxsw_sp = mlxsw_sp;
9980 err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
9987 mlxsw_sp->router->rifs[rif_index] = NULL;
9989 return ERR_PTR(err);
9992 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
9994 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9996 mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
9997 mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
10001 static struct mlxsw_sp_rif *
10002 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
10003 struct netlink_ext_ack *extack)
10005 struct mlxsw_sp_vr *vr;
10008 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
10010 return ERR_CAST(vr);
10012 if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
10015 vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack);
10016 if (IS_ERR(vr->ul_rif)) {
10017 err = PTR_ERR(vr->ul_rif);
10018 goto err_ul_rif_create;
10022 refcount_set(&vr->ul_rif_refcnt, 1);
10027 mlxsw_sp_vr_put(mlxsw_sp, vr);
10028 return ERR_PTR(err);
10031 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
10033 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10034 struct mlxsw_sp_vr *vr;
10036 vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
10038 if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
10042 mlxsw_sp_ul_rif_destroy(ul_rif);
10043 mlxsw_sp_vr_put(mlxsw_sp, vr);
10046 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
10049 struct mlxsw_sp_rif *ul_rif;
10052 mutex_lock(&mlxsw_sp->router->lock);
10053 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
10054 if (IS_ERR(ul_rif)) {
10055 err = PTR_ERR(ul_rif);
10058 *ul_rif_index = ul_rif->rif_index;
10060 mutex_unlock(&mlxsw_sp->router->lock);
10064 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
10066 struct mlxsw_sp_rif *ul_rif;
10068 mutex_lock(&mlxsw_sp->router->lock);
10069 ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
10070 if (WARN_ON(!ul_rif))
10073 mlxsw_sp_ul_rif_put(ul_rif);
10075 mutex_unlock(&mlxsw_sp->router->lock);
10079 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
10080 struct netlink_ext_ack *extack)
10082 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10083 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
10084 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10085 struct mlxsw_sp_rif *ul_rif;
10088 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
10089 if (IS_ERR(ul_rif))
10090 return PTR_ERR(ul_rif);
10092 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
10094 goto err_loopback_op;
10096 lb_rif->ul_vr_id = 0;
10097 lb_rif->ul_rif_id = ul_rif->rif_index;
10102 mlxsw_sp_ul_rif_put(ul_rif);
10106 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
10108 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10109 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10110 struct mlxsw_sp_rif *ul_rif;
10112 ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
10113 mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
10114 mlxsw_sp_ul_rif_put(ul_rif);
10117 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
10118 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
10119 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
10120 .setup = mlxsw_sp_rif_ipip_lb_setup,
10121 .configure = mlxsw_sp2_rif_ipip_lb_configure,
10122 .deconfigure = mlxsw_sp2_rif_ipip_lb_deconfigure,
10125 static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
10126 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
10127 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
10128 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
10129 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops,
10132 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
10134 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10135 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
10136 struct mlxsw_core *core = mlxsw_sp->core;
10138 if (!MLXSW_CORE_RES_VALID(core, MAX_RIF_MAC_PROFILES))
10140 mlxsw_sp->router->max_rif_mac_profile =
10141 MLXSW_CORE_RES_GET(core, MAX_RIF_MAC_PROFILES);
10143 mlxsw_sp->router->rifs = kcalloc(max_rifs,
10144 sizeof(struct mlxsw_sp_rif *),
10146 if (!mlxsw_sp->router->rifs)
10149 idr_init(&mlxsw_sp->router->rif_mac_profiles_idr);
10150 atomic_set(&mlxsw_sp->router->rif_mac_profiles_count, 0);
10151 devlink_resource_occ_get_register(devlink,
10152 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
10153 mlxsw_sp_rif_mac_profiles_occ_get,
10159 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
10161 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
10164 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
10165 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
10167 devlink_resource_occ_get_unregister(devlink,
10168 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
10169 WARN_ON(!idr_is_empty(&mlxsw_sp->router->rif_mac_profiles_idr));
10170 idr_destroy(&mlxsw_sp->router->rif_mac_profiles_idr);
10171 kfree(mlxsw_sp->router->rifs);
10175 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
10177 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
10179 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
10180 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
10183 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
10187 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
10189 err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
10192 err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
10196 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
10199 static int mlxsw_sp1_ipips_init(struct mlxsw_sp *mlxsw_sp)
10201 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp1_ipip_ops_arr;
10202 return mlxsw_sp_ipips_init(mlxsw_sp);
10205 static int mlxsw_sp2_ipips_init(struct mlxsw_sp *mlxsw_sp)
10207 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp2_ipip_ops_arr;
10208 return mlxsw_sp_ipips_init(mlxsw_sp);
10211 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
10213 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
10216 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
10218 struct mlxsw_sp_router *router;
10220 /* Flush pending FIB notifications and then flush the device's
10221 * table before requesting another dump. The FIB notification
10222 * block is unregistered, so no need to take RTNL.
10224 mlxsw_core_flush_owq();
10225 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
10226 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
10229 #ifdef CONFIG_IP_ROUTE_MULTIPATH
10230 struct mlxsw_sp_mp_hash_config {
10231 DECLARE_BITMAP(headers, __MLXSW_REG_RECR2_HEADER_CNT);
10232 DECLARE_BITMAP(fields, __MLXSW_REG_RECR2_FIELD_CNT);
10233 DECLARE_BITMAP(inner_headers, __MLXSW_REG_RECR2_HEADER_CNT);
10234 DECLARE_BITMAP(inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT);
10235 bool inc_parsing_depth;
10238 #define MLXSW_SP_MP_HASH_HEADER_SET(_headers, _header) \
10239 bitmap_set(_headers, MLXSW_REG_RECR2_##_header, 1)
10241 #define MLXSW_SP_MP_HASH_FIELD_SET(_fields, _field) \
10242 bitmap_set(_fields, MLXSW_REG_RECR2_##_field, 1)
10244 #define MLXSW_SP_MP_HASH_FIELD_RANGE_SET(_fields, _field, _nr) \
10245 bitmap_set(_fields, MLXSW_REG_RECR2_##_field, _nr)
10247 static void mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config *config)
10249 unsigned long *inner_headers = config->inner_headers;
10250 unsigned long *inner_fields = config->inner_fields;
10253 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
10254 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
10255 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
10256 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
10258 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
10259 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
10260 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
10261 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
10262 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
10263 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
10264 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
10265 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
10268 static void mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
10270 unsigned long *headers = config->headers;
10271 unsigned long *fields = config->fields;
10273 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
10274 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
10275 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
10276 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
10280 mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config *config,
10283 unsigned long *inner_headers = config->inner_headers;
10284 unsigned long *inner_fields = config->inner_fields;
10287 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
10288 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
10289 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
10290 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
10291 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
10292 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
10293 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
10294 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV4_PROTOCOL);
10296 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
10297 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
10298 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) {
10299 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
10300 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
10302 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) {
10303 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
10304 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
10306 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
10307 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
10308 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
10309 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
10311 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV4);
10312 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV6);
10313 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
10314 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_SPORT);
10315 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
10316 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_DPORT);
10319 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
10320 struct mlxsw_sp_mp_hash_config *config)
10322 struct net *net = mlxsw_sp_net(mlxsw_sp);
10323 unsigned long *headers = config->headers;
10324 unsigned long *fields = config->fields;
10327 switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
10329 mlxsw_sp_mp4_hash_outer_addr(config);
10332 mlxsw_sp_mp4_hash_outer_addr(config);
10333 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
10334 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
10335 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10336 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10340 mlxsw_sp_mp4_hash_outer_addr(config);
10342 mlxsw_sp_mp_hash_inner_l3(config);
10345 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
10347 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
10348 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
10349 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
10350 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
10351 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
10352 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
10353 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
10354 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
10355 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
10356 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
10357 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10358 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
10359 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10361 mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
10366 static void mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
10368 unsigned long *headers = config->headers;
10369 unsigned long *fields = config->fields;
10371 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
10372 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
10373 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
10374 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
10375 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
10376 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
10379 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp,
10380 struct mlxsw_sp_mp_hash_config *config)
10382 u32 hash_fields = ip6_multipath_hash_fields(mlxsw_sp_net(mlxsw_sp));
10383 unsigned long *headers = config->headers;
10384 unsigned long *fields = config->fields;
10386 switch (ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp))) {
10388 mlxsw_sp_mp6_hash_outer_addr(config);
10389 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10390 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10393 mlxsw_sp_mp6_hash_outer_addr(config);
10394 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
10395 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10396 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10397 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10401 mlxsw_sp_mp6_hash_outer_addr(config);
10402 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10403 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10405 mlxsw_sp_mp_hash_inner_l3(config);
10406 config->inc_parsing_depth = true;
10410 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
10411 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
10412 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
10413 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) {
10414 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
10415 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
10417 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) {
10418 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
10419 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
10421 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
10422 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10423 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
10424 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10425 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
10426 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10427 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
10428 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10430 mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
10431 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK)
10432 config->inc_parsing_depth = true;
10437 static int mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp *mlxsw_sp,
10438 bool old_inc_parsing_depth,
10439 bool new_inc_parsing_depth)
10443 if (!old_inc_parsing_depth && new_inc_parsing_depth) {
10444 err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
10447 mlxsw_sp->router->inc_parsing_depth = true;
10448 } else if (old_inc_parsing_depth && !new_inc_parsing_depth) {
10449 mlxsw_sp_parsing_depth_dec(mlxsw_sp);
10450 mlxsw_sp->router->inc_parsing_depth = false;
10456 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
10458 bool old_inc_parsing_depth, new_inc_parsing_depth;
10459 struct mlxsw_sp_mp_hash_config config = {};
10460 char recr2_pl[MLXSW_REG_RECR2_LEN];
10465 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
10466 mlxsw_reg_recr2_pack(recr2_pl, seed);
10467 mlxsw_sp_mp4_hash_init(mlxsw_sp, &config);
10468 mlxsw_sp_mp6_hash_init(mlxsw_sp, &config);
10470 old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
10471 new_inc_parsing_depth = config.inc_parsing_depth;
10472 err = mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp,
10473 old_inc_parsing_depth,
10474 new_inc_parsing_depth);
10478 for_each_set_bit(bit, config.headers, __MLXSW_REG_RECR2_HEADER_CNT)
10479 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, bit, 1);
10480 for_each_set_bit(bit, config.fields, __MLXSW_REG_RECR2_FIELD_CNT)
10481 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, bit, 1);
10482 for_each_set_bit(bit, config.inner_headers, __MLXSW_REG_RECR2_HEADER_CNT)
10483 mlxsw_reg_recr2_inner_header_enables_set(recr2_pl, bit, 1);
10484 for_each_set_bit(bit, config.inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT)
10485 mlxsw_reg_recr2_inner_header_fields_enable_set(recr2_pl, bit, 1);
10487 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
10489 goto err_reg_write;
10494 mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, new_inc_parsing_depth,
10495 old_inc_parsing_depth);
10499 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
10505 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
10507 char rdpm_pl[MLXSW_REG_RDPM_LEN];
10510 MLXSW_REG_ZERO(rdpm, rdpm_pl);
10512 /* HW is determining switch priority based on DSCP-bits, but the
10513 * kernel is still doing that based on the ToS. Since there's a
10514 * mismatch in bits we need to make sure to translate the right
10515 * value ToS would observe, skipping the 2 least-significant ECN bits.
10517 for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
10518 mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
10520 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
10523 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
10525 struct net *net = mlxsw_sp_net(mlxsw_sp);
10526 char rgcr_pl[MLXSW_REG_RGCR_LEN];
10530 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
10532 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10533 usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority);
10535 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
10536 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
10537 mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
10538 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
10541 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
10543 char rgcr_pl[MLXSW_REG_RGCR_LEN];
10545 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
10546 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
10549 static const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_basic_ops = {
10550 .init = mlxsw_sp_router_ll_basic_init,
10551 .ralta_write = mlxsw_sp_router_ll_basic_ralta_write,
10552 .ralst_write = mlxsw_sp_router_ll_basic_ralst_write,
10553 .raltb_write = mlxsw_sp_router_ll_basic_raltb_write,
10554 .fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_basic),
10555 .fib_entry_pack = mlxsw_sp_router_ll_basic_fib_entry_pack,
10556 .fib_entry_act_remote_pack = mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack,
10557 .fib_entry_act_local_pack = mlxsw_sp_router_ll_basic_fib_entry_act_local_pack,
10558 .fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack,
10559 .fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack,
10560 .fib_entry_commit = mlxsw_sp_router_ll_basic_fib_entry_commit,
10561 .fib_entry_is_committed = mlxsw_sp_router_ll_basic_fib_entry_is_committed,
10564 static int mlxsw_sp_router_ll_op_ctx_init(struct mlxsw_sp_router *router)
10566 size_t max_size = 0;
10569 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
10570 size_t size = router->proto_ll_ops[i]->fib_entry_op_ctx_size;
10572 if (size > max_size)
10575 router->ll_op_ctx = kzalloc(sizeof(*router->ll_op_ctx) + max_size,
10577 if (!router->ll_op_ctx)
10579 INIT_LIST_HEAD(&router->ll_op_ctx->fib_entry_priv_list);
10583 static void mlxsw_sp_router_ll_op_ctx_fini(struct mlxsw_sp_router *router)
10585 WARN_ON(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
10586 kfree(router->ll_op_ctx);
10589 static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp)
10594 /* Create a generic loopback RIF associated with the main table
10595 * (default VRF). Any table can be used, but the main table exists
10596 * anyway, so we do not waste resources.
10598 err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, RT_TABLE_MAIN,
10603 mlxsw_sp->router->lb_rif_index = lb_rif_index;
10608 static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp)
10610 mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->router->lb_rif_index);
10613 static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
10615 size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp1_adj_grp_size_ranges);
10617 mlxsw_sp->router->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
10618 mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp1_adj_grp_size_ranges;
10619 mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
10624 const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = {
10625 .init = mlxsw_sp1_router_init,
10626 .ipips_init = mlxsw_sp1_ipips_init,
10629 static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
10631 size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp2_adj_grp_size_ranges);
10633 mlxsw_sp->router->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
10634 mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp2_adj_grp_size_ranges;
10635 mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
10640 const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = {
10641 .init = mlxsw_sp2_router_init,
10642 .ipips_init = mlxsw_sp2_ipips_init,
10645 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
10646 struct netlink_ext_ack *extack)
10648 struct mlxsw_sp_router *router;
10651 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
10654 mutex_init(&router->lock);
10655 mlxsw_sp->router = router;
10656 router->mlxsw_sp = mlxsw_sp;
10658 err = mlxsw_sp->router_ops->init(mlxsw_sp);
10660 goto err_router_ops_init;
10662 err = mlxsw_sp_router_xm_init(mlxsw_sp);
10666 router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV4] = mlxsw_sp_router_xm_ipv4_is_supported(mlxsw_sp) ?
10667 &mlxsw_sp_router_ll_xm_ops :
10668 &mlxsw_sp_router_ll_basic_ops;
10669 router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_router_ll_basic_ops;
10671 err = mlxsw_sp_router_ll_op_ctx_init(router);
10673 goto err_ll_op_ctx_init;
10675 INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
10676 INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
10677 mlxsw_sp_nh_grp_activity_work);
10679 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
10680 err = __mlxsw_sp_router_init(mlxsw_sp);
10682 goto err_router_init;
10684 err = mlxsw_sp_rifs_init(mlxsw_sp);
10686 goto err_rifs_init;
10688 err = mlxsw_sp->router_ops->ipips_init(mlxsw_sp);
10690 goto err_ipips_init;
10692 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
10693 &mlxsw_sp_nexthop_ht_params);
10695 goto err_nexthop_ht_init;
10697 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
10698 &mlxsw_sp_nexthop_group_ht_params);
10700 goto err_nexthop_group_ht_init;
10702 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
10703 err = mlxsw_sp_lpm_init(mlxsw_sp);
10707 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
10711 err = mlxsw_sp_vrs_init(mlxsw_sp);
10715 err = mlxsw_sp_lb_rif_init(mlxsw_sp);
10717 goto err_lb_rif_init;
10719 err = mlxsw_sp_neigh_init(mlxsw_sp);
10721 goto err_neigh_init;
10723 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
10725 goto err_mp_hash_init;
10727 err = mlxsw_sp_dscp_init(mlxsw_sp);
10729 goto err_dscp_init;
10731 INIT_WORK(&router->fib_event_work, mlxsw_sp_router_fib_event_work);
10732 INIT_LIST_HEAD(&router->fib_event_queue);
10733 spin_lock_init(&router->fib_event_queue_lock);
10735 router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
10736 err = register_inetaddr_notifier(&router->inetaddr_nb);
10738 goto err_register_inetaddr_notifier;
10740 router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
10741 err = register_inet6addr_notifier(&router->inet6addr_nb);
10743 goto err_register_inet6addr_notifier;
10745 mlxsw_sp->router->netevent_nb.notifier_call =
10746 mlxsw_sp_router_netevent_event;
10747 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10749 goto err_register_netevent_notifier;
10751 mlxsw_sp->router->nexthop_nb.notifier_call =
10752 mlxsw_sp_nexthop_obj_event;
10753 err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10754 &mlxsw_sp->router->nexthop_nb,
10757 goto err_register_nexthop_notifier;
10759 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
10760 err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
10761 &mlxsw_sp->router->fib_nb,
10762 mlxsw_sp_router_fib_dump_flush, extack);
10764 goto err_register_fib_notifier;
10766 mlxsw_sp->router->netdevice_nb.notifier_call =
10767 mlxsw_sp_router_netdevice_event;
10768 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
10769 &mlxsw_sp->router->netdevice_nb);
10771 goto err_register_netdev_notifier;
10775 err_register_netdev_notifier:
10776 unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
10777 &mlxsw_sp->router->fib_nb);
10778 err_register_fib_notifier:
10779 unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10780 &mlxsw_sp->router->nexthop_nb);
10781 err_register_nexthop_notifier:
10782 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10783 err_register_netevent_notifier:
10784 unregister_inet6addr_notifier(&router->inet6addr_nb);
10785 err_register_inet6addr_notifier:
10786 unregister_inetaddr_notifier(&router->inetaddr_nb);
10787 err_register_inetaddr_notifier:
10788 mlxsw_core_flush_owq();
10789 WARN_ON(!list_empty(&router->fib_event_queue));
10792 mlxsw_sp_neigh_fini(mlxsw_sp);
10794 mlxsw_sp_lb_rif_fini(mlxsw_sp);
10796 mlxsw_sp_vrs_fini(mlxsw_sp);
10798 mlxsw_sp_mr_fini(mlxsw_sp);
10800 mlxsw_sp_lpm_fini(mlxsw_sp);
10802 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
10803 err_nexthop_group_ht_init:
10804 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
10805 err_nexthop_ht_init:
10806 mlxsw_sp_ipips_fini(mlxsw_sp);
10808 mlxsw_sp_rifs_fini(mlxsw_sp);
10810 __mlxsw_sp_router_fini(mlxsw_sp);
10812 cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
10813 mlxsw_sp_router_ll_op_ctx_fini(router);
10814 err_ll_op_ctx_init:
10815 mlxsw_sp_router_xm_fini(mlxsw_sp);
10817 err_router_ops_init:
10818 mutex_destroy(&mlxsw_sp->router->lock);
10819 kfree(mlxsw_sp->router);
10823 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
10825 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
10826 &mlxsw_sp->router->netdevice_nb);
10827 unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
10828 &mlxsw_sp->router->fib_nb);
10829 unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10830 &mlxsw_sp->router->nexthop_nb);
10831 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10832 unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
10833 unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
10834 mlxsw_core_flush_owq();
10835 WARN_ON(!list_empty(&mlxsw_sp->router->fib_event_queue));
10836 mlxsw_sp_neigh_fini(mlxsw_sp);
10837 mlxsw_sp_lb_rif_fini(mlxsw_sp);
10838 mlxsw_sp_vrs_fini(mlxsw_sp);
10839 mlxsw_sp_mr_fini(mlxsw_sp);
10840 mlxsw_sp_lpm_fini(mlxsw_sp);
10841 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
10842 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
10843 mlxsw_sp_ipips_fini(mlxsw_sp);
10844 mlxsw_sp_rifs_fini(mlxsw_sp);
10845 __mlxsw_sp_router_fini(mlxsw_sp);
10846 cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
10847 mlxsw_sp_router_ll_op_ctx_fini(mlxsw_sp->router);
10848 mlxsw_sp_router_xm_fini(mlxsw_sp);
10849 mutex_destroy(&mlxsw_sp->router->lock);
10850 kfree(mlxsw_sp->router);