1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <linux/mutex.h>
21 #include <linux/genalloc.h>
22 #include <net/netevent.h>
23 #include <net/neighbour.h>
25 #include <net/inet_dscp.h>
26 #include <net/ip_fib.h>
27 #include <net/ip6_fib.h>
28 #include <net/nexthop.h>
29 #include <net/fib_rules.h>
30 #include <net/ip_tunnels.h>
31 #include <net/l3mdev.h>
32 #include <net/addrconf.h>
33 #include <net/ndisc.h>
35 #include <net/fib_notifier.h>
36 #include <net/switchdev.h>
41 #include "spectrum_cnt.h"
42 #include "spectrum_dpipe.h"
43 #include "spectrum_ipip.h"
44 #include "spectrum_mr.h"
45 #include "spectrum_mr_tcam.h"
46 #include "spectrum_router.h"
47 #include "spectrum_span.h"
51 struct mlxsw_sp_lpm_tree;
52 struct mlxsw_sp_rif_ops;
54 struct mlxsw_sp_crif_key {
55 struct net_device *dev;
58 struct mlxsw_sp_crif {
59 struct mlxsw_sp_crif_key key;
60 struct rhash_head ht_node;
62 struct list_head nexthop_list;
63 struct mlxsw_sp_rif *rif;
66 static const struct rhashtable_params mlxsw_sp_crif_ht_params = {
67 .key_offset = offsetof(struct mlxsw_sp_crif, key),
68 .key_len = sizeof_field(struct mlxsw_sp_crif, key),
69 .head_offset = offsetof(struct mlxsw_sp_crif, ht_node),
73 struct mlxsw_sp_crif *crif; /* NULL for underlay RIF */
74 struct list_head neigh_list;
75 struct mlxsw_sp_fid *fid;
76 unsigned char addr[ETH_ALEN];
82 const struct mlxsw_sp_rif_ops *ops;
83 struct mlxsw_sp *mlxsw_sp;
85 unsigned int counter_ingress;
86 bool counter_ingress_valid;
87 unsigned int counter_egress;
88 bool counter_egress_valid;
91 static struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
95 return rif->crif->key.dev;
98 struct mlxsw_sp_rif_params {
99 struct net_device *dev;
109 struct mlxsw_sp_rif_subport {
110 struct mlxsw_sp_rif common;
111 refcount_t ref_count;
120 struct mlxsw_sp_rif_ipip_lb {
121 struct mlxsw_sp_rif common;
122 struct mlxsw_sp_rif_ipip_lb_config lb_config;
123 u16 ul_vr_id; /* Spectrum-1. */
124 u16 ul_rif_id; /* Spectrum-2+. */
127 struct mlxsw_sp_rif_params_ipip_lb {
128 struct mlxsw_sp_rif_params common;
129 struct mlxsw_sp_rif_ipip_lb_config lb_config;
132 struct mlxsw_sp_rif_ops {
133 enum mlxsw_sp_rif_type type;
136 void (*setup)(struct mlxsw_sp_rif *rif,
137 const struct mlxsw_sp_rif_params *params);
138 int (*configure)(struct mlxsw_sp_rif *rif,
139 struct netlink_ext_ack *extack);
140 void (*deconfigure)(struct mlxsw_sp_rif *rif);
141 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
142 const struct mlxsw_sp_rif_params *params,
143 struct netlink_ext_ack *extack);
144 void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
147 struct mlxsw_sp_rif_mac_profile {
148 unsigned char mac_prefix[ETH_ALEN];
149 refcount_t ref_count;
153 struct mlxsw_sp_router_ops {
154 int (*init)(struct mlxsw_sp *mlxsw_sp);
155 int (*ipips_init)(struct mlxsw_sp *mlxsw_sp);
158 static struct mlxsw_sp_rif *
159 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
160 const struct net_device *dev);
161 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
162 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
163 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
164 struct mlxsw_sp_lpm_tree *lpm_tree);
165 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
166 const struct mlxsw_sp_fib *fib,
168 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
169 const struct mlxsw_sp_fib *fib);
171 static unsigned int *
172 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
173 enum mlxsw_sp_rif_counter_dir dir)
176 case MLXSW_SP_RIF_COUNTER_EGRESS:
177 return &rif->counter_egress;
178 case MLXSW_SP_RIF_COUNTER_INGRESS:
179 return &rif->counter_ingress;
185 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
186 enum mlxsw_sp_rif_counter_dir dir)
189 case MLXSW_SP_RIF_COUNTER_EGRESS:
190 return rif->counter_egress_valid;
191 case MLXSW_SP_RIF_COUNTER_INGRESS:
192 return rif->counter_ingress_valid;
198 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
199 enum mlxsw_sp_rif_counter_dir dir,
203 case MLXSW_SP_RIF_COUNTER_EGRESS:
204 rif->counter_egress_valid = valid;
206 case MLXSW_SP_RIF_COUNTER_INGRESS:
207 rif->counter_ingress_valid = valid;
212 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
213 unsigned int counter_index, bool enable,
214 enum mlxsw_sp_rif_counter_dir dir)
216 char ritr_pl[MLXSW_REG_RITR_LEN];
217 bool is_egress = false;
220 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
222 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
223 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
227 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
229 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
232 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
233 struct mlxsw_sp_rif *rif,
234 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
236 char ricnt_pl[MLXSW_REG_RICNT_LEN];
237 unsigned int *p_counter_index;
241 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
245 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
246 if (!p_counter_index)
248 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
249 MLXSW_REG_RICNT_OPCODE_NOP);
250 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
253 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
257 struct mlxsw_sp_rif_counter_set_basic {
258 u64 good_unicast_packets;
259 u64 good_multicast_packets;
260 u64 good_broadcast_packets;
261 u64 good_unicast_bytes;
262 u64 good_multicast_bytes;
263 u64 good_broadcast_bytes;
271 mlxsw_sp_rif_counter_fetch_clear(struct mlxsw_sp_rif *rif,
272 enum mlxsw_sp_rif_counter_dir dir,
273 struct mlxsw_sp_rif_counter_set_basic *set)
275 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
276 char ricnt_pl[MLXSW_REG_RICNT_LEN];
277 unsigned int *p_counter_index;
280 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
283 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
284 if (!p_counter_index)
287 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
288 MLXSW_REG_RICNT_OPCODE_CLEAR);
289 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
296 #define MLXSW_SP_RIF_COUNTER_EXTRACT(NAME) \
297 (set->NAME = mlxsw_reg_ricnt_ ## NAME ## _get(ricnt_pl))
299 MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_packets);
300 MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_packets);
301 MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_packets);
302 MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_bytes);
303 MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_bytes);
304 MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_bytes);
305 MLXSW_SP_RIF_COUNTER_EXTRACT(error_packets);
306 MLXSW_SP_RIF_COUNTER_EXTRACT(discard_packets);
307 MLXSW_SP_RIF_COUNTER_EXTRACT(error_bytes);
308 MLXSW_SP_RIF_COUNTER_EXTRACT(discard_bytes);
310 #undef MLXSW_SP_RIF_COUNTER_EXTRACT
315 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
316 unsigned int counter_index)
318 char ricnt_pl[MLXSW_REG_RICNT_LEN];
320 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
321 MLXSW_REG_RICNT_OPCODE_CLEAR);
322 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
325 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif *rif,
326 enum mlxsw_sp_rif_counter_dir dir)
328 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
329 unsigned int *p_counter_index;
332 if (mlxsw_sp_rif_counter_valid_get(rif, dir))
335 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
336 if (!p_counter_index)
339 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
344 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
346 goto err_counter_clear;
348 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
349 *p_counter_index, true, dir);
351 goto err_counter_edit;
352 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
357 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
362 void mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif *rif,
363 enum mlxsw_sp_rif_counter_dir dir)
365 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
366 unsigned int *p_counter_index;
368 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
371 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
372 if (WARN_ON(!p_counter_index))
374 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
375 *p_counter_index, false, dir);
376 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
378 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
381 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
383 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
384 struct devlink *devlink;
386 devlink = priv_to_devlink(mlxsw_sp->core);
387 if (!devlink_dpipe_table_counter_enabled(devlink,
388 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
390 mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
393 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
395 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
398 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
400 struct mlxsw_sp_prefix_usage {
401 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
404 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
405 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
408 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
409 struct mlxsw_sp_prefix_usage *prefix_usage2)
411 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
415 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
416 struct mlxsw_sp_prefix_usage *prefix_usage2)
418 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
422 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
423 unsigned char prefix_len)
425 set_bit(prefix_len, prefix_usage->b);
429 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
430 unsigned char prefix_len)
432 clear_bit(prefix_len, prefix_usage->b);
435 struct mlxsw_sp_fib_key {
436 unsigned char addr[sizeof(struct in6_addr)];
437 unsigned char prefix_len;
440 enum mlxsw_sp_fib_entry_type {
441 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
442 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
443 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
444 MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
445 MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
447 /* This is a special case of local delivery, where a packet should be
448 * decapsulated on reception. Note that there is no corresponding ENCAP,
449 * because that's a type of next hop, not of FIB entry. (There can be
450 * several next hops in a REMOTE entry, and some of them may be
451 * encapsulating entries.)
453 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
454 MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
457 struct mlxsw_sp_nexthop_group_info;
458 struct mlxsw_sp_nexthop_group;
459 struct mlxsw_sp_fib_entry;
461 struct mlxsw_sp_fib_node {
462 struct mlxsw_sp_fib_entry *fib_entry;
463 struct list_head list;
464 struct rhash_head ht_node;
465 struct mlxsw_sp_fib *fib;
466 struct mlxsw_sp_fib_key key;
469 struct mlxsw_sp_fib_entry_decap {
470 struct mlxsw_sp_ipip_entry *ipip_entry;
474 struct mlxsw_sp_fib_entry {
475 struct mlxsw_sp_fib_node *fib_node;
476 enum mlxsw_sp_fib_entry_type type;
477 struct list_head nexthop_group_node;
478 struct mlxsw_sp_nexthop_group *nh_group;
479 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
482 struct mlxsw_sp_fib4_entry {
483 struct mlxsw_sp_fib_entry common;
490 struct mlxsw_sp_fib6_entry {
491 struct mlxsw_sp_fib_entry common;
492 struct list_head rt6_list;
496 struct mlxsw_sp_rt6 {
497 struct list_head list;
498 struct fib6_info *rt;
501 struct mlxsw_sp_lpm_tree {
503 unsigned int ref_count;
504 enum mlxsw_sp_l3proto proto;
505 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
506 struct mlxsw_sp_prefix_usage prefix_usage;
509 struct mlxsw_sp_fib {
510 struct rhashtable ht;
511 struct list_head node_list;
512 struct mlxsw_sp_vr *vr;
513 struct mlxsw_sp_lpm_tree *lpm_tree;
514 enum mlxsw_sp_l3proto proto;
518 u16 id; /* virtual router ID */
519 u32 tb_id; /* kernel fib table id */
520 unsigned int rif_count;
521 struct mlxsw_sp_fib *fib4;
522 struct mlxsw_sp_fib *fib6;
523 struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
524 struct mlxsw_sp_rif *ul_rif;
525 refcount_t ul_rif_refcnt;
528 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
530 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
531 struct mlxsw_sp_vr *vr,
532 enum mlxsw_sp_l3proto proto)
534 struct mlxsw_sp_lpm_tree *lpm_tree;
535 struct mlxsw_sp_fib *fib;
538 lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
539 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
541 return ERR_PTR(-ENOMEM);
542 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
544 goto err_rhashtable_init;
545 INIT_LIST_HEAD(&fib->node_list);
548 fib->lpm_tree = lpm_tree;
549 mlxsw_sp_lpm_tree_hold(lpm_tree);
550 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
552 goto err_lpm_tree_bind;
556 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
562 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
563 struct mlxsw_sp_fib *fib)
565 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
566 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
567 WARN_ON(!list_empty(&fib->node_list));
568 rhashtable_destroy(&fib->ht);
572 static struct mlxsw_sp_lpm_tree *
573 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
575 static struct mlxsw_sp_lpm_tree *lpm_tree;
578 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
579 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
580 if (lpm_tree->ref_count == 0)
586 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
587 struct mlxsw_sp_lpm_tree *lpm_tree)
589 char ralta_pl[MLXSW_REG_RALTA_LEN];
591 mlxsw_reg_ralta_pack(ralta_pl, true,
592 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
594 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
597 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
598 struct mlxsw_sp_lpm_tree *lpm_tree)
600 char ralta_pl[MLXSW_REG_RALTA_LEN];
602 mlxsw_reg_ralta_pack(ralta_pl, false,
603 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
605 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
609 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
610 struct mlxsw_sp_prefix_usage *prefix_usage,
611 struct mlxsw_sp_lpm_tree *lpm_tree)
613 char ralst_pl[MLXSW_REG_RALST_LEN];
616 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
618 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
621 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
622 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
625 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
626 MLXSW_REG_RALST_BIN_NO_CHILD);
627 last_prefix = prefix;
629 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
632 static struct mlxsw_sp_lpm_tree *
633 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
634 struct mlxsw_sp_prefix_usage *prefix_usage,
635 enum mlxsw_sp_l3proto proto)
637 struct mlxsw_sp_lpm_tree *lpm_tree;
640 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
642 return ERR_PTR(-EBUSY);
643 lpm_tree->proto = proto;
644 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
648 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
651 goto err_left_struct_set;
652 memcpy(&lpm_tree->prefix_usage, prefix_usage,
653 sizeof(lpm_tree->prefix_usage));
654 memset(&lpm_tree->prefix_ref_count, 0,
655 sizeof(lpm_tree->prefix_ref_count));
656 lpm_tree->ref_count = 1;
660 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
664 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
665 struct mlxsw_sp_lpm_tree *lpm_tree)
667 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
670 static struct mlxsw_sp_lpm_tree *
671 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
672 struct mlxsw_sp_prefix_usage *prefix_usage,
673 enum mlxsw_sp_l3proto proto)
675 struct mlxsw_sp_lpm_tree *lpm_tree;
678 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
679 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
680 if (lpm_tree->ref_count != 0 &&
681 lpm_tree->proto == proto &&
682 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
684 mlxsw_sp_lpm_tree_hold(lpm_tree);
688 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
691 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
693 lpm_tree->ref_count++;
696 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
697 struct mlxsw_sp_lpm_tree *lpm_tree)
699 if (--lpm_tree->ref_count == 0)
700 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
703 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
705 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
707 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
708 struct mlxsw_sp_lpm_tree *lpm_tree;
712 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
715 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
716 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
717 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
718 sizeof(struct mlxsw_sp_lpm_tree),
720 if (!mlxsw_sp->router->lpm.trees)
723 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
724 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
725 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
728 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
729 MLXSW_SP_L3_PROTO_IPV4);
730 if (IS_ERR(lpm_tree)) {
731 err = PTR_ERR(lpm_tree);
732 goto err_ipv4_tree_get;
734 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
736 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
737 MLXSW_SP_L3_PROTO_IPV6);
738 if (IS_ERR(lpm_tree)) {
739 err = PTR_ERR(lpm_tree);
740 goto err_ipv6_tree_get;
742 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
747 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
748 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
750 kfree(mlxsw_sp->router->lpm.trees);
754 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
756 struct mlxsw_sp_lpm_tree *lpm_tree;
758 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
759 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
761 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
762 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
764 kfree(mlxsw_sp->router->lpm.trees);
767 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
769 return !!vr->fib4 || !!vr->fib6 ||
770 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
771 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
774 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
776 int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
777 struct mlxsw_sp_vr *vr;
780 for (i = 0; i < max_vrs; i++) {
781 vr = &mlxsw_sp->router->vrs[i];
782 if (!mlxsw_sp_vr_is_used(vr))
788 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
789 const struct mlxsw_sp_fib *fib, u8 tree_id)
791 char raltb_pl[MLXSW_REG_RALTB_LEN];
793 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
794 (enum mlxsw_reg_ralxx_protocol) fib->proto,
796 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
799 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
800 const struct mlxsw_sp_fib *fib)
802 char raltb_pl[MLXSW_REG_RALTB_LEN];
804 /* Bind to tree 0 which is default */
805 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
806 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
807 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
810 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
812 /* For our purpose, squash main, default and local tables into one */
813 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
814 tb_id = RT_TABLE_MAIN;
818 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
821 int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
822 struct mlxsw_sp_vr *vr;
825 tb_id = mlxsw_sp_fix_tb_id(tb_id);
827 for (i = 0; i < max_vrs; i++) {
828 vr = &mlxsw_sp->router->vrs[i];
829 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
835 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
838 struct mlxsw_sp_vr *vr;
841 mutex_lock(&mlxsw_sp->router->lock);
842 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
849 mutex_unlock(&mlxsw_sp->router->lock);
853 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
854 enum mlxsw_sp_l3proto proto)
857 case MLXSW_SP_L3_PROTO_IPV4:
859 case MLXSW_SP_L3_PROTO_IPV6:
865 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
867 struct netlink_ext_ack *extack)
869 struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
870 struct mlxsw_sp_fib *fib4;
871 struct mlxsw_sp_fib *fib6;
872 struct mlxsw_sp_vr *vr;
875 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
877 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
878 return ERR_PTR(-EBUSY);
880 fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
882 return ERR_CAST(fib4);
883 fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
886 goto err_fib6_create;
888 mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
889 MLXSW_SP_L3_PROTO_IPV4);
890 if (IS_ERR(mr4_table)) {
891 err = PTR_ERR(mr4_table);
892 goto err_mr4_table_create;
894 mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
895 MLXSW_SP_L3_PROTO_IPV6);
896 if (IS_ERR(mr6_table)) {
897 err = PTR_ERR(mr6_table);
898 goto err_mr6_table_create;
903 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
904 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
908 err_mr6_table_create:
909 mlxsw_sp_mr_table_destroy(mr4_table);
910 err_mr4_table_create:
911 mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
913 mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
917 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
918 struct mlxsw_sp_vr *vr)
920 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
921 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
922 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
923 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
924 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
926 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
930 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
931 struct netlink_ext_ack *extack)
933 struct mlxsw_sp_vr *vr;
935 tb_id = mlxsw_sp_fix_tb_id(tb_id);
936 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
938 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
942 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
944 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
945 list_empty(&vr->fib6->node_list) &&
946 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
947 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
948 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
952 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
953 enum mlxsw_sp_l3proto proto, u8 tree_id)
955 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
957 if (!mlxsw_sp_vr_is_used(vr))
959 if (fib->lpm_tree->id == tree_id)
964 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
965 struct mlxsw_sp_fib *fib,
966 struct mlxsw_sp_lpm_tree *new_tree)
968 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
971 fib->lpm_tree = new_tree;
972 mlxsw_sp_lpm_tree_hold(new_tree);
973 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
976 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
980 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
981 fib->lpm_tree = old_tree;
985 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
986 struct mlxsw_sp_fib *fib,
987 struct mlxsw_sp_lpm_tree *new_tree)
989 int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
990 enum mlxsw_sp_l3proto proto = fib->proto;
991 struct mlxsw_sp_lpm_tree *old_tree;
992 u8 old_id, new_id = new_tree->id;
993 struct mlxsw_sp_vr *vr;
996 old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
997 old_id = old_tree->id;
999 for (i = 0; i < max_vrs; i++) {
1000 vr = &mlxsw_sp->router->vrs[i];
1001 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
1003 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1004 mlxsw_sp_vr_fib(vr, proto),
1007 goto err_tree_replace;
1010 memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
1011 sizeof(new_tree->prefix_ref_count));
1012 mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
1013 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
1018 for (i--; i >= 0; i--) {
1019 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
1021 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1022 mlxsw_sp_vr_fib(vr, proto),
1028 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
1030 struct mlxsw_sp_vr *vr;
1034 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
1037 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
1038 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
1040 if (!mlxsw_sp->router->vrs)
1043 for (i = 0; i < max_vrs; i++) {
1044 vr = &mlxsw_sp->router->vrs[i];
1051 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
1053 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
1055 /* At this stage we're guaranteed not to have new incoming
1056 * FIB notifications and the work queue is free from FIBs
1057 * sitting on top of mlxsw netdevs. However, we can still
1058 * have other FIBs queued. Flush the queue before flushing
1059 * the device's tables. No need for locks, as we're the only
1062 mlxsw_core_flush_owq();
1063 mlxsw_sp_router_fib_flush(mlxsw_sp);
1064 kfree(mlxsw_sp->router->vrs);
1067 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
1069 struct net_device *d;
1073 d = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1075 tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1077 tb_id = RT_TABLE_MAIN;
1084 mlxsw_sp_crif_init(struct mlxsw_sp_crif *crif, struct net_device *dev)
1086 crif->key.dev = dev;
1087 INIT_LIST_HEAD(&crif->nexthop_list);
1090 static struct mlxsw_sp_crif *
1091 mlxsw_sp_crif_alloc(struct net_device *dev)
1093 struct mlxsw_sp_crif *crif;
1095 crif = kzalloc(sizeof(*crif), GFP_KERNEL);
1099 mlxsw_sp_crif_init(crif, dev);
1103 static void mlxsw_sp_crif_free(struct mlxsw_sp_crif *crif)
1105 if (WARN_ON(crif->rif))
1108 WARN_ON(!list_empty(&crif->nexthop_list));
1112 static int mlxsw_sp_crif_insert(struct mlxsw_sp_router *router,
1113 struct mlxsw_sp_crif *crif)
1115 return rhashtable_insert_fast(&router->crif_ht, &crif->ht_node,
1116 mlxsw_sp_crif_ht_params);
1119 static void mlxsw_sp_crif_remove(struct mlxsw_sp_router *router,
1120 struct mlxsw_sp_crif *crif)
1122 rhashtable_remove_fast(&router->crif_ht, &crif->ht_node,
1123 mlxsw_sp_crif_ht_params);
1126 static struct mlxsw_sp_crif *
1127 mlxsw_sp_crif_lookup(struct mlxsw_sp_router *router,
1128 const struct net_device *dev)
1130 struct mlxsw_sp_crif_key key = {
1131 .dev = (struct net_device *)dev,
1134 return rhashtable_lookup_fast(&router->crif_ht, &key,
1135 mlxsw_sp_crif_ht_params);
1138 static struct mlxsw_sp_rif *
1139 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
1140 const struct mlxsw_sp_rif_params *params,
1141 struct netlink_ext_ack *extack);
1143 static struct mlxsw_sp_rif_ipip_lb *
1144 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1145 enum mlxsw_sp_ipip_type ipipt,
1146 struct net_device *ol_dev,
1147 struct netlink_ext_ack *extack)
1149 struct mlxsw_sp_rif_params_ipip_lb lb_params;
1150 const struct mlxsw_sp_ipip_ops *ipip_ops;
1151 struct mlxsw_sp_rif *rif;
1153 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1154 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1155 .common.dev = ol_dev,
1156 .common.lag = false,
1157 .common.double_entry = ipip_ops->double_rif_entry,
1158 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1161 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1163 return ERR_CAST(rif);
1164 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1167 static struct mlxsw_sp_ipip_entry *
1168 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1169 enum mlxsw_sp_ipip_type ipipt,
1170 struct net_device *ol_dev)
1172 const struct mlxsw_sp_ipip_ops *ipip_ops;
1173 struct mlxsw_sp_ipip_entry *ipip_entry;
1174 struct mlxsw_sp_ipip_entry *ret = NULL;
1177 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1178 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1180 return ERR_PTR(-ENOMEM);
1182 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1184 if (IS_ERR(ipip_entry->ol_lb)) {
1185 ret = ERR_CAST(ipip_entry->ol_lb);
1186 goto err_ol_ipip_lb_create;
1189 ipip_entry->ipipt = ipipt;
1190 ipip_entry->ol_dev = ol_dev;
1191 ipip_entry->parms = ipip_ops->parms_init(ol_dev);
1193 err = ipip_ops->rem_ip_addr_set(mlxsw_sp, ipip_entry);
1196 goto err_rem_ip_addr_set;
1201 err_rem_ip_addr_set:
1202 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1203 err_ol_ipip_lb_create:
1208 static void mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp *mlxsw_sp,
1209 struct mlxsw_sp_ipip_entry *ipip_entry)
1211 const struct mlxsw_sp_ipip_ops *ipip_ops =
1212 mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1214 ipip_ops->rem_ip_addr_unset(mlxsw_sp, ipip_entry);
1215 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1220 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1221 const enum mlxsw_sp_l3proto ul_proto,
1222 union mlxsw_sp_l3addr saddr,
1224 struct mlxsw_sp_ipip_entry *ipip_entry)
1226 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1227 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1228 union mlxsw_sp_l3addr tun_saddr;
1230 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1233 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1234 return tun_ul_tb_id == ul_tb_id &&
1235 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1238 static int mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp,
1239 enum mlxsw_sp_ipip_type ipipt)
1241 const struct mlxsw_sp_ipip_ops *ipip_ops;
1243 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1245 /* Not all tunnels require to increase the default pasing depth
1248 if (ipip_ops->inc_parsing_depth)
1249 return mlxsw_sp_parsing_depth_inc(mlxsw_sp);
1254 static void mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp,
1255 enum mlxsw_sp_ipip_type ipipt)
1257 const struct mlxsw_sp_ipip_ops *ipip_ops =
1258 mlxsw_sp->router->ipip_ops_arr[ipipt];
1260 if (ipip_ops->inc_parsing_depth)
1261 mlxsw_sp_parsing_depth_dec(mlxsw_sp);
1265 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1266 struct mlxsw_sp_fib_entry *fib_entry,
1267 struct mlxsw_sp_ipip_entry *ipip_entry)
1272 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1277 err = mlxsw_sp_ipip_decap_parsing_depth_inc(mlxsw_sp,
1280 goto err_parsing_depth_inc;
1282 ipip_entry->decap_fib_entry = fib_entry;
1283 fib_entry->decap.ipip_entry = ipip_entry;
1284 fib_entry->decap.tunnel_index = tunnel_index;
1288 err_parsing_depth_inc:
1289 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
1290 fib_entry->decap.tunnel_index);
1294 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1295 struct mlxsw_sp_fib_entry *fib_entry)
1297 enum mlxsw_sp_ipip_type ipipt = fib_entry->decap.ipip_entry->ipipt;
1299 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1300 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1301 fib_entry->decap.ipip_entry = NULL;
1302 mlxsw_sp_ipip_decap_parsing_depth_dec(mlxsw_sp, ipipt);
1303 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1304 1, fib_entry->decap.tunnel_index);
1307 static struct mlxsw_sp_fib_node *
1308 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1309 size_t addr_len, unsigned char prefix_len);
1310 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1311 struct mlxsw_sp_fib_entry *fib_entry);
1314 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1315 struct mlxsw_sp_ipip_entry *ipip_entry)
1317 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1319 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1320 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1322 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1326 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1327 struct mlxsw_sp_ipip_entry *ipip_entry,
1328 struct mlxsw_sp_fib_entry *decap_fib_entry)
1330 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1333 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1335 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1336 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1339 static struct mlxsw_sp_fib_entry *
1340 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1341 enum mlxsw_sp_l3proto proto,
1342 const union mlxsw_sp_l3addr *addr,
1343 enum mlxsw_sp_fib_entry_type type)
1345 struct mlxsw_sp_fib_node *fib_node;
1346 unsigned char addr_prefix_len;
1347 struct mlxsw_sp_fib *fib;
1348 struct mlxsw_sp_vr *vr;
1353 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1356 fib = mlxsw_sp_vr_fib(vr, proto);
1359 case MLXSW_SP_L3_PROTO_IPV4:
1360 addr4 = be32_to_cpu(addr->addr4);
1363 addr_prefix_len = 32;
1365 case MLXSW_SP_L3_PROTO_IPV6:
1366 addrp = &addr->addr6;
1368 addr_prefix_len = 128;
1375 fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1377 if (!fib_node || fib_node->fib_entry->type != type)
1380 return fib_node->fib_entry;
1383 /* Given an IPIP entry, find the corresponding decap route. */
1384 static struct mlxsw_sp_fib_entry *
1385 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1386 struct mlxsw_sp_ipip_entry *ipip_entry)
1388 static struct mlxsw_sp_fib_node *fib_node;
1389 const struct mlxsw_sp_ipip_ops *ipip_ops;
1390 unsigned char saddr_prefix_len;
1391 union mlxsw_sp_l3addr saddr;
1392 struct mlxsw_sp_fib *ul_fib;
1393 struct mlxsw_sp_vr *ul_vr;
1399 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1401 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1402 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1406 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1407 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1408 ipip_entry->ol_dev);
1410 switch (ipip_ops->ul_proto) {
1411 case MLXSW_SP_L3_PROTO_IPV4:
1412 saddr4 = be32_to_cpu(saddr.addr4);
1415 saddr_prefix_len = 32;
1417 case MLXSW_SP_L3_PROTO_IPV6:
1418 saddrp = &saddr.addr6;
1420 saddr_prefix_len = 128;
1427 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1430 fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1433 return fib_node->fib_entry;
1436 static struct mlxsw_sp_ipip_entry *
1437 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1438 enum mlxsw_sp_ipip_type ipipt,
1439 struct net_device *ol_dev)
1441 struct mlxsw_sp_ipip_entry *ipip_entry;
1443 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1444 if (IS_ERR(ipip_entry))
1447 list_add_tail(&ipip_entry->ipip_list_node,
1448 &mlxsw_sp->router->ipip_list);
1454 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1455 struct mlxsw_sp_ipip_entry *ipip_entry)
1457 list_del(&ipip_entry->ipip_list_node);
1458 mlxsw_sp_ipip_entry_dealloc(mlxsw_sp, ipip_entry);
1462 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1463 const struct net_device *ul_dev,
1464 enum mlxsw_sp_l3proto ul_proto,
1465 union mlxsw_sp_l3addr ul_dip,
1466 struct mlxsw_sp_ipip_entry *ipip_entry)
1468 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1469 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1471 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1474 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1475 ul_tb_id, ipip_entry);
1478 /* Given decap parameters, find the corresponding IPIP entry. */
1479 static struct mlxsw_sp_ipip_entry *
1480 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, int ul_dev_ifindex,
1481 enum mlxsw_sp_l3proto ul_proto,
1482 union mlxsw_sp_l3addr ul_dip)
1484 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1485 struct net_device *ul_dev;
1489 ul_dev = dev_get_by_index_rcu(mlxsw_sp_net(mlxsw_sp), ul_dev_ifindex);
1493 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1495 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1509 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1510 const struct net_device *dev,
1511 enum mlxsw_sp_ipip_type *p_type)
1513 struct mlxsw_sp_router *router = mlxsw_sp->router;
1514 const struct mlxsw_sp_ipip_ops *ipip_ops;
1515 enum mlxsw_sp_ipip_type ipipt;
1517 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1518 ipip_ops = router->ipip_ops_arr[ipipt];
1519 if (dev->type == ipip_ops->dev_type) {
1528 static bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1529 const struct net_device *dev)
1531 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1534 static struct mlxsw_sp_ipip_entry *
1535 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1536 const struct net_device *ol_dev)
1538 struct mlxsw_sp_ipip_entry *ipip_entry;
1540 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1542 if (ipip_entry->ol_dev == ol_dev)
1548 static struct mlxsw_sp_ipip_entry *
1549 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1550 const struct net_device *ul_dev,
1551 struct mlxsw_sp_ipip_entry *start)
1553 struct mlxsw_sp_ipip_entry *ipip_entry;
1555 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1557 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1559 struct net_device *ol_dev = ipip_entry->ol_dev;
1560 struct net_device *ipip_ul_dev;
1563 ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1566 if (ipip_ul_dev == ul_dev)
1573 static bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1574 const struct net_device *dev)
1576 return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1579 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1580 const struct net_device *ol_dev,
1581 enum mlxsw_sp_ipip_type ipipt)
1583 const struct mlxsw_sp_ipip_ops *ops
1584 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1586 return ops->can_offload(mlxsw_sp, ol_dev);
1589 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1590 struct net_device *ol_dev)
1592 enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
1593 struct mlxsw_sp_ipip_entry *ipip_entry;
1594 enum mlxsw_sp_l3proto ul_proto;
1595 union mlxsw_sp_l3addr saddr;
1598 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1599 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1600 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1601 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1602 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1603 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1606 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1608 if (IS_ERR(ipip_entry))
1609 return PTR_ERR(ipip_entry);
1616 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1617 struct net_device *ol_dev)
1619 struct mlxsw_sp_ipip_entry *ipip_entry;
1621 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1623 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1627 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1628 struct mlxsw_sp_ipip_entry *ipip_entry)
1630 struct mlxsw_sp_fib_entry *decap_fib_entry;
1632 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1633 if (decap_fib_entry)
1634 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1639 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1640 u16 ul_rif_id, bool enable)
1642 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1643 struct net_device *dev = mlxsw_sp_rif_dev(&lb_rif->common);
1644 enum mlxsw_reg_ritr_loopback_ipip_options ipip_options;
1645 struct mlxsw_sp_rif *rif = &lb_rif->common;
1646 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1647 char ritr_pl[MLXSW_REG_RITR_LEN];
1648 struct in6_addr *saddr6;
1651 ipip_options = MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET;
1652 switch (lb_cf.ul_protocol) {
1653 case MLXSW_SP_L3_PROTO_IPV4:
1654 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1655 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1656 rif->rif_index, rif->vr_id, dev->mtu);
1657 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1658 ipip_options, ul_vr_id,
1663 case MLXSW_SP_L3_PROTO_IPV6:
1664 saddr6 = &lb_cf.saddr.addr6;
1665 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1666 rif->rif_index, rif->vr_id, dev->mtu);
1667 mlxsw_reg_ritr_loopback_ipip6_pack(ritr_pl, lb_cf.lb_ipipt,
1668 ipip_options, ul_vr_id,
1674 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1677 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1678 struct net_device *ol_dev)
1680 struct mlxsw_sp_ipip_entry *ipip_entry;
1681 struct mlxsw_sp_rif_ipip_lb *lb_rif;
1684 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1686 lb_rif = ipip_entry->ol_lb;
1687 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1688 lb_rif->ul_rif_id, true);
1691 lb_rif->common.mtu = ol_dev->mtu;
1698 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1699 struct net_device *ol_dev)
1701 struct mlxsw_sp_ipip_entry *ipip_entry;
1703 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1705 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1709 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1710 struct mlxsw_sp_ipip_entry *ipip_entry)
1712 if (ipip_entry->decap_fib_entry)
1713 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1716 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1717 struct net_device *ol_dev)
1719 struct mlxsw_sp_ipip_entry *ipip_entry;
1721 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1723 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1726 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1727 struct mlxsw_sp_rif *rif);
1729 static void mlxsw_sp_rif_migrate_destroy(struct mlxsw_sp *mlxsw_sp,
1730 struct mlxsw_sp_rif *old_rif,
1731 struct mlxsw_sp_rif *new_rif,
1734 struct mlxsw_sp_crif *crif = old_rif->crif;
1735 struct mlxsw_sp_crif mock_crif = {};
1738 mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
1740 /* Plant a mock CRIF so that destroying the old RIF doesn't unoffload
1741 * our nexthops and IPIP tunnels, and doesn't sever the crif->rif link.
1743 mlxsw_sp_crif_init(&mock_crif, crif->key.dev);
1744 old_rif->crif = &mock_crif;
1745 mock_crif.rif = old_rif;
1746 mlxsw_sp_rif_destroy(old_rif);
1750 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1751 struct mlxsw_sp_ipip_entry *ipip_entry,
1753 struct netlink_ext_ack *extack)
1755 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1756 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1758 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1762 if (IS_ERR(new_lb_rif))
1763 return PTR_ERR(new_lb_rif);
1764 ipip_entry->ol_lb = new_lb_rif;
1766 mlxsw_sp_rif_migrate_destroy(mlxsw_sp, &old_lb_rif->common,
1767 &new_lb_rif->common, keep_encap);
1772 * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
1773 * @mlxsw_sp: mlxsw_sp.
1774 * @ipip_entry: IPIP entry.
1775 * @recreate_loopback: Recreates the associated loopback RIF.
1776 * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
1777 * relevant when recreate_loopback is true.
1778 * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
1779 * is only relevant when recreate_loopback is false.
1782 * Return: Non-zero value on failure.
1784 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1785 struct mlxsw_sp_ipip_entry *ipip_entry,
1786 bool recreate_loopback,
1788 bool update_nexthops,
1789 struct netlink_ext_ack *extack)
1793 /* RIFs can't be edited, so to update loopback, we need to destroy and
1794 * recreate it. That creates a window of opportunity where RALUE and
1795 * RATR registers end up referencing a RIF that's already gone. RATRs
1796 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1797 * of RALUE, demote the decap route back.
1799 if (ipip_entry->decap_fib_entry)
1800 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1802 if (recreate_loopback) {
1803 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1804 keep_encap, extack);
1807 } else if (update_nexthops) {
1808 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1809 &ipip_entry->ol_lb->common);
1812 if (ipip_entry->ol_dev->flags & IFF_UP)
1813 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1818 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1819 struct net_device *ol_dev,
1820 struct netlink_ext_ack *extack)
1822 struct mlxsw_sp_ipip_entry *ipip_entry =
1823 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1828 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1829 true, false, false, extack);
1833 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1834 struct mlxsw_sp_ipip_entry *ipip_entry,
1835 struct net_device *ul_dev,
1837 struct netlink_ext_ack *extack)
1839 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1840 enum mlxsw_sp_l3proto ul_proto;
1841 union mlxsw_sp_l3addr saddr;
1843 /* Moving underlay to a different VRF might cause local address
1844 * conflict, and the conflicting tunnels need to be demoted.
1846 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1847 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1848 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1851 *demote_this = true;
1855 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1856 true, true, false, extack);
1860 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1861 struct mlxsw_sp_ipip_entry *ipip_entry,
1862 struct net_device *ul_dev)
1864 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1865 false, false, true, NULL);
1869 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1870 struct mlxsw_sp_ipip_entry *ipip_entry,
1871 struct net_device *ul_dev)
1873 /* A down underlay device causes encapsulated packets to not be
1874 * forwarded, but decap still works. So refresh next hops without
1875 * touching anything else.
1877 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1878 false, false, true, NULL);
1882 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1883 struct net_device *ol_dev,
1884 struct netlink_ext_ack *extack)
1886 const struct mlxsw_sp_ipip_ops *ipip_ops;
1887 struct mlxsw_sp_ipip_entry *ipip_entry;
1890 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1892 /* A change might make a tunnel eligible for offloading, but
1893 * that is currently not implemented. What falls to slow path
1898 /* A change might make a tunnel not eligible for offloading. */
1899 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1900 ipip_entry->ipipt)) {
1901 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1905 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1906 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1910 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1911 struct mlxsw_sp_ipip_entry *ipip_entry)
1913 struct net_device *ol_dev = ipip_entry->ol_dev;
1915 if (ol_dev->flags & IFF_UP)
1916 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1917 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1920 /* The configuration where several tunnels have the same local address in the
1921 * same underlay table needs special treatment in the HW. That is currently not
1922 * implemented in the driver. This function finds and demotes the first tunnel
1923 * with a given source address, except the one passed in the argument
1927 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1928 enum mlxsw_sp_l3proto ul_proto,
1929 union mlxsw_sp_l3addr saddr,
1931 const struct mlxsw_sp_ipip_entry *except)
1933 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1935 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1937 if (ipip_entry != except &&
1938 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1939 ul_tb_id, ipip_entry)) {
1940 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1948 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1949 struct net_device *ul_dev)
1951 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1953 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1955 struct net_device *ol_dev = ipip_entry->ol_dev;
1956 struct net_device *ipip_ul_dev;
1959 ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1961 if (ipip_ul_dev == ul_dev)
1962 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1966 static int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1967 struct net_device *ol_dev,
1968 unsigned long event,
1969 struct netdev_notifier_info *info)
1971 struct netdev_notifier_changeupper_info *chup;
1972 struct netlink_ext_ack *extack;
1976 case NETDEV_REGISTER:
1977 err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1979 case NETDEV_UNREGISTER:
1980 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1983 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1986 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1988 case NETDEV_CHANGEUPPER:
1989 chup = container_of(info, typeof(*chup), info);
1990 extack = info->extack;
1991 if (netif_is_l3_master(chup->upper_dev))
1992 err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1997 extack = info->extack;
1998 err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
2001 case NETDEV_CHANGEMTU:
2002 err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
2009 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
2010 struct mlxsw_sp_ipip_entry *ipip_entry,
2011 struct net_device *ul_dev,
2013 unsigned long event,
2014 struct netdev_notifier_info *info)
2016 struct netdev_notifier_changeupper_info *chup;
2017 struct netlink_ext_ack *extack;
2020 case NETDEV_CHANGEUPPER:
2021 chup = container_of(info, typeof(*chup), info);
2022 extack = info->extack;
2023 if (netif_is_l3_master(chup->upper_dev))
2024 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
2032 return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
2035 return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
2043 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
2044 struct net_device *ul_dev,
2045 unsigned long event,
2046 struct netdev_notifier_info *info)
2048 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
2051 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
2054 struct mlxsw_sp_ipip_entry *prev;
2055 bool demote_this = false;
2057 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
2058 ul_dev, &demote_this,
2061 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
2067 if (list_is_first(&ipip_entry->ipip_list_node,
2068 &mlxsw_sp->router->ipip_list))
2071 /* This can't be cached from previous iteration,
2072 * because that entry could be gone now.
2074 prev = list_prev_entry(ipip_entry,
2076 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
2084 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2085 enum mlxsw_sp_l3proto ul_proto,
2086 const union mlxsw_sp_l3addr *ul_sip,
2089 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2090 struct mlxsw_sp_router *router = mlxsw_sp->router;
2091 struct mlxsw_sp_fib_entry *fib_entry;
2094 mutex_lock(&mlxsw_sp->router->lock);
2096 if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
2101 router->nve_decap_config.ul_tb_id = ul_tb_id;
2102 router->nve_decap_config.tunnel_index = tunnel_index;
2103 router->nve_decap_config.ul_proto = ul_proto;
2104 router->nve_decap_config.ul_sip = *ul_sip;
2105 router->nve_decap_config.valid = true;
2107 /* It is valid to create a tunnel with a local IP and only later
2108 * assign this IP address to a local interface
2110 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2116 fib_entry->decap.tunnel_index = tunnel_index;
2117 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2119 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2121 goto err_fib_entry_update;
2125 err_fib_entry_update:
2126 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2127 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2129 mutex_unlock(&mlxsw_sp->router->lock);
2133 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2134 enum mlxsw_sp_l3proto ul_proto,
2135 const union mlxsw_sp_l3addr *ul_sip)
2137 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2138 struct mlxsw_sp_router *router = mlxsw_sp->router;
2139 struct mlxsw_sp_fib_entry *fib_entry;
2141 mutex_lock(&mlxsw_sp->router->lock);
2143 if (WARN_ON_ONCE(!router->nve_decap_config.valid))
2146 router->nve_decap_config.valid = false;
2148 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2154 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2155 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2157 mutex_unlock(&mlxsw_sp->router->lock);
2160 static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
2162 enum mlxsw_sp_l3proto ul_proto,
2163 const union mlxsw_sp_l3addr *ul_sip)
2165 struct mlxsw_sp_router *router = mlxsw_sp->router;
2167 return router->nve_decap_config.valid &&
2168 router->nve_decap_config.ul_tb_id == ul_tb_id &&
2169 router->nve_decap_config.ul_proto == ul_proto &&
2170 !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
2174 struct mlxsw_sp_neigh_key {
2175 struct neighbour *n;
2178 struct mlxsw_sp_neigh_entry {
2179 struct list_head rif_list_node;
2180 struct rhash_head ht_node;
2181 struct mlxsw_sp_neigh_key key;
2184 unsigned char ha[ETH_ALEN];
2185 struct list_head nexthop_list; /* list of nexthops using
2188 struct list_head nexthop_neighs_list_node;
2189 unsigned int counter_index;
2193 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
2194 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
2195 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
2196 .key_len = sizeof(struct mlxsw_sp_neigh_key),
2199 struct mlxsw_sp_neigh_entry *
2200 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
2201 struct mlxsw_sp_neigh_entry *neigh_entry)
2204 if (list_empty(&rif->neigh_list))
2207 return list_first_entry(&rif->neigh_list,
2208 typeof(*neigh_entry),
2211 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
2213 return list_next_entry(neigh_entry, rif_list_node);
2216 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
2218 return neigh_entry->key.n->tbl->family;
2222 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
2224 return neigh_entry->ha;
2227 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2229 struct neighbour *n;
2231 n = neigh_entry->key.n;
2232 return ntohl(*((__be32 *) n->primary_key));
2236 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2238 struct neighbour *n;
2240 n = neigh_entry->key.n;
2241 return (struct in6_addr *) &n->primary_key;
2244 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
2245 struct mlxsw_sp_neigh_entry *neigh_entry,
2248 if (!neigh_entry->counter_valid)
2251 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
2255 static struct mlxsw_sp_neigh_entry *
2256 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
2259 struct mlxsw_sp_neigh_entry *neigh_entry;
2261 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2265 neigh_entry->key.n = n;
2266 neigh_entry->rif = rif;
2267 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2272 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2278 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2279 struct mlxsw_sp_neigh_entry *neigh_entry)
2281 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2282 &neigh_entry->ht_node,
2283 mlxsw_sp_neigh_ht_params);
2287 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2288 struct mlxsw_sp_neigh_entry *neigh_entry)
2290 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2291 &neigh_entry->ht_node,
2292 mlxsw_sp_neigh_ht_params);
2296 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2297 struct mlxsw_sp_neigh_entry *neigh_entry)
2299 struct devlink *devlink;
2300 const char *table_name;
2302 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2304 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2307 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2314 devlink = priv_to_devlink(mlxsw_sp->core);
2315 return devlink_dpipe_table_counter_enabled(devlink, table_name);
2319 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2320 struct mlxsw_sp_neigh_entry *neigh_entry)
2322 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2325 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2328 neigh_entry->counter_valid = true;
2332 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2333 struct mlxsw_sp_neigh_entry *neigh_entry)
2335 if (!neigh_entry->counter_valid)
2337 mlxsw_sp_flow_counter_free(mlxsw_sp,
2338 neigh_entry->counter_index);
2339 neigh_entry->counter_valid = false;
2342 static struct mlxsw_sp_neigh_entry *
2343 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2345 struct mlxsw_sp_neigh_entry *neigh_entry;
2346 struct mlxsw_sp_rif *rif;
2349 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2351 return ERR_PTR(-EINVAL);
2353 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2355 return ERR_PTR(-ENOMEM);
2357 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2359 goto err_neigh_entry_insert;
2361 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2362 atomic_inc(&mlxsw_sp->router->neighs_update.neigh_count);
2363 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2367 err_neigh_entry_insert:
2368 mlxsw_sp_neigh_entry_free(neigh_entry);
2369 return ERR_PTR(err);
2373 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2374 struct mlxsw_sp_neigh_entry *neigh_entry)
2376 list_del(&neigh_entry->rif_list_node);
2377 atomic_dec(&mlxsw_sp->router->neighs_update.neigh_count);
2378 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2379 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2380 mlxsw_sp_neigh_entry_free(neigh_entry);
2383 static struct mlxsw_sp_neigh_entry *
2384 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2386 struct mlxsw_sp_neigh_key key;
2389 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2390 &key, mlxsw_sp_neigh_ht_params);
2394 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2396 unsigned long interval;
2398 #if IS_ENABLED(CONFIG_IPV6)
2399 interval = min_t(unsigned long,
2400 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2401 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2403 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2405 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2408 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2412 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
2413 struct net_device *dev;
2414 struct neighbour *n;
2419 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2421 if (WARN_ON_ONCE(rif >= max_rifs))
2423 if (!mlxsw_sp->router->rifs[rif]) {
2424 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2429 dev = mlxsw_sp_rif_dev(mlxsw_sp->router->rifs[rif]);
2430 n = neigh_lookup(&arp_tbl, &dipn, dev);
2434 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2435 neigh_event_send(n, NULL);
2439 #if IS_ENABLED(CONFIG_IPV6)
2440 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2444 struct net_device *dev;
2445 struct neighbour *n;
2446 struct in6_addr dip;
2449 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2452 if (!mlxsw_sp->router->rifs[rif]) {
2453 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2457 dev = mlxsw_sp_rif_dev(mlxsw_sp->router->rifs[rif]);
2458 n = neigh_lookup(&nd_tbl, &dip, dev);
2462 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2463 neigh_event_send(n, NULL);
2467 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2474 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2481 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2483 /* Hardware starts counting at 0, so add 1. */
2486 /* Each record consists of several neighbour entries. */
2487 for (i = 0; i < num_entries; i++) {
2490 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2491 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2497 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2501 /* One record contains one entry. */
2502 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2506 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2507 char *rauhtd_pl, int rec_index)
2509 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2510 case MLXSW_REG_RAUHTD_TYPE_IPV4:
2511 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2514 case MLXSW_REG_RAUHTD_TYPE_IPV6:
2515 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2521 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2523 u8 num_rec, last_rec_index, num_entries;
2525 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2526 last_rec_index = num_rec - 1;
2528 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2530 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2531 MLXSW_REG_RAUHTD_TYPE_IPV6)
2534 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2536 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2542 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2544 enum mlxsw_reg_rauhtd_type type)
2549 /* Ensure the RIF we read from the device does not change mid-dump. */
2550 mutex_lock(&mlxsw_sp->router->lock);
2552 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2553 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2556 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2559 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2560 for (i = 0; i < num_rec; i++)
2561 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2563 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2564 mutex_unlock(&mlxsw_sp->router->lock);
2569 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2571 enum mlxsw_reg_rauhtd_type type;
2575 if (!atomic_read(&mlxsw_sp->router->neighs_update.neigh_count))
2578 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2582 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2583 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2587 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2588 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2594 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2596 struct mlxsw_sp_neigh_entry *neigh_entry;
2598 mutex_lock(&mlxsw_sp->router->lock);
2599 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2600 nexthop_neighs_list_node)
2601 /* If this neigh have nexthops, make the kernel think this neigh
2602 * is active regardless of the traffic.
2604 neigh_event_send(neigh_entry->key.n, NULL);
2605 mutex_unlock(&mlxsw_sp->router->lock);
2609 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2611 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2613 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2614 msecs_to_jiffies(interval));
2617 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2619 struct mlxsw_sp_router *router;
2622 router = container_of(work, struct mlxsw_sp_router,
2623 neighs_update.dw.work);
2624 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2626 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2628 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2630 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2633 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2635 struct mlxsw_sp_neigh_entry *neigh_entry;
2636 struct mlxsw_sp_router *router;
2638 router = container_of(work, struct mlxsw_sp_router,
2639 nexthop_probe_dw.work);
2640 /* Iterate over nexthop neighbours, find those who are unresolved and
2641 * send arp on them. This solves the chicken-egg problem when
2642 * the nexthop wouldn't get offloaded until the neighbor is resolved
2643 * but it wouldn't get resolved ever in case traffic is flowing in HW
2644 * using different nexthop.
2646 mutex_lock(&router->lock);
2647 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2648 nexthop_neighs_list_node)
2649 if (!neigh_entry->connected)
2650 neigh_event_send(neigh_entry->key.n, NULL);
2651 mutex_unlock(&router->lock);
2653 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2654 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2658 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2659 struct mlxsw_sp_neigh_entry *neigh_entry,
2660 bool removing, bool dead);
2662 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2664 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2665 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2669 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2670 struct mlxsw_sp_neigh_entry *neigh_entry,
2671 enum mlxsw_reg_rauht_op op)
2673 struct neighbour *n = neigh_entry->key.n;
2674 u32 dip = ntohl(*((__be32 *) n->primary_key));
2675 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2677 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2679 if (neigh_entry->counter_valid)
2680 mlxsw_reg_rauht_pack_counter(rauht_pl,
2681 neigh_entry->counter_index);
2682 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2686 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2687 struct mlxsw_sp_neigh_entry *neigh_entry,
2688 enum mlxsw_reg_rauht_op op)
2690 struct neighbour *n = neigh_entry->key.n;
2691 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2692 const char *dip = n->primary_key;
2694 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2696 if (neigh_entry->counter_valid)
2697 mlxsw_reg_rauht_pack_counter(rauht_pl,
2698 neigh_entry->counter_index);
2699 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2702 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2704 struct neighbour *n = neigh_entry->key.n;
2706 /* Packets with a link-local destination address are trapped
2707 * after LPM lookup and never reach the neighbour table, so
2708 * there is no need to program such neighbours to the device.
2710 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2711 IPV6_ADDR_LINKLOCAL)
2717 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2718 struct mlxsw_sp_neigh_entry *neigh_entry,
2721 enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2724 if (!adding && !neigh_entry->connected)
2726 neigh_entry->connected = adding;
2727 if (neigh_entry->key.n->tbl->family == AF_INET) {
2728 err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2732 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2733 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2735 err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2745 neigh_entry->key.n->flags |= NTF_OFFLOADED;
2747 neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2751 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2752 struct mlxsw_sp_neigh_entry *neigh_entry,
2756 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2758 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2759 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2762 struct mlxsw_sp_netevent_work {
2763 struct work_struct work;
2764 struct mlxsw_sp *mlxsw_sp;
2765 struct neighbour *n;
2768 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2770 struct mlxsw_sp_netevent_work *net_work =
2771 container_of(work, struct mlxsw_sp_netevent_work, work);
2772 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2773 struct mlxsw_sp_neigh_entry *neigh_entry;
2774 struct neighbour *n = net_work->n;
2775 unsigned char ha[ETH_ALEN];
2776 bool entry_connected;
2779 /* If these parameters are changed after we release the lock,
2780 * then we are guaranteed to receive another event letting us
2783 read_lock_bh(&n->lock);
2784 memcpy(ha, n->ha, ETH_ALEN);
2785 nud_state = n->nud_state;
2787 read_unlock_bh(&n->lock);
2789 mutex_lock(&mlxsw_sp->router->lock);
2790 mlxsw_sp_span_respin(mlxsw_sp);
2792 entry_connected = nud_state & NUD_VALID && !dead;
2793 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2794 if (!entry_connected && !neigh_entry)
2797 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2798 if (IS_ERR(neigh_entry))
2802 if (neigh_entry->connected && entry_connected &&
2803 !memcmp(neigh_entry->ha, ha, ETH_ALEN))
2806 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2807 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2808 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2811 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2812 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2815 mutex_unlock(&mlxsw_sp->router->lock);
2820 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2822 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2824 struct mlxsw_sp_netevent_work *net_work =
2825 container_of(work, struct mlxsw_sp_netevent_work, work);
2826 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2828 mlxsw_sp_mp_hash_init(mlxsw_sp);
2832 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2834 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2836 struct mlxsw_sp_netevent_work *net_work =
2837 container_of(work, struct mlxsw_sp_netevent_work, work);
2838 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2840 __mlxsw_sp_router_init(mlxsw_sp);
2844 static int mlxsw_sp_router_schedule_work(struct net *net,
2845 struct mlxsw_sp_router *router,
2846 struct neighbour *n,
2847 void (*cb)(struct work_struct *))
2849 struct mlxsw_sp_netevent_work *net_work;
2851 if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2854 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2858 INIT_WORK(&net_work->work, cb);
2859 net_work->mlxsw_sp = router->mlxsw_sp;
2861 mlxsw_core_schedule_work(&net_work->work);
2865 static bool mlxsw_sp_dev_lower_is_port(struct net_device *dev)
2867 struct mlxsw_sp_port *mlxsw_sp_port;
2870 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
2872 return !!mlxsw_sp_port;
2875 static int mlxsw_sp_router_schedule_neigh_work(struct mlxsw_sp_router *router,
2876 struct neighbour *n)
2880 net = neigh_parms_net(n->parms);
2882 /* Take a reference to ensure the neighbour won't be destructed until we
2883 * drop the reference in delayed work.
2886 return mlxsw_sp_router_schedule_work(net, router, n,
2887 mlxsw_sp_router_neigh_event_work);
2890 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2891 unsigned long event, void *ptr)
2893 struct mlxsw_sp_router *router;
2894 unsigned long interval;
2895 struct neigh_parms *p;
2896 struct neighbour *n;
2898 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2901 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2904 /* We don't care about changes in the default table. */
2905 if (!p->dev || (p->tbl->family != AF_INET &&
2906 p->tbl->family != AF_INET6))
2909 /* We are in atomic context and can't take RTNL mutex,
2910 * so use RCU variant to walk the device chain.
2912 if (!mlxsw_sp_dev_lower_is_port(p->dev))
2915 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2916 router->neighs_update.interval = interval;
2918 case NETEVENT_NEIGH_UPDATE:
2921 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2924 if (!mlxsw_sp_dev_lower_is_port(n->dev))
2927 return mlxsw_sp_router_schedule_neigh_work(router, n);
2929 case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2930 case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2931 return mlxsw_sp_router_schedule_work(ptr, router, NULL,
2932 mlxsw_sp_router_mp_hash_event_work);
2934 case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2935 return mlxsw_sp_router_schedule_work(ptr, router, NULL,
2936 mlxsw_sp_router_update_priority_work);
2942 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2946 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2947 &mlxsw_sp_neigh_ht_params);
2951 /* Initialize the polling interval according to the default
2954 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2956 /* Create the delayed works for the activity_update */
2957 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2958 mlxsw_sp_router_neighs_update_work);
2959 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2960 mlxsw_sp_router_probe_unresolved_nexthops);
2961 atomic_set(&mlxsw_sp->router->neighs_update.neigh_count, 0);
2962 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2963 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2967 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2969 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2970 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2971 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2974 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2975 struct mlxsw_sp_rif *rif)
2977 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2979 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2981 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2982 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2986 enum mlxsw_sp_nexthop_type {
2987 MLXSW_SP_NEXTHOP_TYPE_ETH,
2988 MLXSW_SP_NEXTHOP_TYPE_IPIP,
2991 enum mlxsw_sp_nexthop_action {
2992 /* Nexthop forwards packets to an egress RIF */
2993 MLXSW_SP_NEXTHOP_ACTION_FORWARD,
2994 /* Nexthop discards packets */
2995 MLXSW_SP_NEXTHOP_ACTION_DISCARD,
2996 /* Nexthop traps packets */
2997 MLXSW_SP_NEXTHOP_ACTION_TRAP,
3000 struct mlxsw_sp_nexthop_key {
3001 struct fib_nh *fib_nh;
3004 struct mlxsw_sp_nexthop {
3005 struct list_head neigh_list_node; /* member of neigh entry list */
3006 struct list_head crif_list_node;
3007 struct list_head router_list_node;
3008 struct mlxsw_sp_nexthop_group_info *nhgi; /* pointer back to the group
3009 * this nexthop belongs to
3011 struct rhash_head ht_node;
3012 struct neigh_table *neigh_tbl;
3013 struct mlxsw_sp_nexthop_key key;
3014 unsigned char gw_addr[sizeof(struct in6_addr)];
3018 int num_adj_entries;
3019 struct mlxsw_sp_crif *crif;
3020 u8 should_offload:1, /* set indicates this nexthop should be written
3021 * to the adjacency table.
3023 offloaded:1, /* set indicates this nexthop was written to the
3026 update:1; /* set indicates this nexthop should be updated in the
3027 * adjacency table (f.e., its MAC changed).
3029 enum mlxsw_sp_nexthop_action action;
3030 enum mlxsw_sp_nexthop_type type;
3032 struct mlxsw_sp_neigh_entry *neigh_entry;
3033 struct mlxsw_sp_ipip_entry *ipip_entry;
3035 unsigned int counter_index;
3039 static struct net_device *
3040 mlxsw_sp_nexthop_dev(const struct mlxsw_sp_nexthop *nh)
3044 return nh->crif->key.dev;
3047 enum mlxsw_sp_nexthop_group_type {
3048 MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
3049 MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
3050 MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ,
3053 struct mlxsw_sp_nexthop_group_info {
3054 struct mlxsw_sp_nexthop_group *nh_grp;
3058 int sum_norm_weight;
3059 u8 adj_index_valid:1,
3060 gateway:1, /* routes using the group use a gateway */
3062 struct list_head list; /* member in nh_res_grp_list */
3063 struct mlxsw_sp_nexthop nexthops[];
3066 static struct mlxsw_sp_rif *
3067 mlxsw_sp_nhgi_rif(const struct mlxsw_sp_nexthop_group_info *nhgi)
3069 struct mlxsw_sp_crif *crif = nhgi->nexthops[0].crif;
3076 struct mlxsw_sp_nexthop_group_vr_key {
3078 enum mlxsw_sp_l3proto proto;
3081 struct mlxsw_sp_nexthop_group_vr_entry {
3082 struct list_head list; /* member in vr_list */
3083 struct rhash_head ht_node; /* member in vr_ht */
3084 refcount_t ref_count;
3085 struct mlxsw_sp_nexthop_group_vr_key key;
3088 struct mlxsw_sp_nexthop_group {
3089 struct rhash_head ht_node;
3090 struct list_head fib_list; /* list of fib entries that use this group */
3093 struct fib_info *fi;
3099 struct mlxsw_sp_nexthop_group_info *nhgi;
3100 struct list_head vr_list;
3101 struct rhashtable vr_ht;
3102 enum mlxsw_sp_nexthop_group_type type;
3106 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
3107 struct mlxsw_sp_nexthop *nh)
3109 struct devlink *devlink;
3111 devlink = priv_to_devlink(mlxsw_sp->core);
3112 if (!devlink_dpipe_table_counter_enabled(devlink,
3113 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
3116 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
3119 nh->counter_valid = true;
3122 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
3123 struct mlxsw_sp_nexthop *nh)
3125 if (!nh->counter_valid)
3127 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
3128 nh->counter_valid = false;
3131 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
3132 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
3134 if (!nh->counter_valid)
3137 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
3141 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
3142 struct mlxsw_sp_nexthop *nh)
3145 if (list_empty(&router->nexthop_list))
3148 return list_first_entry(&router->nexthop_list,
3149 typeof(*nh), router_list_node);
3151 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
3153 return list_next_entry(nh, router_list_node);
3156 bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh)
3158 return nh->offloaded && nh->action == MLXSW_SP_NEXTHOP_ACTION_FORWARD;
3161 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
3163 if (nh->type != MLXSW_SP_NEXTHOP_TYPE_ETH ||
3164 !mlxsw_sp_nexthop_is_forward(nh))
3166 return nh->neigh_entry->ha;
3169 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
3170 u32 *p_adj_size, u32 *p_adj_hash_index)
3172 struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3173 u32 adj_hash_index = 0;
3176 if (!nh->offloaded || !nhgi->adj_index_valid)
3179 *p_adj_index = nhgi->adj_index;
3180 *p_adj_size = nhgi->ecmp_size;
3182 for (i = 0; i < nhgi->count; i++) {
3183 struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3187 if (nh_iter->offloaded)
3188 adj_hash_index += nh_iter->num_adj_entries;
3191 *p_adj_hash_index = adj_hash_index;
3195 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
3197 if (WARN_ON(!nh->crif))
3199 return nh->crif->rif;
3202 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
3204 struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3207 for (i = 0; i < nhgi->count; i++) {
3208 struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3210 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
3216 static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = {
3217 .key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key),
3218 .head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node),
3219 .key_len = sizeof(struct mlxsw_sp_nexthop_group_vr_key),
3220 .automatic_shrinking = true,
3223 static struct mlxsw_sp_nexthop_group_vr_entry *
3224 mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group *nh_grp,
3225 const struct mlxsw_sp_fib *fib)
3227 struct mlxsw_sp_nexthop_group_vr_key key;
3229 memset(&key, 0, sizeof(key));
3230 key.vr_id = fib->vr->id;
3231 key.proto = fib->proto;
3232 return rhashtable_lookup_fast(&nh_grp->vr_ht, &key,
3233 mlxsw_sp_nexthop_group_vr_ht_params);
3237 mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group *nh_grp,
3238 const struct mlxsw_sp_fib *fib)
3240 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3243 vr_entry = kzalloc(sizeof(*vr_entry), GFP_KERNEL);
3247 vr_entry->key.vr_id = fib->vr->id;
3248 vr_entry->key.proto = fib->proto;
3249 refcount_set(&vr_entry->ref_count, 1);
3251 err = rhashtable_insert_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3252 mlxsw_sp_nexthop_group_vr_ht_params);
3254 goto err_hashtable_insert;
3256 list_add(&vr_entry->list, &nh_grp->vr_list);
3260 err_hashtable_insert:
3266 mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group *nh_grp,
3267 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry)
3269 list_del(&vr_entry->list);
3270 rhashtable_remove_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3271 mlxsw_sp_nexthop_group_vr_ht_params);
3276 mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group *nh_grp,
3277 const struct mlxsw_sp_fib *fib)
3279 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3281 vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3283 refcount_inc(&vr_entry->ref_count);
3287 return mlxsw_sp_nexthop_group_vr_entry_create(nh_grp, fib);
3291 mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group *nh_grp,
3292 const struct mlxsw_sp_fib *fib)
3294 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3296 vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3297 if (WARN_ON_ONCE(!vr_entry))
3300 if (!refcount_dec_and_test(&vr_entry->ref_count))
3303 mlxsw_sp_nexthop_group_vr_entry_destroy(nh_grp, vr_entry);
3306 struct mlxsw_sp_nexthop_group_cmp_arg {
3307 enum mlxsw_sp_nexthop_group_type type;
3309 struct fib_info *fi;
3310 struct mlxsw_sp_fib6_entry *fib6_entry;
3316 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
3317 const struct in6_addr *gw, int ifindex,
3322 for (i = 0; i < nh_grp->nhgi->count; i++) {
3323 const struct mlxsw_sp_nexthop *nh;
3325 nh = &nh_grp->nhgi->nexthops[i];
3326 if (nh->ifindex == ifindex && nh->nh_weight == weight &&
3327 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
3335 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
3336 const struct mlxsw_sp_fib6_entry *fib6_entry)
3338 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3340 if (nh_grp->nhgi->count != fib6_entry->nrt6)
3343 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3344 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3345 struct in6_addr *gw;
3346 int ifindex, weight;
3348 ifindex = fib6_nh->fib_nh_dev->ifindex;
3349 weight = fib6_nh->fib_nh_weight;
3350 gw = &fib6_nh->fib_nh_gw6;
3351 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
3360 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
3362 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
3363 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
3365 if (nh_grp->type != cmp_arg->type)
3368 switch (cmp_arg->type) {
3369 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3370 return cmp_arg->fi != nh_grp->ipv4.fi;
3371 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3372 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
3373 cmp_arg->fib6_entry);
3374 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3375 return cmp_arg->id != nh_grp->obj.id;
3382 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
3384 const struct mlxsw_sp_nexthop_group *nh_grp = data;
3385 const struct mlxsw_sp_nexthop *nh;
3386 struct fib_info *fi;
3390 switch (nh_grp->type) {
3391 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3392 fi = nh_grp->ipv4.fi;
3393 return jhash(&fi, sizeof(fi), seed);
3394 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3395 val = nh_grp->nhgi->count;
3396 for (i = 0; i < nh_grp->nhgi->count; i++) {
3397 nh = &nh_grp->nhgi->nexthops[i];
3398 val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
3399 val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
3401 return jhash(&val, sizeof(val), seed);
3402 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3403 return jhash(&nh_grp->obj.id, sizeof(nh_grp->obj.id), seed);
3411 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
3413 unsigned int val = fib6_entry->nrt6;
3414 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3416 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3417 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3418 struct net_device *dev = fib6_nh->fib_nh_dev;
3419 struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
3421 val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3422 val ^= jhash(gw, sizeof(*gw), seed);
3425 return jhash(&val, sizeof(val), seed);
3429 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
3431 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
3433 switch (cmp_arg->type) {
3434 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3435 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3436 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3437 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3438 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3439 return jhash(&cmp_arg->id, sizeof(cmp_arg->id), seed);
3446 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3447 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3448 .hashfn = mlxsw_sp_nexthop_group_hash,
3449 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
3450 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
3453 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3454 struct mlxsw_sp_nexthop_group *nh_grp)
3456 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3457 !nh_grp->nhgi->gateway)
3460 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3462 mlxsw_sp_nexthop_group_ht_params);
3465 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3466 struct mlxsw_sp_nexthop_group *nh_grp)
3468 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3469 !nh_grp->nhgi->gateway)
3472 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3474 mlxsw_sp_nexthop_group_ht_params);
3477 static struct mlxsw_sp_nexthop_group *
3478 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3479 struct fib_info *fi)
3481 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3483 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
3485 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3487 mlxsw_sp_nexthop_group_ht_params);
3490 static struct mlxsw_sp_nexthop_group *
3491 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3492 struct mlxsw_sp_fib6_entry *fib6_entry)
3494 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3496 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
3497 cmp_arg.fib6_entry = fib6_entry;
3498 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3500 mlxsw_sp_nexthop_group_ht_params);
3503 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3504 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3505 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3506 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
3509 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3510 struct mlxsw_sp_nexthop *nh)
3512 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3513 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3516 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3517 struct mlxsw_sp_nexthop *nh)
3519 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3520 mlxsw_sp_nexthop_ht_params);
3523 static struct mlxsw_sp_nexthop *
3524 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3525 struct mlxsw_sp_nexthop_key key)
3527 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3528 mlxsw_sp_nexthop_ht_params);
3531 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3532 enum mlxsw_sp_l3proto proto,
3534 u32 adj_index, u16 ecmp_size,
3538 char raleu_pl[MLXSW_REG_RALEU_LEN];
3540 mlxsw_reg_raleu_pack(raleu_pl,
3541 (enum mlxsw_reg_ralxx_protocol) proto, vr_id,
3542 adj_index, ecmp_size, new_adj_index,
3544 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3547 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3548 struct mlxsw_sp_nexthop_group *nh_grp,
3549 u32 old_adj_index, u16 old_ecmp_size)
3551 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3552 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3555 list_for_each_entry(vr_entry, &nh_grp->vr_list, list) {
3556 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp,
3557 vr_entry->key.proto,
3558 vr_entry->key.vr_id,
3564 goto err_mass_update_vr;
3569 list_for_each_entry_continue_reverse(vr_entry, &nh_grp->vr_list, list)
3570 mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr_entry->key.proto,
3571 vr_entry->key.vr_id,
3574 old_adj_index, old_ecmp_size);
3578 static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp,
3580 struct mlxsw_sp_nexthop *nh,
3581 bool force, char *ratr_pl)
3583 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3584 struct mlxsw_sp_rif *rif = mlxsw_sp_nexthop_rif(nh);
3585 enum mlxsw_reg_ratr_op op;
3588 rif_index = rif ? rif->rif_index :
3589 mlxsw_sp->router->lb_crif->rif->rif_index;
3590 op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
3591 MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
3592 mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_ETHERNET,
3593 adj_index, rif_index);
3594 switch (nh->action) {
3595 case MLXSW_SP_NEXTHOP_ACTION_FORWARD:
3596 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3598 case MLXSW_SP_NEXTHOP_ACTION_DISCARD:
3599 mlxsw_reg_ratr_trap_action_set(ratr_pl,
3600 MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS);
3602 case MLXSW_SP_NEXTHOP_ACTION_TRAP:
3603 mlxsw_reg_ratr_trap_action_set(ratr_pl,
3604 MLXSW_REG_RATR_TRAP_ACTION_TRAP);
3605 mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
3611 if (nh->counter_valid)
3612 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3614 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3616 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3619 int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3620 struct mlxsw_sp_nexthop *nh, bool force,
3625 for (i = 0; i < nh->num_adj_entries; i++) {
3628 err = __mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index + i,
3629 nh, force, ratr_pl);
3637 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3639 struct mlxsw_sp_nexthop *nh,
3640 bool force, char *ratr_pl)
3642 const struct mlxsw_sp_ipip_ops *ipip_ops;
3644 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3645 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry,
3649 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3651 struct mlxsw_sp_nexthop *nh, bool force,
3656 for (i = 0; i < nh->num_adj_entries; i++) {
3659 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3660 nh, force, ratr_pl);
3668 static int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3669 struct mlxsw_sp_nexthop *nh, bool force,
3672 /* When action is discard or trap, the nexthop must be
3673 * programmed as an Ethernet nexthop.
3675 if (nh->type == MLXSW_SP_NEXTHOP_TYPE_ETH ||
3676 nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD ||
3677 nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3678 return mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index, nh,
3681 return mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index, nh,
3686 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3687 struct mlxsw_sp_nexthop_group_info *nhgi,
3690 char ratr_pl[MLXSW_REG_RATR_LEN];
3691 u32 adj_index = nhgi->adj_index; /* base */
3692 struct mlxsw_sp_nexthop *nh;
3695 for (i = 0; i < nhgi->count; i++) {
3696 nh = &nhgi->nexthops[i];
3698 if (!nh->should_offload) {
3703 if (nh->update || reallocate) {
3706 err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh,
3713 adj_index += nh->num_adj_entries;
3719 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3720 struct mlxsw_sp_nexthop_group *nh_grp)
3722 struct mlxsw_sp_fib_entry *fib_entry;
3725 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3726 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3733 struct mlxsw_sp_adj_grp_size_range {
3734 u16 start; /* Inclusive */
3735 u16 end; /* Inclusive */
3738 /* Ordered by range start value */
3739 static const struct mlxsw_sp_adj_grp_size_range
3740 mlxsw_sp1_adj_grp_size_ranges[] = {
3741 { .start = 1, .end = 64 },
3742 { .start = 512, .end = 512 },
3743 { .start = 1024, .end = 1024 },
3744 { .start = 2048, .end = 2048 },
3745 { .start = 4096, .end = 4096 },
3748 /* Ordered by range start value */
3749 static const struct mlxsw_sp_adj_grp_size_range
3750 mlxsw_sp2_adj_grp_size_ranges[] = {
3751 { .start = 1, .end = 128 },
3752 { .start = 256, .end = 256 },
3753 { .start = 512, .end = 512 },
3754 { .start = 1024, .end = 1024 },
3755 { .start = 2048, .end = 2048 },
3756 { .start = 4096, .end = 4096 },
3759 static void mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp *mlxsw_sp,
3760 u16 *p_adj_grp_size)
3764 for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
3765 const struct mlxsw_sp_adj_grp_size_range *size_range;
3767 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3769 if (*p_adj_grp_size >= size_range->start &&
3770 *p_adj_grp_size <= size_range->end)
3773 if (*p_adj_grp_size <= size_range->end) {
3774 *p_adj_grp_size = size_range->end;
3780 static void mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp *mlxsw_sp,
3781 u16 *p_adj_grp_size,
3782 unsigned int alloc_size)
3786 for (i = mlxsw_sp->router->adj_grp_size_ranges_count - 1; i >= 0; i--) {
3787 const struct mlxsw_sp_adj_grp_size_range *size_range;
3789 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3791 if (alloc_size >= size_range->end) {
3792 *p_adj_grp_size = size_range->end;
3798 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3799 u16 *p_adj_grp_size)
3801 unsigned int alloc_size;
3804 /* Round up the requested group size to the next size supported
3805 * by the device and make sure the request can be satisfied.
3807 mlxsw_sp_adj_grp_size_round_up(mlxsw_sp, p_adj_grp_size);
3808 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3809 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3810 *p_adj_grp_size, &alloc_size);
3813 /* It is possible the allocation results in more allocated
3814 * entries than requested. Try to use as much of them as
3817 mlxsw_sp_adj_grp_size_round_down(mlxsw_sp, p_adj_grp_size, alloc_size);
3823 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info *nhgi)
3825 int i, g = 0, sum_norm_weight = 0;
3826 struct mlxsw_sp_nexthop *nh;
3828 for (i = 0; i < nhgi->count; i++) {
3829 nh = &nhgi->nexthops[i];
3831 if (!nh->should_offload)
3834 g = gcd(nh->nh_weight, g);
3839 for (i = 0; i < nhgi->count; i++) {
3840 nh = &nhgi->nexthops[i];
3842 if (!nh->should_offload)
3844 nh->norm_nh_weight = nh->nh_weight / g;
3845 sum_norm_weight += nh->norm_nh_weight;
3848 nhgi->sum_norm_weight = sum_norm_weight;
3852 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info *nhgi)
3854 int i, weight = 0, lower_bound = 0;
3855 int total = nhgi->sum_norm_weight;
3856 u16 ecmp_size = nhgi->ecmp_size;
3858 for (i = 0; i < nhgi->count; i++) {
3859 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
3862 if (!nh->should_offload)
3864 weight += nh->norm_nh_weight;
3865 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3866 nh->num_adj_entries = upper_bound - lower_bound;
3867 lower_bound = upper_bound;
3871 static struct mlxsw_sp_nexthop *
3872 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3873 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
3876 mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3877 struct mlxsw_sp_nexthop_group *nh_grp)
3881 for (i = 0; i < nh_grp->nhgi->count; i++) {
3882 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3885 nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3887 nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3892 __mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
3893 struct mlxsw_sp_fib6_entry *fib6_entry)
3895 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3897 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3898 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3899 struct mlxsw_sp_nexthop *nh;
3901 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3902 if (nh && nh->offloaded)
3903 fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3905 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3910 mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3911 struct mlxsw_sp_nexthop_group *nh_grp)
3913 struct mlxsw_sp_fib6_entry *fib6_entry;
3915 /* Unfortunately, in IPv6 the route and the nexthop are described by
3916 * the same struct, so we need to iterate over all the routes using the
3917 * nexthop group and set / clear the offload indication for them.
3919 list_for_each_entry(fib6_entry, &nh_grp->fib_list,
3920 common.nexthop_group_node)
3921 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
3925 mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3926 const struct mlxsw_sp_nexthop *nh,
3929 struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
3930 bool offload = false, trap = false;
3932 if (nh->offloaded) {
3933 if (nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3938 nexthop_bucket_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3939 bucket_index, offload, trap);
3943 mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3944 struct mlxsw_sp_nexthop_group *nh_grp)
3948 /* Do not update the flags if the nexthop group is being destroyed
3950 * 1. The nexthop objects is being deleted, in which case the flags are
3952 * 2. The nexthop group was replaced by a newer group, in which case
3953 * the flags of the nexthop object were already updated based on the
3956 if (nh_grp->can_destroy)
3959 nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3960 nh_grp->nhgi->adj_index_valid, false);
3962 /* Update flags of individual nexthop buckets in case of a resilient
3965 if (!nh_grp->nhgi->is_resilient)
3968 for (i = 0; i < nh_grp->nhgi->count; i++) {
3969 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3971 mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, i);
3976 mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3977 struct mlxsw_sp_nexthop_group *nh_grp)
3979 switch (nh_grp->type) {
3980 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3981 mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
3983 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3984 mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
3986 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3987 mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, nh_grp);
3993 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3994 struct mlxsw_sp_nexthop_group *nh_grp)
3996 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3997 u16 ecmp_size, old_ecmp_size;
3998 struct mlxsw_sp_nexthop *nh;
3999 bool offload_change = false;
4001 bool old_adj_index_valid;
4006 return mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4008 for (i = 0; i < nhgi->count; i++) {
4009 nh = &nhgi->nexthops[i];
4011 if (nh->should_offload != nh->offloaded) {
4012 offload_change = true;
4013 if (nh->should_offload)
4017 if (!offload_change) {
4018 /* Nothing was added or removed, so no need to reallocate. Just
4019 * update MAC on existing adjacency indexes.
4021 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, false);
4023 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
4026 /* Flags of individual nexthop buckets might need to be
4029 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4032 mlxsw_sp_nexthop_group_normalize(nhgi);
4033 if (!nhgi->sum_norm_weight) {
4034 /* No neigh of this group is connected so we just set
4035 * the trap and let everthing flow through kernel.
4041 ecmp_size = nhgi->sum_norm_weight;
4042 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
4044 /* No valid allocation size available. */
4047 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4048 ecmp_size, &adj_index);
4050 /* We ran out of KVD linear space, just set the
4051 * trap and let everything flow through kernel.
4053 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
4056 old_adj_index_valid = nhgi->adj_index_valid;
4057 old_adj_index = nhgi->adj_index;
4058 old_ecmp_size = nhgi->ecmp_size;
4059 nhgi->adj_index_valid = 1;
4060 nhgi->adj_index = adj_index;
4061 nhgi->ecmp_size = ecmp_size;
4062 mlxsw_sp_nexthop_group_rebalance(nhgi);
4063 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, true);
4065 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
4069 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4071 if (!old_adj_index_valid) {
4072 /* The trap was set for fib entries, so we have to call
4073 * fib entry update to unset it and use adjacency index.
4075 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4077 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
4083 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
4084 old_adj_index, old_ecmp_size);
4085 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4086 old_ecmp_size, old_adj_index);
4088 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
4095 old_adj_index_valid = nhgi->adj_index_valid;
4096 nhgi->adj_index_valid = 0;
4097 for (i = 0; i < nhgi->count; i++) {
4098 nh = &nhgi->nexthops[i];
4101 err2 = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4103 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
4104 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4105 if (old_adj_index_valid)
4106 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4107 nhgi->ecmp_size, nhgi->adj_index);
4111 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
4115 nh->action = MLXSW_SP_NEXTHOP_ACTION_FORWARD;
4116 nh->should_offload = 1;
4117 } else if (nh->nhgi->is_resilient) {
4118 nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4119 nh->should_offload = 1;
4121 nh->should_offload = 0;
4127 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
4128 struct mlxsw_sp_neigh_entry *neigh_entry)
4130 struct neighbour *n, *old_n = neigh_entry->key.n;
4131 struct mlxsw_sp_nexthop *nh;
4132 struct net_device *dev;
4133 bool entry_connected;
4137 nh = list_first_entry(&neigh_entry->nexthop_list,
4138 struct mlxsw_sp_nexthop, neigh_list_node);
4139 dev = mlxsw_sp_nexthop_dev(nh);
4141 n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, dev);
4143 n = neigh_create(nh->neigh_tbl, &nh->gw_addr, dev);
4146 neigh_event_send(n, NULL);
4149 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
4150 neigh_entry->key.n = n;
4151 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4153 goto err_neigh_entry_insert;
4155 read_lock_bh(&n->lock);
4156 nud_state = n->nud_state;
4158 read_unlock_bh(&n->lock);
4159 entry_connected = nud_state & NUD_VALID && !dead;
4161 list_for_each_entry(nh, &neigh_entry->nexthop_list,
4163 neigh_release(old_n);
4165 __mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
4166 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4173 err_neigh_entry_insert:
4174 neigh_entry->key.n = old_n;
4175 mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4181 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
4182 struct mlxsw_sp_neigh_entry *neigh_entry,
4183 bool removing, bool dead)
4185 struct mlxsw_sp_nexthop *nh;
4187 if (list_empty(&neigh_entry->nexthop_list))
4193 err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
4196 dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
4200 list_for_each_entry(nh, &neigh_entry->nexthop_list,
4202 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4203 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4207 static void mlxsw_sp_nexthop_crif_init(struct mlxsw_sp_nexthop *nh,
4208 struct mlxsw_sp_crif *crif)
4214 list_add(&nh->crif_list_node, &crif->nexthop_list);
4217 static void mlxsw_sp_nexthop_crif_fini(struct mlxsw_sp_nexthop *nh)
4222 list_del(&nh->crif_list_node);
4226 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
4227 struct mlxsw_sp_nexthop *nh)
4229 struct mlxsw_sp_neigh_entry *neigh_entry;
4230 struct net_device *dev;
4231 struct neighbour *n;
4235 if (WARN_ON(!nh->crif->rif))
4238 if (!nh->nhgi->gateway || nh->neigh_entry)
4240 dev = mlxsw_sp_nexthop_dev(nh);
4242 /* Take a reference of neigh here ensuring that neigh would
4243 * not be destructed before the nexthop entry is finished.
4244 * The reference is taken either in neigh_lookup() or
4245 * in neigh_create() in case n is not found.
4247 n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, dev);
4249 n = neigh_create(nh->neigh_tbl, &nh->gw_addr, dev);
4252 neigh_event_send(n, NULL);
4254 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
4256 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
4257 if (IS_ERR(neigh_entry)) {
4259 goto err_neigh_entry_create;
4263 /* If that is the first nexthop connected to that neigh, add to
4264 * nexthop_neighs_list
4266 if (list_empty(&neigh_entry->nexthop_list))
4267 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
4268 &mlxsw_sp->router->nexthop_neighs_list);
4270 nh->neigh_entry = neigh_entry;
4271 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
4272 read_lock_bh(&n->lock);
4273 nud_state = n->nud_state;
4275 read_unlock_bh(&n->lock);
4276 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
4280 err_neigh_entry_create:
4285 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
4286 struct mlxsw_sp_nexthop *nh)
4288 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
4289 struct neighbour *n;
4293 n = neigh_entry->key.n;
4295 __mlxsw_sp_nexthop_neigh_update(nh, true);
4296 list_del(&nh->neigh_list_node);
4297 nh->neigh_entry = NULL;
4299 /* If that is the last nexthop connected to that neigh, remove from
4300 * nexthop_neighs_list
4302 if (list_empty(&neigh_entry->nexthop_list))
4303 list_del(&neigh_entry->nexthop_neighs_list_node);
4305 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
4306 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
4311 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
4313 struct net_device *ul_dev;
4317 ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
4318 is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
4324 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
4325 struct mlxsw_sp_nexthop *nh,
4326 struct mlxsw_sp_ipip_entry *ipip_entry)
4328 struct mlxsw_sp_crif *crif;
4331 if (!nh->nhgi->gateway || nh->ipip_entry)
4334 crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, ipip_entry->ol_dev);
4338 nh->ipip_entry = ipip_entry;
4339 removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
4340 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4341 mlxsw_sp_nexthop_crif_init(nh, crif);
4344 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
4345 struct mlxsw_sp_nexthop *nh)
4347 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
4352 __mlxsw_sp_nexthop_neigh_update(nh, true);
4353 nh->ipip_entry = NULL;
4356 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4357 const struct fib_nh *fib_nh,
4358 enum mlxsw_sp_ipip_type *p_ipipt)
4360 struct net_device *dev = fib_nh->fib_nh_dev;
4363 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
4364 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
4367 static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
4368 struct mlxsw_sp_nexthop *nh,
4369 const struct net_device *dev)
4371 const struct mlxsw_sp_ipip_ops *ipip_ops;
4372 struct mlxsw_sp_ipip_entry *ipip_entry;
4373 struct mlxsw_sp_crif *crif;
4376 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4378 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4379 if (ipip_ops->can_offload(mlxsw_sp, dev)) {
4380 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4381 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4386 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
4387 crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, dev);
4391 mlxsw_sp_nexthop_crif_init(nh, crif);
4396 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4398 goto err_neigh_init;
4403 mlxsw_sp_nexthop_crif_fini(nh);
4407 static void mlxsw_sp_nexthop_type_rif_gone(struct mlxsw_sp *mlxsw_sp,
4408 struct mlxsw_sp_nexthop *nh)
4411 case MLXSW_SP_NEXTHOP_TYPE_ETH:
4412 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
4414 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4415 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
4420 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
4421 struct mlxsw_sp_nexthop *nh)
4423 mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
4424 mlxsw_sp_nexthop_crif_fini(nh);
4427 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
4428 struct mlxsw_sp_nexthop_group *nh_grp,
4429 struct mlxsw_sp_nexthop *nh,
4430 struct fib_nh *fib_nh)
4432 struct net_device *dev = fib_nh->fib_nh_dev;
4433 struct in_device *in_dev;
4436 nh->nhgi = nh_grp->nhgi;
4437 nh->key.fib_nh = fib_nh;
4438 #ifdef CONFIG_IP_ROUTE_MULTIPATH
4439 nh->nh_weight = fib_nh->fib_nh_weight;
4443 memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
4444 nh->neigh_tbl = &arp_tbl;
4445 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
4449 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4450 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4454 nh->ifindex = dev->ifindex;
4457 in_dev = __in_dev_get_rcu(dev);
4458 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
4459 fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
4465 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4467 goto err_nexthop_neigh_init;
4471 err_nexthop_neigh_init:
4472 list_del(&nh->router_list_node);
4473 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4474 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4478 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
4479 struct mlxsw_sp_nexthop *nh)
4481 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4482 list_del(&nh->router_list_node);
4483 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4484 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4487 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
4488 unsigned long event, struct fib_nh *fib_nh)
4490 struct mlxsw_sp_nexthop_key key;
4491 struct mlxsw_sp_nexthop *nh;
4493 key.fib_nh = fib_nh;
4494 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
4499 case FIB_EVENT_NH_ADD:
4500 mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, fib_nh->fib_nh_dev);
4502 case FIB_EVENT_NH_DEL:
4503 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4507 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4510 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
4511 struct mlxsw_sp_rif *rif)
4513 struct net_device *dev = mlxsw_sp_rif_dev(rif);
4514 struct mlxsw_sp_nexthop *nh;
4517 list_for_each_entry(nh, &rif->crif->nexthop_list, crif_list_node) {
4519 case MLXSW_SP_NEXTHOP_TYPE_ETH:
4522 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4523 removing = !mlxsw_sp_ipip_netdev_ul_up(dev);
4530 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4531 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4535 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
4536 struct mlxsw_sp_rif *rif)
4538 struct mlxsw_sp_nexthop *nh, *tmp;
4540 list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list,
4542 mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
4543 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4547 static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp)
4549 enum mlxsw_reg_ratr_trap_action trap_action;
4550 char ratr_pl[MLXSW_REG_RATR_LEN];
4553 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4554 &mlxsw_sp->router->adj_trap_index);
4558 trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
4559 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
4560 MLXSW_REG_RATR_TYPE_ETHERNET,
4561 mlxsw_sp->router->adj_trap_index,
4562 mlxsw_sp->router->lb_crif->rif->rif_index);
4563 mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
4564 mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
4565 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
4567 goto err_ratr_write;
4572 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4573 mlxsw_sp->router->adj_trap_index);
4577 static void mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp *mlxsw_sp)
4579 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4580 mlxsw_sp->router->adj_trap_index);
4583 static int mlxsw_sp_nexthop_group_inc(struct mlxsw_sp *mlxsw_sp)
4587 if (refcount_inc_not_zero(&mlxsw_sp->router->num_groups))
4590 err = mlxsw_sp_adj_trap_entry_init(mlxsw_sp);
4594 refcount_set(&mlxsw_sp->router->num_groups, 1);
4599 static void mlxsw_sp_nexthop_group_dec(struct mlxsw_sp *mlxsw_sp)
4601 if (!refcount_dec_and_test(&mlxsw_sp->router->num_groups))
4604 mlxsw_sp_adj_trap_entry_fini(mlxsw_sp);
4608 mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
4609 const struct mlxsw_sp_nexthop_group *nh_grp,
4610 unsigned long *activity)
4615 ratrad_pl = kmalloc(MLXSW_REG_RATRAD_LEN, GFP_KERNEL);
4619 mlxsw_reg_ratrad_pack(ratrad_pl, nh_grp->nhgi->adj_index,
4620 nh_grp->nhgi->count);
4621 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratrad), ratrad_pl);
4625 for (i = 0; i < nh_grp->nhgi->count; i++) {
4626 if (!mlxsw_reg_ratrad_activity_vector_get(ratrad_pl, i))
4628 bitmap_set(activity, i, 1);
4635 #define MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL 1000 /* ms */
4638 mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp *mlxsw_sp,
4639 const struct mlxsw_sp_nexthop_group *nh_grp)
4641 unsigned long *activity;
4643 activity = bitmap_zalloc(nh_grp->nhgi->count, GFP_KERNEL);
4647 mlxsw_sp_nh_grp_activity_get(mlxsw_sp, nh_grp, activity);
4648 nexthop_res_grp_activity_update(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4649 nh_grp->nhgi->count, activity);
4651 bitmap_free(activity);
4655 mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp *mlxsw_sp)
4657 unsigned int interval = MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL;
4659 mlxsw_core_schedule_dw(&mlxsw_sp->router->nh_grp_activity_dw,
4660 msecs_to_jiffies(interval));
4663 static void mlxsw_sp_nh_grp_activity_work(struct work_struct *work)
4665 struct mlxsw_sp_nexthop_group_info *nhgi;
4666 struct mlxsw_sp_router *router;
4667 bool reschedule = false;
4669 router = container_of(work, struct mlxsw_sp_router,
4670 nh_grp_activity_dw.work);
4672 mutex_lock(&router->lock);
4674 list_for_each_entry(nhgi, &router->nh_res_grp_list, list) {
4675 mlxsw_sp_nh_grp_activity_update(router->mlxsw_sp, nhgi->nh_grp);
4679 mutex_unlock(&router->lock);
4683 mlxsw_sp_nh_grp_activity_work_schedule(router->mlxsw_sp);
4687 mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
4688 const struct nh_notifier_single_info *nh,
4689 struct netlink_ext_ack *extack)
4694 NL_SET_ERR_MSG_MOD(extack, "FDB nexthops are not supported");
4695 else if (nh->has_encap)
4696 NL_SET_ERR_MSG_MOD(extack, "Encapsulating nexthops are not supported");
4704 mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp *mlxsw_sp,
4705 const struct nh_notifier_single_info *nh,
4706 struct netlink_ext_ack *extack)
4710 err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, extack);
4714 /* Device only nexthops with an IPIP device are programmed as
4715 * encapsulating adjacency entries.
4717 if (!nh->gw_family && !nh->is_reject &&
4718 !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
4719 NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
4727 mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
4728 const struct nh_notifier_grp_info *nh_grp,
4729 struct netlink_ext_ack *extack)
4733 if (nh_grp->is_fdb) {
4734 NL_SET_ERR_MSG_MOD(extack, "FDB nexthop groups are not supported");
4738 for (i = 0; i < nh_grp->num_nh; i++) {
4739 const struct nh_notifier_single_info *nh;
4742 nh = &nh_grp->nh_entries[i].nh;
4743 err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4753 mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp *mlxsw_sp,
4754 const struct nh_notifier_res_table_info *nh_res_table,
4755 struct netlink_ext_ack *extack)
4757 unsigned int alloc_size;
4758 bool valid_size = false;
4761 if (nh_res_table->num_nh_buckets < 32) {
4762 NL_SET_ERR_MSG_MOD(extack, "Minimum number of buckets is 32");
4766 for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
4767 const struct mlxsw_sp_adj_grp_size_range *size_range;
4769 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
4771 if (nh_res_table->num_nh_buckets >= size_range->start &&
4772 nh_res_table->num_nh_buckets <= size_range->end) {
4779 NL_SET_ERR_MSG_MOD(extack, "Invalid number of buckets");
4783 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
4784 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4785 nh_res_table->num_nh_buckets,
4787 if (err || nh_res_table->num_nh_buckets != alloc_size) {
4788 NL_SET_ERR_MSG_MOD(extack, "Number of buckets does not fit allocation size of any KVDL partition");
4796 mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp *mlxsw_sp,
4797 const struct nh_notifier_res_table_info *nh_res_table,
4798 struct netlink_ext_ack *extack)
4803 err = mlxsw_sp_nexthop_obj_res_group_size_validate(mlxsw_sp,
4809 for (i = 0; i < nh_res_table->num_nh_buckets; i++) {
4810 const struct nh_notifier_single_info *nh;
4813 nh = &nh_res_table->nhs[i];
4814 err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4823 static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
4824 unsigned long event,
4825 struct nh_notifier_info *info)
4827 struct nh_notifier_single_info *nh;
4829 if (event != NEXTHOP_EVENT_REPLACE &&
4830 event != NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE &&
4831 event != NEXTHOP_EVENT_BUCKET_REPLACE)
4834 switch (info->type) {
4835 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4836 return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
4838 case NH_NOTIFIER_INFO_TYPE_GRP:
4839 return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp,
4842 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4843 return mlxsw_sp_nexthop_obj_res_group_validate(mlxsw_sp,
4846 case NH_NOTIFIER_INFO_TYPE_RES_BUCKET:
4847 nh = &info->nh_res_bucket->new_nh;
4848 return mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4851 NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
4856 static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
4857 const struct nh_notifier_info *info)
4859 const struct net_device *dev;
4861 switch (info->type) {
4862 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4863 dev = info->nh->dev;
4864 return info->nh->gw_family || info->nh->is_reject ||
4865 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
4866 case NH_NOTIFIER_INFO_TYPE_GRP:
4867 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4868 /* Already validated earlier. */
4875 static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
4876 struct mlxsw_sp_nexthop *nh)
4878 nh->action = MLXSW_SP_NEXTHOP_ACTION_DISCARD;
4879 nh->should_offload = 1;
4880 /* While nexthops that discard packets do not forward packets
4881 * via an egress RIF, they still need to be programmed using a
4882 * valid RIF, so use the loopback RIF created during init.
4884 nh->crif = mlxsw_sp->router->lb_crif;
4887 static void mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp *mlxsw_sp,
4888 struct mlxsw_sp_nexthop *nh)
4891 nh->should_offload = 0;
4895 mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
4896 struct mlxsw_sp_nexthop_group *nh_grp,
4897 struct mlxsw_sp_nexthop *nh,
4898 struct nh_notifier_single_info *nh_obj, int weight)
4900 struct net_device *dev = nh_obj->dev;
4903 nh->nhgi = nh_grp->nhgi;
4904 nh->nh_weight = weight;
4906 switch (nh_obj->gw_family) {
4908 memcpy(&nh->gw_addr, &nh_obj->ipv4, sizeof(nh_obj->ipv4));
4909 nh->neigh_tbl = &arp_tbl;
4912 memcpy(&nh->gw_addr, &nh_obj->ipv6, sizeof(nh_obj->ipv6));
4913 #if IS_ENABLED(CONFIG_IPV6)
4914 nh->neigh_tbl = &nd_tbl;
4919 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4920 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4921 nh->ifindex = dev->ifindex;
4923 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4927 if (nh_obj->is_reject)
4928 mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh);
4930 /* In a resilient nexthop group, all the nexthops must be written to
4931 * the adjacency table. Even if they do not have a valid neighbour or
4934 if (nh_grp->nhgi->is_resilient && !nh->should_offload) {
4935 nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4936 nh->should_offload = 1;
4942 list_del(&nh->router_list_node);
4943 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4947 static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
4948 struct mlxsw_sp_nexthop *nh)
4950 if (nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD)
4951 mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
4952 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4953 list_del(&nh->router_list_node);
4954 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4955 nh->should_offload = 0;
4959 mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
4960 struct mlxsw_sp_nexthop_group *nh_grp,
4961 struct nh_notifier_info *info)
4963 struct mlxsw_sp_nexthop_group_info *nhgi;
4964 struct mlxsw_sp_nexthop *nh;
4965 bool is_resilient = false;
4969 switch (info->type) {
4970 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4973 case NH_NOTIFIER_INFO_TYPE_GRP:
4974 nhs = info->nh_grp->num_nh;
4976 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4977 nhs = info->nh_res_table->num_nh_buckets;
4978 is_resilient = true;
4984 nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
4987 nh_grp->nhgi = nhgi;
4988 nhgi->nh_grp = nh_grp;
4989 nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
4990 nhgi->is_resilient = is_resilient;
4992 for (i = 0; i < nhgi->count; i++) {
4993 struct nh_notifier_single_info *nh_obj;
4996 nh = &nhgi->nexthops[i];
4997 switch (info->type) {
4998 case NH_NOTIFIER_INFO_TYPE_SINGLE:
5002 case NH_NOTIFIER_INFO_TYPE_GRP:
5003 nh_obj = &info->nh_grp->nh_entries[i].nh;
5004 weight = info->nh_grp->nh_entries[i].weight;
5006 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
5007 nh_obj = &info->nh_res_table->nhs[i];
5012 goto err_nexthop_obj_init;
5014 err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
5017 goto err_nexthop_obj_init;
5019 err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
5022 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5024 NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
5025 goto err_group_refresh;
5028 /* Add resilient nexthop groups to a list so that the activity of their
5029 * nexthop buckets will be periodically queried and cleared.
5031 if (nhgi->is_resilient) {
5032 if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
5033 mlxsw_sp_nh_grp_activity_work_schedule(mlxsw_sp);
5034 list_add(&nhgi->list, &mlxsw_sp->router->nh_res_grp_list);
5040 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5043 err_nexthop_obj_init:
5044 for (i--; i >= 0; i--) {
5045 nh = &nhgi->nexthops[i];
5046 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5053 mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5054 struct mlxsw_sp_nexthop_group *nh_grp)
5056 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5057 struct mlxsw_sp_router *router = mlxsw_sp->router;
5060 if (nhgi->is_resilient) {
5061 list_del(&nhgi->list);
5062 if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
5063 cancel_delayed_work(&router->nh_grp_activity_dw);
5066 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5067 for (i = nhgi->count - 1; i >= 0; i--) {
5068 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5070 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5072 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5073 WARN_ON_ONCE(nhgi->adj_index_valid);
5077 static struct mlxsw_sp_nexthop_group *
5078 mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp *mlxsw_sp,
5079 struct nh_notifier_info *info)
5081 struct mlxsw_sp_nexthop_group *nh_grp;
5084 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5086 return ERR_PTR(-ENOMEM);
5087 INIT_LIST_HEAD(&nh_grp->vr_list);
5088 err = rhashtable_init(&nh_grp->vr_ht,
5089 &mlxsw_sp_nexthop_group_vr_ht_params);
5091 goto err_nexthop_group_vr_ht_init;
5092 INIT_LIST_HEAD(&nh_grp->fib_list);
5093 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5094 nh_grp->obj.id = info->id;
5096 err = mlxsw_sp_nexthop_obj_group_info_init(mlxsw_sp, nh_grp, info);
5098 goto err_nexthop_group_info_init;
5100 nh_grp->can_destroy = false;
5104 err_nexthop_group_info_init:
5105 rhashtable_destroy(&nh_grp->vr_ht);
5106 err_nexthop_group_vr_ht_init:
5108 return ERR_PTR(err);
5112 mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp *mlxsw_sp,
5113 struct mlxsw_sp_nexthop_group *nh_grp)
5115 if (!nh_grp->can_destroy)
5117 mlxsw_sp_nexthop_obj_group_info_fini(mlxsw_sp, nh_grp);
5118 WARN_ON_ONCE(!list_empty(&nh_grp->fib_list));
5119 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5120 rhashtable_destroy(&nh_grp->vr_ht);
5124 static struct mlxsw_sp_nexthop_group *
5125 mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
5127 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
5129 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5131 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
5133 mlxsw_sp_nexthop_group_ht_params);
5136 static int mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp *mlxsw_sp,
5137 struct mlxsw_sp_nexthop_group *nh_grp)
5139 return mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5143 mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp *mlxsw_sp,
5144 struct mlxsw_sp_nexthop_group *nh_grp,
5145 struct mlxsw_sp_nexthop_group *old_nh_grp,
5146 struct netlink_ext_ack *extack)
5148 struct mlxsw_sp_nexthop_group_info *old_nhgi = old_nh_grp->nhgi;
5149 struct mlxsw_sp_nexthop_group_info *new_nhgi = nh_grp->nhgi;
5152 old_nh_grp->nhgi = new_nhgi;
5153 new_nhgi->nh_grp = old_nh_grp;
5154 nh_grp->nhgi = old_nhgi;
5155 old_nhgi->nh_grp = nh_grp;
5157 if (old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5158 /* Both the old adjacency index and the new one are valid.
5159 * Routes are currently using the old one. Tell the device to
5160 * replace the old adjacency index with the new one.
5162 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, old_nh_grp,
5163 old_nhgi->adj_index,
5164 old_nhgi->ecmp_size);
5166 NL_SET_ERR_MSG_MOD(extack, "Failed to replace old adjacency index with new one");
5169 } else if (old_nhgi->adj_index_valid && !new_nhgi->adj_index_valid) {
5170 /* The old adjacency index is valid, while the new one is not.
5171 * Iterate over all the routes using the group and change them
5172 * to trap packets to the CPU.
5174 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5176 NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to trap packets");
5179 } else if (!old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5180 /* The old adjacency index is invalid, while the new one is.
5181 * Iterate over all the routes using the group and change them
5182 * to forward packets using the new valid index.
5184 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5186 NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to forward packets");
5191 /* Make sure the flags are set / cleared based on the new nexthop group
5194 mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, old_nh_grp);
5196 /* At this point 'nh_grp' is just a shell that is not used by anyone
5197 * and its nexthop group info is the old info that was just replaced
5198 * with the new one. Remove it.
5200 nh_grp->can_destroy = true;
5201 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5206 old_nhgi->nh_grp = old_nh_grp;
5207 nh_grp->nhgi = new_nhgi;
5208 new_nhgi->nh_grp = nh_grp;
5209 old_nh_grp->nhgi = old_nhgi;
5213 static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
5214 struct nh_notifier_info *info)
5216 struct mlxsw_sp_nexthop_group *nh_grp, *old_nh_grp;
5217 struct netlink_ext_ack *extack = info->extack;
5220 nh_grp = mlxsw_sp_nexthop_obj_group_create(mlxsw_sp, info);
5222 return PTR_ERR(nh_grp);
5224 old_nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5226 err = mlxsw_sp_nexthop_obj_group_add(mlxsw_sp, nh_grp);
5228 err = mlxsw_sp_nexthop_obj_group_replace(mlxsw_sp, nh_grp,
5229 old_nh_grp, extack);
5232 nh_grp->can_destroy = true;
5233 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5239 static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp,
5240 struct nh_notifier_info *info)
5242 struct mlxsw_sp_nexthop_group *nh_grp;
5244 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5248 nh_grp->can_destroy = true;
5249 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5251 /* If the group still has routes using it, then defer the delete
5252 * operation until the last route using it is deleted.
5254 if (!list_empty(&nh_grp->fib_list))
5256 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5259 static int mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp *mlxsw_sp,
5260 u32 adj_index, char *ratr_pl)
5262 MLXSW_REG_ZERO(ratr, ratr_pl);
5263 mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5264 mlxsw_reg_ratr_adjacency_index_low_set(ratr_pl, adj_index);
5265 mlxsw_reg_ratr_adjacency_index_high_set(ratr_pl, adj_index >> 16);
5267 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
5270 static int mlxsw_sp_nexthop_obj_bucket_compare(char *ratr_pl, char *ratr_pl_new)
5272 /* Clear the opcode and activity on both the old and new payload as
5273 * they are irrelevant for the comparison.
5275 mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5276 mlxsw_reg_ratr_a_set(ratr_pl, 0);
5277 mlxsw_reg_ratr_op_set(ratr_pl_new, MLXSW_REG_RATR_OP_QUERY_READ);
5278 mlxsw_reg_ratr_a_set(ratr_pl_new, 0);
5280 /* If the contents of the adjacency entry are consistent with the
5281 * replacement request, then replacement was successful.
5283 if (!memcmp(ratr_pl, ratr_pl_new, MLXSW_REG_RATR_LEN))
5290 mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp *mlxsw_sp,
5291 struct mlxsw_sp_nexthop *nh,
5292 struct nh_notifier_info *info)
5294 u16 bucket_index = info->nh_res_bucket->bucket_index;
5295 struct netlink_ext_ack *extack = info->extack;
5296 bool force = info->nh_res_bucket->force;
5297 char ratr_pl_new[MLXSW_REG_RATR_LEN];
5298 char ratr_pl[MLXSW_REG_RATR_LEN];
5302 /* No point in trying an atomic replacement if the idle timer interval
5303 * is smaller than the interval in which we query and clear activity.
5305 if (!force && info->nh_res_bucket->idle_timer_ms <
5306 MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL)
5309 adj_index = nh->nhgi->adj_index + bucket_index;
5310 err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh, force, ratr_pl);
5312 NL_SET_ERR_MSG_MOD(extack, "Failed to overwrite nexthop bucket");
5317 err = mlxsw_sp_nexthop_obj_bucket_query(mlxsw_sp, adj_index,
5320 NL_SET_ERR_MSG_MOD(extack, "Failed to query nexthop bucket state after replacement. State might be inconsistent");
5324 err = mlxsw_sp_nexthop_obj_bucket_compare(ratr_pl, ratr_pl_new);
5326 NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket was not replaced because it was active during replacement");
5333 mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, bucket_index);
5338 static int mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp *mlxsw_sp,
5339 struct nh_notifier_info *info)
5341 u16 bucket_index = info->nh_res_bucket->bucket_index;
5342 struct netlink_ext_ack *extack = info->extack;
5343 struct mlxsw_sp_nexthop_group_info *nhgi;
5344 struct nh_notifier_single_info *nh_obj;
5345 struct mlxsw_sp_nexthop_group *nh_grp;
5346 struct mlxsw_sp_nexthop *nh;
5349 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5351 NL_SET_ERR_MSG_MOD(extack, "Nexthop group was not found");
5355 nhgi = nh_grp->nhgi;
5357 if (bucket_index >= nhgi->count) {
5358 NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket index out of range");
5362 nh = &nhgi->nexthops[bucket_index];
5363 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5365 nh_obj = &info->nh_res_bucket->new_nh;
5366 err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5368 NL_SET_ERR_MSG_MOD(extack, "Failed to initialize nexthop object for nexthop bucket replacement");
5369 goto err_nexthop_obj_init;
5372 err = mlxsw_sp_nexthop_obj_bucket_adj_update(mlxsw_sp, nh, info);
5374 goto err_nexthop_obj_bucket_adj_update;
5378 err_nexthop_obj_bucket_adj_update:
5379 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5380 err_nexthop_obj_init:
5381 nh_obj = &info->nh_res_bucket->old_nh;
5382 mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5383 /* The old adjacency entry was not overwritten */
5389 static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
5390 unsigned long event, void *ptr)
5392 struct nh_notifier_info *info = ptr;
5393 struct mlxsw_sp_router *router;
5396 router = container_of(nb, struct mlxsw_sp_router, nexthop_nb);
5397 err = mlxsw_sp_nexthop_obj_validate(router->mlxsw_sp, event, info);
5401 mutex_lock(&router->lock);
5404 case NEXTHOP_EVENT_REPLACE:
5405 err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
5407 case NEXTHOP_EVENT_DEL:
5408 mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info);
5410 case NEXTHOP_EVENT_BUCKET_REPLACE:
5411 err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp,
5418 mutex_unlock(&router->lock);
5421 return notifier_from_errno(err);
5424 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5425 struct fib_info *fi)
5427 const struct fib_nh *nh = fib_info_nh(fi, 0);
5429 return nh->fib_nh_gw_family ||
5430 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
5434 mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
5435 struct mlxsw_sp_nexthop_group *nh_grp)
5437 unsigned int nhs = fib_info_num_path(nh_grp->ipv4.fi);
5438 struct mlxsw_sp_nexthop_group_info *nhgi;
5439 struct mlxsw_sp_nexthop *nh;
5442 nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
5445 nh_grp->nhgi = nhgi;
5446 nhgi->nh_grp = nh_grp;
5447 nhgi->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, nh_grp->ipv4.fi);
5449 for (i = 0; i < nhgi->count; i++) {
5450 struct fib_nh *fib_nh;
5452 nh = &nhgi->nexthops[i];
5453 fib_nh = fib_info_nh(nh_grp->ipv4.fi, i);
5454 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
5456 goto err_nexthop4_init;
5458 err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
5461 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5463 goto err_group_refresh;
5468 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5472 for (i--; i >= 0; i--) {
5473 nh = &nhgi->nexthops[i];
5474 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5481 mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5482 struct mlxsw_sp_nexthop_group *nh_grp)
5484 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5487 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5488 for (i = nhgi->count - 1; i >= 0; i--) {
5489 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5491 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5493 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5494 WARN_ON_ONCE(nhgi->adj_index_valid);
5498 static struct mlxsw_sp_nexthop_group *
5499 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
5501 struct mlxsw_sp_nexthop_group *nh_grp;
5504 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5506 return ERR_PTR(-ENOMEM);
5507 INIT_LIST_HEAD(&nh_grp->vr_list);
5508 err = rhashtable_init(&nh_grp->vr_ht,
5509 &mlxsw_sp_nexthop_group_vr_ht_params);
5511 goto err_nexthop_group_vr_ht_init;
5512 INIT_LIST_HEAD(&nh_grp->fib_list);
5513 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
5514 nh_grp->ipv4.fi = fi;
5517 err = mlxsw_sp_nexthop4_group_info_init(mlxsw_sp, nh_grp);
5519 goto err_nexthop_group_info_init;
5521 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5523 goto err_nexthop_group_insert;
5525 nh_grp->can_destroy = true;
5529 err_nexthop_group_insert:
5530 mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5531 err_nexthop_group_info_init:
5533 rhashtable_destroy(&nh_grp->vr_ht);
5534 err_nexthop_group_vr_ht_init:
5536 return ERR_PTR(err);
5540 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
5541 struct mlxsw_sp_nexthop_group *nh_grp)
5543 if (!nh_grp->can_destroy)
5545 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5546 mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5547 fib_info_put(nh_grp->ipv4.fi);
5548 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5549 rhashtable_destroy(&nh_grp->vr_ht);
5553 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
5554 struct mlxsw_sp_fib_entry *fib_entry,
5555 struct fib_info *fi)
5557 struct mlxsw_sp_nexthop_group *nh_grp;
5560 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
5562 if (WARN_ON_ONCE(!nh_grp))
5567 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
5569 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
5571 return PTR_ERR(nh_grp);
5574 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
5575 fib_entry->nh_group = nh_grp;
5579 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
5580 struct mlxsw_sp_fib_entry *fib_entry)
5582 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5584 list_del(&fib_entry->nexthop_group_node);
5585 if (!list_empty(&nh_grp->fib_list))
5588 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
5589 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5593 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
5597 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5599 struct mlxsw_sp_fib4_entry *fib4_entry;
5601 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5603 return !fib4_entry->dscp;
5607 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5609 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5611 switch (fib_entry->fib_node->fib->proto) {
5612 case MLXSW_SP_L3_PROTO_IPV4:
5613 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
5616 case MLXSW_SP_L3_PROTO_IPV6:
5620 switch (fib_entry->type) {
5621 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
5622 return !!nh_group->nhgi->adj_index_valid;
5623 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
5624 return !!mlxsw_sp_nhgi_rif(nh_group->nhgi);
5625 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
5626 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5627 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
5634 static struct mlxsw_sp_nexthop *
5635 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
5636 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5640 for (i = 0; i < nh_grp->nhgi->count; i++) {
5641 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
5642 struct net_device *dev = mlxsw_sp_nexthop_dev(nh);
5643 struct fib6_info *rt = mlxsw_sp_rt6->rt;
5645 if (dev && dev == rt->fib6_nh->fib_nh_dev &&
5646 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
5647 &rt->fib6_nh->fib_nh_gw6))
5655 mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5656 struct fib_entry_notifier_info *fen_info)
5658 u32 *p_dst = (u32 *) &fen_info->dst;
5659 struct fib_rt_info fri;
5661 fri.fi = fen_info->fi;
5662 fri.tb_id = fen_info->tb_id;
5663 fri.dst = cpu_to_be32(*p_dst);
5664 fri.dst_len = fen_info->dst_len;
5665 fri.dscp = fen_info->dscp;
5666 fri.type = fen_info->type;
5667 fri.offload = false;
5669 fri.offload_failed = true;
5670 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5674 mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5675 struct mlxsw_sp_fib_entry *fib_entry)
5677 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5678 int dst_len = fib_entry->fib_node->key.prefix_len;
5679 struct mlxsw_sp_fib4_entry *fib4_entry;
5680 struct fib_rt_info fri;
5681 bool should_offload;
5683 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5684 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5686 fri.fi = fib4_entry->fi;
5687 fri.tb_id = fib4_entry->tb_id;
5688 fri.dst = cpu_to_be32(*p_dst);
5689 fri.dst_len = dst_len;
5690 fri.dscp = fib4_entry->dscp;
5691 fri.type = fib4_entry->type;
5692 fri.offload = should_offload;
5693 fri.trap = !should_offload;
5694 fri.offload_failed = false;
5695 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5699 mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5700 struct mlxsw_sp_fib_entry *fib_entry)
5702 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5703 int dst_len = fib_entry->fib_node->key.prefix_len;
5704 struct mlxsw_sp_fib4_entry *fib4_entry;
5705 struct fib_rt_info fri;
5707 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5709 fri.fi = fib4_entry->fi;
5710 fri.tb_id = fib4_entry->tb_id;
5711 fri.dst = cpu_to_be32(*p_dst);
5712 fri.dst_len = dst_len;
5713 fri.dscp = fib4_entry->dscp;
5714 fri.type = fib4_entry->type;
5715 fri.offload = false;
5717 fri.offload_failed = false;
5718 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5721 #if IS_ENABLED(CONFIG_IPV6)
5723 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5724 struct fib6_info **rt_arr,
5729 /* In IPv6 a multipath route is represented using multiple routes, so
5730 * we need to set the flags on all of them.
5732 for (i = 0; i < nrt6; i++)
5733 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), rt_arr[i],
5734 false, false, true);
5738 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5739 struct fib6_info **rt_arr,
5745 #if IS_ENABLED(CONFIG_IPV6)
5747 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5748 struct mlxsw_sp_fib_entry *fib_entry)
5750 struct mlxsw_sp_fib6_entry *fib6_entry;
5751 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5752 bool should_offload;
5754 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5756 /* In IPv6 a multipath route is represented using multiple routes, so
5757 * we need to set the flags on all of them.
5759 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5761 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5762 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5763 should_offload, !should_offload, false);
5767 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5768 struct mlxsw_sp_fib_entry *fib_entry)
5773 #if IS_ENABLED(CONFIG_IPV6)
5775 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5776 struct mlxsw_sp_fib_entry *fib_entry)
5778 struct mlxsw_sp_fib6_entry *fib6_entry;
5779 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5781 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5783 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5784 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5785 false, false, false);
5789 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5790 struct mlxsw_sp_fib_entry *fib_entry)
5796 mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5797 struct mlxsw_sp_fib_entry *fib_entry)
5799 switch (fib_entry->fib_node->fib->proto) {
5800 case MLXSW_SP_L3_PROTO_IPV4:
5801 mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
5803 case MLXSW_SP_L3_PROTO_IPV6:
5804 mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
5810 mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5811 struct mlxsw_sp_fib_entry *fib_entry)
5813 switch (fib_entry->fib_node->fib->proto) {
5814 case MLXSW_SP_L3_PROTO_IPV4:
5815 mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5817 case MLXSW_SP_L3_PROTO_IPV6:
5818 mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5824 mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
5825 struct mlxsw_sp_fib_entry *fib_entry,
5826 enum mlxsw_reg_ralue_op op)
5829 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
5830 mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
5832 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
5833 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5841 mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
5842 const struct mlxsw_sp_fib_entry *fib_entry,
5843 enum mlxsw_reg_ralue_op op)
5845 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
5846 enum mlxsw_reg_ralxx_protocol proto;
5849 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
5851 switch (fib->proto) {
5852 case MLXSW_SP_L3_PROTO_IPV4:
5853 p_dip = (u32 *) fib_entry->fib_node->key.addr;
5854 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
5855 fib_entry->fib_node->key.prefix_len,
5858 case MLXSW_SP_L3_PROTO_IPV6:
5859 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
5860 fib_entry->fib_node->key.prefix_len,
5861 fib_entry->fib_node->key.addr);
5866 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
5867 struct mlxsw_sp_fib_entry *fib_entry,
5868 enum mlxsw_reg_ralue_op op)
5870 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5871 struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
5872 char ralue_pl[MLXSW_REG_RALUE_LEN];
5873 enum mlxsw_reg_ralue_trap_action trap_action;
5875 u32 adjacency_index = 0;
5878 /* In case the nexthop group adjacency index is valid, use it
5879 * with provided ECMP size. Otherwise, setup trap and pass
5880 * traffic to kernel.
5882 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5883 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5884 adjacency_index = nhgi->adj_index;
5885 ecmp_size = nhgi->ecmp_size;
5886 } else if (!nhgi->adj_index_valid && nhgi->count &&
5887 mlxsw_sp_nhgi_rif(nhgi)) {
5888 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5889 adjacency_index = mlxsw_sp->router->adj_trap_index;
5892 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5893 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5896 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5897 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
5898 adjacency_index, ecmp_size);
5899 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5902 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
5903 struct mlxsw_sp_fib_entry *fib_entry,
5904 enum mlxsw_reg_ralue_op op)
5906 struct mlxsw_sp_rif *rif = mlxsw_sp_nhgi_rif(fib_entry->nh_group->nhgi);
5907 enum mlxsw_reg_ralue_trap_action trap_action;
5908 char ralue_pl[MLXSW_REG_RALUE_LEN];
5912 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5913 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5914 rif_index = rif->rif_index;
5916 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5917 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5920 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5921 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
5923 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5926 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
5927 struct mlxsw_sp_fib_entry *fib_entry,
5928 enum mlxsw_reg_ralue_op op)
5930 char ralue_pl[MLXSW_REG_RALUE_LEN];
5932 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5933 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5934 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5937 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
5938 struct mlxsw_sp_fib_entry *fib_entry,
5939 enum mlxsw_reg_ralue_op op)
5941 enum mlxsw_reg_ralue_trap_action trap_action;
5942 char ralue_pl[MLXSW_REG_RALUE_LEN];
5944 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
5945 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5946 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
5947 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5951 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
5952 struct mlxsw_sp_fib_entry *fib_entry,
5953 enum mlxsw_reg_ralue_op op)
5955 enum mlxsw_reg_ralue_trap_action trap_action;
5956 char ralue_pl[MLXSW_REG_RALUE_LEN];
5959 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5960 trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
5962 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5963 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
5964 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5968 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
5969 struct mlxsw_sp_fib_entry *fib_entry,
5970 enum mlxsw_reg_ralue_op op)
5972 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
5973 const struct mlxsw_sp_ipip_ops *ipip_ops;
5974 char ralue_pl[MLXSW_REG_RALUE_LEN];
5977 if (WARN_ON(!ipip_entry))
5980 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
5981 err = ipip_ops->decap_config(mlxsw_sp, ipip_entry,
5982 fib_entry->decap.tunnel_index);
5986 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5987 mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
5988 fib_entry->decap.tunnel_index);
5989 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5992 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
5993 struct mlxsw_sp_fib_entry *fib_entry,
5994 enum mlxsw_reg_ralue_op op)
5996 char ralue_pl[MLXSW_REG_RALUE_LEN];
5998 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5999 mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
6000 fib_entry->decap.tunnel_index);
6001 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6004 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6005 struct mlxsw_sp_fib_entry *fib_entry,
6006 enum mlxsw_reg_ralue_op op)
6008 switch (fib_entry->type) {
6009 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
6010 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
6011 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
6012 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
6013 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
6014 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
6015 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
6016 return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
6017 case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
6018 return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
6020 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6021 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
6023 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
6024 return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
6029 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6030 struct mlxsw_sp_fib_entry *fib_entry,
6031 enum mlxsw_reg_ralue_op op)
6033 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
6038 mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
6043 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
6044 struct mlxsw_sp_fib_entry *fib_entry)
6046 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
6047 MLXSW_REG_RALUE_OP_WRITE_WRITE);
6050 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
6051 struct mlxsw_sp_fib_entry *fib_entry)
6053 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
6054 MLXSW_REG_RALUE_OP_WRITE_DELETE);
6058 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
6059 const struct fib_entry_notifier_info *fen_info,
6060 struct mlxsw_sp_fib_entry *fib_entry)
6062 struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
6063 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
6064 struct mlxsw_sp_router *router = mlxsw_sp->router;
6065 u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
6066 int ifindex = nhgi->nexthops[0].ifindex;
6067 struct mlxsw_sp_ipip_entry *ipip_entry;
6069 switch (fen_info->type) {
6071 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
6072 MLXSW_SP_L3_PROTO_IPV4, dip);
6073 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
6074 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
6075 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
6079 if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
6080 MLXSW_SP_L3_PROTO_IPV4,
6084 tunnel_index = router->nve_decap_config.tunnel_index;
6085 fib_entry->decap.tunnel_index = tunnel_index;
6086 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
6091 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6094 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
6096 case RTN_UNREACHABLE:
6098 /* Packets hitting these routes need to be trapped, but
6099 * can do so with a lower priority than packets directed
6100 * at the host, so use action type local instead of trap.
6102 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
6106 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
6108 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
6116 mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6117 struct mlxsw_sp_fib_entry *fib_entry)
6119 switch (fib_entry->type) {
6120 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6121 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
6129 mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6130 struct mlxsw_sp_fib4_entry *fib4_entry)
6132 mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib4_entry->common);
6135 static struct mlxsw_sp_fib4_entry *
6136 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
6137 struct mlxsw_sp_fib_node *fib_node,
6138 const struct fib_entry_notifier_info *fen_info)
6140 struct mlxsw_sp_fib4_entry *fib4_entry;
6141 struct mlxsw_sp_fib_entry *fib_entry;
6144 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
6146 return ERR_PTR(-ENOMEM);
6147 fib_entry = &fib4_entry->common;
6149 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
6151 goto err_nexthop4_group_get;
6153 err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
6156 goto err_nexthop_group_vr_link;
6158 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
6160 goto err_fib4_entry_type_set;
6162 fib4_entry->fi = fen_info->fi;
6163 fib_info_hold(fib4_entry->fi);
6164 fib4_entry->tb_id = fen_info->tb_id;
6165 fib4_entry->type = fen_info->type;
6166 fib4_entry->dscp = fen_info->dscp;
6168 fib_entry->fib_node = fib_node;
6172 err_fib4_entry_type_set:
6173 mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
6174 err_nexthop_group_vr_link:
6175 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6176 err_nexthop4_group_get:
6178 return ERR_PTR(err);
6181 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
6182 struct mlxsw_sp_fib4_entry *fib4_entry)
6184 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6186 fib_info_put(fib4_entry->fi);
6187 mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib4_entry);
6188 mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
6190 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6194 static struct mlxsw_sp_fib4_entry *
6195 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
6196 const struct fib_entry_notifier_info *fen_info)
6198 struct mlxsw_sp_fib4_entry *fib4_entry;
6199 struct mlxsw_sp_fib_node *fib_node;
6200 struct mlxsw_sp_fib *fib;
6201 struct mlxsw_sp_vr *vr;
6203 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
6206 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
6208 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
6209 sizeof(fen_info->dst),
6214 fib4_entry = container_of(fib_node->fib_entry,
6215 struct mlxsw_sp_fib4_entry, common);
6216 if (fib4_entry->tb_id == fen_info->tb_id &&
6217 fib4_entry->dscp == fen_info->dscp &&
6218 fib4_entry->type == fen_info->type &&
6219 fib4_entry->fi == fen_info->fi)
6225 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
6226 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
6227 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
6228 .key_len = sizeof(struct mlxsw_sp_fib_key),
6229 .automatic_shrinking = true,
6232 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
6233 struct mlxsw_sp_fib_node *fib_node)
6235 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
6236 mlxsw_sp_fib_ht_params);
6239 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
6240 struct mlxsw_sp_fib_node *fib_node)
6242 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
6243 mlxsw_sp_fib_ht_params);
6246 static struct mlxsw_sp_fib_node *
6247 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
6248 size_t addr_len, unsigned char prefix_len)
6250 struct mlxsw_sp_fib_key key;
6252 memset(&key, 0, sizeof(key));
6253 memcpy(key.addr, addr, addr_len);
6254 key.prefix_len = prefix_len;
6255 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
6258 static struct mlxsw_sp_fib_node *
6259 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
6260 size_t addr_len, unsigned char prefix_len)
6262 struct mlxsw_sp_fib_node *fib_node;
6264 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
6268 list_add(&fib_node->list, &fib->node_list);
6269 memcpy(fib_node->key.addr, addr, addr_len);
6270 fib_node->key.prefix_len = prefix_len;
6275 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
6277 list_del(&fib_node->list);
6281 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
6282 struct mlxsw_sp_fib_node *fib_node)
6284 struct mlxsw_sp_prefix_usage req_prefix_usage;
6285 struct mlxsw_sp_fib *fib = fib_node->fib;
6286 struct mlxsw_sp_lpm_tree *lpm_tree;
6289 lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
6290 if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6293 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6294 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
6295 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6297 if (IS_ERR(lpm_tree))
6298 return PTR_ERR(lpm_tree);
6300 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6302 goto err_lpm_tree_replace;
6305 lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
6308 err_lpm_tree_replace:
6309 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6313 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
6314 struct mlxsw_sp_fib_node *fib_node)
6316 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
6317 struct mlxsw_sp_prefix_usage req_prefix_usage;
6318 struct mlxsw_sp_fib *fib = fib_node->fib;
6321 if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6323 /* Try to construct a new LPM tree from the current prefix usage
6324 * minus the unused one. If we fail, continue using the old one.
6326 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6327 mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
6328 fib_node->key.prefix_len);
6329 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6331 if (IS_ERR(lpm_tree))
6334 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6336 goto err_lpm_tree_replace;
6340 err_lpm_tree_replace:
6341 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6344 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
6345 struct mlxsw_sp_fib_node *fib_node,
6346 struct mlxsw_sp_fib *fib)
6350 err = mlxsw_sp_fib_node_insert(fib, fib_node);
6353 fib_node->fib = fib;
6355 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
6357 goto err_fib_lpm_tree_link;
6361 err_fib_lpm_tree_link:
6362 fib_node->fib = NULL;
6363 mlxsw_sp_fib_node_remove(fib, fib_node);
6367 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
6368 struct mlxsw_sp_fib_node *fib_node)
6370 struct mlxsw_sp_fib *fib = fib_node->fib;
6372 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
6373 fib_node->fib = NULL;
6374 mlxsw_sp_fib_node_remove(fib, fib_node);
6377 static struct mlxsw_sp_fib_node *
6378 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
6379 size_t addr_len, unsigned char prefix_len,
6380 enum mlxsw_sp_l3proto proto)
6382 struct mlxsw_sp_fib_node *fib_node;
6383 struct mlxsw_sp_fib *fib;
6384 struct mlxsw_sp_vr *vr;
6387 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
6389 return ERR_CAST(vr);
6390 fib = mlxsw_sp_vr_fib(vr, proto);
6392 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
6396 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
6399 goto err_fib_node_create;
6402 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
6404 goto err_fib_node_init;
6409 mlxsw_sp_fib_node_destroy(fib_node);
6410 err_fib_node_create:
6411 mlxsw_sp_vr_put(mlxsw_sp, vr);
6412 return ERR_PTR(err);
6415 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
6416 struct mlxsw_sp_fib_node *fib_node)
6418 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
6420 if (fib_node->fib_entry)
6422 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
6423 mlxsw_sp_fib_node_destroy(fib_node);
6424 mlxsw_sp_vr_put(mlxsw_sp, vr);
6427 static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
6428 struct mlxsw_sp_fib_entry *fib_entry)
6430 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6433 fib_node->fib_entry = fib_entry;
6435 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
6437 goto err_fib_entry_update;
6441 err_fib_entry_update:
6442 fib_node->fib_entry = NULL;
6447 mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6448 struct mlxsw_sp_fib_entry *fib_entry)
6450 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6452 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
6453 fib_node->fib_entry = NULL;
6456 static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
6458 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6459 struct mlxsw_sp_fib4_entry *fib4_replaced;
6461 if (!fib_node->fib_entry)
6464 fib4_replaced = container_of(fib_node->fib_entry,
6465 struct mlxsw_sp_fib4_entry, common);
6466 if (fib4_entry->tb_id == RT_TABLE_MAIN &&
6467 fib4_replaced->tb_id == RT_TABLE_LOCAL)
6474 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
6475 const struct fib_entry_notifier_info *fen_info)
6477 struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
6478 struct mlxsw_sp_fib_entry *replaced;
6479 struct mlxsw_sp_fib_node *fib_node;
6482 if (fen_info->fi->nh &&
6483 !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, fen_info->fi->nh->id))
6486 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
6487 &fen_info->dst, sizeof(fen_info->dst),
6489 MLXSW_SP_L3_PROTO_IPV4);
6490 if (IS_ERR(fib_node)) {
6491 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
6492 return PTR_ERR(fib_node);
6495 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
6496 if (IS_ERR(fib4_entry)) {
6497 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
6498 err = PTR_ERR(fib4_entry);
6499 goto err_fib4_entry_create;
6502 if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
6503 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6504 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6508 replaced = fib_node->fib_entry;
6509 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
6511 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
6512 goto err_fib_node_entry_link;
6515 /* Nothing to replace */
6519 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
6520 fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
6522 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
6526 err_fib_node_entry_link:
6527 fib_node->fib_entry = replaced;
6528 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6529 err_fib4_entry_create:
6530 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6534 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
6535 struct fib_entry_notifier_info *fen_info)
6537 struct mlxsw_sp_fib4_entry *fib4_entry;
6538 struct mlxsw_sp_fib_node *fib_node;
6540 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
6543 fib_node = fib4_entry->common.fib_node;
6545 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
6546 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6547 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6550 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
6552 /* Multicast routes aren't supported, so ignore them. Neighbour
6553 * Discovery packets are specifically trapped.
6555 if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
6558 /* Cloned routes are irrelevant in the forwarding path. */
6559 if (rt->fib6_flags & RTF_CACHE)
6565 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
6567 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6569 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
6571 return ERR_PTR(-ENOMEM);
6573 /* In case of route replace, replaced route is deleted with
6574 * no notification. Take reference to prevent accessing freed
6577 mlxsw_sp_rt6->rt = rt;
6580 return mlxsw_sp_rt6;
6583 #if IS_ENABLED(CONFIG_IPV6)
6584 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6586 fib6_info_release(rt);
6589 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6594 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
6596 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
6598 if (!mlxsw_sp_rt6->rt->nh)
6599 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
6600 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
6601 kfree(mlxsw_sp_rt6);
6604 static struct fib6_info *
6605 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
6607 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
6611 static struct mlxsw_sp_rt6 *
6612 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
6613 const struct fib6_info *rt)
6615 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6617 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
6618 if (mlxsw_sp_rt6->rt == rt)
6619 return mlxsw_sp_rt6;
6625 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
6626 const struct fib6_info *rt,
6627 enum mlxsw_sp_ipip_type *ret)
6629 return rt->fib6_nh->fib_nh_dev &&
6630 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
6633 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
6634 struct mlxsw_sp_nexthop_group *nh_grp,
6635 struct mlxsw_sp_nexthop *nh,
6636 const struct fib6_info *rt)
6638 struct net_device *dev = rt->fib6_nh->fib_nh_dev;
6641 nh->nhgi = nh_grp->nhgi;
6642 nh->nh_weight = rt->fib6_nh->fib_nh_weight;
6643 memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
6644 #if IS_ENABLED(CONFIG_IPV6)
6645 nh->neigh_tbl = &nd_tbl;
6647 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
6649 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
6653 nh->ifindex = dev->ifindex;
6655 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
6657 goto err_nexthop_type_init;
6661 err_nexthop_type_init:
6662 list_del(&nh->router_list_node);
6663 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6667 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
6668 struct mlxsw_sp_nexthop *nh)
6670 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
6671 list_del(&nh->router_list_node);
6672 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6675 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
6676 const struct fib6_info *rt)
6678 return rt->fib6_nh->fib_nh_gw_family ||
6679 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
6683 mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
6684 struct mlxsw_sp_nexthop_group *nh_grp,
6685 struct mlxsw_sp_fib6_entry *fib6_entry)
6687 struct mlxsw_sp_nexthop_group_info *nhgi;
6688 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6689 struct mlxsw_sp_nexthop *nh;
6692 nhgi = kzalloc(struct_size(nhgi, nexthops, fib6_entry->nrt6),
6696 nh_grp->nhgi = nhgi;
6697 nhgi->nh_grp = nh_grp;
6698 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
6699 struct mlxsw_sp_rt6, list);
6700 nhgi->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
6701 nhgi->count = fib6_entry->nrt6;
6702 for (i = 0; i < nhgi->count; i++) {
6703 struct fib6_info *rt = mlxsw_sp_rt6->rt;
6705 nh = &nhgi->nexthops[i];
6706 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
6708 goto err_nexthop6_init;
6709 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
6711 nh_grp->nhgi = nhgi;
6712 err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
6715 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6717 goto err_group_refresh;
6722 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6726 for (i--; i >= 0; i--) {
6727 nh = &nhgi->nexthops[i];
6728 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6735 mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
6736 struct mlxsw_sp_nexthop_group *nh_grp)
6738 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
6741 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6742 for (i = nhgi->count - 1; i >= 0; i--) {
6743 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
6745 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6747 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6748 WARN_ON_ONCE(nhgi->adj_index_valid);
6752 static struct mlxsw_sp_nexthop_group *
6753 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
6754 struct mlxsw_sp_fib6_entry *fib6_entry)
6756 struct mlxsw_sp_nexthop_group *nh_grp;
6759 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
6761 return ERR_PTR(-ENOMEM);
6762 INIT_LIST_HEAD(&nh_grp->vr_list);
6763 err = rhashtable_init(&nh_grp->vr_ht,
6764 &mlxsw_sp_nexthop_group_vr_ht_params);
6766 goto err_nexthop_group_vr_ht_init;
6767 INIT_LIST_HEAD(&nh_grp->fib_list);
6768 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
6770 err = mlxsw_sp_nexthop6_group_info_init(mlxsw_sp, nh_grp, fib6_entry);
6772 goto err_nexthop_group_info_init;
6774 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
6776 goto err_nexthop_group_insert;
6778 nh_grp->can_destroy = true;
6782 err_nexthop_group_insert:
6783 mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6784 err_nexthop_group_info_init:
6785 rhashtable_destroy(&nh_grp->vr_ht);
6786 err_nexthop_group_vr_ht_init:
6788 return ERR_PTR(err);
6792 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
6793 struct mlxsw_sp_nexthop_group *nh_grp)
6795 if (!nh_grp->can_destroy)
6797 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
6798 mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6799 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
6800 rhashtable_destroy(&nh_grp->vr_ht);
6804 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
6805 struct mlxsw_sp_fib6_entry *fib6_entry)
6807 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
6808 struct mlxsw_sp_nexthop_group *nh_grp;
6811 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
6813 if (WARN_ON_ONCE(!nh_grp))
6818 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
6820 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
6822 return PTR_ERR(nh_grp);
6825 /* The route and the nexthop are described by the same struct, so we
6826 * need to the update the nexthop offload indication for the new route.
6828 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
6831 list_add_tail(&fib6_entry->common.nexthop_group_node,
6833 fib6_entry->common.nh_group = nh_grp;
6838 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
6839 struct mlxsw_sp_fib_entry *fib_entry)
6841 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
6843 list_del(&fib_entry->nexthop_group_node);
6844 if (!list_empty(&nh_grp->fib_list))
6847 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
6848 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
6852 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
6856 mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
6857 struct mlxsw_sp_fib6_entry *fib6_entry)
6859 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
6860 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
6863 mlxsw_sp_nexthop_group_vr_unlink(old_nh_grp, fib_node->fib);
6864 fib6_entry->common.nh_group = NULL;
6865 list_del(&fib6_entry->common.nexthop_group_node);
6867 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
6869 goto err_nexthop6_group_get;
6871 err = mlxsw_sp_nexthop_group_vr_link(fib6_entry->common.nh_group,
6874 goto err_nexthop_group_vr_link;
6876 /* In case this entry is offloaded, then the adjacency index
6877 * currently associated with it in the device's table is that
6878 * of the old group. Start using the new one instead.
6880 err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
6882 goto err_fib_entry_update;
6884 if (list_empty(&old_nh_grp->fib_list))
6885 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
6889 err_fib_entry_update:
6890 mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
6892 err_nexthop_group_vr_link:
6893 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
6894 err_nexthop6_group_get:
6895 list_add_tail(&fib6_entry->common.nexthop_group_node,
6896 &old_nh_grp->fib_list);
6897 fib6_entry->common.nh_group = old_nh_grp;
6898 mlxsw_sp_nexthop_group_vr_link(old_nh_grp, fib_node->fib);
6903 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
6904 struct mlxsw_sp_fib6_entry *fib6_entry,
6905 struct fib6_info **rt_arr, unsigned int nrt6)
6907 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6910 for (i = 0; i < nrt6; i++) {
6911 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
6912 if (IS_ERR(mlxsw_sp_rt6)) {
6913 err = PTR_ERR(mlxsw_sp_rt6);
6914 goto err_rt6_unwind;
6917 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
6921 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
6923 goto err_rt6_unwind;
6928 for (; i > 0; i--) {
6930 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
6931 struct mlxsw_sp_rt6, list);
6932 list_del(&mlxsw_sp_rt6->list);
6933 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6939 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
6940 struct mlxsw_sp_fib6_entry *fib6_entry,
6941 struct fib6_info **rt_arr, unsigned int nrt6)
6943 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6946 for (i = 0; i < nrt6; i++) {
6947 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
6949 if (WARN_ON_ONCE(!mlxsw_sp_rt6))
6953 list_del(&mlxsw_sp_rt6->list);
6954 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6957 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
6961 mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
6962 struct mlxsw_sp_fib_entry *fib_entry,
6963 const struct fib6_info *rt)
6965 struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
6966 union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr };
6967 u32 tb_id = mlxsw_sp_fix_tb_id(rt->fib6_table->tb6_id);
6968 struct mlxsw_sp_router *router = mlxsw_sp->router;
6969 int ifindex = nhgi->nexthops[0].ifindex;
6970 struct mlxsw_sp_ipip_entry *ipip_entry;
6972 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6973 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
6974 MLXSW_SP_L3_PROTO_IPV6,
6977 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
6978 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
6979 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry,
6982 if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
6983 MLXSW_SP_L3_PROTO_IPV6, &dip)) {
6986 tunnel_index = router->nve_decap_config.tunnel_index;
6987 fib_entry->decap.tunnel_index = tunnel_index;
6988 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
6994 static int mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
6995 struct mlxsw_sp_fib_entry *fib_entry,
6996 const struct fib6_info *rt)
6998 if (rt->fib6_flags & RTF_LOCAL)
6999 return mlxsw_sp_fib6_entry_type_set_local(mlxsw_sp, fib_entry,
7001 if (rt->fib6_flags & RTF_ANYCAST)
7002 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
7003 else if (rt->fib6_type == RTN_BLACKHOLE)
7004 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
7005 else if (rt->fib6_flags & RTF_REJECT)
7006 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
7007 else if (fib_entry->nh_group->nhgi->gateway)
7008 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
7010 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
7016 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
7018 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
7020 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
7023 list_del(&mlxsw_sp_rt6->list);
7024 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7028 static struct mlxsw_sp_fib6_entry *
7029 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
7030 struct mlxsw_sp_fib_node *fib_node,
7031 struct fib6_info **rt_arr, unsigned int nrt6)
7033 struct mlxsw_sp_fib6_entry *fib6_entry;
7034 struct mlxsw_sp_fib_entry *fib_entry;
7035 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7038 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
7040 return ERR_PTR(-ENOMEM);
7041 fib_entry = &fib6_entry->common;
7043 INIT_LIST_HEAD(&fib6_entry->rt6_list);
7045 for (i = 0; i < nrt6; i++) {
7046 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
7047 if (IS_ERR(mlxsw_sp_rt6)) {
7048 err = PTR_ERR(mlxsw_sp_rt6);
7049 goto err_rt6_unwind;
7051 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
7055 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
7057 goto err_rt6_unwind;
7059 err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
7062 goto err_nexthop_group_vr_link;
7064 err = mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
7066 goto err_fib6_entry_type_set;
7068 fib_entry->fib_node = fib_node;
7072 err_fib6_entry_type_set:
7073 mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
7074 err_nexthop_group_vr_link:
7075 mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
7077 for (; i > 0; i--) {
7079 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
7080 struct mlxsw_sp_rt6, list);
7081 list_del(&mlxsw_sp_rt6->list);
7082 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7085 return ERR_PTR(err);
7089 mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
7090 struct mlxsw_sp_fib6_entry *fib6_entry)
7092 mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib6_entry->common);
7095 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
7096 struct mlxsw_sp_fib6_entry *fib6_entry)
7098 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7100 mlxsw_sp_fib6_entry_type_unset(mlxsw_sp, fib6_entry);
7101 mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
7103 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
7104 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
7105 WARN_ON(fib6_entry->nrt6);
7109 static struct mlxsw_sp_fib6_entry *
7110 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
7111 const struct fib6_info *rt)
7113 struct mlxsw_sp_fib6_entry *fib6_entry;
7114 struct mlxsw_sp_fib_node *fib_node;
7115 struct mlxsw_sp_fib *fib;
7116 struct fib6_info *cmp_rt;
7117 struct mlxsw_sp_vr *vr;
7119 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
7122 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
7124 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
7125 sizeof(rt->fib6_dst.addr),
7130 fib6_entry = container_of(fib_node->fib_entry,
7131 struct mlxsw_sp_fib6_entry, common);
7132 cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7133 if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
7134 rt->fib6_metric == cmp_rt->fib6_metric &&
7135 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
7141 static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
7143 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7144 struct mlxsw_sp_fib6_entry *fib6_replaced;
7145 struct fib6_info *rt, *rt_replaced;
7147 if (!fib_node->fib_entry)
7150 fib6_replaced = container_of(fib_node->fib_entry,
7151 struct mlxsw_sp_fib6_entry,
7153 rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7154 rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
7155 if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
7156 rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
7162 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
7163 struct fib6_info **rt_arr,
7166 struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
7167 struct mlxsw_sp_fib_entry *replaced;
7168 struct mlxsw_sp_fib_node *fib_node;
7169 struct fib6_info *rt = rt_arr[0];
7172 if (rt->fib6_src.plen)
7175 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7178 if (rt->nh && !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, rt->nh->id))
7181 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7183 sizeof(rt->fib6_dst.addr),
7185 MLXSW_SP_L3_PROTO_IPV6);
7186 if (IS_ERR(fib_node))
7187 return PTR_ERR(fib_node);
7189 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
7191 if (IS_ERR(fib6_entry)) {
7192 err = PTR_ERR(fib6_entry);
7193 goto err_fib6_entry_create;
7196 if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
7197 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7198 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7202 replaced = fib_node->fib_entry;
7203 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
7205 goto err_fib_node_entry_link;
7207 /* Nothing to replace */
7211 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
7212 fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
7214 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
7218 err_fib_node_entry_link:
7219 fib_node->fib_entry = replaced;
7220 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7221 err_fib6_entry_create:
7222 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7226 static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
7227 struct fib6_info **rt_arr,
7230 struct mlxsw_sp_fib6_entry *fib6_entry;
7231 struct mlxsw_sp_fib_node *fib_node;
7232 struct fib6_info *rt = rt_arr[0];
7235 if (rt->fib6_src.plen)
7238 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7241 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7243 sizeof(rt->fib6_dst.addr),
7245 MLXSW_SP_L3_PROTO_IPV6);
7246 if (IS_ERR(fib_node))
7247 return PTR_ERR(fib_node);
7249 if (WARN_ON_ONCE(!fib_node->fib_entry)) {
7250 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7254 fib6_entry = container_of(fib_node->fib_entry,
7255 struct mlxsw_sp_fib6_entry, common);
7256 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
7259 goto err_fib6_entry_nexthop_add;
7263 err_fib6_entry_nexthop_add:
7264 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7268 static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
7269 struct fib6_info **rt_arr,
7272 struct mlxsw_sp_fib6_entry *fib6_entry;
7273 struct mlxsw_sp_fib_node *fib_node;
7274 struct fib6_info *rt = rt_arr[0];
7276 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7279 /* Multipath routes are first added to the FIB trie and only then
7280 * notified. If we vetoed the addition, we will get a delete
7281 * notification for a route we do not have. Therefore, do not warn if
7282 * route was not found.
7284 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
7288 /* If not all the nexthops are deleted, then only reduce the nexthop
7291 if (nrt6 != fib6_entry->nrt6) {
7292 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
7297 fib_node = fib6_entry->common.fib_node;
7299 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
7300 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7301 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7304 static struct mlxsw_sp_mr_table *
7305 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
7307 if (family == RTNL_FAMILY_IPMR)
7308 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
7310 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
7313 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
7314 struct mfc_entry_notifier_info *men_info,
7317 struct mlxsw_sp_mr_table *mrt;
7318 struct mlxsw_sp_vr *vr;
7320 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
7324 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7325 return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
7328 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
7329 struct mfc_entry_notifier_info *men_info)
7331 struct mlxsw_sp_mr_table *mrt;
7332 struct mlxsw_sp_vr *vr;
7334 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
7338 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7339 mlxsw_sp_mr_route_del(mrt, men_info->mfc);
7340 mlxsw_sp_vr_put(mlxsw_sp, vr);
7344 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
7345 struct vif_entry_notifier_info *ven_info)
7347 struct mlxsw_sp_mr_table *mrt;
7348 struct mlxsw_sp_rif *rif;
7349 struct mlxsw_sp_vr *vr;
7351 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
7355 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7356 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
7357 return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
7358 ven_info->vif_index,
7359 ven_info->vif_flags, rif);
7363 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
7364 struct vif_entry_notifier_info *ven_info)
7366 struct mlxsw_sp_mr_table *mrt;
7367 struct mlxsw_sp_vr *vr;
7369 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
7373 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7374 mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
7375 mlxsw_sp_vr_put(mlxsw_sp, vr);
7378 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
7379 struct mlxsw_sp_fib_node *fib_node)
7381 struct mlxsw_sp_fib4_entry *fib4_entry;
7383 fib4_entry = container_of(fib_node->fib_entry,
7384 struct mlxsw_sp_fib4_entry, common);
7385 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7386 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
7387 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7390 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
7391 struct mlxsw_sp_fib_node *fib_node)
7393 struct mlxsw_sp_fib6_entry *fib6_entry;
7395 fib6_entry = container_of(fib_node->fib_entry,
7396 struct mlxsw_sp_fib6_entry, common);
7397 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7398 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7399 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7402 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
7403 struct mlxsw_sp_fib_node *fib_node)
7405 switch (fib_node->fib->proto) {
7406 case MLXSW_SP_L3_PROTO_IPV4:
7407 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
7409 case MLXSW_SP_L3_PROTO_IPV6:
7410 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
7415 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
7416 struct mlxsw_sp_vr *vr,
7417 enum mlxsw_sp_l3proto proto)
7419 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
7420 struct mlxsw_sp_fib_node *fib_node, *tmp;
7422 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
7423 bool do_break = &tmp->list == &fib->node_list;
7425 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
7431 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
7433 int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
7436 for (i = 0; i < max_vrs; i++) {
7437 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
7439 if (!mlxsw_sp_vr_is_used(vr))
7442 for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
7443 mlxsw_sp_mr_table_flush(vr->mr_table[j]);
7444 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
7446 /* If virtual router was only used for IPv4, then it's no
7449 if (!mlxsw_sp_vr_is_used(vr))
7451 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
7455 struct mlxsw_sp_fib6_event_work {
7456 struct fib6_info **rt_arr;
7460 struct mlxsw_sp_fib_event_work {
7461 struct work_struct work;
7463 struct mlxsw_sp_fib6_event_work fib6_work;
7464 struct fib_entry_notifier_info fen_info;
7465 struct fib_rule_notifier_info fr_info;
7466 struct fib_nh_notifier_info fnh_info;
7467 struct mfc_entry_notifier_info men_info;
7468 struct vif_entry_notifier_info ven_info;
7470 struct mlxsw_sp *mlxsw_sp;
7471 unsigned long event;
7475 mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
7476 struct fib6_entry_notifier_info *fen6_info)
7478 struct fib6_info *rt = fen6_info->rt;
7479 struct fib6_info **rt_arr;
7480 struct fib6_info *iter;
7484 nrt6 = fen6_info->nsiblings + 1;
7486 rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
7490 fib6_work->rt_arr = rt_arr;
7491 fib6_work->nrt6 = nrt6;
7496 if (!fen6_info->nsiblings)
7499 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
7500 if (i == fen6_info->nsiblings)
7503 rt_arr[i + 1] = iter;
7504 fib6_info_hold(iter);
7507 WARN_ON_ONCE(i != fen6_info->nsiblings);
7513 mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
7517 for (i = 0; i < fib6_work->nrt6; i++)
7518 mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
7519 kfree(fib6_work->rt_arr);
7522 static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
7524 struct mlxsw_sp_fib_event_work *fib_work =
7525 container_of(work, struct mlxsw_sp_fib_event_work, work);
7526 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7529 mutex_lock(&mlxsw_sp->router->lock);
7530 mlxsw_sp_span_respin(mlxsw_sp);
7532 switch (fib_work->event) {
7533 case FIB_EVENT_ENTRY_REPLACE:
7534 err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
7535 &fib_work->fen_info);
7537 dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7538 mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
7539 &fib_work->fen_info);
7541 fib_info_put(fib_work->fen_info.fi);
7543 case FIB_EVENT_ENTRY_DEL:
7544 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
7545 fib_info_put(fib_work->fen_info.fi);
7547 case FIB_EVENT_NH_ADD:
7548 case FIB_EVENT_NH_DEL:
7549 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
7550 fib_work->fnh_info.fib_nh);
7551 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
7554 mutex_unlock(&mlxsw_sp->router->lock);
7558 static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
7560 struct mlxsw_sp_fib_event_work *fib_work =
7561 container_of(work, struct mlxsw_sp_fib_event_work, work);
7562 struct mlxsw_sp_fib6_event_work *fib6_work = &fib_work->fib6_work;
7563 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7566 mutex_lock(&mlxsw_sp->router->lock);
7567 mlxsw_sp_span_respin(mlxsw_sp);
7569 switch (fib_work->event) {
7570 case FIB_EVENT_ENTRY_REPLACE:
7571 err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
7575 dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7576 mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7580 mlxsw_sp_router_fib6_work_fini(fib6_work);
7582 case FIB_EVENT_ENTRY_APPEND:
7583 err = mlxsw_sp_router_fib6_append(mlxsw_sp,
7587 dev_warn(mlxsw_sp->bus_info->dev, "FIB append failed.\n");
7588 mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7592 mlxsw_sp_router_fib6_work_fini(fib6_work);
7594 case FIB_EVENT_ENTRY_DEL:
7595 mlxsw_sp_router_fib6_del(mlxsw_sp,
7598 mlxsw_sp_router_fib6_work_fini(fib6_work);
7601 mutex_unlock(&mlxsw_sp->router->lock);
7605 static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
7607 struct mlxsw_sp_fib_event_work *fib_work =
7608 container_of(work, struct mlxsw_sp_fib_event_work, work);
7609 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7614 mutex_lock(&mlxsw_sp->router->lock);
7615 switch (fib_work->event) {
7616 case FIB_EVENT_ENTRY_REPLACE:
7617 case FIB_EVENT_ENTRY_ADD:
7618 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
7620 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
7623 dev_warn(mlxsw_sp->bus_info->dev, "MR entry add failed.\n");
7624 mr_cache_put(fib_work->men_info.mfc);
7626 case FIB_EVENT_ENTRY_DEL:
7627 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
7628 mr_cache_put(fib_work->men_info.mfc);
7630 case FIB_EVENT_VIF_ADD:
7631 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
7632 &fib_work->ven_info);
7634 dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
7635 dev_put(fib_work->ven_info.dev);
7637 case FIB_EVENT_VIF_DEL:
7638 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
7639 &fib_work->ven_info);
7640 dev_put(fib_work->ven_info.dev);
7643 mutex_unlock(&mlxsw_sp->router->lock);
7648 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
7649 struct fib_notifier_info *info)
7651 struct fib_entry_notifier_info *fen_info;
7652 struct fib_nh_notifier_info *fnh_info;
7654 switch (fib_work->event) {
7655 case FIB_EVENT_ENTRY_REPLACE:
7656 case FIB_EVENT_ENTRY_DEL:
7657 fen_info = container_of(info, struct fib_entry_notifier_info,
7659 fib_work->fen_info = *fen_info;
7660 /* Take reference on fib_info to prevent it from being
7661 * freed while work is queued. Release it afterwards.
7663 fib_info_hold(fib_work->fen_info.fi);
7665 case FIB_EVENT_NH_ADD:
7666 case FIB_EVENT_NH_DEL:
7667 fnh_info = container_of(info, struct fib_nh_notifier_info,
7669 fib_work->fnh_info = *fnh_info;
7670 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
7675 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
7676 struct fib_notifier_info *info)
7678 struct fib6_entry_notifier_info *fen6_info;
7681 switch (fib_work->event) {
7682 case FIB_EVENT_ENTRY_REPLACE:
7683 case FIB_EVENT_ENTRY_APPEND:
7684 case FIB_EVENT_ENTRY_DEL:
7685 fen6_info = container_of(info, struct fib6_entry_notifier_info,
7687 err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
7698 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
7699 struct fib_notifier_info *info)
7701 switch (fib_work->event) {
7702 case FIB_EVENT_ENTRY_REPLACE:
7703 case FIB_EVENT_ENTRY_ADD:
7704 case FIB_EVENT_ENTRY_DEL:
7705 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
7706 mr_cache_hold(fib_work->men_info.mfc);
7708 case FIB_EVENT_VIF_ADD:
7709 case FIB_EVENT_VIF_DEL:
7710 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
7711 dev_hold(fib_work->ven_info.dev);
7716 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
7717 struct fib_notifier_info *info,
7718 struct mlxsw_sp *mlxsw_sp)
7720 struct netlink_ext_ack *extack = info->extack;
7721 struct fib_rule_notifier_info *fr_info;
7722 struct fib_rule *rule;
7725 /* nothing to do at the moment */
7726 if (event == FIB_EVENT_RULE_DEL)
7729 fr_info = container_of(info, struct fib_rule_notifier_info, info);
7730 rule = fr_info->rule;
7732 /* Rule only affects locally generated traffic */
7733 if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
7736 switch (info->family) {
7738 if (!fib4_rule_default(rule) && !rule->l3mdev)
7742 if (!fib6_rule_default(rule) && !rule->l3mdev)
7745 case RTNL_FAMILY_IPMR:
7746 if (!ipmr_rule_default(rule) && !rule->l3mdev)
7749 case RTNL_FAMILY_IP6MR:
7750 if (!ip6mr_rule_default(rule) && !rule->l3mdev)
7756 NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
7761 /* Called with rcu_read_lock() */
7762 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
7763 unsigned long event, void *ptr)
7765 struct mlxsw_sp_fib_event_work *fib_work;
7766 struct fib_notifier_info *info = ptr;
7767 struct mlxsw_sp_router *router;
7770 if ((info->family != AF_INET && info->family != AF_INET6 &&
7771 info->family != RTNL_FAMILY_IPMR &&
7772 info->family != RTNL_FAMILY_IP6MR))
7775 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7778 case FIB_EVENT_RULE_ADD:
7779 case FIB_EVENT_RULE_DEL:
7780 err = mlxsw_sp_router_fib_rule_event(event, info,
7782 return notifier_from_errno(err);
7783 case FIB_EVENT_ENTRY_ADD:
7784 case FIB_EVENT_ENTRY_REPLACE:
7785 case FIB_EVENT_ENTRY_APPEND:
7786 if (info->family == AF_INET) {
7787 struct fib_entry_notifier_info *fen_info = ptr;
7789 if (fen_info->fi->fib_nh_is_v6) {
7790 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
7791 return notifier_from_errno(-EINVAL);
7797 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
7801 fib_work->mlxsw_sp = router->mlxsw_sp;
7802 fib_work->event = event;
7804 switch (info->family) {
7806 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
7807 mlxsw_sp_router_fib4_event(fib_work, info);
7810 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
7811 err = mlxsw_sp_router_fib6_event(fib_work, info);
7815 case RTNL_FAMILY_IP6MR:
7816 case RTNL_FAMILY_IPMR:
7817 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
7818 mlxsw_sp_router_fibmr_event(fib_work, info);
7822 mlxsw_core_schedule_work(&fib_work->work);
7831 static struct mlxsw_sp_rif *
7832 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
7833 const struct net_device *dev)
7835 int max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
7838 for (i = 0; i < max_rifs; i++)
7839 if (mlxsw_sp->router->rifs[i] &&
7840 mlxsw_sp_rif_dev_is(mlxsw_sp->router->rifs[i], dev))
7841 return mlxsw_sp->router->rifs[i];
7846 bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
7847 const struct net_device *dev)
7849 struct mlxsw_sp_rif *rif;
7851 mutex_lock(&mlxsw_sp->router->lock);
7852 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7853 mutex_unlock(&mlxsw_sp->router->lock);
7858 u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
7860 struct mlxsw_sp_rif *rif;
7863 mutex_lock(&mlxsw_sp->router->lock);
7864 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7868 /* We only return the VID for VLAN RIFs. Otherwise we return an
7869 * invalid value (0).
7871 if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
7874 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7877 mutex_unlock(&mlxsw_sp->router->lock);
7881 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
7883 char ritr_pl[MLXSW_REG_RITR_LEN];
7886 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
7887 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7891 mlxsw_reg_ritr_enable_set(ritr_pl, false);
7892 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7895 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
7896 struct mlxsw_sp_rif *rif)
7898 /* Signal to nexthop cleanup that the RIF is going away. */
7899 rif->crif->rif = NULL;
7901 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
7902 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
7903 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
7906 static bool __mlxsw_sp_dev_addr_list_empty(const struct net_device *dev)
7908 struct inet6_dev *inet6_dev;
7909 struct in_device *idev;
7911 idev = __in_dev_get_rcu(dev);
7912 if (idev && idev->ifa_list)
7915 inet6_dev = __in6_dev_get(dev);
7916 if (inet6_dev && !list_empty(&inet6_dev->addr_list))
7922 static bool mlxsw_sp_dev_addr_list_empty(const struct net_device *dev)
7924 bool addr_list_empty;
7927 addr_list_empty = __mlxsw_sp_dev_addr_list_empty(dev);
7930 return addr_list_empty;
7934 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
7935 unsigned long event)
7937 bool addr_list_empty;
7943 addr_list_empty = mlxsw_sp_dev_addr_list_empty(dev);
7945 /* macvlans do not have a RIF, but rather piggy back on the
7946 * RIF of their lower device.
7948 if (netif_is_macvlan(dev) && addr_list_empty)
7951 if (rif && addr_list_empty &&
7952 !netif_is_l3_slave(mlxsw_sp_rif_dev(rif)))
7954 /* It is possible we already removed the RIF ourselves
7955 * if it was assigned to a netdev that is now a bridge
7964 static enum mlxsw_sp_rif_type
7965 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
7966 const struct net_device *dev)
7968 enum mlxsw_sp_fid_type type;
7970 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
7971 return MLXSW_SP_RIF_TYPE_IPIP_LB;
7973 /* Otherwise RIF type is derived from the type of the underlying FID. */
7974 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
7975 type = MLXSW_SP_FID_TYPE_8021Q;
7976 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
7977 type = MLXSW_SP_FID_TYPE_8021Q;
7978 else if (netif_is_bridge_master(dev))
7979 type = MLXSW_SP_FID_TYPE_8021D;
7981 type = MLXSW_SP_FID_TYPE_RFID;
7983 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
7986 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index,
7989 *p_rif_index = gen_pool_alloc(mlxsw_sp->router->rifs_table,
7991 if (*p_rif_index == 0)
7993 *p_rif_index -= MLXSW_SP_ROUTER_GENALLOC_OFFSET;
7995 /* RIF indexes must be aligned to the allocation size. */
7996 WARN_ON_ONCE(*p_rif_index % rif_entries);
8001 static void mlxsw_sp_rif_index_free(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
8004 gen_pool_free(mlxsw_sp->router->rifs_table,
8005 MLXSW_SP_ROUTER_GENALLOC_OFFSET + rif_index, rif_entries);
8008 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
8010 struct mlxsw_sp_crif *crif)
8012 struct net_device *l3_dev = crif ? crif->key.dev : NULL;
8013 struct mlxsw_sp_rif *rif;
8015 rif = kzalloc(rif_size, GFP_KERNEL);
8019 INIT_LIST_HEAD(&rif->neigh_list);
8021 ether_addr_copy(rif->addr, l3_dev->dev_addr);
8022 rif->mtu = l3_dev->mtu;
8025 rif->rif_index = rif_index;
8034 static void mlxsw_sp_rif_free(struct mlxsw_sp_rif *rif)
8036 WARN_ON(!list_empty(&rif->neigh_list));
8039 rif->crif->rif = NULL;
8043 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
8046 return mlxsw_sp->router->rifs[rif_index];
8049 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
8051 return rif->rif_index;
8054 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8056 return lb_rif->common.rif_index;
8059 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8061 struct net_device *dev = mlxsw_sp_rif_dev(&lb_rif->common);
8062 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
8063 struct mlxsw_sp_vr *ul_vr;
8065 ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
8066 if (WARN_ON(IS_ERR(ul_vr)))
8072 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8074 return lb_rif->ul_rif_id;
8078 mlxsw_sp_router_port_l3_stats_enabled(struct mlxsw_sp_rif *rif)
8080 return mlxsw_sp_rif_counter_valid_get(rif,
8081 MLXSW_SP_RIF_COUNTER_EGRESS) &&
8082 mlxsw_sp_rif_counter_valid_get(rif,
8083 MLXSW_SP_RIF_COUNTER_INGRESS);
8087 mlxsw_sp_router_port_l3_stats_enable(struct mlxsw_sp_rif *rif)
8091 err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8095 /* Clear stale data. */
8096 err = mlxsw_sp_rif_counter_fetch_clear(rif,
8097 MLXSW_SP_RIF_COUNTER_INGRESS,
8100 goto err_clear_ingress;
8102 err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8104 goto err_alloc_egress;
8106 /* Clear stale data. */
8107 err = mlxsw_sp_rif_counter_fetch_clear(rif,
8108 MLXSW_SP_RIF_COUNTER_EGRESS,
8111 goto err_clear_egress;
8116 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8119 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8124 mlxsw_sp_router_port_l3_stats_disable(struct mlxsw_sp_rif *rif)
8126 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8127 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8131 mlxsw_sp_router_port_l3_stats_report_used(struct mlxsw_sp_rif *rif,
8132 struct netdev_notifier_offload_xstats_info *info)
8134 if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
8136 netdev_offload_xstats_report_used(info->report_used);
8140 mlxsw_sp_router_port_l3_stats_fetch(struct mlxsw_sp_rif *rif,
8141 struct rtnl_hw_stats64 *p_stats)
8143 struct mlxsw_sp_rif_counter_set_basic ingress;
8144 struct mlxsw_sp_rif_counter_set_basic egress;
8147 err = mlxsw_sp_rif_counter_fetch_clear(rif,
8148 MLXSW_SP_RIF_COUNTER_INGRESS,
8153 err = mlxsw_sp_rif_counter_fetch_clear(rif,
8154 MLXSW_SP_RIF_COUNTER_EGRESS,
8159 #define MLXSW_SP_ROUTER_ALL_GOOD(SET, SFX) \
8160 ((SET.good_unicast_ ## SFX) + \
8161 (SET.good_multicast_ ## SFX) + \
8162 (SET.good_broadcast_ ## SFX))
8164 p_stats->rx_packets = MLXSW_SP_ROUTER_ALL_GOOD(ingress, packets);
8165 p_stats->tx_packets = MLXSW_SP_ROUTER_ALL_GOOD(egress, packets);
8166 p_stats->rx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(ingress, bytes);
8167 p_stats->tx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(egress, bytes);
8168 p_stats->rx_errors = ingress.error_packets;
8169 p_stats->tx_errors = egress.error_packets;
8170 p_stats->rx_dropped = ingress.discard_packets;
8171 p_stats->tx_dropped = egress.discard_packets;
8172 p_stats->multicast = ingress.good_multicast_packets +
8173 ingress.good_broadcast_packets;
8175 #undef MLXSW_SP_ROUTER_ALL_GOOD
8181 mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif *rif,
8182 struct netdev_notifier_offload_xstats_info *info)
8184 struct rtnl_hw_stats64 stats = {};
8187 if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
8190 err = mlxsw_sp_router_port_l3_stats_fetch(rif, &stats);
8194 netdev_offload_xstats_report_delta(info->report_delta, &stats);
8198 struct mlxsw_sp_router_hwstats_notify_work {
8199 struct work_struct work;
8200 struct net_device *dev;
8203 static void mlxsw_sp_router_hwstats_notify_work(struct work_struct *work)
8205 struct mlxsw_sp_router_hwstats_notify_work *hws_work =
8206 container_of(work, struct mlxsw_sp_router_hwstats_notify_work,
8210 rtnl_offload_xstats_notify(hws_work->dev);
8212 dev_put(hws_work->dev);
8217 mlxsw_sp_router_hwstats_notify_schedule(struct net_device *dev)
8219 struct mlxsw_sp_router_hwstats_notify_work *hws_work;
8221 /* To collect notification payload, the core ends up sending another
8222 * notifier block message, which would deadlock on the attempt to
8223 * acquire the router lock again. Just postpone the notification until
8227 hws_work = kzalloc(sizeof(*hws_work), GFP_KERNEL);
8231 INIT_WORK(&hws_work->work, mlxsw_sp_router_hwstats_notify_work);
8233 hws_work->dev = dev;
8234 mlxsw_core_schedule_work(&hws_work->work);
8237 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
8239 return mlxsw_sp_rif_dev(rif)->ifindex;
8242 bool mlxsw_sp_rif_has_dev(const struct mlxsw_sp_rif *rif)
8244 return !!mlxsw_sp_rif_dev(rif);
8247 bool mlxsw_sp_rif_dev_is(const struct mlxsw_sp_rif *rif,
8248 const struct net_device *dev)
8250 return mlxsw_sp_rif_dev(rif) == dev;
8253 static void mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif *rif)
8255 struct rtnl_hw_stats64 stats = {};
8257 if (!mlxsw_sp_router_port_l3_stats_fetch(rif, &stats))
8258 netdev_offload_xstats_push_delta(mlxsw_sp_rif_dev(rif),
8259 NETDEV_OFFLOAD_XSTATS_TYPE_L3,
8263 static struct mlxsw_sp_rif *
8264 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
8265 const struct mlxsw_sp_rif_params *params,
8266 struct netlink_ext_ack *extack)
8268 u8 rif_entries = params->double_entry ? 2 : 1;
8269 u32 tb_id = l3mdev_fib_table(params->dev);
8270 const struct mlxsw_sp_rif_ops *ops;
8271 struct mlxsw_sp_fid *fid = NULL;
8272 enum mlxsw_sp_rif_type type;
8273 struct mlxsw_sp_crif *crif;
8274 struct mlxsw_sp_rif *rif;
8275 struct mlxsw_sp_vr *vr;
8279 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
8280 ops = mlxsw_sp->router->rif_ops_arr[type];
8282 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
8284 return ERR_CAST(vr);
8287 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index, rif_entries);
8289 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
8290 goto err_rif_index_alloc;
8293 crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, params->dev);
8294 if (WARN_ON(!crif)) {
8296 goto err_crif_lookup;
8299 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, crif);
8304 dev_hold(params->dev);
8305 mlxsw_sp->router->rifs[rif_index] = rif;
8306 rif->mlxsw_sp = mlxsw_sp;
8308 rif->rif_entries = rif_entries;
8311 fid = ops->fid_get(rif, params, extack);
8320 ops->setup(rif, params);
8322 err = ops->configure(rif, extack);
8326 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
8327 err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
8329 goto err_mr_rif_add;
8332 if (netdev_offload_xstats_enabled(params->dev,
8333 NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8334 err = mlxsw_sp_router_port_l3_stats_enable(rif);
8336 goto err_stats_enable;
8337 mlxsw_sp_router_hwstats_notify_schedule(params->dev);
8339 mlxsw_sp_rif_counters_alloc(rif);
8342 atomic_add(rif_entries, &mlxsw_sp->router->rifs_count);
8347 for (i--; i >= 0; i--)
8348 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8349 ops->deconfigure(rif);
8352 mlxsw_sp_fid_put(fid);
8354 mlxsw_sp->router->rifs[rif_index] = NULL;
8355 dev_put(params->dev);
8356 mlxsw_sp_rif_free(rif);
8359 mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
8360 err_rif_index_alloc:
8362 mlxsw_sp_vr_put(mlxsw_sp, vr);
8363 return ERR_PTR(err);
8366 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
8368 struct net_device *dev = mlxsw_sp_rif_dev(rif);
8369 const struct mlxsw_sp_rif_ops *ops = rif->ops;
8370 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8371 struct mlxsw_sp_crif *crif = rif->crif;
8372 struct mlxsw_sp_fid *fid = rif->fid;
8373 u8 rif_entries = rif->rif_entries;
8374 u16 rif_index = rif->rif_index;
8375 struct mlxsw_sp_vr *vr;
8378 atomic_sub(rif_entries, &mlxsw_sp->router->rifs_count);
8379 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
8380 vr = &mlxsw_sp->router->vrs[rif->vr_id];
8382 if (netdev_offload_xstats_enabled(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8383 mlxsw_sp_rif_push_l3_stats(rif);
8384 mlxsw_sp_router_port_l3_stats_disable(rif);
8385 mlxsw_sp_router_hwstats_notify_schedule(dev);
8387 mlxsw_sp_rif_counters_free(rif);
8390 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
8391 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8392 ops->deconfigure(rif);
8394 /* Loopback RIFs are not associated with a FID. */
8395 mlxsw_sp_fid_put(fid);
8396 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
8398 mlxsw_sp_rif_free(rif);
8399 mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
8401 mlxsw_sp_vr_put(mlxsw_sp, vr);
8403 if (crif->can_destroy)
8404 mlxsw_sp_crif_free(crif);
8407 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
8408 struct net_device *dev)
8410 struct mlxsw_sp_rif *rif;
8412 mutex_lock(&mlxsw_sp->router->lock);
8413 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8416 mlxsw_sp_rif_destroy(rif);
8418 mutex_unlock(&mlxsw_sp->router->lock);
8421 static void mlxsw_sp_rif_destroy_vlan_upper(struct mlxsw_sp *mlxsw_sp,
8422 struct net_device *br_dev,
8425 struct net_device *upper_dev;
8426 struct mlxsw_sp_crif *crif;
8429 upper_dev = __vlan_find_dev_deep_rcu(br_dev, htons(ETH_P_8021Q), vid);
8435 crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, upper_dev);
8436 if (!crif || !crif->rif)
8439 mlxsw_sp_rif_destroy(crif->rif);
8442 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
8443 struct net_device *l3_dev,
8445 unsigned long event,
8446 struct netlink_ext_ack *extack);
8448 int mlxsw_sp_router_bridge_vlan_add(struct mlxsw_sp *mlxsw_sp,
8449 struct net_device *br_dev,
8450 u16 new_vid, bool is_pvid,
8451 struct netlink_ext_ack *extack)
8453 struct mlxsw_sp_rif *old_rif;
8454 struct mlxsw_sp_rif *new_rif;
8455 struct net_device *upper_dev;
8460 mutex_lock(&mlxsw_sp->router->lock);
8461 old_rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
8463 /* If the RIF on the bridge is not a VLAN RIF, we shouldn't have
8464 * gotten a PVID notification.
8466 if (WARN_ON(old_rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN))
8469 old_pvid = mlxsw_sp_fid_8021q_vid(old_rif->fid);
8474 else if (old_pvid == new_vid)
8479 if (old_pvid == new_pvid)
8483 struct mlxsw_sp_rif_params params = {
8488 /* If there is a VLAN upper with the same VID as the new PVID,
8489 * kill its RIF, if there is one.
8491 mlxsw_sp_rif_destroy_vlan_upper(mlxsw_sp, br_dev, new_pvid);
8493 if (mlxsw_sp_dev_addr_list_empty(br_dev))
8495 new_rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack);
8496 if (IS_ERR(new_rif)) {
8497 err = PTR_ERR(new_rif);
8502 mlxsw_sp_rif_migrate_destroy(mlxsw_sp, old_rif, new_rif,
8505 mlxsw_sp_rif_destroy(old_rif);
8510 upper_dev = __vlan_find_dev_deep_rcu(br_dev, htons(ETH_P_8021Q),
8514 err = mlxsw_sp_inetaddr_bridge_event(mlxsw_sp,
8521 mutex_unlock(&mlxsw_sp->router->lock);
8526 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
8527 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8529 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8531 params->vid = mlxsw_sp_port_vlan->vid;
8532 params->lag = mlxsw_sp_port->lagged;
8534 params->lag_id = mlxsw_sp_port->lag_id;
8536 params->system_port = mlxsw_sp_port->local_port;
8539 static struct mlxsw_sp_rif_subport *
8540 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
8542 return container_of(rif, struct mlxsw_sp_rif_subport, common);
8545 static struct mlxsw_sp_rif *
8546 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
8547 const struct mlxsw_sp_rif_params *params,
8548 struct netlink_ext_ack *extack)
8550 struct mlxsw_sp_rif_subport *rif_subport;
8551 struct mlxsw_sp_rif *rif;
8553 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
8555 return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
8557 rif_subport = mlxsw_sp_rif_subport_rif(rif);
8558 refcount_inc(&rif_subport->ref_count);
8562 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
8564 struct mlxsw_sp_rif_subport *rif_subport;
8566 rif_subport = mlxsw_sp_rif_subport_rif(rif);
8567 if (!refcount_dec_and_test(&rif_subport->ref_count))
8570 mlxsw_sp_rif_destroy(rif);
8573 static int mlxsw_sp_rif_mac_profile_index_alloc(struct mlxsw_sp *mlxsw_sp,
8574 struct mlxsw_sp_rif_mac_profile *profile,
8575 struct netlink_ext_ack *extack)
8577 u8 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
8578 struct mlxsw_sp_router *router = mlxsw_sp->router;
8581 id = idr_alloc(&router->rif_mac_profiles_idr, profile, 0,
8582 max_rif_mac_profiles, GFP_KERNEL);
8590 NL_SET_ERR_MSG_MOD(extack,
8591 "Exceeded number of supported router interface MAC profiles");
8596 static struct mlxsw_sp_rif_mac_profile *
8597 mlxsw_sp_rif_mac_profile_index_free(struct mlxsw_sp *mlxsw_sp, u8 mac_profile)
8599 struct mlxsw_sp_rif_mac_profile *profile;
8601 profile = idr_remove(&mlxsw_sp->router->rif_mac_profiles_idr,
8607 static struct mlxsw_sp_rif_mac_profile *
8608 mlxsw_sp_rif_mac_profile_alloc(const char *mac)
8610 struct mlxsw_sp_rif_mac_profile *profile;
8612 profile = kzalloc(sizeof(*profile), GFP_KERNEL);
8616 ether_addr_copy(profile->mac_prefix, mac);
8617 refcount_set(&profile->ref_count, 1);
8621 static struct mlxsw_sp_rif_mac_profile *
8622 mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp *mlxsw_sp, const char *mac)
8624 struct mlxsw_sp_router *router = mlxsw_sp->router;
8625 struct mlxsw_sp_rif_mac_profile *profile;
8628 idr_for_each_entry(&router->rif_mac_profiles_idr, profile, id) {
8629 if (ether_addr_equal_masked(profile->mac_prefix, mac,
8630 mlxsw_sp->mac_mask))
8637 static u64 mlxsw_sp_rif_mac_profiles_occ_get(void *priv)
8639 const struct mlxsw_sp *mlxsw_sp = priv;
8641 return atomic_read(&mlxsw_sp->router->rif_mac_profiles_count);
8644 static u64 mlxsw_sp_rifs_occ_get(void *priv)
8646 const struct mlxsw_sp *mlxsw_sp = priv;
8648 return atomic_read(&mlxsw_sp->router->rifs_count);
8651 static struct mlxsw_sp_rif_mac_profile *
8652 mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp *mlxsw_sp, const char *mac,
8653 struct netlink_ext_ack *extack)
8655 struct mlxsw_sp_rif_mac_profile *profile;
8658 profile = mlxsw_sp_rif_mac_profile_alloc(mac);
8660 return ERR_PTR(-ENOMEM);
8662 err = mlxsw_sp_rif_mac_profile_index_alloc(mlxsw_sp, profile, extack);
8664 goto profile_index_alloc_err;
8666 atomic_inc(&mlxsw_sp->router->rif_mac_profiles_count);
8669 profile_index_alloc_err:
8671 return ERR_PTR(err);
8674 static void mlxsw_sp_rif_mac_profile_destroy(struct mlxsw_sp *mlxsw_sp,
8677 struct mlxsw_sp_rif_mac_profile *profile;
8679 atomic_dec(&mlxsw_sp->router->rif_mac_profiles_count);
8680 profile = mlxsw_sp_rif_mac_profile_index_free(mlxsw_sp, mac_profile);
8684 static int mlxsw_sp_rif_mac_profile_get(struct mlxsw_sp *mlxsw_sp,
8685 const char *mac, u8 *p_mac_profile,
8686 struct netlink_ext_ack *extack)
8688 struct mlxsw_sp_rif_mac_profile *profile;
8690 profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, mac);
8692 refcount_inc(&profile->ref_count);
8696 profile = mlxsw_sp_rif_mac_profile_create(mlxsw_sp, mac, extack);
8697 if (IS_ERR(profile))
8698 return PTR_ERR(profile);
8701 *p_mac_profile = profile->id;
8705 static void mlxsw_sp_rif_mac_profile_put(struct mlxsw_sp *mlxsw_sp,
8708 struct mlxsw_sp_rif_mac_profile *profile;
8710 profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8712 if (WARN_ON(!profile))
8715 if (!refcount_dec_and_test(&profile->ref_count))
8718 mlxsw_sp_rif_mac_profile_destroy(mlxsw_sp, mac_profile);
8721 static bool mlxsw_sp_rif_mac_profile_is_shared(const struct mlxsw_sp_rif *rif)
8723 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8724 struct mlxsw_sp_rif_mac_profile *profile;
8726 profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8727 rif->mac_profile_id);
8728 if (WARN_ON(!profile))
8731 return refcount_read(&profile->ref_count) > 1;
8734 static int mlxsw_sp_rif_mac_profile_edit(struct mlxsw_sp_rif *rif,
8735 const char *new_mac)
8737 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8738 struct mlxsw_sp_rif_mac_profile *profile;
8740 profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8741 rif->mac_profile_id);
8742 if (WARN_ON(!profile))
8745 ether_addr_copy(profile->mac_prefix, new_mac);
8750 mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp,
8751 struct mlxsw_sp_rif *rif,
8752 const char *new_mac,
8753 struct netlink_ext_ack *extack)
8758 if (!mlxsw_sp_rif_mac_profile_is_shared(rif) &&
8759 !mlxsw_sp_rif_mac_profile_find(mlxsw_sp, new_mac))
8760 return mlxsw_sp_rif_mac_profile_edit(rif, new_mac);
8762 err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac,
8763 &mac_profile, extack);
8767 mlxsw_sp_rif_mac_profile_put(mlxsw_sp, rif->mac_profile_id);
8768 rif->mac_profile_id = mac_profile;
8773 __mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8774 struct net_device *l3_dev,
8775 struct netlink_ext_ack *extack)
8777 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8778 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
8779 struct mlxsw_sp_rif_params params;
8780 u16 vid = mlxsw_sp_port_vlan->vid;
8781 struct mlxsw_sp_rif *rif;
8782 struct mlxsw_sp_fid *fid;
8785 params = (struct mlxsw_sp_rif_params) {
8790 mlxsw_sp_rif_subport_params_init(¶ms, mlxsw_sp_port_vlan);
8791 rif = mlxsw_sp_rif_subport_get(mlxsw_sp, ¶ms, extack);
8793 return PTR_ERR(rif);
8795 /* FID was already created, just take a reference */
8796 fid = rif->ops->fid_get(rif, ¶ms, extack);
8797 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
8799 goto err_fid_port_vid_map;
8801 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
8803 goto err_port_vid_learning_set;
8805 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
8806 BR_STATE_FORWARDING);
8808 goto err_port_vid_stp_set;
8810 mlxsw_sp_port_vlan->fid = fid;
8814 err_port_vid_stp_set:
8815 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8816 err_port_vid_learning_set:
8817 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8818 err_fid_port_vid_map:
8819 mlxsw_sp_fid_put(fid);
8820 mlxsw_sp_rif_subport_put(rif);
8825 __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8827 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8828 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
8829 struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
8830 u16 vid = mlxsw_sp_port_vlan->vid;
8832 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
8835 mlxsw_sp_port_vlan->fid = NULL;
8836 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
8837 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8838 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8839 mlxsw_sp_fid_put(fid);
8840 mlxsw_sp_rif_subport_put(rif);
8844 mlxsw_sp_port_vlan_router_join_existing(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8845 struct net_device *l3_dev,
8846 struct netlink_ext_ack *extack)
8848 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8850 lockdep_assert_held(&mlxsw_sp->router->lock);
8852 if (!mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev))
8855 return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, l3_dev,
8860 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8862 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8864 mutex_lock(&mlxsw_sp->router->lock);
8865 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8866 mutex_unlock(&mlxsw_sp->router->lock);
8869 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
8870 struct net_device *port_dev,
8871 unsigned long event, u16 vid,
8872 struct netlink_ext_ack *extack)
8874 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
8875 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
8877 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
8878 if (WARN_ON(!mlxsw_sp_port_vlan))
8883 return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
8886 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8893 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
8894 unsigned long event, bool nomaster,
8895 struct netlink_ext_ack *extack)
8897 if (!nomaster && (netif_is_any_bridge_port(port_dev) ||
8898 netif_is_lag_port(port_dev)))
8901 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
8902 MLXSW_SP_DEFAULT_VID, extack);
8905 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
8906 struct net_device *lag_dev,
8907 unsigned long event, u16 vid,
8908 struct netlink_ext_ack *extack)
8910 struct net_device *port_dev;
8911 struct list_head *iter;
8914 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
8915 if (mlxsw_sp_port_dev_check(port_dev)) {
8916 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
8928 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
8929 unsigned long event, bool nomaster,
8930 struct netlink_ext_ack *extack)
8932 if (!nomaster && netif_is_bridge_port(lag_dev))
8935 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
8936 MLXSW_SP_DEFAULT_VID, extack);
8939 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
8940 struct net_device *l3_dev,
8942 unsigned long event,
8943 struct netlink_ext_ack *extack)
8945 struct mlxsw_sp_rif_params params = {
8948 struct mlxsw_sp_rif *rif;
8953 if (netif_is_bridge_master(l3_dev) && br_vlan_enabled(l3_dev)) {
8956 br_vlan_get_proto(l3_dev, &proto);
8957 if (proto == ETH_P_8021AD) {
8958 NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported");
8961 err = br_vlan_get_pvid(l3_dev, ¶ms.vid);
8966 } else if (is_vlan_dev(l3_dev)) {
8967 params.vid = vlan_dev_vlan_id(l3_dev);
8969 /* If the VID matches PVID of the bridge below, the
8970 * bridge owns the RIF for this VLAN. Don't do anything.
8972 if ((int)params.vid == lower_pvid)
8976 rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack);
8978 return PTR_ERR(rif);
8981 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8982 mlxsw_sp_rif_destroy(rif);
8989 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
8990 struct net_device *vlan_dev,
8991 unsigned long event, bool nomaster,
8992 struct netlink_ext_ack *extack)
8994 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
8995 u16 vid = vlan_dev_vlan_id(vlan_dev);
8999 if (!nomaster && netif_is_bridge_port(vlan_dev))
9002 if (mlxsw_sp_port_dev_check(real_dev)) {
9003 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
9004 event, vid, extack);
9005 } else if (netif_is_lag_master(real_dev)) {
9006 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
9008 } else if (netif_is_bridge_master(real_dev) &&
9009 br_vlan_enabled(real_dev)) {
9010 err = br_vlan_get_pvid(real_dev, &lower_pvid);
9013 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev,
9021 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
9023 u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
9024 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
9026 return ether_addr_equal_masked(mac, vrrp4, mask);
9029 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
9031 u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
9032 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
9034 return ether_addr_equal_masked(mac, vrrp6, mask);
9037 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
9038 const u8 *mac, bool adding)
9040 char ritr_pl[MLXSW_REG_RITR_LEN];
9041 u8 vrrp_id = adding ? mac[5] : 0;
9044 if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
9045 !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
9048 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
9049 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9053 if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
9054 mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
9056 mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
9058 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9061 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
9062 const struct net_device *macvlan_dev,
9063 struct netlink_ext_ack *extack)
9065 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
9066 struct mlxsw_sp_rif *rif;
9069 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
9071 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
9075 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9076 mlxsw_sp_fid_index(rif->fid), true);
9080 err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
9081 macvlan_dev->dev_addr, true);
9083 goto err_rif_vrrp_add;
9085 /* Make sure the bridge driver does not have this MAC pointing at
9088 if (rif->ops->fdb_del)
9089 rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
9094 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9095 mlxsw_sp_fid_index(rif->fid), false);
9099 static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
9100 const struct net_device *macvlan_dev)
9102 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
9103 struct mlxsw_sp_rif *rif;
9105 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
9106 /* If we do not have a RIF, then we already took care of
9107 * removing the macvlan's MAC during RIF deletion.
9111 mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
9113 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9114 mlxsw_sp_fid_index(rif->fid), false);
9117 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
9118 const struct net_device *macvlan_dev)
9120 mutex_lock(&mlxsw_sp->router->lock);
9121 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
9122 mutex_unlock(&mlxsw_sp->router->lock);
9125 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
9126 struct net_device *macvlan_dev,
9127 unsigned long event,
9128 struct netlink_ext_ack *extack)
9132 return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
9134 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
9141 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
9142 struct net_device *dev,
9143 unsigned long event, bool nomaster,
9144 struct netlink_ext_ack *extack)
9146 if (mlxsw_sp_port_dev_check(dev))
9147 return mlxsw_sp_inetaddr_port_event(dev, event, nomaster,
9149 else if (netif_is_lag_master(dev))
9150 return mlxsw_sp_inetaddr_lag_event(dev, event, nomaster,
9152 else if (netif_is_bridge_master(dev))
9153 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, -1, event,
9155 else if (is_vlan_dev(dev))
9156 return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
9158 else if (netif_is_macvlan(dev))
9159 return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
9165 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
9166 unsigned long event, void *ptr)
9168 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
9169 struct net_device *dev = ifa->ifa_dev->dev;
9170 struct mlxsw_sp_router *router;
9171 struct mlxsw_sp_rif *rif;
9174 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
9175 if (event == NETDEV_UP)
9178 router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
9179 mutex_lock(&router->lock);
9180 rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
9181 if (!mlxsw_sp_rif_should_config(rif, dev, event))
9184 err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, false,
9187 mutex_unlock(&router->lock);
9188 return notifier_from_errno(err);
9191 static int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
9192 unsigned long event, void *ptr)
9194 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
9195 struct net_device *dev = ivi->ivi_dev->dev;
9196 struct mlxsw_sp *mlxsw_sp;
9197 struct mlxsw_sp_rif *rif;
9200 mlxsw_sp = mlxsw_sp_lower_get(dev);
9204 mutex_lock(&mlxsw_sp->router->lock);
9205 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9206 if (!mlxsw_sp_rif_should_config(rif, dev, event))
9209 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false,
9212 mutex_unlock(&mlxsw_sp->router->lock);
9213 return notifier_from_errno(err);
9216 struct mlxsw_sp_inet6addr_event_work {
9217 struct work_struct work;
9218 struct mlxsw_sp *mlxsw_sp;
9219 struct net_device *dev;
9220 unsigned long event;
9223 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
9225 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
9226 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
9227 struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
9228 struct net_device *dev = inet6addr_work->dev;
9229 unsigned long event = inet6addr_work->event;
9230 struct mlxsw_sp_rif *rif;
9233 mutex_lock(&mlxsw_sp->router->lock);
9235 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9236 if (!mlxsw_sp_rif_should_config(rif, dev, event))
9239 __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false, NULL);
9241 mutex_unlock(&mlxsw_sp->router->lock);
9244 kfree(inet6addr_work);
9247 /* Called with rcu_read_lock() */
9248 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
9249 unsigned long event, void *ptr)
9251 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
9252 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
9253 struct net_device *dev = if6->idev->dev;
9254 struct mlxsw_sp_router *router;
9256 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
9257 if (event == NETDEV_UP)
9260 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
9261 if (!inet6addr_work)
9264 router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
9265 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
9266 inet6addr_work->mlxsw_sp = router->mlxsw_sp;
9267 inet6addr_work->dev = dev;
9268 inet6addr_work->event = event;
9270 mlxsw_core_schedule_work(&inet6addr_work->work);
9275 static int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
9276 unsigned long event, void *ptr)
9278 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
9279 struct net_device *dev = i6vi->i6vi_dev->dev;
9280 struct mlxsw_sp *mlxsw_sp;
9281 struct mlxsw_sp_rif *rif;
9284 mlxsw_sp = mlxsw_sp_lower_get(dev);
9288 mutex_lock(&mlxsw_sp->router->lock);
9289 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9290 if (!mlxsw_sp_rif_should_config(rif, dev, event))
9293 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false,
9296 mutex_unlock(&mlxsw_sp->router->lock);
9297 return notifier_from_errno(err);
9300 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
9301 const char *mac, int mtu, u8 mac_profile)
9303 char ritr_pl[MLXSW_REG_RITR_LEN];
9306 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
9307 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9311 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
9312 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
9313 mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, mac_profile);
9314 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
9315 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9319 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
9320 struct mlxsw_sp_rif *rif,
9321 struct netlink_ext_ack *extack)
9323 struct net_device *dev = mlxsw_sp_rif_dev(rif);
9328 fid_index = mlxsw_sp_fid_index(rif->fid);
9330 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
9334 old_mac_profile = rif->mac_profile_id;
9335 err = mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, dev->dev_addr,
9338 goto err_rif_mac_profile_replace;
9340 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
9341 dev->mtu, rif->mac_profile_id);
9345 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
9347 goto err_rif_fdb_op;
9349 if (rif->mtu != dev->mtu) {
9350 struct mlxsw_sp_vr *vr;
9353 /* The RIF is relevant only to its mr_table instance, as unlike
9354 * unicast routing, in multicast routing a RIF cannot be shared
9355 * between several multicast routing tables.
9357 vr = &mlxsw_sp->router->vrs[rif->vr_id];
9358 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
9359 mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
9363 ether_addr_copy(rif->addr, dev->dev_addr);
9364 rif->mtu = dev->mtu;
9366 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
9371 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu,
9374 mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, rif->addr, extack);
9375 err_rif_mac_profile_replace:
9376 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
9380 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
9381 struct netdev_notifier_pre_changeaddr_info *info)
9383 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9384 struct mlxsw_sp_rif_mac_profile *profile;
9385 struct netlink_ext_ack *extack;
9386 u8 max_rif_mac_profiles;
9389 extack = netdev_notifier_info_to_extack(&info->info);
9391 profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, info->dev_addr);
9395 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
9396 occ = mlxsw_sp_rif_mac_profiles_occ_get(mlxsw_sp);
9397 if (occ < max_rif_mac_profiles)
9400 if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
9403 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interface MAC profiles");
9407 static bool mlxsw_sp_router_netdevice_interesting(struct mlxsw_sp *mlxsw_sp,
9408 struct net_device *dev)
9410 struct vlan_dev_priv *vlan;
9412 if (netif_is_lag_master(dev) ||
9413 netif_is_bridge_master(dev) ||
9414 mlxsw_sp_port_dev_check(dev) ||
9415 mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev) ||
9416 netif_is_l3_master(dev))
9419 if (!is_vlan_dev(dev))
9422 vlan = vlan_dev_priv(dev);
9423 return netif_is_lag_master(vlan->real_dev) ||
9424 netif_is_bridge_master(vlan->real_dev) ||
9425 mlxsw_sp_port_dev_check(vlan->real_dev);
9428 static struct mlxsw_sp_crif *
9429 mlxsw_sp_crif_register(struct mlxsw_sp_router *router, struct net_device *dev)
9431 struct mlxsw_sp_crif *crif;
9434 if (WARN_ON(mlxsw_sp_crif_lookup(router, dev)))
9437 crif = mlxsw_sp_crif_alloc(dev);
9439 return ERR_PTR(-ENOMEM);
9441 err = mlxsw_sp_crif_insert(router, crif);
9443 goto err_netdev_insert;
9448 mlxsw_sp_crif_free(crif);
9449 return ERR_PTR(err);
9452 static void mlxsw_sp_crif_unregister(struct mlxsw_sp_router *router,
9453 struct mlxsw_sp_crif *crif)
9455 struct mlxsw_sp_nexthop *nh, *tmp;
9457 mlxsw_sp_crif_remove(router, crif);
9459 list_for_each_entry_safe(nh, tmp, &crif->nexthop_list, crif_list_node)
9460 mlxsw_sp_nexthop_type_fini(router->mlxsw_sp, nh);
9463 crif->can_destroy = true;
9465 mlxsw_sp_crif_free(crif);
9468 static int mlxsw_sp_netdevice_register(struct mlxsw_sp_router *router,
9469 struct net_device *dev)
9471 struct mlxsw_sp_crif *crif;
9473 if (!mlxsw_sp_router_netdevice_interesting(router->mlxsw_sp, dev))
9476 crif = mlxsw_sp_crif_register(router, dev);
9477 return PTR_ERR_OR_ZERO(crif);
9480 static void mlxsw_sp_netdevice_unregister(struct mlxsw_sp_router *router,
9481 struct net_device *dev)
9483 struct mlxsw_sp_crif *crif;
9485 if (!mlxsw_sp_router_netdevice_interesting(router->mlxsw_sp, dev))
9488 /* netdev_run_todo(), by way of netdev_wait_allrefs_any(), rebroadcasts
9489 * the NETDEV_UNREGISTER message, so we can get here twice. If that's
9490 * what happened, the netdevice state is NETREG_UNREGISTERED. In that
9491 * case, we expect to have collected the CRIF already, and warn if it
9492 * still exists. Otherwise we expect the CRIF to exist.
9494 crif = mlxsw_sp_crif_lookup(router, dev);
9495 if (dev->reg_state == NETREG_UNREGISTERED) {
9502 mlxsw_sp_crif_unregister(router, crif);
9505 static bool mlxsw_sp_is_offload_xstats_event(unsigned long event)
9508 case NETDEV_OFFLOAD_XSTATS_ENABLE:
9509 case NETDEV_OFFLOAD_XSTATS_DISABLE:
9510 case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9511 case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9519 mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif *rif,
9520 unsigned long event,
9521 struct netdev_notifier_offload_xstats_info *info)
9523 switch (info->type) {
9524 case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
9531 case NETDEV_OFFLOAD_XSTATS_ENABLE:
9532 return mlxsw_sp_router_port_l3_stats_enable(rif);
9533 case NETDEV_OFFLOAD_XSTATS_DISABLE:
9534 mlxsw_sp_router_port_l3_stats_disable(rif);
9536 case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9537 mlxsw_sp_router_port_l3_stats_report_used(rif, info);
9539 case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9540 return mlxsw_sp_router_port_l3_stats_report_delta(rif, info);
9548 mlxsw_sp_netdevice_offload_xstats_cmd(struct mlxsw_sp *mlxsw_sp,
9549 struct net_device *dev,
9550 unsigned long event,
9551 struct netdev_notifier_offload_xstats_info *info)
9553 struct mlxsw_sp_rif *rif;
9555 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9559 return mlxsw_sp_router_port_offload_xstats_cmd(rif, event, info);
9562 static bool mlxsw_sp_is_router_event(unsigned long event)
9565 case NETDEV_PRE_CHANGEADDR:
9566 case NETDEV_CHANGEADDR:
9567 case NETDEV_CHANGEMTU:
9574 static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
9575 unsigned long event, void *ptr)
9577 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
9578 struct mlxsw_sp *mlxsw_sp;
9579 struct mlxsw_sp_rif *rif;
9581 mlxsw_sp = mlxsw_sp_lower_get(dev);
9585 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9590 case NETDEV_CHANGEMTU:
9591 case NETDEV_CHANGEADDR:
9592 return mlxsw_sp_router_port_change_event(mlxsw_sp, rif, extack);
9593 case NETDEV_PRE_CHANGEADDR:
9594 return mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
9603 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
9604 struct net_device *l3_dev,
9605 struct netlink_ext_ack *extack)
9607 struct mlxsw_sp_rif *rif;
9609 /* If netdev is already associated with a RIF, then we need to
9610 * destroy it and create a new one with the new virtual router ID.
9612 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9614 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, false,
9617 return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, false,
9621 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
9622 struct net_device *l3_dev)
9624 struct mlxsw_sp_rif *rif;
9626 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9629 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, false, NULL);
9632 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
9634 struct netdev_notifier_changeupper_info *info = ptr;
9636 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
9638 return netif_is_l3_master(info->upper_dev);
9642 mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
9643 struct netdev_notifier_changeupper_info *info)
9645 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
9648 /* We do not create a RIF for a macvlan, but only use it to
9649 * direct more MAC addresses to the router.
9651 if (!mlxsw_sp || netif_is_macvlan(l3_dev))
9655 case NETDEV_PRECHANGEUPPER:
9657 case NETDEV_CHANGEUPPER:
9658 if (info->linking) {
9659 struct netlink_ext_ack *extack;
9661 extack = netdev_notifier_info_to_extack(&info->info);
9662 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
9664 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
9673 mlxsw_sp_port_vid_router_join_existing(struct mlxsw_sp_port *mlxsw_sp_port,
9674 u16 vid, struct net_device *dev,
9675 struct netlink_ext_ack *extack)
9677 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
9679 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port,
9681 if (WARN_ON(!mlxsw_sp_port_vlan))
9684 return mlxsw_sp_port_vlan_router_join_existing(mlxsw_sp_port_vlan,
9688 static int __mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
9689 struct net_device *lag_dev,
9690 struct netlink_ext_ack *extack)
9692 u16 default_vid = MLXSW_SP_DEFAULT_VID;
9694 return mlxsw_sp_port_vid_router_join_existing(mlxsw_sp_port,
9695 default_vid, lag_dev,
9699 int mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
9700 struct net_device *lag_dev,
9701 struct netlink_ext_ack *extack)
9705 mutex_lock(&mlxsw_sp_port->mlxsw_sp->router->lock);
9706 err = __mlxsw_sp_router_port_join_lag(mlxsw_sp_port, lag_dev, extack);
9707 mutex_unlock(&mlxsw_sp_port->mlxsw_sp->router->lock);
9712 static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb,
9713 unsigned long event, void *ptr)
9715 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
9716 struct mlxsw_sp_router *router;
9717 struct mlxsw_sp *mlxsw_sp;
9720 router = container_of(nb, struct mlxsw_sp_router, netdevice_nb);
9721 mlxsw_sp = router->mlxsw_sp;
9723 mutex_lock(&mlxsw_sp->router->lock);
9725 if (event == NETDEV_REGISTER) {
9726 err = mlxsw_sp_netdevice_register(router, dev);
9728 /* No need to roll this back, UNREGISTER will collect it
9734 if (mlxsw_sp_is_offload_xstats_event(event))
9735 err = mlxsw_sp_netdevice_offload_xstats_cmd(mlxsw_sp, dev,
9737 else if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
9738 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
9740 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
9741 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
9743 else if (mlxsw_sp_is_router_event(event))
9744 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
9745 else if (mlxsw_sp_is_vrf_event(event, ptr))
9746 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
9748 if (event == NETDEV_UNREGISTER)
9749 mlxsw_sp_netdevice_unregister(router, dev);
9752 mutex_unlock(&mlxsw_sp->router->lock);
9754 return notifier_from_errno(err);
9757 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
9758 struct netdev_nested_priv *priv)
9760 struct mlxsw_sp_rif *rif = (struct mlxsw_sp_rif *)priv->data;
9762 if (!netif_is_macvlan(dev))
9765 return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9766 mlxsw_sp_fid_index(rif->fid), false);
9769 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
9771 struct net_device *dev = mlxsw_sp_rif_dev(rif);
9772 struct netdev_nested_priv priv = {
9773 .data = (void *)rif,
9776 if (!netif_is_macvlan_port(dev))
9779 netdev_warn(dev, "Router interface is deleted. Upper macvlans will not work\n");
9780 return netdev_walk_all_upper_dev_rcu(dev,
9781 __mlxsw_sp_rif_macvlan_flush, &priv);
9784 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
9785 const struct mlxsw_sp_rif_params *params)
9787 struct mlxsw_sp_rif_subport *rif_subport;
9789 rif_subport = mlxsw_sp_rif_subport_rif(rif);
9790 refcount_set(&rif_subport->ref_count, 1);
9791 rif_subport->vid = params->vid;
9792 rif_subport->lag = params->lag;
9794 rif_subport->lag_id = params->lag_id;
9796 rif_subport->system_port = params->system_port;
9799 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
9801 struct net_device *dev = mlxsw_sp_rif_dev(rif);
9802 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9803 struct mlxsw_sp_rif_subport *rif_subport;
9804 char ritr_pl[MLXSW_REG_RITR_LEN];
9807 rif_subport = mlxsw_sp_rif_subport_rif(rif);
9808 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
9809 rif->rif_index, rif->vr_id, dev->mtu);
9810 mlxsw_reg_ritr_mac_pack(ritr_pl, dev->dev_addr);
9811 mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
9812 efid = mlxsw_sp_fid_index(rif->fid);
9813 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
9814 rif_subport->lag ? rif_subport->lag_id :
9815 rif_subport->system_port,
9817 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9820 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
9821 struct netlink_ext_ack *extack)
9823 struct net_device *dev = mlxsw_sp_rif_dev(rif);
9827 err = mlxsw_sp_rif_mac_profile_get(rif->mlxsw_sp, rif->addr,
9828 &mac_profile, extack);
9831 rif->mac_profile_id = mac_profile;
9833 err = mlxsw_sp_rif_subport_op(rif, true);
9835 goto err_rif_subport_op;
9837 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9838 mlxsw_sp_fid_index(rif->fid), true);
9840 goto err_rif_fdb_op;
9842 err = mlxsw_sp_fid_rif_set(rif->fid, rif);
9844 goto err_fid_rif_set;
9849 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9850 mlxsw_sp_fid_index(rif->fid), false);
9852 mlxsw_sp_rif_subport_op(rif, false);
9854 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, mac_profile);
9858 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
9860 struct net_device *dev = mlxsw_sp_rif_dev(rif);
9861 struct mlxsw_sp_fid *fid = rif->fid;
9863 mlxsw_sp_fid_rif_unset(fid);
9864 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9865 mlxsw_sp_fid_index(fid), false);
9866 mlxsw_sp_rif_macvlan_flush(rif);
9867 mlxsw_sp_rif_subport_op(rif, false);
9868 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
9871 static struct mlxsw_sp_fid *
9872 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
9873 const struct mlxsw_sp_rif_params *params,
9874 struct netlink_ext_ack *extack)
9876 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
9879 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
9880 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
9881 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
9882 .setup = mlxsw_sp_rif_subport_setup,
9883 .configure = mlxsw_sp_rif_subport_configure,
9884 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
9885 .fid_get = mlxsw_sp_rif_subport_fid_get,
9888 static int mlxsw_sp_rif_fid_op(struct mlxsw_sp_rif *rif, u16 fid, bool enable)
9890 enum mlxsw_reg_ritr_if_type type = MLXSW_REG_RITR_FID_IF;
9891 struct net_device *dev = mlxsw_sp_rif_dev(rif);
9892 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9893 char ritr_pl[MLXSW_REG_RITR_LEN];
9895 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
9897 mlxsw_reg_ritr_mac_pack(ritr_pl, dev->dev_addr);
9898 mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
9899 mlxsw_reg_ritr_fid_if_fid_set(ritr_pl, fid);
9901 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9904 u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
9906 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
9909 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
9910 struct netlink_ext_ack *extack)
9912 struct net_device *dev = mlxsw_sp_rif_dev(rif);
9913 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9914 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
9918 err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
9919 &mac_profile, extack);
9922 rif->mac_profile_id = mac_profile;
9924 err = mlxsw_sp_rif_fid_op(rif, fid_index, true);
9926 goto err_rif_fid_op;
9928 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9929 mlxsw_sp_router_port(mlxsw_sp), true);
9931 goto err_fid_mc_flood_set;
9933 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9934 mlxsw_sp_router_port(mlxsw_sp), true);
9936 goto err_fid_bc_flood_set;
9938 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9939 mlxsw_sp_fid_index(rif->fid), true);
9941 goto err_rif_fdb_op;
9943 err = mlxsw_sp_fid_rif_set(rif->fid, rif);
9945 goto err_fid_rif_set;
9950 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9951 mlxsw_sp_fid_index(rif->fid), false);
9953 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9954 mlxsw_sp_router_port(mlxsw_sp), false);
9955 err_fid_bc_flood_set:
9956 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9957 mlxsw_sp_router_port(mlxsw_sp), false);
9958 err_fid_mc_flood_set:
9959 mlxsw_sp_rif_fid_op(rif, fid_index, false);
9961 mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
9965 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
9967 struct net_device *dev = mlxsw_sp_rif_dev(rif);
9968 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
9969 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9970 struct mlxsw_sp_fid *fid = rif->fid;
9972 mlxsw_sp_fid_rif_unset(fid);
9973 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9974 mlxsw_sp_fid_index(fid), false);
9975 mlxsw_sp_rif_macvlan_flush(rif);
9976 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9977 mlxsw_sp_router_port(mlxsw_sp), false);
9978 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9979 mlxsw_sp_router_port(mlxsw_sp), false);
9980 mlxsw_sp_rif_fid_op(rif, fid_index, false);
9981 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
9984 static struct mlxsw_sp_fid *
9985 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
9986 const struct mlxsw_sp_rif_params *params,
9987 struct netlink_ext_ack *extack)
9989 int rif_ifindex = mlxsw_sp_rif_dev_ifindex(rif);
9991 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif_ifindex);
9994 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
9996 struct switchdev_notifier_fdb_info info = {};
9997 struct net_device *dev;
9999 dev = br_fdb_find_port(mlxsw_sp_rif_dev(rif), mac, 0);
10005 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
10009 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
10010 .type = MLXSW_SP_RIF_TYPE_FID,
10011 .rif_size = sizeof(struct mlxsw_sp_rif),
10012 .configure = mlxsw_sp_rif_fid_configure,
10013 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
10014 .fid_get = mlxsw_sp_rif_fid_fid_get,
10015 .fdb_del = mlxsw_sp_rif_fid_fdb_del,
10018 static struct mlxsw_sp_fid *
10019 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
10020 const struct mlxsw_sp_rif_params *params,
10021 struct netlink_ext_ack *extack)
10023 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10024 struct net_device *br_dev;
10026 if (WARN_ON(!params->vid))
10027 return ERR_PTR(-EINVAL);
10029 if (is_vlan_dev(dev)) {
10030 br_dev = vlan_dev_real_dev(dev);
10031 if (WARN_ON(!netif_is_bridge_master(br_dev)))
10032 return ERR_PTR(-EINVAL);
10035 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, params->vid);
10038 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
10040 struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
10041 struct switchdev_notifier_fdb_info info = {};
10042 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
10043 struct net_device *br_dev;
10044 struct net_device *dev;
10046 br_dev = is_vlan_dev(rif_dev) ? vlan_dev_real_dev(rif_dev) : rif_dev;
10047 dev = br_fdb_find_port(br_dev, mac, vid);
10053 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
10057 static int mlxsw_sp_rif_vlan_op(struct mlxsw_sp_rif *rif, u16 vid, u16 efid,
10060 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10061 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10062 char ritr_pl[MLXSW_REG_RITR_LEN];
10064 mlxsw_reg_ritr_vlan_if_pack(ritr_pl, enable, rif->rif_index, rif->vr_id,
10065 dev->mtu, dev->dev_addr,
10066 rif->mac_profile_id, vid, efid);
10068 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10071 static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif, u16 efid,
10072 struct netlink_ext_ack *extack)
10074 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10075 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
10076 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10080 err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
10081 &mac_profile, extack);
10084 rif->mac_profile_id = mac_profile;
10086 err = mlxsw_sp_rif_vlan_op(rif, vid, efid, true);
10088 goto err_rif_vlan_fid_op;
10090 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10091 mlxsw_sp_router_port(mlxsw_sp), true);
10093 goto err_fid_mc_flood_set;
10095 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10096 mlxsw_sp_router_port(mlxsw_sp), true);
10098 goto err_fid_bc_flood_set;
10100 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10101 mlxsw_sp_fid_index(rif->fid), true);
10103 goto err_rif_fdb_op;
10105 err = mlxsw_sp_fid_rif_set(rif->fid, rif);
10107 goto err_fid_rif_set;
10112 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10113 mlxsw_sp_fid_index(rif->fid), false);
10115 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10116 mlxsw_sp_router_port(mlxsw_sp), false);
10117 err_fid_bc_flood_set:
10118 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10119 mlxsw_sp_router_port(mlxsw_sp), false);
10120 err_fid_mc_flood_set:
10121 mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
10122 err_rif_vlan_fid_op:
10123 mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
10127 static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
10129 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10130 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
10131 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10133 mlxsw_sp_fid_rif_unset(rif->fid);
10134 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10135 mlxsw_sp_fid_index(rif->fid), false);
10136 mlxsw_sp_rif_macvlan_flush(rif);
10137 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10138 mlxsw_sp_router_port(mlxsw_sp), false);
10139 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10140 mlxsw_sp_router_port(mlxsw_sp), false);
10141 mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
10142 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
10145 static int mlxsw_sp1_rif_vlan_configure(struct mlxsw_sp_rif *rif,
10146 struct netlink_ext_ack *extack)
10148 return mlxsw_sp_rif_vlan_configure(rif, 0, extack);
10151 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_vlan_ops = {
10152 .type = MLXSW_SP_RIF_TYPE_VLAN,
10153 .rif_size = sizeof(struct mlxsw_sp_rif),
10154 .configure = mlxsw_sp1_rif_vlan_configure,
10155 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
10156 .fid_get = mlxsw_sp_rif_vlan_fid_get,
10157 .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
10160 static int mlxsw_sp2_rif_vlan_configure(struct mlxsw_sp_rif *rif,
10161 struct netlink_ext_ack *extack)
10163 u16 efid = mlxsw_sp_fid_index(rif->fid);
10165 return mlxsw_sp_rif_vlan_configure(rif, efid, extack);
10168 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_vlan_ops = {
10169 .type = MLXSW_SP_RIF_TYPE_VLAN,
10170 .rif_size = sizeof(struct mlxsw_sp_rif),
10171 .configure = mlxsw_sp2_rif_vlan_configure,
10172 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
10173 .fid_get = mlxsw_sp_rif_vlan_fid_get,
10174 .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
10177 static struct mlxsw_sp_rif_ipip_lb *
10178 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
10180 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
10184 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
10185 const struct mlxsw_sp_rif_params *params)
10187 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
10188 struct mlxsw_sp_rif_ipip_lb *rif_lb;
10190 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
10192 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
10193 rif_lb->lb_config = params_lb->lb_config;
10197 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
10198 struct netlink_ext_ack *extack)
10200 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10201 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10202 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
10203 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10204 struct mlxsw_sp_vr *ul_vr;
10207 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, extack);
10209 return PTR_ERR(ul_vr);
10211 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
10213 goto err_loopback_op;
10215 lb_rif->ul_vr_id = ul_vr->id;
10216 lb_rif->ul_rif_id = 0;
10217 ++ul_vr->rif_count;
10221 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
10225 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
10227 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10228 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10229 struct mlxsw_sp_vr *ul_vr;
10231 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
10232 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
10234 --ul_vr->rif_count;
10235 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
10238 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
10239 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
10240 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
10241 .setup = mlxsw_sp_rif_ipip_lb_setup,
10242 .configure = mlxsw_sp1_rif_ipip_lb_configure,
10243 .deconfigure = mlxsw_sp1_rif_ipip_lb_deconfigure,
10246 static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
10247 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
10248 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp1_rif_vlan_ops,
10249 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
10250 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops,
10254 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
10256 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10257 char ritr_pl[MLXSW_REG_RITR_LEN];
10259 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
10260 ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
10261 mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
10262 MLXSW_REG_RITR_LOOPBACK_GENERIC);
10264 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10267 static struct mlxsw_sp_rif *
10268 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
10269 struct mlxsw_sp_crif *ul_crif,
10270 struct netlink_ext_ack *extack)
10272 struct mlxsw_sp_rif *ul_rif;
10273 u8 rif_entries = 1;
10277 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index, rif_entries);
10279 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
10280 return ERR_PTR(err);
10283 ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id,
10287 goto err_rif_alloc;
10290 mlxsw_sp->router->rifs[rif_index] = ul_rif;
10291 ul_rif->mlxsw_sp = mlxsw_sp;
10292 ul_rif->rif_entries = rif_entries;
10293 err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
10295 goto ul_rif_op_err;
10297 atomic_add(rif_entries, &mlxsw_sp->router->rifs_count);
10301 mlxsw_sp->router->rifs[rif_index] = NULL;
10302 mlxsw_sp_rif_free(ul_rif);
10304 mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
10305 return ERR_PTR(err);
10308 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
10310 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10311 u8 rif_entries = ul_rif->rif_entries;
10312 u16 rif_index = ul_rif->rif_index;
10314 atomic_sub(rif_entries, &mlxsw_sp->router->rifs_count);
10315 mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
10316 mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
10317 mlxsw_sp_rif_free(ul_rif);
10318 mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
10321 static struct mlxsw_sp_rif *
10322 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
10323 struct mlxsw_sp_crif *ul_crif,
10324 struct netlink_ext_ack *extack)
10326 struct mlxsw_sp_vr *vr;
10329 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
10331 return ERR_CAST(vr);
10333 if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
10336 vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, ul_crif, extack);
10337 if (IS_ERR(vr->ul_rif)) {
10338 err = PTR_ERR(vr->ul_rif);
10339 goto err_ul_rif_create;
10343 refcount_set(&vr->ul_rif_refcnt, 1);
10348 mlxsw_sp_vr_put(mlxsw_sp, vr);
10349 return ERR_PTR(err);
10352 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
10354 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10355 struct mlxsw_sp_vr *vr;
10357 vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
10359 if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
10363 mlxsw_sp_ul_rif_destroy(ul_rif);
10364 mlxsw_sp_vr_put(mlxsw_sp, vr);
10367 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
10370 struct mlxsw_sp_rif *ul_rif;
10373 mutex_lock(&mlxsw_sp->router->lock);
10374 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL, NULL);
10375 if (IS_ERR(ul_rif)) {
10376 err = PTR_ERR(ul_rif);
10379 *ul_rif_index = ul_rif->rif_index;
10381 mutex_unlock(&mlxsw_sp->router->lock);
10385 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
10387 struct mlxsw_sp_rif *ul_rif;
10389 mutex_lock(&mlxsw_sp->router->lock);
10390 ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
10391 if (WARN_ON(!ul_rif))
10394 mlxsw_sp_ul_rif_put(ul_rif);
10396 mutex_unlock(&mlxsw_sp->router->lock);
10400 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
10401 struct netlink_ext_ack *extack)
10403 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10404 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10405 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
10406 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10407 struct mlxsw_sp_rif *ul_rif;
10410 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL, extack);
10411 if (IS_ERR(ul_rif))
10412 return PTR_ERR(ul_rif);
10414 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
10416 goto err_loopback_op;
10418 lb_rif->ul_vr_id = 0;
10419 lb_rif->ul_rif_id = ul_rif->rif_index;
10424 mlxsw_sp_ul_rif_put(ul_rif);
10428 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
10430 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10431 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10432 struct mlxsw_sp_rif *ul_rif;
10434 ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
10435 mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
10436 mlxsw_sp_ul_rif_put(ul_rif);
10439 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
10440 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
10441 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
10442 .setup = mlxsw_sp_rif_ipip_lb_setup,
10443 .configure = mlxsw_sp2_rif_ipip_lb_configure,
10444 .deconfigure = mlxsw_sp2_rif_ipip_lb_deconfigure,
10447 static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
10448 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
10449 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp2_rif_vlan_ops,
10450 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
10451 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops,
10454 static int mlxsw_sp_rifs_table_init(struct mlxsw_sp *mlxsw_sp)
10456 struct gen_pool *rifs_table;
10459 rifs_table = gen_pool_create(0, -1);
10463 gen_pool_set_algo(rifs_table, gen_pool_first_fit_order_align,
10466 err = gen_pool_add(rifs_table, MLXSW_SP_ROUTER_GENALLOC_OFFSET,
10467 MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS), -1);
10469 goto err_gen_pool_add;
10471 mlxsw_sp->router->rifs_table = rifs_table;
10476 gen_pool_destroy(rifs_table);
10480 static void mlxsw_sp_rifs_table_fini(struct mlxsw_sp *mlxsw_sp)
10482 gen_pool_destroy(mlxsw_sp->router->rifs_table);
10485 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
10487 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10488 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
10489 struct mlxsw_core *core = mlxsw_sp->core;
10492 if (!MLXSW_CORE_RES_VALID(core, MAX_RIF_MAC_PROFILES))
10494 mlxsw_sp->router->max_rif_mac_profile =
10495 MLXSW_CORE_RES_GET(core, MAX_RIF_MAC_PROFILES);
10497 mlxsw_sp->router->rifs = kcalloc(max_rifs,
10498 sizeof(struct mlxsw_sp_rif *),
10500 if (!mlxsw_sp->router->rifs)
10503 err = mlxsw_sp_rifs_table_init(mlxsw_sp);
10505 goto err_rifs_table_init;
10507 idr_init(&mlxsw_sp->router->rif_mac_profiles_idr);
10508 atomic_set(&mlxsw_sp->router->rif_mac_profiles_count, 0);
10509 atomic_set(&mlxsw_sp->router->rifs_count, 0);
10510 devl_resource_occ_get_register(devlink,
10511 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
10512 mlxsw_sp_rif_mac_profiles_occ_get,
10514 devl_resource_occ_get_register(devlink,
10515 MLXSW_SP_RESOURCE_RIFS,
10516 mlxsw_sp_rifs_occ_get,
10521 err_rifs_table_init:
10522 kfree(mlxsw_sp->router->rifs);
10526 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
10528 int max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10529 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
10532 WARN_ON_ONCE(atomic_read(&mlxsw_sp->router->rifs_count));
10533 for (i = 0; i < max_rifs; i++)
10534 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
10536 devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_RIFS);
10537 devl_resource_occ_get_unregister(devlink,
10538 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
10539 WARN_ON(!idr_is_empty(&mlxsw_sp->router->rif_mac_profiles_idr));
10540 idr_destroy(&mlxsw_sp->router->rif_mac_profiles_idr);
10541 mlxsw_sp_rifs_table_fini(mlxsw_sp);
10542 kfree(mlxsw_sp->router->rifs);
10546 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
10548 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
10550 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
10551 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
10554 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
10558 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
10560 err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
10563 err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
10567 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
10570 static int mlxsw_sp1_ipips_init(struct mlxsw_sp *mlxsw_sp)
10572 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp1_ipip_ops_arr;
10573 return mlxsw_sp_ipips_init(mlxsw_sp);
10576 static int mlxsw_sp2_ipips_init(struct mlxsw_sp *mlxsw_sp)
10578 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp2_ipip_ops_arr;
10579 return mlxsw_sp_ipips_init(mlxsw_sp);
10582 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
10584 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
10587 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
10589 struct mlxsw_sp_router *router;
10591 /* Flush pending FIB notifications and then flush the device's
10592 * table before requesting another dump. The FIB notification
10593 * block is unregistered, so no need to take RTNL.
10595 mlxsw_core_flush_owq();
10596 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
10597 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
10600 #ifdef CONFIG_IP_ROUTE_MULTIPATH
10601 struct mlxsw_sp_mp_hash_config {
10602 DECLARE_BITMAP(headers, __MLXSW_REG_RECR2_HEADER_CNT);
10603 DECLARE_BITMAP(fields, __MLXSW_REG_RECR2_FIELD_CNT);
10604 DECLARE_BITMAP(inner_headers, __MLXSW_REG_RECR2_HEADER_CNT);
10605 DECLARE_BITMAP(inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT);
10606 bool inc_parsing_depth;
10609 #define MLXSW_SP_MP_HASH_HEADER_SET(_headers, _header) \
10610 bitmap_set(_headers, MLXSW_REG_RECR2_##_header, 1)
10612 #define MLXSW_SP_MP_HASH_FIELD_SET(_fields, _field) \
10613 bitmap_set(_fields, MLXSW_REG_RECR2_##_field, 1)
10615 #define MLXSW_SP_MP_HASH_FIELD_RANGE_SET(_fields, _field, _nr) \
10616 bitmap_set(_fields, MLXSW_REG_RECR2_##_field, _nr)
10618 static void mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config *config)
10620 unsigned long *inner_headers = config->inner_headers;
10621 unsigned long *inner_fields = config->inner_fields;
10624 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
10625 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
10626 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
10627 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
10629 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
10630 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
10631 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
10632 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
10633 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
10634 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
10635 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
10636 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
10639 static void mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
10641 unsigned long *headers = config->headers;
10642 unsigned long *fields = config->fields;
10644 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
10645 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
10646 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
10647 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
10651 mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config *config,
10654 unsigned long *inner_headers = config->inner_headers;
10655 unsigned long *inner_fields = config->inner_fields;
10658 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
10659 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
10660 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
10661 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
10662 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
10663 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
10664 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
10665 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV4_PROTOCOL);
10667 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
10668 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
10669 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) {
10670 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
10671 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
10673 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) {
10674 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
10675 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
10677 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
10678 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
10679 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
10680 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
10682 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV4);
10683 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV6);
10684 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
10685 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_SPORT);
10686 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
10687 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_DPORT);
10690 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
10691 struct mlxsw_sp_mp_hash_config *config)
10693 struct net *net = mlxsw_sp_net(mlxsw_sp);
10694 unsigned long *headers = config->headers;
10695 unsigned long *fields = config->fields;
10698 switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
10700 mlxsw_sp_mp4_hash_outer_addr(config);
10703 mlxsw_sp_mp4_hash_outer_addr(config);
10704 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
10705 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
10706 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10707 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10711 mlxsw_sp_mp4_hash_outer_addr(config);
10713 mlxsw_sp_mp_hash_inner_l3(config);
10716 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
10718 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
10719 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
10720 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
10721 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
10722 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
10723 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
10724 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
10725 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
10726 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
10727 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
10728 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10729 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
10730 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10732 mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
10737 static void mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
10739 unsigned long *headers = config->headers;
10740 unsigned long *fields = config->fields;
10742 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
10743 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
10744 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
10745 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
10746 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
10747 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
10750 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp,
10751 struct mlxsw_sp_mp_hash_config *config)
10753 u32 hash_fields = ip6_multipath_hash_fields(mlxsw_sp_net(mlxsw_sp));
10754 unsigned long *headers = config->headers;
10755 unsigned long *fields = config->fields;
10757 switch (ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp))) {
10759 mlxsw_sp_mp6_hash_outer_addr(config);
10760 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10761 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10764 mlxsw_sp_mp6_hash_outer_addr(config);
10765 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
10766 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10767 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10768 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10772 mlxsw_sp_mp6_hash_outer_addr(config);
10773 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10774 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10776 mlxsw_sp_mp_hash_inner_l3(config);
10777 config->inc_parsing_depth = true;
10781 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
10782 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
10783 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
10784 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) {
10785 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
10786 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
10788 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) {
10789 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
10790 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
10792 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
10793 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10794 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
10795 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10796 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
10797 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10798 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
10799 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10801 mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
10802 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK)
10803 config->inc_parsing_depth = true;
10808 static int mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp *mlxsw_sp,
10809 bool old_inc_parsing_depth,
10810 bool new_inc_parsing_depth)
10814 if (!old_inc_parsing_depth && new_inc_parsing_depth) {
10815 err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
10818 mlxsw_sp->router->inc_parsing_depth = true;
10819 } else if (old_inc_parsing_depth && !new_inc_parsing_depth) {
10820 mlxsw_sp_parsing_depth_dec(mlxsw_sp);
10821 mlxsw_sp->router->inc_parsing_depth = false;
10827 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
10829 bool old_inc_parsing_depth, new_inc_parsing_depth;
10830 struct mlxsw_sp_mp_hash_config config = {};
10831 char recr2_pl[MLXSW_REG_RECR2_LEN];
10836 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
10837 mlxsw_reg_recr2_pack(recr2_pl, seed);
10838 mlxsw_sp_mp4_hash_init(mlxsw_sp, &config);
10839 mlxsw_sp_mp6_hash_init(mlxsw_sp, &config);
10841 old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
10842 new_inc_parsing_depth = config.inc_parsing_depth;
10843 err = mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp,
10844 old_inc_parsing_depth,
10845 new_inc_parsing_depth);
10849 for_each_set_bit(bit, config.headers, __MLXSW_REG_RECR2_HEADER_CNT)
10850 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, bit, 1);
10851 for_each_set_bit(bit, config.fields, __MLXSW_REG_RECR2_FIELD_CNT)
10852 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, bit, 1);
10853 for_each_set_bit(bit, config.inner_headers, __MLXSW_REG_RECR2_HEADER_CNT)
10854 mlxsw_reg_recr2_inner_header_enables_set(recr2_pl, bit, 1);
10855 for_each_set_bit(bit, config.inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT)
10856 mlxsw_reg_recr2_inner_header_fields_enable_set(recr2_pl, bit, 1);
10858 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
10860 goto err_reg_write;
10865 mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, new_inc_parsing_depth,
10866 old_inc_parsing_depth);
10870 static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
10872 bool old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
10874 mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, old_inc_parsing_depth,
10878 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
10883 static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
10888 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
10890 char rdpm_pl[MLXSW_REG_RDPM_LEN];
10893 MLXSW_REG_ZERO(rdpm, rdpm_pl);
10895 /* HW is determining switch priority based on DSCP-bits, but the
10896 * kernel is still doing that based on the ToS. Since there's a
10897 * mismatch in bits we need to make sure to translate the right
10898 * value ToS would observe, skipping the 2 least-significant ECN bits.
10900 for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
10901 mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
10903 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
10906 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
10908 struct net *net = mlxsw_sp_net(mlxsw_sp);
10909 char rgcr_pl[MLXSW_REG_RGCR_LEN];
10913 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
10915 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10916 usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority);
10918 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
10919 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
10920 mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
10921 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
10924 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
10926 char rgcr_pl[MLXSW_REG_RGCR_LEN];
10928 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
10929 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
10932 static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp,
10933 struct netlink_ext_ack *extack)
10935 struct mlxsw_sp_router *router = mlxsw_sp->router;
10936 struct mlxsw_sp_rif *lb_rif;
10939 router->lb_crif = mlxsw_sp_crif_alloc(NULL);
10940 if (!router->lb_crif)
10943 /* Create a generic loopback RIF associated with the main table
10944 * (default VRF). Any table can be used, but the main table exists
10945 * anyway, so we do not waste resources. Loopback RIFs are usually
10946 * created with a NULL CRIF, but this RIF is used as a fallback RIF
10947 * for blackhole nexthops, and nexthops expect to have a valid CRIF.
10949 lb_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, RT_TABLE_MAIN, router->lb_crif,
10951 if (IS_ERR(lb_rif)) {
10952 err = PTR_ERR(lb_rif);
10953 goto err_ul_rif_get;
10959 mlxsw_sp_crif_free(router->lb_crif);
10963 static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp)
10965 mlxsw_sp_ul_rif_put(mlxsw_sp->router->lb_crif->rif);
10966 mlxsw_sp_crif_free(mlxsw_sp->router->lb_crif);
10969 static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
10971 size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp1_adj_grp_size_ranges);
10973 mlxsw_sp->router->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
10974 mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp1_adj_grp_size_ranges;
10975 mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
10980 const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = {
10981 .init = mlxsw_sp1_router_init,
10982 .ipips_init = mlxsw_sp1_ipips_init,
10985 static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
10987 size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp2_adj_grp_size_ranges);
10989 mlxsw_sp->router->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
10990 mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp2_adj_grp_size_ranges;
10991 mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
10996 const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = {
10997 .init = mlxsw_sp2_router_init,
10998 .ipips_init = mlxsw_sp2_ipips_init,
11001 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
11002 struct netlink_ext_ack *extack)
11004 struct mlxsw_sp_router *router;
11005 struct notifier_block *nb;
11008 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
11011 mutex_init(&router->lock);
11012 mlxsw_sp->router = router;
11013 router->mlxsw_sp = mlxsw_sp;
11015 err = mlxsw_sp->router_ops->init(mlxsw_sp);
11017 goto err_router_ops_init;
11019 INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
11020 INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
11021 mlxsw_sp_nh_grp_activity_work);
11022 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
11023 err = __mlxsw_sp_router_init(mlxsw_sp);
11025 goto err_router_init;
11027 err = mlxsw_sp->router_ops->ipips_init(mlxsw_sp);
11029 goto err_ipips_init;
11031 err = rhashtable_init(&mlxsw_sp->router->crif_ht,
11032 &mlxsw_sp_crif_ht_params);
11034 goto err_crif_ht_init;
11036 err = mlxsw_sp_rifs_init(mlxsw_sp);
11038 goto err_rifs_init;
11040 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
11041 &mlxsw_sp_nexthop_ht_params);
11043 goto err_nexthop_ht_init;
11045 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
11046 &mlxsw_sp_nexthop_group_ht_params);
11048 goto err_nexthop_group_ht_init;
11050 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
11051 err = mlxsw_sp_lpm_init(mlxsw_sp);
11055 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
11059 err = mlxsw_sp_vrs_init(mlxsw_sp);
11063 err = mlxsw_sp_lb_rif_init(mlxsw_sp, extack);
11065 goto err_lb_rif_init;
11067 err = mlxsw_sp_neigh_init(mlxsw_sp);
11069 goto err_neigh_init;
11071 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
11073 goto err_mp_hash_init;
11075 err = mlxsw_sp_dscp_init(mlxsw_sp);
11077 goto err_dscp_init;
11079 router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
11080 err = register_inetaddr_notifier(&router->inetaddr_nb);
11082 goto err_register_inetaddr_notifier;
11084 router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
11085 err = register_inet6addr_notifier(&router->inet6addr_nb);
11087 goto err_register_inet6addr_notifier;
11089 router->inetaddr_valid_nb.notifier_call = mlxsw_sp_inetaddr_valid_event;
11090 err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
11092 goto err_register_inetaddr_valid_notifier;
11094 nb = &router->inet6addr_valid_nb;
11095 nb->notifier_call = mlxsw_sp_inet6addr_valid_event;
11096 err = register_inet6addr_validator_notifier(nb);
11098 goto err_register_inet6addr_valid_notifier;
11100 mlxsw_sp->router->netevent_nb.notifier_call =
11101 mlxsw_sp_router_netevent_event;
11102 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
11104 goto err_register_netevent_notifier;
11106 mlxsw_sp->router->nexthop_nb.notifier_call =
11107 mlxsw_sp_nexthop_obj_event;
11108 err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
11109 &mlxsw_sp->router->nexthop_nb,
11112 goto err_register_nexthop_notifier;
11114 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
11115 err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
11116 &mlxsw_sp->router->fib_nb,
11117 mlxsw_sp_router_fib_dump_flush, extack);
11119 goto err_register_fib_notifier;
11121 mlxsw_sp->router->netdevice_nb.notifier_call =
11122 mlxsw_sp_router_netdevice_event;
11123 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
11124 &mlxsw_sp->router->netdevice_nb);
11126 goto err_register_netdev_notifier;
11130 err_register_netdev_notifier:
11131 unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
11132 &mlxsw_sp->router->fib_nb);
11133 err_register_fib_notifier:
11134 unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
11135 &mlxsw_sp->router->nexthop_nb);
11136 err_register_nexthop_notifier:
11137 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
11138 err_register_netevent_notifier:
11139 unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
11140 err_register_inet6addr_valid_notifier:
11141 unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
11142 err_register_inetaddr_valid_notifier:
11143 unregister_inet6addr_notifier(&router->inet6addr_nb);
11144 err_register_inet6addr_notifier:
11145 unregister_inetaddr_notifier(&router->inetaddr_nb);
11146 err_register_inetaddr_notifier:
11147 mlxsw_core_flush_owq();
11149 mlxsw_sp_mp_hash_fini(mlxsw_sp);
11151 mlxsw_sp_neigh_fini(mlxsw_sp);
11153 mlxsw_sp_lb_rif_fini(mlxsw_sp);
11155 mlxsw_sp_vrs_fini(mlxsw_sp);
11157 mlxsw_sp_mr_fini(mlxsw_sp);
11159 mlxsw_sp_lpm_fini(mlxsw_sp);
11161 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
11162 err_nexthop_group_ht_init:
11163 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
11164 err_nexthop_ht_init:
11165 mlxsw_sp_rifs_fini(mlxsw_sp);
11167 rhashtable_destroy(&mlxsw_sp->router->crif_ht);
11169 mlxsw_sp_ipips_fini(mlxsw_sp);
11171 __mlxsw_sp_router_fini(mlxsw_sp);
11173 cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
11174 err_router_ops_init:
11175 mutex_destroy(&mlxsw_sp->router->lock);
11176 kfree(mlxsw_sp->router);
11180 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
11182 struct mlxsw_sp_router *router = mlxsw_sp->router;
11184 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
11185 &router->netdevice_nb);
11186 unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp), &router->fib_nb);
11187 unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
11188 &router->nexthop_nb);
11189 unregister_netevent_notifier(&router->netevent_nb);
11190 unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
11191 unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
11192 unregister_inet6addr_notifier(&router->inet6addr_nb);
11193 unregister_inetaddr_notifier(&router->inetaddr_nb);
11194 mlxsw_core_flush_owq();
11195 mlxsw_sp_mp_hash_fini(mlxsw_sp);
11196 mlxsw_sp_neigh_fini(mlxsw_sp);
11197 mlxsw_sp_lb_rif_fini(mlxsw_sp);
11198 mlxsw_sp_vrs_fini(mlxsw_sp);
11199 mlxsw_sp_mr_fini(mlxsw_sp);
11200 mlxsw_sp_lpm_fini(mlxsw_sp);
11201 rhashtable_destroy(&router->nexthop_group_ht);
11202 rhashtable_destroy(&router->nexthop_ht);
11203 mlxsw_sp_rifs_fini(mlxsw_sp);
11204 rhashtable_destroy(&mlxsw_sp->router->crif_ht);
11205 mlxsw_sp_ipips_fini(mlxsw_sp);
11206 __mlxsw_sp_router_fini(mlxsw_sp);
11207 cancel_delayed_work_sync(&router->nh_grp_activity_dw);
11208 mutex_destroy(&router->lock);