1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
4 #include <linux/etherdevice.h>
5 #include <linux/inetdevice.h>
6 #include <net/netevent.h>
8 #include <net/dst_metadata.h>
13 #include "../nfp_net_repr.h"
14 #include "../nfp_net.h"
16 #define NFP_FL_MAX_ROUTES 32
18 #define NFP_TUN_PRE_TUN_RULE_LIMIT 32
19 #define NFP_TUN_PRE_TUN_RULE_DEL BIT(0)
20 #define NFP_TUN_PRE_TUN_IDX_BIT BIT(3)
21 #define NFP_TUN_PRE_TUN_IPV6_BIT BIT(7)
24 * struct nfp_tun_pre_tun_rule - rule matched before decap
25 * @flags: options for the rule offset
26 * @port_idx: index of destination MAC address for the rule
27 * @vlan_tci: VLAN info associated with MAC
28 * @host_ctx_id: stats context of rule to update
30 struct nfp_tun_pre_tun_rule {
38 * struct nfp_tun_active_tuns - periodic message of active tunnels
39 * @seq: sequence number of the message
40 * @count: number of tunnels report in message
41 * @flags: options part of the request
42 * @tun_info.ipv4: dest IPv4 address of active route
43 * @tun_info.egress_port: port the encapsulated packet egressed
44 * @tun_info.extra: reserved for future use
45 * @tun_info: tunnels that have sent traffic in reported period
47 struct nfp_tun_active_tuns {
51 struct route_ip_info {
59 * struct nfp_tun_active_tuns_v6 - periodic message of active IPv6 tunnels
60 * @seq: sequence number of the message
61 * @count: number of tunnels report in message
62 * @flags: options part of the request
63 * @tun_info.ipv6: dest IPv6 address of active route
64 * @tun_info.egress_port: port the encapsulated packet egressed
65 * @tun_info.extra: reserved for future use
66 * @tun_info: tunnels that have sent traffic in reported period
68 struct nfp_tun_active_tuns_v6 {
72 struct route_ip_info_v6 {
80 * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
81 * @ingress_port: ingress port of packet that signalled request
82 * @ipv4_addr: destination ipv4 address for route
83 * @reserved: reserved for future use
85 struct nfp_tun_req_route_ipv4 {
92 * struct nfp_tun_req_route_ipv6 - NFP requests an IPv6 route/neighbour lookup
93 * @ingress_port: ingress port of packet that signalled request
94 * @ipv6_addr: destination ipv6 address for route
96 struct nfp_tun_req_route_ipv6 {
98 struct in6_addr ipv6_addr;
102 * struct nfp_offloaded_route - routes that are offloaded to the NFP
103 * @list: list pointer
104 * @ip_add: destination of route - can be IPv4 or IPv6
106 struct nfp_offloaded_route {
107 struct list_head list;
111 #define NFP_FL_IPV4_ADDRS_MAX 32
114 * struct nfp_tun_ipv4_addr - set the IP address list on the NFP
115 * @count: number of IPs populated in the array
116 * @ipv4_addr: array of IPV4_ADDRS_MAX 32 bit IPv4 addresses
118 struct nfp_tun_ipv4_addr {
120 __be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
124 * struct nfp_ipv4_addr_entry - cached IPv4 addresses
125 * @ipv4_addr: IP address
126 * @ref_count: number of rules currently using this IP
127 * @list: list pointer
129 struct nfp_ipv4_addr_entry {
132 struct list_head list;
135 #define NFP_FL_IPV6_ADDRS_MAX 4
138 * struct nfp_tun_ipv6_addr - set the IP address list on the NFP
139 * @count: number of IPs populated in the array
140 * @ipv6_addr: array of IPV6_ADDRS_MAX 128 bit IPv6 addresses
142 struct nfp_tun_ipv6_addr {
144 struct in6_addr ipv6_addr[NFP_FL_IPV6_ADDRS_MAX];
147 #define NFP_TUN_MAC_OFFLOAD_DEL_FLAG 0x2
150 * struct nfp_tun_mac_addr_offload - configure MAC address of tunnel EP on NFP
151 * @flags: MAC address offload options
152 * @count: number of MAC addresses in the message (should be 1)
153 * @index: index of MAC address in the lookup table
154 * @addr: interface MAC address
156 struct nfp_tun_mac_addr_offload {
163 enum nfp_flower_mac_offload_cmd {
164 NFP_TUNNEL_MAC_OFFLOAD_ADD = 0,
165 NFP_TUNNEL_MAC_OFFLOAD_DEL = 1,
166 NFP_TUNNEL_MAC_OFFLOAD_MOD = 2,
169 #define NFP_MAX_MAC_INDEX 0xff
172 * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC
173 * @ht_node: Hashtable entry
174 * @addr: Offloaded MAC address
175 * @index: Offloaded index for given MAC address
176 * @ref_count: Number of devs using this MAC address
177 * @repr_list: List of reprs sharing this MAC address
178 * @bridge_count: Number of bridge/internal devs with MAC
180 struct nfp_tun_offloaded_mac {
181 struct rhash_head ht_node;
185 struct list_head repr_list;
189 static const struct rhashtable_params offloaded_macs_params = {
190 .key_offset = offsetof(struct nfp_tun_offloaded_mac, addr),
191 .head_offset = offsetof(struct nfp_tun_offloaded_mac, ht_node),
193 .automatic_shrinking = true,
196 void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
198 struct nfp_tun_active_tuns *payload;
199 struct net_device *netdev;
200 int count, i, pay_len;
205 payload = nfp_flower_cmsg_get_data(skb);
206 count = be32_to_cpu(payload->count);
207 if (count > NFP_FL_MAX_ROUTES) {
208 nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n");
212 pay_len = nfp_flower_cmsg_get_data_len(skb);
213 if (pay_len != struct_size(payload, tun_info, count)) {
214 nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
219 for (i = 0; i < count; i++) {
220 ipv4_addr = payload->tun_info[i].ipv4;
221 port = be32_to_cpu(payload->tun_info[i].egress_port);
222 netdev = nfp_app_dev_get(app, port, NULL);
226 n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev);
230 /* Update the used timestamp of neighbour */
231 neigh_event_send(n, NULL);
237 void nfp_tunnel_keep_alive_v6(struct nfp_app *app, struct sk_buff *skb)
239 #if IS_ENABLED(CONFIG_IPV6)
240 struct nfp_tun_active_tuns_v6 *payload;
241 struct net_device *netdev;
242 int count, i, pay_len;
247 payload = nfp_flower_cmsg_get_data(skb);
248 count = be32_to_cpu(payload->count);
249 if (count > NFP_FL_IPV6_ADDRS_MAX) {
250 nfp_flower_cmsg_warn(app, "IPv6 tunnel keep-alive request exceeds max routes.\n");
254 pay_len = nfp_flower_cmsg_get_data_len(skb);
255 if (pay_len != struct_size(payload, tun_info, count)) {
256 nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
261 for (i = 0; i < count; i++) {
262 ipv6_add = &payload->tun_info[i].ipv6;
263 port = be32_to_cpu(payload->tun_info[i].egress_port);
264 netdev = nfp_app_dev_get(app, port, NULL);
268 n = neigh_lookup(&nd_tbl, ipv6_add, netdev);
272 /* Update the used timestamp of neighbour */
273 neigh_event_send(n, NULL);
281 nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
284 struct nfp_flower_priv *priv = app->priv;
288 if (!(priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) &&
289 (mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH ||
290 mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6))
291 plen -= sizeof(struct nfp_tun_neigh_ext);
293 if (!(priv->flower_ext_feats & NFP_FL_FEATS_TUNNEL_NEIGH_LAG) &&
294 (mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH ||
295 mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6))
296 plen -= sizeof(struct nfp_tun_neigh_lag);
298 skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
302 msg = nfp_flower_cmsg_get_data(skb);
303 memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb));
305 nfp_ctrl_tx(app->ctrl, skb);
310 nfp_tun_mutual_link(struct nfp_predt_entry *predt,
311 struct nfp_neigh_entry *neigh)
313 struct nfp_fl_payload *flow_pay = predt->flow_pay;
314 struct nfp_tun_neigh_ext *ext;
315 struct nfp_tun_neigh *common;
317 if (flow_pay->pre_tun_rule.is_ipv6 != neigh->is_ipv6)
320 /* In the case of bonding it is possible that there might already
321 * be a flow linked (as the MAC address gets shared). If a flow
322 * is already linked just return.
327 common = neigh->is_ipv6 ?
328 &((struct nfp_tun_neigh_v6 *)neigh->payload)->common :
329 &((struct nfp_tun_neigh_v4 *)neigh->payload)->common;
330 ext = neigh->is_ipv6 ?
331 &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext :
332 &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext;
334 if (memcmp(flow_pay->pre_tun_rule.loc_mac,
335 common->src_addr, ETH_ALEN) ||
336 memcmp(flow_pay->pre_tun_rule.rem_mac,
337 common->dst_addr, ETH_ALEN))
340 list_add(&neigh->list_head, &predt->nn_list);
342 ext->host_ctx = flow_pay->meta.host_ctx_id;
343 ext->vlan_tci = flow_pay->pre_tun_rule.vlan_tci;
344 ext->vlan_tpid = flow_pay->pre_tun_rule.vlan_tpid;
348 nfp_tun_link_predt_entries(struct nfp_app *app,
349 struct nfp_neigh_entry *nn_entry)
351 struct nfp_flower_priv *priv = app->priv;
352 struct nfp_predt_entry *predt, *tmp;
354 list_for_each_entry_safe(predt, tmp, &priv->predt_list, list_head) {
355 nfp_tun_mutual_link(predt, nn_entry);
359 void nfp_tun_link_and_update_nn_entries(struct nfp_app *app,
360 struct nfp_predt_entry *predt)
362 struct nfp_flower_priv *priv = app->priv;
363 struct nfp_neigh_entry *nn_entry;
364 struct rhashtable_iter iter;
368 rhashtable_walk_enter(&priv->neigh_table, &iter);
369 rhashtable_walk_start(&iter);
370 while ((nn_entry = rhashtable_walk_next(&iter)) != NULL) {
371 if (IS_ERR(nn_entry))
373 nfp_tun_mutual_link(predt, nn_entry);
374 neigh_size = nn_entry->is_ipv6 ?
375 sizeof(struct nfp_tun_neigh_v6) :
376 sizeof(struct nfp_tun_neigh_v4);
377 type = nn_entry->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 :
378 NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
379 nfp_flower_xmit_tun_conf(app, type, neigh_size,
383 rhashtable_walk_stop(&iter);
384 rhashtable_walk_exit(&iter);
387 static void nfp_tun_cleanup_nn_entries(struct nfp_app *app)
389 struct nfp_flower_priv *priv = app->priv;
390 struct nfp_neigh_entry *neigh;
391 struct nfp_tun_neigh_ext *ext;
392 struct rhashtable_iter iter;
396 rhashtable_walk_enter(&priv->neigh_table, &iter);
397 rhashtable_walk_start(&iter);
398 while ((neigh = rhashtable_walk_next(&iter)) != NULL) {
401 ext = neigh->is_ipv6 ?
402 &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext :
403 &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext;
404 ext->host_ctx = cpu_to_be32(U32_MAX);
405 ext->vlan_tpid = cpu_to_be16(U16_MAX);
406 ext->vlan_tci = cpu_to_be16(U16_MAX);
408 neigh_size = neigh->is_ipv6 ?
409 sizeof(struct nfp_tun_neigh_v6) :
410 sizeof(struct nfp_tun_neigh_v4);
411 type = neigh->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 :
412 NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
413 nfp_flower_xmit_tun_conf(app, type, neigh_size, neigh->payload,
416 rhashtable_remove_fast(&priv->neigh_table, &neigh->ht_node,
419 list_del(&neigh->list_head);
422 rhashtable_walk_stop(&iter);
423 rhashtable_walk_exit(&iter);
426 void nfp_tun_unlink_and_update_nn_entries(struct nfp_app *app,
427 struct nfp_predt_entry *predt)
429 struct nfp_neigh_entry *neigh, *tmp;
430 struct nfp_tun_neigh_ext *ext;
434 list_for_each_entry_safe(neigh, tmp, &predt->nn_list, list_head) {
435 ext = neigh->is_ipv6 ?
436 &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext :
437 &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext;
439 ext->host_ctx = cpu_to_be32(U32_MAX);
440 ext->vlan_tpid = cpu_to_be16(U16_MAX);
441 ext->vlan_tci = cpu_to_be16(U16_MAX);
442 list_del(&neigh->list_head);
443 neigh_size = neigh->is_ipv6 ?
444 sizeof(struct nfp_tun_neigh_v6) :
445 sizeof(struct nfp_tun_neigh_v4);
446 type = neigh->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 :
447 NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
448 nfp_flower_xmit_tun_conf(app, type, neigh_size, neigh->payload,
454 nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
455 void *flow, struct neighbour *neigh, bool is_ipv6,
458 bool neigh_invalid = !(neigh->nud_state & NUD_VALID) || neigh->dead;
459 size_t neigh_size = is_ipv6 ? sizeof(struct nfp_tun_neigh_v6) :
460 sizeof(struct nfp_tun_neigh_v4);
461 unsigned long cookie = (unsigned long)neigh;
462 struct nfp_flower_priv *priv = app->priv;
463 struct nfp_tun_neigh_lag lag_info;
464 struct nfp_neigh_entry *nn_entry;
468 port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
472 if ((port_id & NFP_FL_LAG_OUT) == NFP_FL_LAG_OUT) {
473 memset(&lag_info, 0, sizeof(struct nfp_tun_neigh_lag));
474 nfp_flower_lag_get_info_from_netdev(app, netdev, &lag_info);
477 spin_lock_bh(&priv->predt_lock);
478 nn_entry = rhashtable_lookup_fast(&priv->neigh_table, &cookie,
480 if (!nn_entry && !neigh_invalid) {
481 struct nfp_tun_neigh_ext *ext;
482 struct nfp_tun_neigh_lag *lag;
483 struct nfp_tun_neigh *common;
485 nn_entry = kzalloc(sizeof(*nn_entry) + neigh_size,
490 nn_entry->payload = (char *)&nn_entry[1];
491 nn_entry->neigh_cookie = cookie;
492 nn_entry->is_ipv6 = is_ipv6;
493 nn_entry->flow = NULL;
495 struct flowi6 *flowi6 = (struct flowi6 *)flow;
496 struct nfp_tun_neigh_v6 *payload;
498 payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload;
499 payload->src_ipv6 = flowi6->saddr;
500 payload->dst_ipv6 = flowi6->daddr;
501 common = &payload->common;
504 mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6;
506 struct flowi4 *flowi4 = (struct flowi4 *)flow;
507 struct nfp_tun_neigh_v4 *payload;
509 payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload;
510 payload->src_ipv4 = flowi4->saddr;
511 payload->dst_ipv4 = flowi4->daddr;
512 common = &payload->common;
515 mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
517 ext->host_ctx = cpu_to_be32(U32_MAX);
518 ext->vlan_tpid = cpu_to_be16(U16_MAX);
519 ext->vlan_tci = cpu_to_be16(U16_MAX);
520 ether_addr_copy(common->src_addr, netdev->dev_addr);
521 neigh_ha_snapshot(common->dst_addr, neigh, netdev);
523 if ((port_id & NFP_FL_LAG_OUT) == NFP_FL_LAG_OUT)
524 memcpy(lag, &lag_info, sizeof(struct nfp_tun_neigh_lag));
525 common->port_id = cpu_to_be32(port_id);
527 if (rhashtable_insert_fast(&priv->neigh_table,
532 nfp_tun_link_predt_entries(app, nn_entry);
533 nfp_flower_xmit_tun_conf(app, mtype, neigh_size,
536 } else if (nn_entry && neigh_invalid) {
538 struct flowi6 *flowi6 = (struct flowi6 *)flow;
539 struct nfp_tun_neigh_v6 *payload;
541 payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload;
542 memset(payload, 0, sizeof(struct nfp_tun_neigh_v6));
543 payload->dst_ipv6 = flowi6->daddr;
544 mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6;
546 struct flowi4 *flowi4 = (struct flowi4 *)flow;
547 struct nfp_tun_neigh_v4 *payload;
549 payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload;
550 memset(payload, 0, sizeof(struct nfp_tun_neigh_v4));
551 payload->dst_ipv4 = flowi4->daddr;
552 mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
554 /* Trigger ARP to verify invalid neighbour state. */
555 neigh_event_send(neigh, NULL);
556 rhashtable_remove_fast(&priv->neigh_table,
560 nfp_flower_xmit_tun_conf(app, mtype, neigh_size,
565 list_del(&nn_entry->list_head);
567 } else if (nn_entry && !neigh_invalid) {
568 struct nfp_tun_neigh *common;
569 u8 dst_addr[ETH_ALEN];
573 struct nfp_tun_neigh_v6 *payload;
575 payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload;
576 common = &payload->common;
577 mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6;
579 struct nfp_tun_neigh_v4 *payload;
581 payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload;
582 common = &payload->common;
583 mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
586 ether_addr_copy(dst_addr, common->dst_addr);
587 neigh_ha_snapshot(common->dst_addr, neigh, netdev);
588 is_mac_change = !ether_addr_equal(dst_addr, common->dst_addr);
589 if (override || is_mac_change) {
590 if (is_mac_change && nn_entry->flow) {
591 list_del(&nn_entry->list_head);
592 nn_entry->flow = NULL;
594 nfp_tun_link_predt_entries(app, nn_entry);
595 nfp_flower_xmit_tun_conf(app, mtype, neigh_size,
601 spin_unlock_bh(&priv->predt_lock);
606 spin_unlock_bh(&priv->predt_lock);
607 nfp_flower_cmsg_warn(app, "Neighbour configuration failed.\n");
611 nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
614 struct nfp_flower_priv *app_priv;
615 struct netevent_redirect *redir;
622 case NETEVENT_REDIRECT:
623 redir = (struct netevent_redirect *)ptr;
626 case NETEVENT_NEIGH_UPDATE:
627 n = (struct neighbour *)ptr;
633 neigh_invalid = !(n->nud_state & NUD_VALID) || n->dead;
635 app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
638 if (!nfp_flower_get_port_id_from_netdev(app, n->dev))
641 #if IS_ENABLED(CONFIG_INET)
642 if (n->tbl->family == AF_INET6) {
643 #if IS_ENABLED(CONFIG_IPV6)
644 struct flowi6 flow6 = {};
646 flow6.daddr = *(struct in6_addr *)n->primary_key;
647 if (!neigh_invalid) {
648 struct dst_entry *dst;
649 /* Use ipv6_dst_lookup_flow to populate flow6->saddr
650 * and other fields. This information is only needed
651 * for new entries, lookup can be skipped when an entry
652 * gets invalidated - as only the daddr is needed for
655 dst = ip6_dst_lookup_flow(dev_net(n->dev), NULL,
662 nfp_tun_write_neigh(n->dev, app, &flow6, n, true, false);
665 #endif /* CONFIG_IPV6 */
667 struct flowi4 flow4 = {};
669 flow4.daddr = *(__be32 *)n->primary_key;
670 if (!neigh_invalid) {
672 /* Use ip_route_output_key to populate flow4->saddr and
673 * other fields. This information is only needed for
674 * new entries, lookup can be skipped when an entry
675 * gets invalidated - as only the daddr is needed for
678 rt = ip_route_output_key(dev_net(n->dev), &flow4);
679 err = PTR_ERR_OR_ZERO(rt);
685 nfp_tun_write_neigh(n->dev, app, &flow4, n, false, false);
689 #endif /* CONFIG_INET */
694 void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)
696 struct nfp_tun_req_route_ipv4 *payload;
697 struct net_device *netdev;
698 struct flowi4 flow = {};
703 payload = nfp_flower_cmsg_get_data(skb);
706 netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
708 goto fail_rcu_unlock;
710 flow.daddr = payload->ipv4_addr;
711 flow.flowi4_proto = IPPROTO_UDP;
713 #if IS_ENABLED(CONFIG_INET)
714 /* Do a route lookup on same namespace as ingress port. */
715 rt = ip_route_output_key(dev_net(netdev), &flow);
716 err = PTR_ERR_OR_ZERO(rt);
718 goto fail_rcu_unlock;
720 goto fail_rcu_unlock;
723 /* Get the neighbour entry for the lookup */
724 n = dst_neigh_lookup(&rt->dst, &flow.daddr);
727 goto fail_rcu_unlock;
728 nfp_tun_write_neigh(n->dev, app, &flow, n, false, true);
735 nfp_flower_cmsg_warn(app, "Requested route not found.\n");
738 void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb)
740 struct nfp_tun_req_route_ipv6 *payload;
741 struct net_device *netdev;
742 struct flowi6 flow = {};
743 struct dst_entry *dst;
746 payload = nfp_flower_cmsg_get_data(skb);
749 netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
751 goto fail_rcu_unlock;
753 flow.daddr = payload->ipv6_addr;
754 flow.flowi6_proto = IPPROTO_UDP;
756 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
757 dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(netdev), NULL, &flow,
760 goto fail_rcu_unlock;
762 goto fail_rcu_unlock;
765 n = dst_neigh_lookup(dst, &flow.daddr);
768 goto fail_rcu_unlock;
770 nfp_tun_write_neigh(n->dev, app, &flow, n, true, true);
777 nfp_flower_cmsg_warn(app, "Requested IPv6 route not found.\n");
780 static void nfp_tun_write_ipv4_list(struct nfp_app *app)
782 struct nfp_flower_priv *priv = app->priv;
783 struct nfp_ipv4_addr_entry *entry;
784 struct nfp_tun_ipv4_addr payload;
785 struct list_head *ptr, *storage;
788 memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
789 mutex_lock(&priv->tun.ipv4_off_lock);
791 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
792 if (count >= NFP_FL_IPV4_ADDRS_MAX) {
793 mutex_unlock(&priv->tun.ipv4_off_lock);
794 nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
797 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
798 payload.ipv4_addr[count++] = entry->ipv4_addr;
800 payload.count = cpu_to_be32(count);
801 mutex_unlock(&priv->tun.ipv4_off_lock);
803 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
804 sizeof(struct nfp_tun_ipv4_addr),
805 &payload, GFP_KERNEL);
808 void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
810 struct nfp_flower_priv *priv = app->priv;
811 struct nfp_ipv4_addr_entry *entry;
812 struct list_head *ptr, *storage;
814 mutex_lock(&priv->tun.ipv4_off_lock);
815 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
816 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
817 if (entry->ipv4_addr == ipv4) {
819 mutex_unlock(&priv->tun.ipv4_off_lock);
824 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
826 mutex_unlock(&priv->tun.ipv4_off_lock);
827 nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
830 entry->ipv4_addr = ipv4;
831 entry->ref_count = 1;
832 list_add_tail(&entry->list, &priv->tun.ipv4_off_list);
833 mutex_unlock(&priv->tun.ipv4_off_lock);
835 nfp_tun_write_ipv4_list(app);
838 void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
840 struct nfp_flower_priv *priv = app->priv;
841 struct nfp_ipv4_addr_entry *entry;
842 struct list_head *ptr, *storage;
844 mutex_lock(&priv->tun.ipv4_off_lock);
845 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
846 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
847 if (entry->ipv4_addr == ipv4) {
849 if (!entry->ref_count) {
850 list_del(&entry->list);
856 mutex_unlock(&priv->tun.ipv4_off_lock);
858 nfp_tun_write_ipv4_list(app);
861 static void nfp_tun_write_ipv6_list(struct nfp_app *app)
863 struct nfp_flower_priv *priv = app->priv;
864 struct nfp_ipv6_addr_entry *entry;
865 struct nfp_tun_ipv6_addr payload;
868 memset(&payload, 0, sizeof(struct nfp_tun_ipv6_addr));
869 mutex_lock(&priv->tun.ipv6_off_lock);
870 list_for_each_entry(entry, &priv->tun.ipv6_off_list, list) {
871 if (count >= NFP_FL_IPV6_ADDRS_MAX) {
872 nfp_flower_cmsg_warn(app, "Too many IPv6 tunnel endpoint addresses, some cannot be offloaded.\n");
875 payload.ipv6_addr[count++] = entry->ipv6_addr;
877 mutex_unlock(&priv->tun.ipv6_off_lock);
878 payload.count = cpu_to_be32(count);
880 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6,
881 sizeof(struct nfp_tun_ipv6_addr),
882 &payload, GFP_KERNEL);
885 struct nfp_ipv6_addr_entry *
886 nfp_tunnel_add_ipv6_off(struct nfp_app *app, struct in6_addr *ipv6)
888 struct nfp_flower_priv *priv = app->priv;
889 struct nfp_ipv6_addr_entry *entry;
891 mutex_lock(&priv->tun.ipv6_off_lock);
892 list_for_each_entry(entry, &priv->tun.ipv6_off_list, list)
893 if (!memcmp(&entry->ipv6_addr, ipv6, sizeof(*ipv6))) {
895 mutex_unlock(&priv->tun.ipv6_off_lock);
899 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
901 mutex_unlock(&priv->tun.ipv6_off_lock);
902 nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
905 entry->ipv6_addr = *ipv6;
906 entry->ref_count = 1;
907 list_add_tail(&entry->list, &priv->tun.ipv6_off_list);
908 mutex_unlock(&priv->tun.ipv6_off_lock);
910 nfp_tun_write_ipv6_list(app);
916 nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry)
918 struct nfp_flower_priv *priv = app->priv;
921 mutex_lock(&priv->tun.ipv6_off_lock);
922 if (!--entry->ref_count) {
923 list_del(&entry->list);
927 mutex_unlock(&priv->tun.ipv6_off_lock);
930 nfp_tun_write_ipv6_list(app);
934 __nfp_tunnel_offload_mac(struct nfp_app *app, const u8 *mac, u16 idx, bool del)
936 struct nfp_tun_mac_addr_offload payload;
938 memset(&payload, 0, sizeof(payload));
941 payload.flags = cpu_to_be16(NFP_TUN_MAC_OFFLOAD_DEL_FLAG);
943 /* FW supports multiple MACs per cmsg but restrict to single. */
944 payload.count = cpu_to_be16(1);
945 payload.index = cpu_to_be16(idx);
946 ether_addr_copy(payload.addr, mac);
948 return nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
949 sizeof(struct nfp_tun_mac_addr_offload),
950 &payload, GFP_KERNEL);
953 static bool nfp_tunnel_port_is_phy_repr(int port)
955 if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
956 NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT)
962 static u16 nfp_tunnel_get_mac_idx_from_phy_port_id(int port)
964 return port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
967 static u16 nfp_tunnel_get_global_mac_idx_from_ida(int id)
969 return id << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
972 static int nfp_tunnel_get_ida_from_global_mac_idx(u16 nfp_mac_idx)
974 return nfp_mac_idx >> 8;
977 static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx)
979 return (nfp_mac_idx & 0xff) == NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
982 static struct nfp_tun_offloaded_mac *
983 nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, const u8 *mac)
985 struct nfp_flower_priv *priv = app->priv;
987 return rhashtable_lookup_fast(&priv->tun.offloaded_macs, mac,
988 offloaded_macs_params);
992 nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry,
993 struct net_device *netdev, bool mod)
995 if (nfp_netdev_is_nfp_repr(netdev)) {
996 struct nfp_flower_repr_priv *repr_priv;
997 struct nfp_repr *repr;
999 repr = netdev_priv(netdev);
1000 repr_priv = repr->app_priv;
1002 /* If modifing MAC, remove repr from old list first. */
1004 list_del(&repr_priv->mac_list);
1006 list_add_tail(&repr_priv->mac_list, &entry->repr_list);
1007 } else if (nfp_flower_is_supported_bridge(netdev)) {
1008 entry->bridge_count++;
1015 nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
1018 struct nfp_flower_priv *priv = app->priv;
1019 struct nfp_tun_offloaded_mac *entry;
1020 int ida_idx = -1, err;
1021 u16 nfp_mac_idx = 0;
1023 entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
1024 if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
1025 if (entry->bridge_count ||
1026 !nfp_flower_is_supported_bridge(netdev)) {
1027 nfp_tunnel_offloaded_macs_inc_ref_and_link(entry,
1032 /* MAC is global but matches need to go to pre_tun table. */
1033 nfp_mac_idx = entry->index | NFP_TUN_PRE_TUN_IDX_BIT;
1037 /* Assign a global index if non-repr or MAC is now shared. */
1038 if (entry || !port) {
1039 ida_idx = ida_alloc_max(&priv->tun.mac_off_ids,
1040 NFP_MAX_MAC_INDEX, GFP_KERNEL);
1045 nfp_tunnel_get_global_mac_idx_from_ida(ida_idx);
1047 if (nfp_flower_is_supported_bridge(netdev))
1048 nfp_mac_idx |= NFP_TUN_PRE_TUN_IDX_BIT;
1052 nfp_tunnel_get_mac_idx_from_phy_port_id(port);
1057 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1063 ether_addr_copy(entry->addr, netdev->dev_addr);
1064 INIT_LIST_HEAD(&entry->repr_list);
1066 if (rhashtable_insert_fast(&priv->tun.offloaded_macs,
1068 offloaded_macs_params)) {
1070 goto err_free_entry;
1074 err = __nfp_tunnel_offload_mac(app, netdev->dev_addr,
1075 nfp_mac_idx, false);
1077 /* If not shared then free. */
1078 if (!entry->ref_count)
1079 goto err_remove_hash;
1083 entry->index = nfp_mac_idx;
1084 nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod);
1089 rhashtable_remove_fast(&priv->tun.offloaded_macs, &entry->ht_node,
1090 offloaded_macs_params);
1095 ida_free(&priv->tun.mac_off_ids, ida_idx);
1101 nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
1102 const u8 *mac, bool mod)
1104 struct nfp_flower_priv *priv = app->priv;
1105 struct nfp_flower_repr_priv *repr_priv;
1106 struct nfp_tun_offloaded_mac *entry;
1107 struct nfp_repr *repr;
1111 entry = nfp_tunnel_lookup_offloaded_macs(app, mac);
1116 /* If del is part of a mod then mac_list is still in use elsewhere. */
1117 if (nfp_netdev_is_nfp_repr(netdev) && !mod) {
1118 repr = netdev_priv(netdev);
1119 repr_priv = repr->app_priv;
1120 list_del(&repr_priv->mac_list);
1123 if (nfp_flower_is_supported_bridge(netdev)) {
1124 entry->bridge_count--;
1126 if (!entry->bridge_count && entry->ref_count) {
1127 nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
1128 if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx,
1130 nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
1131 netdev_name(netdev));
1135 entry->index = nfp_mac_idx;
1140 /* If MAC is now used by 1 repr set the offloaded MAC index to port. */
1141 if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
1144 repr_priv = list_first_entry(&entry->repr_list,
1145 struct nfp_flower_repr_priv,
1147 repr = repr_priv->nfp_repr;
1148 port = nfp_repr_get_port_id(repr->netdev);
1149 nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
1150 err = __nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false);
1152 nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
1153 netdev_name(netdev));
1157 ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
1158 ida_free(&priv->tun.mac_off_ids, ida_idx);
1159 entry->index = nfp_mac_idx;
1163 if (entry->ref_count)
1166 WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs,
1168 offloaded_macs_params));
1170 if (nfp_flower_is_supported_bridge(netdev))
1171 nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
1173 nfp_mac_idx = entry->index;
1175 /* If MAC has global ID then extract and free the ida entry. */
1176 if (nfp_tunnel_is_mac_idx_global(nfp_mac_idx)) {
1177 ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
1178 ida_free(&priv->tun.mac_off_ids, ida_idx);
1183 return __nfp_tunnel_offload_mac(app, mac, 0, true);
1187 nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev,
1188 enum nfp_flower_mac_offload_cmd cmd)
1190 struct nfp_flower_non_repr_priv *nr_priv = NULL;
1191 bool non_repr = false, *mac_offloaded;
1195 if (nfp_netdev_is_nfp_repr(netdev)) {
1196 struct nfp_flower_repr_priv *repr_priv;
1197 struct nfp_repr *repr;
1199 repr = netdev_priv(netdev);
1200 if (repr->app != app)
1203 repr_priv = repr->app_priv;
1204 if (repr_priv->on_bridge)
1207 mac_offloaded = &repr_priv->mac_offloaded;
1208 off_mac = &repr_priv->offloaded_mac_addr[0];
1209 port = nfp_repr_get_port_id(netdev);
1210 if (!nfp_tunnel_port_is_phy_repr(port))
1212 } else if (nfp_fl_is_netdev_to_offload(netdev)) {
1213 nr_priv = nfp_flower_non_repr_priv_get(app, netdev);
1217 mac_offloaded = &nr_priv->mac_offloaded;
1218 off_mac = &nr_priv->offloaded_mac_addr[0];
1224 if (!is_valid_ether_addr(netdev->dev_addr)) {
1226 goto err_put_non_repr_priv;
1229 if (cmd == NFP_TUNNEL_MAC_OFFLOAD_MOD && !*mac_offloaded)
1230 cmd = NFP_TUNNEL_MAC_OFFLOAD_ADD;
1233 case NFP_TUNNEL_MAC_OFFLOAD_ADD:
1234 err = nfp_tunnel_add_shared_mac(app, netdev, port, false);
1236 goto err_put_non_repr_priv;
1239 __nfp_flower_non_repr_priv_get(nr_priv);
1241 *mac_offloaded = true;
1242 ether_addr_copy(off_mac, netdev->dev_addr);
1244 case NFP_TUNNEL_MAC_OFFLOAD_DEL:
1245 /* Only attempt delete if add was successful. */
1246 if (!*mac_offloaded)
1250 __nfp_flower_non_repr_priv_put(nr_priv);
1252 *mac_offloaded = false;
1254 err = nfp_tunnel_del_shared_mac(app, netdev, netdev->dev_addr,
1257 goto err_put_non_repr_priv;
1260 case NFP_TUNNEL_MAC_OFFLOAD_MOD:
1261 /* Ignore if changing to the same address. */
1262 if (ether_addr_equal(netdev->dev_addr, off_mac))
1265 err = nfp_tunnel_add_shared_mac(app, netdev, port, true);
1267 goto err_put_non_repr_priv;
1269 /* Delete the previous MAC address. */
1270 err = nfp_tunnel_del_shared_mac(app, netdev, off_mac, true);
1272 nfp_flower_cmsg_warn(app, "Failed to remove offload of replaced MAC addr on %s.\n",
1273 netdev_name(netdev));
1275 ether_addr_copy(off_mac, netdev->dev_addr);
1279 goto err_put_non_repr_priv;
1283 __nfp_flower_non_repr_priv_put(nr_priv);
1287 err_put_non_repr_priv:
1289 __nfp_flower_non_repr_priv_put(nr_priv);
1294 int nfp_tunnel_mac_event_handler(struct nfp_app *app,
1295 struct net_device *netdev,
1296 unsigned long event, void *ptr)
1300 if (event == NETDEV_DOWN) {
1301 err = nfp_tunnel_offload_mac(app, netdev,
1302 NFP_TUNNEL_MAC_OFFLOAD_DEL);
1304 nfp_flower_cmsg_warn(app, "Failed to delete offload MAC on %s.\n",
1305 netdev_name(netdev));
1306 } else if (event == NETDEV_UP) {
1307 err = nfp_tunnel_offload_mac(app, netdev,
1308 NFP_TUNNEL_MAC_OFFLOAD_ADD);
1310 nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
1311 netdev_name(netdev));
1312 } else if (event == NETDEV_CHANGEADDR) {
1313 /* Only offload addr change if netdev is already up. */
1314 if (!(netdev->flags & IFF_UP))
1317 err = nfp_tunnel_offload_mac(app, netdev,
1318 NFP_TUNNEL_MAC_OFFLOAD_MOD);
1320 nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n",
1321 netdev_name(netdev));
1322 } else if (event == NETDEV_CHANGEUPPER) {
1323 /* If a repr is attached to a bridge then tunnel packets
1324 * entering the physical port are directed through the bridge
1325 * datapath and cannot be directly detunneled. Therefore,
1326 * associated offloaded MACs and indexes should not be used
1327 * by fw for detunneling.
1329 struct netdev_notifier_changeupper_info *info = ptr;
1330 struct net_device *upper = info->upper_dev;
1331 struct nfp_flower_repr_priv *repr_priv;
1332 struct nfp_repr *repr;
1334 if (!nfp_netdev_is_nfp_repr(netdev) ||
1335 !nfp_flower_is_supported_bridge(upper))
1338 repr = netdev_priv(netdev);
1339 if (repr->app != app)
1342 repr_priv = repr->app_priv;
1344 if (info->linking) {
1345 if (nfp_tunnel_offload_mac(app, netdev,
1346 NFP_TUNNEL_MAC_OFFLOAD_DEL))
1347 nfp_flower_cmsg_warn(app, "Failed to delete offloaded MAC on %s.\n",
1348 netdev_name(netdev));
1349 repr_priv->on_bridge = true;
1351 repr_priv->on_bridge = false;
1353 if (!(netdev->flags & IFF_UP))
1356 if (nfp_tunnel_offload_mac(app, netdev,
1357 NFP_TUNNEL_MAC_OFFLOAD_ADD))
1358 nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
1359 netdev_name(netdev));
1365 int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
1366 struct nfp_fl_payload *flow)
1368 struct nfp_flower_priv *app_priv = app->priv;
1369 struct nfp_tun_offloaded_mac *mac_entry;
1370 struct nfp_flower_meta_tci *key_meta;
1371 struct nfp_tun_pre_tun_rule payload;
1372 struct net_device *internal_dev;
1375 if (app_priv->pre_tun_rule_cnt == NFP_TUN_PRE_TUN_RULE_LIMIT)
1378 memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
1380 internal_dev = flow->pre_tun_rule.dev;
1381 payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
1382 payload.host_ctx_id = flow->meta.host_ctx_id;
1384 /* Lookup MAC index for the pre-tunnel rule egress device.
1385 * Note that because the device is always an internal port, it will
1386 * have a constant global index so does not need to be tracked.
1388 mac_entry = nfp_tunnel_lookup_offloaded_macs(app,
1389 internal_dev->dev_addr);
1393 /* Set/clear IPV6 bit. cpu_to_be16() swap will lead to MSB being
1394 * set/clear for port_idx.
1396 key_meta = (struct nfp_flower_meta_tci *)flow->unmasked_data;
1397 if (key_meta->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV6)
1398 mac_entry->index |= NFP_TUN_PRE_TUN_IPV6_BIT;
1400 mac_entry->index &= ~NFP_TUN_PRE_TUN_IPV6_BIT;
1402 payload.port_idx = cpu_to_be16(mac_entry->index);
1404 /* Copy mac id and vlan to flow - dev may not exist at delete time. */
1405 flow->pre_tun_rule.vlan_tci = payload.vlan_tci;
1406 flow->pre_tun_rule.port_idx = payload.port_idx;
1408 err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
1409 sizeof(struct nfp_tun_pre_tun_rule),
1410 (unsigned char *)&payload, GFP_KERNEL);
1414 app_priv->pre_tun_rule_cnt++;
1419 int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
1420 struct nfp_fl_payload *flow)
1422 struct nfp_flower_priv *app_priv = app->priv;
1423 struct nfp_tun_pre_tun_rule payload;
1427 memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
1429 tmp_flags |= NFP_TUN_PRE_TUN_RULE_DEL;
1430 payload.flags = cpu_to_be32(tmp_flags);
1431 payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
1432 payload.port_idx = flow->pre_tun_rule.port_idx;
1434 err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
1435 sizeof(struct nfp_tun_pre_tun_rule),
1436 (unsigned char *)&payload, GFP_KERNEL);
1440 app_priv->pre_tun_rule_cnt--;
1445 int nfp_tunnel_config_start(struct nfp_app *app)
1447 struct nfp_flower_priv *priv = app->priv;
1450 /* Initialise rhash for MAC offload tracking. */
1451 err = rhashtable_init(&priv->tun.offloaded_macs,
1452 &offloaded_macs_params);
1456 ida_init(&priv->tun.mac_off_ids);
1458 /* Initialise priv data for IPv4/v6 offloading. */
1459 mutex_init(&priv->tun.ipv4_off_lock);
1460 INIT_LIST_HEAD(&priv->tun.ipv4_off_list);
1461 mutex_init(&priv->tun.ipv6_off_lock);
1462 INIT_LIST_HEAD(&priv->tun.ipv6_off_list);
1464 /* Initialise priv data for neighbour offloading. */
1465 priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
1467 err = register_netevent_notifier(&priv->tun.neigh_nb);
1469 rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
1470 nfp_check_rhashtable_empty, NULL);
1477 void nfp_tunnel_config_stop(struct nfp_app *app)
1479 struct nfp_flower_priv *priv = app->priv;
1480 struct nfp_ipv4_addr_entry *ip_entry;
1481 struct list_head *ptr, *storage;
1483 unregister_netevent_notifier(&priv->tun.neigh_nb);
1485 ida_destroy(&priv->tun.mac_off_ids);
1487 /* Free any memory that may be occupied by ipv4 list. */
1488 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
1489 ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
1490 list_del(&ip_entry->list);
1494 mutex_destroy(&priv->tun.ipv6_off_lock);
1496 /* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */
1497 rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
1498 nfp_check_rhashtable_empty, NULL);
1500 nfp_tun_cleanup_nn_entries(app);