scsi: scsi_transport_srp: Don't block target in SRP_PORT_LOST state
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / netronome / nfp / flower / tunnel_conf.c
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3
4 #include <linux/etherdevice.h>
5 #include <linux/inetdevice.h>
6 #include <net/netevent.h>
7 #include <linux/idr.h>
8 #include <net/dst_metadata.h>
9 #include <net/arp.h>
10
11 #include "cmsg.h"
12 #include "main.h"
13 #include "../nfp_net_repr.h"
14 #include "../nfp_net.h"
15
16 #define NFP_FL_MAX_ROUTES               32
17
18 #define NFP_TUN_PRE_TUN_RULE_LIMIT      32
19 #define NFP_TUN_PRE_TUN_RULE_DEL        0x1
20 #define NFP_TUN_PRE_TUN_IDX_BIT         0x8
21
22 /**
23  * struct nfp_tun_pre_run_rule - rule matched before decap
24  * @flags:              options for the rule offset
25  * @port_idx:           index of destination MAC address for the rule
26  * @vlan_tci:           VLAN info associated with MAC
27  * @host_ctx_id:        stats context of rule to update
28  */
29 struct nfp_tun_pre_tun_rule {
30         __be32 flags;
31         __be16 port_idx;
32         __be16 vlan_tci;
33         __be32 host_ctx_id;
34 };
35
36 /**
37  * struct nfp_tun_active_tuns - periodic message of active tunnels
38  * @seq:                sequence number of the message
39  * @count:              number of tunnels report in message
40  * @flags:              options part of the request
41  * @tun_info.ipv4:              dest IPv4 address of active route
42  * @tun_info.egress_port:       port the encapsulated packet egressed
43  * @tun_info.extra:             reserved for future use
44  * @tun_info:           tunnels that have sent traffic in reported period
45  */
46 struct nfp_tun_active_tuns {
47         __be32 seq;
48         __be32 count;
49         __be32 flags;
50         struct route_ip_info {
51                 __be32 ipv4;
52                 __be32 egress_port;
53                 __be32 extra[2];
54         } tun_info[];
55 };
56
57 /**
58  * struct nfp_tun_active_tuns_v6 - periodic message of active IPv6 tunnels
59  * @seq:                sequence number of the message
60  * @count:              number of tunnels report in message
61  * @flags:              options part of the request
62  * @tun_info.ipv6:              dest IPv6 address of active route
63  * @tun_info.egress_port:       port the encapsulated packet egressed
64  * @tun_info.extra:             reserved for future use
65  * @tun_info:           tunnels that have sent traffic in reported period
66  */
67 struct nfp_tun_active_tuns_v6 {
68         __be32 seq;
69         __be32 count;
70         __be32 flags;
71         struct route_ip_info_v6 {
72                 struct in6_addr ipv6;
73                 __be32 egress_port;
74                 __be32 extra[2];
75         } tun_info[];
76 };
77
78 /**
79  * struct nfp_tun_neigh - neighbour/route entry on the NFP
80  * @dst_ipv4:   destination IPv4 address
81  * @src_ipv4:   source IPv4 address
82  * @dst_addr:   destination MAC address
83  * @src_addr:   source MAC address
84  * @port_id:    NFP port to output packet on - associated with source IPv4
85  */
86 struct nfp_tun_neigh {
87         __be32 dst_ipv4;
88         __be32 src_ipv4;
89         u8 dst_addr[ETH_ALEN];
90         u8 src_addr[ETH_ALEN];
91         __be32 port_id;
92 };
93
94 /**
95  * struct nfp_tun_neigh_v6 - neighbour/route entry on the NFP
96  * @dst_ipv6:   destination IPv6 address
97  * @src_ipv6:   source IPv6 address
98  * @dst_addr:   destination MAC address
99  * @src_addr:   source MAC address
100  * @port_id:    NFP port to output packet on - associated with source IPv6
101  */
102 struct nfp_tun_neigh_v6 {
103         struct in6_addr dst_ipv6;
104         struct in6_addr src_ipv6;
105         u8 dst_addr[ETH_ALEN];
106         u8 src_addr[ETH_ALEN];
107         __be32 port_id;
108 };
109
110 /**
111  * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
112  * @ingress_port:       ingress port of packet that signalled request
113  * @ipv4_addr:          destination ipv4 address for route
114  * @reserved:           reserved for future use
115  */
116 struct nfp_tun_req_route_ipv4 {
117         __be32 ingress_port;
118         __be32 ipv4_addr;
119         __be32 reserved[2];
120 };
121
122 /**
123  * struct nfp_tun_req_route_ipv6 - NFP requests an IPv6 route/neighbour lookup
124  * @ingress_port:       ingress port of packet that signalled request
125  * @ipv6_addr:          destination ipv6 address for route
126  */
127 struct nfp_tun_req_route_ipv6 {
128         __be32 ingress_port;
129         struct in6_addr ipv6_addr;
130 };
131
132 /**
133  * struct nfp_offloaded_route - routes that are offloaded to the NFP
134  * @list:       list pointer
135  * @ip_add:     destination of route - can be IPv4 or IPv6
136  */
137 struct nfp_offloaded_route {
138         struct list_head list;
139         u8 ip_add[];
140 };
141
142 #define NFP_FL_IPV4_ADDRS_MAX        32
143
144 /**
145  * struct nfp_tun_ipv4_addr - set the IP address list on the NFP
146  * @count:      number of IPs populated in the array
147  * @ipv4_addr:  array of IPV4_ADDRS_MAX 32 bit IPv4 addresses
148  */
149 struct nfp_tun_ipv4_addr {
150         __be32 count;
151         __be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
152 };
153
154 /**
155  * struct nfp_ipv4_addr_entry - cached IPv4 addresses
156  * @ipv4_addr:  IP address
157  * @ref_count:  number of rules currently using this IP
158  * @list:       list pointer
159  */
160 struct nfp_ipv4_addr_entry {
161         __be32 ipv4_addr;
162         int ref_count;
163         struct list_head list;
164 };
165
166 #define NFP_FL_IPV6_ADDRS_MAX        4
167
168 /**
169  * struct nfp_tun_ipv6_addr - set the IP address list on the NFP
170  * @count:      number of IPs populated in the array
171  * @ipv6_addr:  array of IPV6_ADDRS_MAX 128 bit IPv6 addresses
172  */
173 struct nfp_tun_ipv6_addr {
174         __be32 count;
175         struct in6_addr ipv6_addr[NFP_FL_IPV6_ADDRS_MAX];
176 };
177
178 #define NFP_TUN_MAC_OFFLOAD_DEL_FLAG    0x2
179
180 /**
181  * struct nfp_tun_mac_addr_offload - configure MAC address of tunnel EP on NFP
182  * @flags:      MAC address offload options
183  * @count:      number of MAC addresses in the message (should be 1)
184  * @index:      index of MAC address in the lookup table
185  * @addr:       interface MAC address
186  */
187 struct nfp_tun_mac_addr_offload {
188         __be16 flags;
189         __be16 count;
190         __be16 index;
191         u8 addr[ETH_ALEN];
192 };
193
194 enum nfp_flower_mac_offload_cmd {
195         NFP_TUNNEL_MAC_OFFLOAD_ADD =            0,
196         NFP_TUNNEL_MAC_OFFLOAD_DEL =            1,
197         NFP_TUNNEL_MAC_OFFLOAD_MOD =            2,
198 };
199
200 #define NFP_MAX_MAC_INDEX       0xff
201
202 /**
203  * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC
204  * @ht_node:            Hashtable entry
205  * @addr:               Offloaded MAC address
206  * @index:              Offloaded index for given MAC address
207  * @ref_count:          Number of devs using this MAC address
208  * @repr_list:          List of reprs sharing this MAC address
209  * @bridge_count:       Number of bridge/internal devs with MAC
210  */
211 struct nfp_tun_offloaded_mac {
212         struct rhash_head ht_node;
213         u8 addr[ETH_ALEN];
214         u16 index;
215         int ref_count;
216         struct list_head repr_list;
217         int bridge_count;
218 };
219
220 static const struct rhashtable_params offloaded_macs_params = {
221         .key_offset     = offsetof(struct nfp_tun_offloaded_mac, addr),
222         .head_offset    = offsetof(struct nfp_tun_offloaded_mac, ht_node),
223         .key_len        = ETH_ALEN,
224         .automatic_shrinking    = true,
225 };
226
227 void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
228 {
229         struct nfp_tun_active_tuns *payload;
230         struct net_device *netdev;
231         int count, i, pay_len;
232         struct neighbour *n;
233         __be32 ipv4_addr;
234         u32 port;
235
236         payload = nfp_flower_cmsg_get_data(skb);
237         count = be32_to_cpu(payload->count);
238         if (count > NFP_FL_MAX_ROUTES) {
239                 nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n");
240                 return;
241         }
242
243         pay_len = nfp_flower_cmsg_get_data_len(skb);
244         if (pay_len != struct_size(payload, tun_info, count)) {
245                 nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
246                 return;
247         }
248
249         rcu_read_lock();
250         for (i = 0; i < count; i++) {
251                 ipv4_addr = payload->tun_info[i].ipv4;
252                 port = be32_to_cpu(payload->tun_info[i].egress_port);
253                 netdev = nfp_app_dev_get(app, port, NULL);
254                 if (!netdev)
255                         continue;
256
257                 n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev);
258                 if (!n)
259                         continue;
260
261                 /* Update the used timestamp of neighbour */
262                 neigh_event_send(n, NULL);
263                 neigh_release(n);
264         }
265         rcu_read_unlock();
266 }
267
268 void nfp_tunnel_keep_alive_v6(struct nfp_app *app, struct sk_buff *skb)
269 {
270 #if IS_ENABLED(CONFIG_IPV6)
271         struct nfp_tun_active_tuns_v6 *payload;
272         struct net_device *netdev;
273         int count, i, pay_len;
274         struct neighbour *n;
275         void *ipv6_add;
276         u32 port;
277
278         payload = nfp_flower_cmsg_get_data(skb);
279         count = be32_to_cpu(payload->count);
280         if (count > NFP_FL_IPV6_ADDRS_MAX) {
281                 nfp_flower_cmsg_warn(app, "IPv6 tunnel keep-alive request exceeds max routes.\n");
282                 return;
283         }
284
285         pay_len = nfp_flower_cmsg_get_data_len(skb);
286         if (pay_len != struct_size(payload, tun_info, count)) {
287                 nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
288                 return;
289         }
290
291         rcu_read_lock();
292         for (i = 0; i < count; i++) {
293                 ipv6_add = &payload->tun_info[i].ipv6;
294                 port = be32_to_cpu(payload->tun_info[i].egress_port);
295                 netdev = nfp_app_dev_get(app, port, NULL);
296                 if (!netdev)
297                         continue;
298
299                 n = neigh_lookup(&nd_tbl, ipv6_add, netdev);
300                 if (!n)
301                         continue;
302
303                 /* Update the used timestamp of neighbour */
304                 neigh_event_send(n, NULL);
305                 neigh_release(n);
306         }
307         rcu_read_unlock();
308 #endif
309 }
310
311 static int
312 nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
313                          gfp_t flag)
314 {
315         struct sk_buff *skb;
316         unsigned char *msg;
317
318         skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
319         if (!skb)
320                 return -ENOMEM;
321
322         msg = nfp_flower_cmsg_get_data(skb);
323         memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb));
324
325         nfp_ctrl_tx(app->ctrl, skb);
326         return 0;
327 }
328
329 static bool
330 __nfp_tun_has_route(struct list_head *route_list, spinlock_t *list_lock,
331                     void *add, int add_len)
332 {
333         struct nfp_offloaded_route *entry;
334
335         spin_lock_bh(list_lock);
336         list_for_each_entry(entry, route_list, list)
337                 if (!memcmp(entry->ip_add, add, add_len)) {
338                         spin_unlock_bh(list_lock);
339                         return true;
340                 }
341         spin_unlock_bh(list_lock);
342         return false;
343 }
344
345 static int
346 __nfp_tun_add_route_to_cache(struct list_head *route_list,
347                              spinlock_t *list_lock, void *add, int add_len)
348 {
349         struct nfp_offloaded_route *entry;
350
351         spin_lock_bh(list_lock);
352         list_for_each_entry(entry, route_list, list)
353                 if (!memcmp(entry->ip_add, add, add_len)) {
354                         spin_unlock_bh(list_lock);
355                         return 0;
356                 }
357
358         entry = kmalloc(sizeof(*entry) + add_len, GFP_ATOMIC);
359         if (!entry) {
360                 spin_unlock_bh(list_lock);
361                 return -ENOMEM;
362         }
363
364         memcpy(entry->ip_add, add, add_len);
365         list_add_tail(&entry->list, route_list);
366         spin_unlock_bh(list_lock);
367
368         return 0;
369 }
370
371 static void
372 __nfp_tun_del_route_from_cache(struct list_head *route_list,
373                                spinlock_t *list_lock, void *add, int add_len)
374 {
375         struct nfp_offloaded_route *entry;
376
377         spin_lock_bh(list_lock);
378         list_for_each_entry(entry, route_list, list)
379                 if (!memcmp(entry->ip_add, add, add_len)) {
380                         list_del(&entry->list);
381                         kfree(entry);
382                         break;
383                 }
384         spin_unlock_bh(list_lock);
385 }
386
387 static bool nfp_tun_has_route_v4(struct nfp_app *app, __be32 *ipv4_addr)
388 {
389         struct nfp_flower_priv *priv = app->priv;
390
391         return __nfp_tun_has_route(&priv->tun.neigh_off_list_v4,
392                                    &priv->tun.neigh_off_lock_v4, ipv4_addr,
393                                    sizeof(*ipv4_addr));
394 }
395
396 static bool
397 nfp_tun_has_route_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
398 {
399         struct nfp_flower_priv *priv = app->priv;
400
401         return __nfp_tun_has_route(&priv->tun.neigh_off_list_v6,
402                                    &priv->tun.neigh_off_lock_v6, ipv6_addr,
403                                    sizeof(*ipv6_addr));
404 }
405
406 static void
407 nfp_tun_add_route_to_cache_v4(struct nfp_app *app, __be32 *ipv4_addr)
408 {
409         struct nfp_flower_priv *priv = app->priv;
410
411         __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v4,
412                                      &priv->tun.neigh_off_lock_v4, ipv4_addr,
413                                      sizeof(*ipv4_addr));
414 }
415
416 static void
417 nfp_tun_add_route_to_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
418 {
419         struct nfp_flower_priv *priv = app->priv;
420
421         __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v6,
422                                      &priv->tun.neigh_off_lock_v6, ipv6_addr,
423                                      sizeof(*ipv6_addr));
424 }
425
426 static void
427 nfp_tun_del_route_from_cache_v4(struct nfp_app *app, __be32 *ipv4_addr)
428 {
429         struct nfp_flower_priv *priv = app->priv;
430
431         __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v4,
432                                        &priv->tun.neigh_off_lock_v4, ipv4_addr,
433                                        sizeof(*ipv4_addr));
434 }
435
436 static void
437 nfp_tun_del_route_from_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
438 {
439         struct nfp_flower_priv *priv = app->priv;
440
441         __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v6,
442                                        &priv->tun.neigh_off_lock_v6, ipv6_addr,
443                                        sizeof(*ipv6_addr));
444 }
445
446 static void
447 nfp_tun_write_neigh_v4(struct net_device *netdev, struct nfp_app *app,
448                        struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
449 {
450         struct nfp_tun_neigh payload;
451         u32 port_id;
452
453         port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
454         if (!port_id)
455                 return;
456
457         memset(&payload, 0, sizeof(struct nfp_tun_neigh));
458         payload.dst_ipv4 = flow->daddr;
459
460         /* If entry has expired send dst IP with all other fields 0. */
461         if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
462                 nfp_tun_del_route_from_cache_v4(app, &payload.dst_ipv4);
463                 /* Trigger ARP to verify invalid neighbour state. */
464                 neigh_event_send(neigh, NULL);
465                 goto send_msg;
466         }
467
468         /* Have a valid neighbour so populate rest of entry. */
469         payload.src_ipv4 = flow->saddr;
470         ether_addr_copy(payload.src_addr, netdev->dev_addr);
471         neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
472         payload.port_id = cpu_to_be32(port_id);
473         /* Add destination of new route to NFP cache. */
474         nfp_tun_add_route_to_cache_v4(app, &payload.dst_ipv4);
475
476 send_msg:
477         nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
478                                  sizeof(struct nfp_tun_neigh),
479                                  (unsigned char *)&payload, flag);
480 }
481
482 static void
483 nfp_tun_write_neigh_v6(struct net_device *netdev, struct nfp_app *app,
484                        struct flowi6 *flow, struct neighbour *neigh, gfp_t flag)
485 {
486         struct nfp_tun_neigh_v6 payload;
487         u32 port_id;
488
489         port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
490         if (!port_id)
491                 return;
492
493         memset(&payload, 0, sizeof(struct nfp_tun_neigh_v6));
494         payload.dst_ipv6 = flow->daddr;
495
496         /* If entry has expired send dst IP with all other fields 0. */
497         if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
498                 nfp_tun_del_route_from_cache_v6(app, &payload.dst_ipv6);
499                 /* Trigger probe to verify invalid neighbour state. */
500                 neigh_event_send(neigh, NULL);
501                 goto send_msg;
502         }
503
504         /* Have a valid neighbour so populate rest of entry. */
505         payload.src_ipv6 = flow->saddr;
506         ether_addr_copy(payload.src_addr, netdev->dev_addr);
507         neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
508         payload.port_id = cpu_to_be32(port_id);
509         /* Add destination of new route to NFP cache. */
510         nfp_tun_add_route_to_cache_v6(app, &payload.dst_ipv6);
511
512 send_msg:
513         nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6,
514                                  sizeof(struct nfp_tun_neigh_v6),
515                                  (unsigned char *)&payload, flag);
516 }
517
518 static int
519 nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
520                             void *ptr)
521 {
522         struct nfp_flower_priv *app_priv;
523         struct netevent_redirect *redir;
524         struct flowi4 flow4 = {};
525         struct flowi6 flow6 = {};
526         struct neighbour *n;
527         struct nfp_app *app;
528         struct rtable *rt;
529         bool ipv6 = false;
530         int err;
531
532         switch (event) {
533         case NETEVENT_REDIRECT:
534                 redir = (struct netevent_redirect *)ptr;
535                 n = redir->neigh;
536                 break;
537         case NETEVENT_NEIGH_UPDATE:
538                 n = (struct neighbour *)ptr;
539                 break;
540         default:
541                 return NOTIFY_DONE;
542         }
543
544         if (n->tbl->family == AF_INET6)
545                 ipv6 = true;
546
547         if (ipv6)
548                 flow6.daddr = *(struct in6_addr *)n->primary_key;
549         else
550                 flow4.daddr = *(__be32 *)n->primary_key;
551
552         app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
553         app = app_priv->app;
554
555         if (!nfp_netdev_is_nfp_repr(n->dev) &&
556             !nfp_flower_internal_port_can_offload(app, n->dev))
557                 return NOTIFY_DONE;
558
559         /* Only concerned with changes to routes already added to NFP. */
560         if ((ipv6 && !nfp_tun_has_route_v6(app, &flow6.daddr)) ||
561             (!ipv6 && !nfp_tun_has_route_v4(app, &flow4.daddr)))
562                 return NOTIFY_DONE;
563
564 #if IS_ENABLED(CONFIG_INET)
565         if (ipv6) {
566 #if IS_ENABLED(CONFIG_IPV6)
567                 struct dst_entry *dst;
568
569                 dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(n->dev), NULL,
570                                                       &flow6, NULL);
571                 if (IS_ERR(dst))
572                         return NOTIFY_DONE;
573
574                 dst_release(dst);
575                 flow6.flowi6_proto = IPPROTO_UDP;
576                 nfp_tun_write_neigh_v6(n->dev, app, &flow6, n, GFP_ATOMIC);
577 #else
578                 return NOTIFY_DONE;
579 #endif /* CONFIG_IPV6 */
580         } else {
581                 /* Do a route lookup to populate flow data. */
582                 rt = ip_route_output_key(dev_net(n->dev), &flow4);
583                 err = PTR_ERR_OR_ZERO(rt);
584                 if (err)
585                         return NOTIFY_DONE;
586
587                 ip_rt_put(rt);
588
589                 flow4.flowi4_proto = IPPROTO_UDP;
590                 nfp_tun_write_neigh_v4(n->dev, app, &flow4, n, GFP_ATOMIC);
591         }
592 #else
593         return NOTIFY_DONE;
594 #endif /* CONFIG_INET */
595
596         return NOTIFY_OK;
597 }
598
599 void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)
600 {
601         struct nfp_tun_req_route_ipv4 *payload;
602         struct net_device *netdev;
603         struct flowi4 flow = {};
604         struct neighbour *n;
605         struct rtable *rt;
606         int err;
607
608         payload = nfp_flower_cmsg_get_data(skb);
609
610         rcu_read_lock();
611         netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
612         if (!netdev)
613                 goto fail_rcu_unlock;
614
615         flow.daddr = payload->ipv4_addr;
616         flow.flowi4_proto = IPPROTO_UDP;
617
618 #if IS_ENABLED(CONFIG_INET)
619         /* Do a route lookup on same namespace as ingress port. */
620         rt = ip_route_output_key(dev_net(netdev), &flow);
621         err = PTR_ERR_OR_ZERO(rt);
622         if (err)
623                 goto fail_rcu_unlock;
624 #else
625         goto fail_rcu_unlock;
626 #endif
627
628         /* Get the neighbour entry for the lookup */
629         n = dst_neigh_lookup(&rt->dst, &flow.daddr);
630         ip_rt_put(rt);
631         if (!n)
632                 goto fail_rcu_unlock;
633         nfp_tun_write_neigh_v4(n->dev, app, &flow, n, GFP_ATOMIC);
634         neigh_release(n);
635         rcu_read_unlock();
636         return;
637
638 fail_rcu_unlock:
639         rcu_read_unlock();
640         nfp_flower_cmsg_warn(app, "Requested route not found.\n");
641 }
642
643 void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb)
644 {
645         struct nfp_tun_req_route_ipv6 *payload;
646         struct net_device *netdev;
647         struct flowi6 flow = {};
648         struct dst_entry *dst;
649         struct neighbour *n;
650
651         payload = nfp_flower_cmsg_get_data(skb);
652
653         rcu_read_lock();
654         netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
655         if (!netdev)
656                 goto fail_rcu_unlock;
657
658         flow.daddr = payload->ipv6_addr;
659         flow.flowi6_proto = IPPROTO_UDP;
660
661 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
662         dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(netdev), NULL, &flow,
663                                               NULL);
664         if (IS_ERR(dst))
665                 goto fail_rcu_unlock;
666 #else
667         goto fail_rcu_unlock;
668 #endif
669
670         n = dst_neigh_lookup(dst, &flow.daddr);
671         dst_release(dst);
672         if (!n)
673                 goto fail_rcu_unlock;
674
675         nfp_tun_write_neigh_v6(n->dev, app, &flow, n, GFP_ATOMIC);
676         neigh_release(n);
677         rcu_read_unlock();
678         return;
679
680 fail_rcu_unlock:
681         rcu_read_unlock();
682         nfp_flower_cmsg_warn(app, "Requested IPv6 route not found.\n");
683 }
684
685 static void nfp_tun_write_ipv4_list(struct nfp_app *app)
686 {
687         struct nfp_flower_priv *priv = app->priv;
688         struct nfp_ipv4_addr_entry *entry;
689         struct nfp_tun_ipv4_addr payload;
690         struct list_head *ptr, *storage;
691         int count;
692
693         memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
694         mutex_lock(&priv->tun.ipv4_off_lock);
695         count = 0;
696         list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
697                 if (count >= NFP_FL_IPV4_ADDRS_MAX) {
698                         mutex_unlock(&priv->tun.ipv4_off_lock);
699                         nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
700                         return;
701                 }
702                 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
703                 payload.ipv4_addr[count++] = entry->ipv4_addr;
704         }
705         payload.count = cpu_to_be32(count);
706         mutex_unlock(&priv->tun.ipv4_off_lock);
707
708         nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
709                                  sizeof(struct nfp_tun_ipv4_addr),
710                                  &payload, GFP_KERNEL);
711 }
712
713 void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
714 {
715         struct nfp_flower_priv *priv = app->priv;
716         struct nfp_ipv4_addr_entry *entry;
717         struct list_head *ptr, *storage;
718
719         mutex_lock(&priv->tun.ipv4_off_lock);
720         list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
721                 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
722                 if (entry->ipv4_addr == ipv4) {
723                         entry->ref_count++;
724                         mutex_unlock(&priv->tun.ipv4_off_lock);
725                         return;
726                 }
727         }
728
729         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
730         if (!entry) {
731                 mutex_unlock(&priv->tun.ipv4_off_lock);
732                 nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
733                 return;
734         }
735         entry->ipv4_addr = ipv4;
736         entry->ref_count = 1;
737         list_add_tail(&entry->list, &priv->tun.ipv4_off_list);
738         mutex_unlock(&priv->tun.ipv4_off_lock);
739
740         nfp_tun_write_ipv4_list(app);
741 }
742
743 void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
744 {
745         struct nfp_flower_priv *priv = app->priv;
746         struct nfp_ipv4_addr_entry *entry;
747         struct list_head *ptr, *storage;
748
749         mutex_lock(&priv->tun.ipv4_off_lock);
750         list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
751                 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
752                 if (entry->ipv4_addr == ipv4) {
753                         entry->ref_count--;
754                         if (!entry->ref_count) {
755                                 list_del(&entry->list);
756                                 kfree(entry);
757                         }
758                         break;
759                 }
760         }
761         mutex_unlock(&priv->tun.ipv4_off_lock);
762
763         nfp_tun_write_ipv4_list(app);
764 }
765
766 static void nfp_tun_write_ipv6_list(struct nfp_app *app)
767 {
768         struct nfp_flower_priv *priv = app->priv;
769         struct nfp_ipv6_addr_entry *entry;
770         struct nfp_tun_ipv6_addr payload;
771         int count = 0;
772
773         memset(&payload, 0, sizeof(struct nfp_tun_ipv6_addr));
774         mutex_lock(&priv->tun.ipv6_off_lock);
775         list_for_each_entry(entry, &priv->tun.ipv6_off_list, list) {
776                 if (count >= NFP_FL_IPV6_ADDRS_MAX) {
777                         nfp_flower_cmsg_warn(app, "Too many IPv6 tunnel endpoint addresses, some cannot be offloaded.\n");
778                         break;
779                 }
780                 payload.ipv6_addr[count++] = entry->ipv6_addr;
781         }
782         mutex_unlock(&priv->tun.ipv6_off_lock);
783         payload.count = cpu_to_be32(count);
784
785         nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6,
786                                  sizeof(struct nfp_tun_ipv6_addr),
787                                  &payload, GFP_KERNEL);
788 }
789
790 struct nfp_ipv6_addr_entry *
791 nfp_tunnel_add_ipv6_off(struct nfp_app *app, struct in6_addr *ipv6)
792 {
793         struct nfp_flower_priv *priv = app->priv;
794         struct nfp_ipv6_addr_entry *entry;
795
796         mutex_lock(&priv->tun.ipv6_off_lock);
797         list_for_each_entry(entry, &priv->tun.ipv6_off_list, list)
798                 if (!memcmp(&entry->ipv6_addr, ipv6, sizeof(*ipv6))) {
799                         entry->ref_count++;
800                         mutex_unlock(&priv->tun.ipv6_off_lock);
801                         return entry;
802                 }
803
804         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
805         if (!entry) {
806                 mutex_unlock(&priv->tun.ipv6_off_lock);
807                 nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
808                 return NULL;
809         }
810         entry->ipv6_addr = *ipv6;
811         entry->ref_count = 1;
812         list_add_tail(&entry->list, &priv->tun.ipv6_off_list);
813         mutex_unlock(&priv->tun.ipv6_off_lock);
814
815         nfp_tun_write_ipv6_list(app);
816
817         return entry;
818 }
819
820 void
821 nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry)
822 {
823         struct nfp_flower_priv *priv = app->priv;
824         bool freed = false;
825
826         mutex_lock(&priv->tun.ipv6_off_lock);
827         if (!--entry->ref_count) {
828                 list_del(&entry->list);
829                 kfree(entry);
830                 freed = true;
831         }
832         mutex_unlock(&priv->tun.ipv6_off_lock);
833
834         if (freed)
835                 nfp_tun_write_ipv6_list(app);
836 }
837
838 static int
839 __nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del)
840 {
841         struct nfp_tun_mac_addr_offload payload;
842
843         memset(&payload, 0, sizeof(payload));
844
845         if (del)
846                 payload.flags = cpu_to_be16(NFP_TUN_MAC_OFFLOAD_DEL_FLAG);
847
848         /* FW supports multiple MACs per cmsg but restrict to single. */
849         payload.count = cpu_to_be16(1);
850         payload.index = cpu_to_be16(idx);
851         ether_addr_copy(payload.addr, mac);
852
853         return nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
854                                         sizeof(struct nfp_tun_mac_addr_offload),
855                                         &payload, GFP_KERNEL);
856 }
857
858 static bool nfp_tunnel_port_is_phy_repr(int port)
859 {
860         if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
861             NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT)
862                 return true;
863
864         return false;
865 }
866
867 static u16 nfp_tunnel_get_mac_idx_from_phy_port_id(int port)
868 {
869         return port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
870 }
871
872 static u16 nfp_tunnel_get_global_mac_idx_from_ida(int id)
873 {
874         return id << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
875 }
876
877 static int nfp_tunnel_get_ida_from_global_mac_idx(u16 nfp_mac_idx)
878 {
879         return nfp_mac_idx >> 8;
880 }
881
882 static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx)
883 {
884         return (nfp_mac_idx & 0xff) == NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
885 }
886
887 static struct nfp_tun_offloaded_mac *
888 nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, u8 *mac)
889 {
890         struct nfp_flower_priv *priv = app->priv;
891
892         return rhashtable_lookup_fast(&priv->tun.offloaded_macs, mac,
893                                       offloaded_macs_params);
894 }
895
896 static void
897 nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry,
898                                            struct net_device *netdev, bool mod)
899 {
900         if (nfp_netdev_is_nfp_repr(netdev)) {
901                 struct nfp_flower_repr_priv *repr_priv;
902                 struct nfp_repr *repr;
903
904                 repr = netdev_priv(netdev);
905                 repr_priv = repr->app_priv;
906
907                 /* If modifing MAC, remove repr from old list first. */
908                 if (mod)
909                         list_del(&repr_priv->mac_list);
910
911                 list_add_tail(&repr_priv->mac_list, &entry->repr_list);
912         } else if (nfp_flower_is_supported_bridge(netdev)) {
913                 entry->bridge_count++;
914         }
915
916         entry->ref_count++;
917 }
918
919 static int
920 nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
921                           int port, bool mod)
922 {
923         struct nfp_flower_priv *priv = app->priv;
924         int ida_idx = NFP_MAX_MAC_INDEX, err;
925         struct nfp_tun_offloaded_mac *entry;
926         u16 nfp_mac_idx = 0;
927
928         entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
929         if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
930                 if (entry->bridge_count ||
931                     !nfp_flower_is_supported_bridge(netdev)) {
932                         nfp_tunnel_offloaded_macs_inc_ref_and_link(entry,
933                                                                    netdev, mod);
934                         return 0;
935                 }
936
937                 /* MAC is global but matches need to go to pre_tun table. */
938                 nfp_mac_idx = entry->index | NFP_TUN_PRE_TUN_IDX_BIT;
939         }
940
941         if (!nfp_mac_idx) {
942                 /* Assign a global index if non-repr or MAC is now shared. */
943                 if (entry || !port) {
944                         ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0,
945                                                  NFP_MAX_MAC_INDEX, GFP_KERNEL);
946                         if (ida_idx < 0)
947                                 return ida_idx;
948
949                         nfp_mac_idx =
950                                 nfp_tunnel_get_global_mac_idx_from_ida(ida_idx);
951
952                         if (nfp_flower_is_supported_bridge(netdev))
953                                 nfp_mac_idx |= NFP_TUN_PRE_TUN_IDX_BIT;
954
955                 } else {
956                         nfp_mac_idx =
957                                 nfp_tunnel_get_mac_idx_from_phy_port_id(port);
958                 }
959         }
960
961         if (!entry) {
962                 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
963                 if (!entry) {
964                         err = -ENOMEM;
965                         goto err_free_ida;
966                 }
967
968                 ether_addr_copy(entry->addr, netdev->dev_addr);
969                 INIT_LIST_HEAD(&entry->repr_list);
970
971                 if (rhashtable_insert_fast(&priv->tun.offloaded_macs,
972                                            &entry->ht_node,
973                                            offloaded_macs_params)) {
974                         err = -ENOMEM;
975                         goto err_free_entry;
976                 }
977         }
978
979         err = __nfp_tunnel_offload_mac(app, netdev->dev_addr,
980                                        nfp_mac_idx, false);
981         if (err) {
982                 /* If not shared then free. */
983                 if (!entry->ref_count)
984                         goto err_remove_hash;
985                 goto err_free_ida;
986         }
987
988         entry->index = nfp_mac_idx;
989         nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod);
990
991         return 0;
992
993 err_remove_hash:
994         rhashtable_remove_fast(&priv->tun.offloaded_macs, &entry->ht_node,
995                                offloaded_macs_params);
996 err_free_entry:
997         kfree(entry);
998 err_free_ida:
999         if (ida_idx != NFP_MAX_MAC_INDEX)
1000                 ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
1001
1002         return err;
1003 }
1004
1005 static int
1006 nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
1007                           u8 *mac, bool mod)
1008 {
1009         struct nfp_flower_priv *priv = app->priv;
1010         struct nfp_flower_repr_priv *repr_priv;
1011         struct nfp_tun_offloaded_mac *entry;
1012         struct nfp_repr *repr;
1013         int ida_idx;
1014
1015         entry = nfp_tunnel_lookup_offloaded_macs(app, mac);
1016         if (!entry)
1017                 return 0;
1018
1019         entry->ref_count--;
1020         /* If del is part of a mod then mac_list is still in use elsewheree. */
1021         if (nfp_netdev_is_nfp_repr(netdev) && !mod) {
1022                 repr = netdev_priv(netdev);
1023                 repr_priv = repr->app_priv;
1024                 list_del(&repr_priv->mac_list);
1025         }
1026
1027         if (nfp_flower_is_supported_bridge(netdev)) {
1028                 entry->bridge_count--;
1029
1030                 if (!entry->bridge_count && entry->ref_count) {
1031                         u16 nfp_mac_idx;
1032
1033                         nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
1034                         if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx,
1035                                                      false)) {
1036                                 nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
1037                                                      netdev_name(netdev));
1038                                 return 0;
1039                         }
1040
1041                         entry->index = nfp_mac_idx;
1042                         return 0;
1043                 }
1044         }
1045
1046         /* If MAC is now used by 1 repr set the offloaded MAC index to port. */
1047         if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
1048                 u16 nfp_mac_idx;
1049                 int port, err;
1050
1051                 repr_priv = list_first_entry(&entry->repr_list,
1052                                              struct nfp_flower_repr_priv,
1053                                              mac_list);
1054                 repr = repr_priv->nfp_repr;
1055                 port = nfp_repr_get_port_id(repr->netdev);
1056                 nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
1057                 err = __nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false);
1058                 if (err) {
1059                         nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
1060                                              netdev_name(netdev));
1061                         return 0;
1062                 }
1063
1064                 ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
1065                 ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
1066                 entry->index = nfp_mac_idx;
1067                 return 0;
1068         }
1069
1070         if (entry->ref_count)
1071                 return 0;
1072
1073         WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs,
1074                                             &entry->ht_node,
1075                                             offloaded_macs_params));
1076         /* If MAC has global ID then extract and free the ida entry. */
1077         if (nfp_tunnel_is_mac_idx_global(entry->index)) {
1078                 ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
1079                 ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
1080         }
1081
1082         kfree(entry);
1083
1084         return __nfp_tunnel_offload_mac(app, mac, 0, true);
1085 }
1086
1087 static int
1088 nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev,
1089                        enum nfp_flower_mac_offload_cmd cmd)
1090 {
1091         struct nfp_flower_non_repr_priv *nr_priv = NULL;
1092         bool non_repr = false, *mac_offloaded;
1093         u8 *off_mac = NULL;
1094         int err, port = 0;
1095
1096         if (nfp_netdev_is_nfp_repr(netdev)) {
1097                 struct nfp_flower_repr_priv *repr_priv;
1098                 struct nfp_repr *repr;
1099
1100                 repr = netdev_priv(netdev);
1101                 if (repr->app != app)
1102                         return 0;
1103
1104                 repr_priv = repr->app_priv;
1105                 if (repr_priv->on_bridge)
1106                         return 0;
1107
1108                 mac_offloaded = &repr_priv->mac_offloaded;
1109                 off_mac = &repr_priv->offloaded_mac_addr[0];
1110                 port = nfp_repr_get_port_id(netdev);
1111                 if (!nfp_tunnel_port_is_phy_repr(port))
1112                         return 0;
1113         } else if (nfp_fl_is_netdev_to_offload(netdev)) {
1114                 nr_priv = nfp_flower_non_repr_priv_get(app, netdev);
1115                 if (!nr_priv)
1116                         return -ENOMEM;
1117
1118                 mac_offloaded = &nr_priv->mac_offloaded;
1119                 off_mac = &nr_priv->offloaded_mac_addr[0];
1120                 non_repr = true;
1121         } else {
1122                 return 0;
1123         }
1124
1125         if (!is_valid_ether_addr(netdev->dev_addr)) {
1126                 err = -EINVAL;
1127                 goto err_put_non_repr_priv;
1128         }
1129
1130         if (cmd == NFP_TUNNEL_MAC_OFFLOAD_MOD && !*mac_offloaded)
1131                 cmd = NFP_TUNNEL_MAC_OFFLOAD_ADD;
1132
1133         switch (cmd) {
1134         case NFP_TUNNEL_MAC_OFFLOAD_ADD:
1135                 err = nfp_tunnel_add_shared_mac(app, netdev, port, false);
1136                 if (err)
1137                         goto err_put_non_repr_priv;
1138
1139                 if (non_repr)
1140                         __nfp_flower_non_repr_priv_get(nr_priv);
1141
1142                 *mac_offloaded = true;
1143                 ether_addr_copy(off_mac, netdev->dev_addr);
1144                 break;
1145         case NFP_TUNNEL_MAC_OFFLOAD_DEL:
1146                 /* Only attempt delete if add was successful. */
1147                 if (!*mac_offloaded)
1148                         break;
1149
1150                 if (non_repr)
1151                         __nfp_flower_non_repr_priv_put(nr_priv);
1152
1153                 *mac_offloaded = false;
1154
1155                 err = nfp_tunnel_del_shared_mac(app, netdev, netdev->dev_addr,
1156                                                 false);
1157                 if (err)
1158                         goto err_put_non_repr_priv;
1159
1160                 break;
1161         case NFP_TUNNEL_MAC_OFFLOAD_MOD:
1162                 /* Ignore if changing to the same address. */
1163                 if (ether_addr_equal(netdev->dev_addr, off_mac))
1164                         break;
1165
1166                 err = nfp_tunnel_add_shared_mac(app, netdev, port, true);
1167                 if (err)
1168                         goto err_put_non_repr_priv;
1169
1170                 /* Delete the previous MAC address. */
1171                 err = nfp_tunnel_del_shared_mac(app, netdev, off_mac, true);
1172                 if (err)
1173                         nfp_flower_cmsg_warn(app, "Failed to remove offload of replaced MAC addr on %s.\n",
1174                                              netdev_name(netdev));
1175
1176                 ether_addr_copy(off_mac, netdev->dev_addr);
1177                 break;
1178         default:
1179                 err = -EINVAL;
1180                 goto err_put_non_repr_priv;
1181         }
1182
1183         if (non_repr)
1184                 __nfp_flower_non_repr_priv_put(nr_priv);
1185
1186         return 0;
1187
1188 err_put_non_repr_priv:
1189         if (non_repr)
1190                 __nfp_flower_non_repr_priv_put(nr_priv);
1191
1192         return err;
1193 }
1194
1195 int nfp_tunnel_mac_event_handler(struct nfp_app *app,
1196                                  struct net_device *netdev,
1197                                  unsigned long event, void *ptr)
1198 {
1199         int err;
1200
1201         if (event == NETDEV_DOWN) {
1202                 err = nfp_tunnel_offload_mac(app, netdev,
1203                                              NFP_TUNNEL_MAC_OFFLOAD_DEL);
1204                 if (err)
1205                         nfp_flower_cmsg_warn(app, "Failed to delete offload MAC on %s.\n",
1206                                              netdev_name(netdev));
1207         } else if (event == NETDEV_UP) {
1208                 err = nfp_tunnel_offload_mac(app, netdev,
1209                                              NFP_TUNNEL_MAC_OFFLOAD_ADD);
1210                 if (err)
1211                         nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
1212                                              netdev_name(netdev));
1213         } else if (event == NETDEV_CHANGEADDR) {
1214                 /* Only offload addr change if netdev is already up. */
1215                 if (!(netdev->flags & IFF_UP))
1216                         return NOTIFY_OK;
1217
1218                 err = nfp_tunnel_offload_mac(app, netdev,
1219                                              NFP_TUNNEL_MAC_OFFLOAD_MOD);
1220                 if (err)
1221                         nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n",
1222                                              netdev_name(netdev));
1223         } else if (event == NETDEV_CHANGEUPPER) {
1224                 /* If a repr is attached to a bridge then tunnel packets
1225                  * entering the physical port are directed through the bridge
1226                  * datapath and cannot be directly detunneled. Therefore,
1227                  * associated offloaded MACs and indexes should not be used
1228                  * by fw for detunneling.
1229                  */
1230                 struct netdev_notifier_changeupper_info *info = ptr;
1231                 struct net_device *upper = info->upper_dev;
1232                 struct nfp_flower_repr_priv *repr_priv;
1233                 struct nfp_repr *repr;
1234
1235                 if (!nfp_netdev_is_nfp_repr(netdev) ||
1236                     !nfp_flower_is_supported_bridge(upper))
1237                         return NOTIFY_OK;
1238
1239                 repr = netdev_priv(netdev);
1240                 if (repr->app != app)
1241                         return NOTIFY_OK;
1242
1243                 repr_priv = repr->app_priv;
1244
1245                 if (info->linking) {
1246                         if (nfp_tunnel_offload_mac(app, netdev,
1247                                                    NFP_TUNNEL_MAC_OFFLOAD_DEL))
1248                                 nfp_flower_cmsg_warn(app, "Failed to delete offloaded MAC on %s.\n",
1249                                                      netdev_name(netdev));
1250                         repr_priv->on_bridge = true;
1251                 } else {
1252                         repr_priv->on_bridge = false;
1253
1254                         if (!(netdev->flags & IFF_UP))
1255                                 return NOTIFY_OK;
1256
1257                         if (nfp_tunnel_offload_mac(app, netdev,
1258                                                    NFP_TUNNEL_MAC_OFFLOAD_ADD))
1259                                 nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
1260                                                      netdev_name(netdev));
1261                 }
1262         }
1263         return NOTIFY_OK;
1264 }
1265
1266 int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
1267                                  struct nfp_fl_payload *flow)
1268 {
1269         struct nfp_flower_priv *app_priv = app->priv;
1270         struct nfp_tun_offloaded_mac *mac_entry;
1271         struct nfp_tun_pre_tun_rule payload;
1272         struct net_device *internal_dev;
1273         int err;
1274
1275         if (app_priv->pre_tun_rule_cnt == NFP_TUN_PRE_TUN_RULE_LIMIT)
1276                 return -ENOSPC;
1277
1278         memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
1279
1280         internal_dev = flow->pre_tun_rule.dev;
1281         payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
1282         payload.host_ctx_id = flow->meta.host_ctx_id;
1283
1284         /* Lookup MAC index for the pre-tunnel rule egress device.
1285          * Note that because the device is always an internal port, it will
1286          * have a constant global index so does not need to be tracked.
1287          */
1288         mac_entry = nfp_tunnel_lookup_offloaded_macs(app,
1289                                                      internal_dev->dev_addr);
1290         if (!mac_entry)
1291                 return -ENOENT;
1292
1293         payload.port_idx = cpu_to_be16(mac_entry->index);
1294
1295         /* Copy mac id and vlan to flow - dev may not exist at delete time. */
1296         flow->pre_tun_rule.vlan_tci = payload.vlan_tci;
1297         flow->pre_tun_rule.port_idx = payload.port_idx;
1298
1299         err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
1300                                        sizeof(struct nfp_tun_pre_tun_rule),
1301                                        (unsigned char *)&payload, GFP_KERNEL);
1302         if (err)
1303                 return err;
1304
1305         app_priv->pre_tun_rule_cnt++;
1306
1307         return 0;
1308 }
1309
1310 int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
1311                                      struct nfp_fl_payload *flow)
1312 {
1313         struct nfp_flower_priv *app_priv = app->priv;
1314         struct nfp_tun_pre_tun_rule payload;
1315         u32 tmp_flags = 0;
1316         int err;
1317
1318         memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
1319
1320         tmp_flags |= NFP_TUN_PRE_TUN_RULE_DEL;
1321         payload.flags = cpu_to_be32(tmp_flags);
1322         payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
1323         payload.port_idx = flow->pre_tun_rule.port_idx;
1324
1325         err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
1326                                        sizeof(struct nfp_tun_pre_tun_rule),
1327                                        (unsigned char *)&payload, GFP_KERNEL);
1328         if (err)
1329                 return err;
1330
1331         app_priv->pre_tun_rule_cnt--;
1332
1333         return 0;
1334 }
1335
1336 int nfp_tunnel_config_start(struct nfp_app *app)
1337 {
1338         struct nfp_flower_priv *priv = app->priv;
1339         int err;
1340
1341         /* Initialise rhash for MAC offload tracking. */
1342         err = rhashtable_init(&priv->tun.offloaded_macs,
1343                               &offloaded_macs_params);
1344         if (err)
1345                 return err;
1346
1347         ida_init(&priv->tun.mac_off_ids);
1348
1349         /* Initialise priv data for IPv4/v6 offloading. */
1350         mutex_init(&priv->tun.ipv4_off_lock);
1351         INIT_LIST_HEAD(&priv->tun.ipv4_off_list);
1352         mutex_init(&priv->tun.ipv6_off_lock);
1353         INIT_LIST_HEAD(&priv->tun.ipv6_off_list);
1354
1355         /* Initialise priv data for neighbour offloading. */
1356         spin_lock_init(&priv->tun.neigh_off_lock_v4);
1357         INIT_LIST_HEAD(&priv->tun.neigh_off_list_v4);
1358         spin_lock_init(&priv->tun.neigh_off_lock_v6);
1359         INIT_LIST_HEAD(&priv->tun.neigh_off_list_v6);
1360         priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
1361
1362         err = register_netevent_notifier(&priv->tun.neigh_nb);
1363         if (err) {
1364                 rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
1365                                             nfp_check_rhashtable_empty, NULL);
1366                 return err;
1367         }
1368
1369         return 0;
1370 }
1371
1372 void nfp_tunnel_config_stop(struct nfp_app *app)
1373 {
1374         struct nfp_offloaded_route *route_entry, *temp;
1375         struct nfp_flower_priv *priv = app->priv;
1376         struct nfp_ipv4_addr_entry *ip_entry;
1377         struct nfp_tun_neigh_v6 ipv6_route;
1378         struct nfp_tun_neigh ipv4_route;
1379         struct list_head *ptr, *storage;
1380
1381         unregister_netevent_notifier(&priv->tun.neigh_nb);
1382
1383         ida_destroy(&priv->tun.mac_off_ids);
1384
1385         /* Free any memory that may be occupied by ipv4 list. */
1386         list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
1387                 ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
1388                 list_del(&ip_entry->list);
1389                 kfree(ip_entry);
1390         }
1391
1392         mutex_destroy(&priv->tun.ipv6_off_lock);
1393
1394         /* Free memory in the route list and remove entries from fw cache. */
1395         list_for_each_entry_safe(route_entry, temp,
1396                                  &priv->tun.neigh_off_list_v4, list) {
1397                 memset(&ipv4_route, 0, sizeof(ipv4_route));
1398                 memcpy(&ipv4_route.dst_ipv4, &route_entry->ip_add,
1399                        sizeof(ipv4_route.dst_ipv4));
1400                 list_del(&route_entry->list);
1401                 kfree(route_entry);
1402
1403                 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
1404                                          sizeof(struct nfp_tun_neigh),
1405                                          (unsigned char *)&ipv4_route,
1406                                          GFP_KERNEL);
1407         }
1408
1409         list_for_each_entry_safe(route_entry, temp,
1410                                  &priv->tun.neigh_off_list_v6, list) {
1411                 memset(&ipv6_route, 0, sizeof(ipv6_route));
1412                 memcpy(&ipv6_route.dst_ipv6, &route_entry->ip_add,
1413                        sizeof(ipv6_route.dst_ipv6));
1414                 list_del(&route_entry->list);
1415                 kfree(route_entry);
1416
1417                 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6,
1418                                          sizeof(struct nfp_tun_neigh),
1419                                          (unsigned char *)&ipv6_route,
1420                                          GFP_KERNEL);
1421         }
1422
1423         /* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */
1424         rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
1425                                     nfp_check_rhashtable_empty, NULL);
1426 }