060a77f2265d9a12e0c675a1a43e9f06ba95a1b2
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / netronome / nfp / flower / tunnel_conf.c
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3
4 #include <linux/etherdevice.h>
5 #include <linux/inetdevice.h>
6 #include <net/netevent.h>
7 #include <linux/idr.h>
8 #include <net/dst_metadata.h>
9 #include <net/arp.h>
10
11 #include "cmsg.h"
12 #include "main.h"
13 #include "../nfp_net_repr.h"
14 #include "../nfp_net.h"
15
16 #define NFP_FL_MAX_ROUTES               32
17
18 #define NFP_TUN_PRE_TUN_RULE_LIMIT      32
19 #define NFP_TUN_PRE_TUN_RULE_DEL        BIT(0)
20 #define NFP_TUN_PRE_TUN_IDX_BIT         BIT(3)
21 #define NFP_TUN_PRE_TUN_IPV6_BIT        BIT(7)
22
23 /**
24  * struct nfp_tun_pre_tun_rule - rule matched before decap
25  * @flags:              options for the rule offset
26  * @port_idx:           index of destination MAC address for the rule
27  * @vlan_tci:           VLAN info associated with MAC
28  * @host_ctx_id:        stats context of rule to update
29  */
30 struct nfp_tun_pre_tun_rule {
31         __be32 flags;
32         __be16 port_idx;
33         __be16 vlan_tci;
34         __be32 host_ctx_id;
35 };
36
37 /**
38  * struct nfp_tun_active_tuns - periodic message of active tunnels
39  * @seq:                sequence number of the message
40  * @count:              number of tunnels report in message
41  * @flags:              options part of the request
42  * @tun_info.ipv4:              dest IPv4 address of active route
43  * @tun_info.egress_port:       port the encapsulated packet egressed
44  * @tun_info.extra:             reserved for future use
45  * @tun_info:           tunnels that have sent traffic in reported period
46  */
47 struct nfp_tun_active_tuns {
48         __be32 seq;
49         __be32 count;
50         __be32 flags;
51         struct route_ip_info {
52                 __be32 ipv4;
53                 __be32 egress_port;
54                 __be32 extra[2];
55         } tun_info[];
56 };
57
58 /**
59  * struct nfp_tun_active_tuns_v6 - periodic message of active IPv6 tunnels
60  * @seq:                sequence number of the message
61  * @count:              number of tunnels report in message
62  * @flags:              options part of the request
63  * @tun_info.ipv6:              dest IPv6 address of active route
64  * @tun_info.egress_port:       port the encapsulated packet egressed
65  * @tun_info.extra:             reserved for future use
66  * @tun_info:           tunnels that have sent traffic in reported period
67  */
68 struct nfp_tun_active_tuns_v6 {
69         __be32 seq;
70         __be32 count;
71         __be32 flags;
72         struct route_ip_info_v6 {
73                 struct in6_addr ipv6;
74                 __be32 egress_port;
75                 __be32 extra[2];
76         } tun_info[];
77 };
78
79 /**
80  * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
81  * @ingress_port:       ingress port of packet that signalled request
82  * @ipv4_addr:          destination ipv4 address for route
83  * @reserved:           reserved for future use
84  */
85 struct nfp_tun_req_route_ipv4 {
86         __be32 ingress_port;
87         __be32 ipv4_addr;
88         __be32 reserved[2];
89 };
90
91 /**
92  * struct nfp_tun_req_route_ipv6 - NFP requests an IPv6 route/neighbour lookup
93  * @ingress_port:       ingress port of packet that signalled request
94  * @ipv6_addr:          destination ipv6 address for route
95  */
96 struct nfp_tun_req_route_ipv6 {
97         __be32 ingress_port;
98         struct in6_addr ipv6_addr;
99 };
100
101 /**
102  * struct nfp_offloaded_route - routes that are offloaded to the NFP
103  * @list:       list pointer
104  * @ip_add:     destination of route - can be IPv4 or IPv6
105  */
106 struct nfp_offloaded_route {
107         struct list_head list;
108         u8 ip_add[];
109 };
110
111 #define NFP_FL_IPV4_ADDRS_MAX        32
112
113 /**
114  * struct nfp_tun_ipv4_addr - set the IP address list on the NFP
115  * @count:      number of IPs populated in the array
116  * @ipv4_addr:  array of IPV4_ADDRS_MAX 32 bit IPv4 addresses
117  */
118 struct nfp_tun_ipv4_addr {
119         __be32 count;
120         __be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
121 };
122
123 /**
124  * struct nfp_ipv4_addr_entry - cached IPv4 addresses
125  * @ipv4_addr:  IP address
126  * @ref_count:  number of rules currently using this IP
127  * @list:       list pointer
128  */
129 struct nfp_ipv4_addr_entry {
130         __be32 ipv4_addr;
131         int ref_count;
132         struct list_head list;
133 };
134
135 #define NFP_FL_IPV6_ADDRS_MAX        4
136
137 /**
138  * struct nfp_tun_ipv6_addr - set the IP address list on the NFP
139  * @count:      number of IPs populated in the array
140  * @ipv6_addr:  array of IPV6_ADDRS_MAX 128 bit IPv6 addresses
141  */
142 struct nfp_tun_ipv6_addr {
143         __be32 count;
144         struct in6_addr ipv6_addr[NFP_FL_IPV6_ADDRS_MAX];
145 };
146
147 #define NFP_TUN_MAC_OFFLOAD_DEL_FLAG    0x2
148
149 /**
150  * struct nfp_tun_mac_addr_offload - configure MAC address of tunnel EP on NFP
151  * @flags:      MAC address offload options
152  * @count:      number of MAC addresses in the message (should be 1)
153  * @index:      index of MAC address in the lookup table
154  * @addr:       interface MAC address
155  */
156 struct nfp_tun_mac_addr_offload {
157         __be16 flags;
158         __be16 count;
159         __be16 index;
160         u8 addr[ETH_ALEN];
161 };
162
163 enum nfp_flower_mac_offload_cmd {
164         NFP_TUNNEL_MAC_OFFLOAD_ADD =            0,
165         NFP_TUNNEL_MAC_OFFLOAD_DEL =            1,
166         NFP_TUNNEL_MAC_OFFLOAD_MOD =            2,
167 };
168
169 #define NFP_MAX_MAC_INDEX       0xff
170
171 /**
172  * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC
173  * @ht_node:            Hashtable entry
174  * @addr:               Offloaded MAC address
175  * @index:              Offloaded index for given MAC address
176  * @ref_count:          Number of devs using this MAC address
177  * @repr_list:          List of reprs sharing this MAC address
178  * @bridge_count:       Number of bridge/internal devs with MAC
179  */
180 struct nfp_tun_offloaded_mac {
181         struct rhash_head ht_node;
182         u8 addr[ETH_ALEN];
183         u16 index;
184         int ref_count;
185         struct list_head repr_list;
186         int bridge_count;
187 };
188
189 static const struct rhashtable_params offloaded_macs_params = {
190         .key_offset     = offsetof(struct nfp_tun_offloaded_mac, addr),
191         .head_offset    = offsetof(struct nfp_tun_offloaded_mac, ht_node),
192         .key_len        = ETH_ALEN,
193         .automatic_shrinking    = true,
194 };
195
196 void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
197 {
198         struct nfp_tun_active_tuns *payload;
199         struct net_device *netdev;
200         int count, i, pay_len;
201         struct neighbour *n;
202         __be32 ipv4_addr;
203         u32 port;
204
205         payload = nfp_flower_cmsg_get_data(skb);
206         count = be32_to_cpu(payload->count);
207         if (count > NFP_FL_MAX_ROUTES) {
208                 nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n");
209                 return;
210         }
211
212         pay_len = nfp_flower_cmsg_get_data_len(skb);
213         if (pay_len != struct_size(payload, tun_info, count)) {
214                 nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
215                 return;
216         }
217
218         rcu_read_lock();
219         for (i = 0; i < count; i++) {
220                 ipv4_addr = payload->tun_info[i].ipv4;
221                 port = be32_to_cpu(payload->tun_info[i].egress_port);
222                 netdev = nfp_app_dev_get(app, port, NULL);
223                 if (!netdev)
224                         continue;
225
226                 n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev);
227                 if (!n)
228                         continue;
229
230                 /* Update the used timestamp of neighbour */
231                 neigh_event_send(n, NULL);
232                 neigh_release(n);
233         }
234         rcu_read_unlock();
235 }
236
237 void nfp_tunnel_keep_alive_v6(struct nfp_app *app, struct sk_buff *skb)
238 {
239 #if IS_ENABLED(CONFIG_IPV6)
240         struct nfp_tun_active_tuns_v6 *payload;
241         struct net_device *netdev;
242         int count, i, pay_len;
243         struct neighbour *n;
244         void *ipv6_add;
245         u32 port;
246
247         payload = nfp_flower_cmsg_get_data(skb);
248         count = be32_to_cpu(payload->count);
249         if (count > NFP_FL_IPV6_ADDRS_MAX) {
250                 nfp_flower_cmsg_warn(app, "IPv6 tunnel keep-alive request exceeds max routes.\n");
251                 return;
252         }
253
254         pay_len = nfp_flower_cmsg_get_data_len(skb);
255         if (pay_len != struct_size(payload, tun_info, count)) {
256                 nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
257                 return;
258         }
259
260         rcu_read_lock();
261         for (i = 0; i < count; i++) {
262                 ipv6_add = &payload->tun_info[i].ipv6;
263                 port = be32_to_cpu(payload->tun_info[i].egress_port);
264                 netdev = nfp_app_dev_get(app, port, NULL);
265                 if (!netdev)
266                         continue;
267
268                 n = neigh_lookup(&nd_tbl, ipv6_add, netdev);
269                 if (!n)
270                         continue;
271
272                 /* Update the used timestamp of neighbour */
273                 neigh_event_send(n, NULL);
274                 neigh_release(n);
275         }
276         rcu_read_unlock();
277 #endif
278 }
279
280 static int
281 nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
282                          gfp_t flag)
283 {
284         struct nfp_flower_priv *priv = app->priv;
285         struct sk_buff *skb;
286         unsigned char *msg;
287
288         if (!(priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) &&
289             (mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH ||
290              mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6))
291                 plen -= sizeof(struct nfp_tun_neigh_ext);
292
293         if (!(priv->flower_ext_feats & NFP_FL_FEATS_TUNNEL_NEIGH_LAG) &&
294             (mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH ||
295              mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6))
296                 plen -= sizeof(struct nfp_tun_neigh_lag);
297
298         skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
299         if (!skb)
300                 return -ENOMEM;
301
302         msg = nfp_flower_cmsg_get_data(skb);
303         memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb));
304
305         nfp_ctrl_tx(app->ctrl, skb);
306         return 0;
307 }
308
309 static void
310 nfp_tun_mutual_link(struct nfp_predt_entry *predt,
311                     struct nfp_neigh_entry *neigh)
312 {
313         struct nfp_fl_payload *flow_pay = predt->flow_pay;
314         struct nfp_tun_neigh_ext *ext;
315         struct nfp_tun_neigh *common;
316
317         if (flow_pay->pre_tun_rule.is_ipv6 != neigh->is_ipv6)
318                 return;
319
320         /* In the case of bonding it is possible that there might already
321          * be a flow linked (as the MAC address gets shared). If a flow
322          * is already linked just return.
323          */
324         if (neigh->flow)
325                 return;
326
327         common = neigh->is_ipv6 ?
328                  &((struct nfp_tun_neigh_v6 *)neigh->payload)->common :
329                  &((struct nfp_tun_neigh_v4 *)neigh->payload)->common;
330         ext = neigh->is_ipv6 ?
331                  &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext :
332                  &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext;
333
334         if (memcmp(flow_pay->pre_tun_rule.loc_mac,
335                    common->src_addr, ETH_ALEN) ||
336             memcmp(flow_pay->pre_tun_rule.rem_mac,
337                    common->dst_addr, ETH_ALEN))
338                 return;
339
340         list_add(&neigh->list_head, &predt->nn_list);
341         neigh->flow = predt;
342         ext->host_ctx = flow_pay->meta.host_ctx_id;
343         ext->vlan_tci = flow_pay->pre_tun_rule.vlan_tci;
344         ext->vlan_tpid = flow_pay->pre_tun_rule.vlan_tpid;
345 }
346
347 static void
348 nfp_tun_link_predt_entries(struct nfp_app *app,
349                            struct nfp_neigh_entry *nn_entry)
350 {
351         struct nfp_flower_priv *priv = app->priv;
352         struct nfp_predt_entry *predt, *tmp;
353
354         list_for_each_entry_safe(predt, tmp, &priv->predt_list, list_head) {
355                 nfp_tun_mutual_link(predt, nn_entry);
356         }
357 }
358
359 void nfp_tun_link_and_update_nn_entries(struct nfp_app *app,
360                                         struct nfp_predt_entry *predt)
361 {
362         struct nfp_flower_priv *priv = app->priv;
363         struct nfp_neigh_entry *nn_entry;
364         struct rhashtable_iter iter;
365         size_t neigh_size;
366         u8 type;
367
368         rhashtable_walk_enter(&priv->neigh_table, &iter);
369         rhashtable_walk_start(&iter);
370         while ((nn_entry = rhashtable_walk_next(&iter)) != NULL) {
371                 if (IS_ERR(nn_entry))
372                         continue;
373                 nfp_tun_mutual_link(predt, nn_entry);
374                 neigh_size = nn_entry->is_ipv6 ?
375                              sizeof(struct nfp_tun_neigh_v6) :
376                              sizeof(struct nfp_tun_neigh_v4);
377                 type = nn_entry->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 :
378                                            NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
379                 nfp_flower_xmit_tun_conf(app, type, neigh_size,
380                                          nn_entry->payload,
381                                          GFP_ATOMIC);
382         }
383         rhashtable_walk_stop(&iter);
384         rhashtable_walk_exit(&iter);
385 }
386
387 static void nfp_tun_cleanup_nn_entries(struct nfp_app *app)
388 {
389         struct nfp_flower_priv *priv = app->priv;
390         struct nfp_neigh_entry *neigh;
391         struct nfp_tun_neigh_ext *ext;
392         struct rhashtable_iter iter;
393         size_t neigh_size;
394         u8 type;
395
396         rhashtable_walk_enter(&priv->neigh_table, &iter);
397         rhashtable_walk_start(&iter);
398         while ((neigh = rhashtable_walk_next(&iter)) != NULL) {
399                 if (IS_ERR(neigh))
400                         continue;
401                 ext = neigh->is_ipv6 ?
402                          &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext :
403                          &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext;
404                 ext->host_ctx = cpu_to_be32(U32_MAX);
405                 ext->vlan_tpid = cpu_to_be16(U16_MAX);
406                 ext->vlan_tci = cpu_to_be16(U16_MAX);
407
408                 neigh_size = neigh->is_ipv6 ?
409                              sizeof(struct nfp_tun_neigh_v6) :
410                              sizeof(struct nfp_tun_neigh_v4);
411                 type = neigh->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 :
412                                            NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
413                 nfp_flower_xmit_tun_conf(app, type, neigh_size, neigh->payload,
414                                          GFP_ATOMIC);
415
416                 rhashtable_remove_fast(&priv->neigh_table, &neigh->ht_node,
417                                        neigh_table_params);
418                 if (neigh->flow)
419                         list_del(&neigh->list_head);
420                 kfree(neigh);
421         }
422         rhashtable_walk_stop(&iter);
423         rhashtable_walk_exit(&iter);
424 }
425
426 void nfp_tun_unlink_and_update_nn_entries(struct nfp_app *app,
427                                           struct nfp_predt_entry *predt)
428 {
429         struct nfp_neigh_entry *neigh, *tmp;
430         struct nfp_tun_neigh_ext *ext;
431         size_t neigh_size;
432         u8 type;
433
434         list_for_each_entry_safe(neigh, tmp, &predt->nn_list, list_head) {
435                 ext = neigh->is_ipv6 ?
436                          &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext :
437                          &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext;
438                 neigh->flow = NULL;
439                 ext->host_ctx = cpu_to_be32(U32_MAX);
440                 ext->vlan_tpid = cpu_to_be16(U16_MAX);
441                 ext->vlan_tci = cpu_to_be16(U16_MAX);
442                 list_del(&neigh->list_head);
443                 neigh_size = neigh->is_ipv6 ?
444                              sizeof(struct nfp_tun_neigh_v6) :
445                              sizeof(struct nfp_tun_neigh_v4);
446                 type = neigh->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 :
447                                            NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
448                 nfp_flower_xmit_tun_conf(app, type, neigh_size, neigh->payload,
449                                          GFP_ATOMIC);
450         }
451 }
452
453 static void
454 nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
455                     void *flow, struct neighbour *neigh, bool is_ipv6,
456                     bool override)
457 {
458         bool neigh_invalid = !(neigh->nud_state & NUD_VALID) || neigh->dead;
459         size_t neigh_size = is_ipv6 ? sizeof(struct nfp_tun_neigh_v6) :
460                             sizeof(struct nfp_tun_neigh_v4);
461         unsigned long cookie = (unsigned long)neigh;
462         struct nfp_flower_priv *priv = app->priv;
463         struct nfp_tun_neigh_lag lag_info;
464         struct nfp_neigh_entry *nn_entry;
465         u32 port_id;
466         u8 mtype;
467
468         port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
469         if (!port_id)
470                 return;
471
472         if ((port_id & NFP_FL_LAG_OUT) == NFP_FL_LAG_OUT) {
473                 memset(&lag_info, 0, sizeof(struct nfp_tun_neigh_lag));
474                 nfp_flower_lag_get_info_from_netdev(app, netdev, &lag_info);
475         }
476
477         spin_lock_bh(&priv->predt_lock);
478         nn_entry = rhashtable_lookup_fast(&priv->neigh_table, &cookie,
479                                           neigh_table_params);
480         if (!nn_entry && !neigh_invalid) {
481                 struct nfp_tun_neigh_ext *ext;
482                 struct nfp_tun_neigh_lag *lag;
483                 struct nfp_tun_neigh *common;
484
485                 nn_entry = kzalloc(sizeof(*nn_entry) + neigh_size,
486                                    GFP_ATOMIC);
487                 if (!nn_entry)
488                         goto err;
489
490                 nn_entry->payload = (char *)&nn_entry[1];
491                 nn_entry->neigh_cookie = cookie;
492                 nn_entry->is_ipv6 = is_ipv6;
493                 nn_entry->flow = NULL;
494                 if (is_ipv6) {
495                         struct flowi6 *flowi6 = (struct flowi6 *)flow;
496                         struct nfp_tun_neigh_v6 *payload;
497
498                         payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload;
499                         payload->src_ipv6 = flowi6->saddr;
500                         payload->dst_ipv6 = flowi6->daddr;
501                         common = &payload->common;
502                         ext = &payload->ext;
503                         lag = &payload->lag;
504                         mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6;
505                 } else {
506                         struct flowi4 *flowi4 = (struct flowi4 *)flow;
507                         struct nfp_tun_neigh_v4 *payload;
508
509                         payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload;
510                         payload->src_ipv4 = flowi4->saddr;
511                         payload->dst_ipv4 = flowi4->daddr;
512                         common = &payload->common;
513                         ext = &payload->ext;
514                         lag = &payload->lag;
515                         mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
516                 }
517                 ext->host_ctx = cpu_to_be32(U32_MAX);
518                 ext->vlan_tpid = cpu_to_be16(U16_MAX);
519                 ext->vlan_tci = cpu_to_be16(U16_MAX);
520                 ether_addr_copy(common->src_addr, netdev->dev_addr);
521                 neigh_ha_snapshot(common->dst_addr, neigh, netdev);
522
523                 if ((port_id & NFP_FL_LAG_OUT) == NFP_FL_LAG_OUT)
524                         memcpy(lag, &lag_info, sizeof(struct nfp_tun_neigh_lag));
525                 common->port_id = cpu_to_be32(port_id);
526
527                 if (rhashtable_insert_fast(&priv->neigh_table,
528                                            &nn_entry->ht_node,
529                                            neigh_table_params))
530                         goto err;
531
532                 nfp_tun_link_predt_entries(app, nn_entry);
533                 nfp_flower_xmit_tun_conf(app, mtype, neigh_size,
534                                          nn_entry->payload,
535                                          GFP_ATOMIC);
536         } else if (nn_entry && neigh_invalid) {
537                 if (is_ipv6) {
538                         struct flowi6 *flowi6 = (struct flowi6 *)flow;
539                         struct nfp_tun_neigh_v6 *payload;
540
541                         payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload;
542                         memset(payload, 0, sizeof(struct nfp_tun_neigh_v6));
543                         payload->dst_ipv6 = flowi6->daddr;
544                         mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6;
545                 } else {
546                         struct flowi4 *flowi4 = (struct flowi4 *)flow;
547                         struct nfp_tun_neigh_v4 *payload;
548
549                         payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload;
550                         memset(payload, 0, sizeof(struct nfp_tun_neigh_v4));
551                         payload->dst_ipv4 = flowi4->daddr;
552                         mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
553                 }
554                 /* Trigger ARP to verify invalid neighbour state. */
555                 neigh_event_send(neigh, NULL);
556                 rhashtable_remove_fast(&priv->neigh_table,
557                                        &nn_entry->ht_node,
558                                        neigh_table_params);
559
560                 nfp_flower_xmit_tun_conf(app, mtype, neigh_size,
561                                          nn_entry->payload,
562                                          GFP_ATOMIC);
563
564                 if (nn_entry->flow)
565                         list_del(&nn_entry->list_head);
566                 kfree(nn_entry);
567         } else if (nn_entry && !neigh_invalid) {
568                 struct nfp_tun_neigh *common;
569                 u8 dst_addr[ETH_ALEN];
570                 bool is_mac_change;
571
572                 if (is_ipv6) {
573                         struct nfp_tun_neigh_v6 *payload;
574
575                         payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload;
576                         common = &payload->common;
577                         mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6;
578                 } else {
579                         struct nfp_tun_neigh_v4 *payload;
580
581                         payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload;
582                         common = &payload->common;
583                         mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
584                 }
585
586                 ether_addr_copy(dst_addr, common->dst_addr);
587                 neigh_ha_snapshot(common->dst_addr, neigh, netdev);
588                 is_mac_change = !ether_addr_equal(dst_addr, common->dst_addr);
589                 if (override || is_mac_change) {
590                         if (is_mac_change && nn_entry->flow) {
591                                 list_del(&nn_entry->list_head);
592                                 nn_entry->flow = NULL;
593                         }
594                         nfp_tun_link_predt_entries(app, nn_entry);
595                         nfp_flower_xmit_tun_conf(app, mtype, neigh_size,
596                                                  nn_entry->payload,
597                                                  GFP_ATOMIC);
598                 }
599         }
600
601         spin_unlock_bh(&priv->predt_lock);
602         return;
603
604 err:
605         kfree(nn_entry);
606         spin_unlock_bh(&priv->predt_lock);
607         nfp_flower_cmsg_warn(app, "Neighbour configuration failed.\n");
608 }
609
610 static int
611 nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
612                             void *ptr)
613 {
614         struct nfp_flower_priv *app_priv;
615         struct netevent_redirect *redir;
616         struct neighbour *n;
617         struct nfp_app *app;
618         bool neigh_invalid;
619         int err;
620
621         switch (event) {
622         case NETEVENT_REDIRECT:
623                 redir = (struct netevent_redirect *)ptr;
624                 n = redir->neigh;
625                 break;
626         case NETEVENT_NEIGH_UPDATE:
627                 n = (struct neighbour *)ptr;
628                 break;
629         default:
630                 return NOTIFY_DONE;
631         }
632
633         neigh_invalid = !(n->nud_state & NUD_VALID) || n->dead;
634
635         app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
636         app = app_priv->app;
637
638         if (!nfp_flower_get_port_id_from_netdev(app, n->dev))
639                 return NOTIFY_DONE;
640
641 #if IS_ENABLED(CONFIG_INET)
642         if (n->tbl->family == AF_INET6) {
643 #if IS_ENABLED(CONFIG_IPV6)
644                 struct flowi6 flow6 = {};
645
646                 flow6.daddr = *(struct in6_addr *)n->primary_key;
647                 if (!neigh_invalid) {
648                         struct dst_entry *dst;
649                         /* Use ipv6_dst_lookup_flow to populate flow6->saddr
650                          * and other fields. This information is only needed
651                          * for new entries, lookup can be skipped when an entry
652                          * gets invalidated - as only the daddr is needed for
653                          * deleting.
654                          */
655                         dst = ip6_dst_lookup_flow(dev_net(n->dev), NULL,
656                                                   &flow6, NULL);
657                         if (IS_ERR(dst))
658                                 return NOTIFY_DONE;
659
660                         dst_release(dst);
661                 }
662                 nfp_tun_write_neigh(n->dev, app, &flow6, n, true, false);
663 #else
664                 return NOTIFY_DONE;
665 #endif /* CONFIG_IPV6 */
666         } else {
667                 struct flowi4 flow4 = {};
668
669                 flow4.daddr = *(__be32 *)n->primary_key;
670                 if (!neigh_invalid) {
671                         struct rtable *rt;
672                         /* Use ip_route_output_key to populate flow4->saddr and
673                          * other fields. This information is only needed for
674                          * new entries, lookup can be skipped when an entry
675                          * gets invalidated - as only the daddr is needed for
676                          * deleting.
677                          */
678                         rt = ip_route_output_key(dev_net(n->dev), &flow4);
679                         err = PTR_ERR_OR_ZERO(rt);
680                         if (err)
681                                 return NOTIFY_DONE;
682
683                         ip_rt_put(rt);
684                 }
685                 nfp_tun_write_neigh(n->dev, app, &flow4, n, false, false);
686         }
687 #else
688         return NOTIFY_DONE;
689 #endif /* CONFIG_INET */
690
691         return NOTIFY_OK;
692 }
693
694 void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)
695 {
696         struct nfp_tun_req_route_ipv4 *payload;
697         struct net_device *netdev;
698         struct flowi4 flow = {};
699         struct neighbour *n;
700         struct rtable *rt;
701         int err;
702
703         payload = nfp_flower_cmsg_get_data(skb);
704
705         rcu_read_lock();
706         netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
707         if (!netdev)
708                 goto fail_rcu_unlock;
709
710         flow.daddr = payload->ipv4_addr;
711         flow.flowi4_proto = IPPROTO_UDP;
712
713 #if IS_ENABLED(CONFIG_INET)
714         /* Do a route lookup on same namespace as ingress port. */
715         rt = ip_route_output_key(dev_net(netdev), &flow);
716         err = PTR_ERR_OR_ZERO(rt);
717         if (err)
718                 goto fail_rcu_unlock;
719 #else
720         goto fail_rcu_unlock;
721 #endif
722
723         /* Get the neighbour entry for the lookup */
724         n = dst_neigh_lookup(&rt->dst, &flow.daddr);
725         ip_rt_put(rt);
726         if (!n)
727                 goto fail_rcu_unlock;
728         nfp_tun_write_neigh(n->dev, app, &flow, n, false, true);
729         neigh_release(n);
730         rcu_read_unlock();
731         return;
732
733 fail_rcu_unlock:
734         rcu_read_unlock();
735         nfp_flower_cmsg_warn(app, "Requested route not found.\n");
736 }
737
738 void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb)
739 {
740         struct nfp_tun_req_route_ipv6 *payload;
741         struct net_device *netdev;
742         struct flowi6 flow = {};
743         struct dst_entry *dst;
744         struct neighbour *n;
745
746         payload = nfp_flower_cmsg_get_data(skb);
747
748         rcu_read_lock();
749         netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
750         if (!netdev)
751                 goto fail_rcu_unlock;
752
753         flow.daddr = payload->ipv6_addr;
754         flow.flowi6_proto = IPPROTO_UDP;
755
756 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
757         dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(netdev), NULL, &flow,
758                                               NULL);
759         if (IS_ERR(dst))
760                 goto fail_rcu_unlock;
761 #else
762         goto fail_rcu_unlock;
763 #endif
764
765         n = dst_neigh_lookup(dst, &flow.daddr);
766         dst_release(dst);
767         if (!n)
768                 goto fail_rcu_unlock;
769
770         nfp_tun_write_neigh(n->dev, app, &flow, n, true, true);
771         neigh_release(n);
772         rcu_read_unlock();
773         return;
774
775 fail_rcu_unlock:
776         rcu_read_unlock();
777         nfp_flower_cmsg_warn(app, "Requested IPv6 route not found.\n");
778 }
779
780 static void nfp_tun_write_ipv4_list(struct nfp_app *app)
781 {
782         struct nfp_flower_priv *priv = app->priv;
783         struct nfp_ipv4_addr_entry *entry;
784         struct nfp_tun_ipv4_addr payload;
785         struct list_head *ptr, *storage;
786         int count;
787
788         memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
789         mutex_lock(&priv->tun.ipv4_off_lock);
790         count = 0;
791         list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
792                 if (count >= NFP_FL_IPV4_ADDRS_MAX) {
793                         mutex_unlock(&priv->tun.ipv4_off_lock);
794                         nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
795                         return;
796                 }
797                 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
798                 payload.ipv4_addr[count++] = entry->ipv4_addr;
799         }
800         payload.count = cpu_to_be32(count);
801         mutex_unlock(&priv->tun.ipv4_off_lock);
802
803         nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
804                                  sizeof(struct nfp_tun_ipv4_addr),
805                                  &payload, GFP_KERNEL);
806 }
807
808 void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
809 {
810         struct nfp_flower_priv *priv = app->priv;
811         struct nfp_ipv4_addr_entry *entry;
812         struct list_head *ptr, *storage;
813
814         mutex_lock(&priv->tun.ipv4_off_lock);
815         list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
816                 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
817                 if (entry->ipv4_addr == ipv4) {
818                         entry->ref_count++;
819                         mutex_unlock(&priv->tun.ipv4_off_lock);
820                         return;
821                 }
822         }
823
824         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
825         if (!entry) {
826                 mutex_unlock(&priv->tun.ipv4_off_lock);
827                 nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
828                 return;
829         }
830         entry->ipv4_addr = ipv4;
831         entry->ref_count = 1;
832         list_add_tail(&entry->list, &priv->tun.ipv4_off_list);
833         mutex_unlock(&priv->tun.ipv4_off_lock);
834
835         nfp_tun_write_ipv4_list(app);
836 }
837
838 void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
839 {
840         struct nfp_flower_priv *priv = app->priv;
841         struct nfp_ipv4_addr_entry *entry;
842         struct list_head *ptr, *storage;
843
844         mutex_lock(&priv->tun.ipv4_off_lock);
845         list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
846                 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
847                 if (entry->ipv4_addr == ipv4) {
848                         entry->ref_count--;
849                         if (!entry->ref_count) {
850                                 list_del(&entry->list);
851                                 kfree(entry);
852                         }
853                         break;
854                 }
855         }
856         mutex_unlock(&priv->tun.ipv4_off_lock);
857
858         nfp_tun_write_ipv4_list(app);
859 }
860
861 static void nfp_tun_write_ipv6_list(struct nfp_app *app)
862 {
863         struct nfp_flower_priv *priv = app->priv;
864         struct nfp_ipv6_addr_entry *entry;
865         struct nfp_tun_ipv6_addr payload;
866         int count = 0;
867
868         memset(&payload, 0, sizeof(struct nfp_tun_ipv6_addr));
869         mutex_lock(&priv->tun.ipv6_off_lock);
870         list_for_each_entry(entry, &priv->tun.ipv6_off_list, list) {
871                 if (count >= NFP_FL_IPV6_ADDRS_MAX) {
872                         nfp_flower_cmsg_warn(app, "Too many IPv6 tunnel endpoint addresses, some cannot be offloaded.\n");
873                         break;
874                 }
875                 payload.ipv6_addr[count++] = entry->ipv6_addr;
876         }
877         mutex_unlock(&priv->tun.ipv6_off_lock);
878         payload.count = cpu_to_be32(count);
879
880         nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6,
881                                  sizeof(struct nfp_tun_ipv6_addr),
882                                  &payload, GFP_KERNEL);
883 }
884
885 struct nfp_ipv6_addr_entry *
886 nfp_tunnel_add_ipv6_off(struct nfp_app *app, struct in6_addr *ipv6)
887 {
888         struct nfp_flower_priv *priv = app->priv;
889         struct nfp_ipv6_addr_entry *entry;
890
891         mutex_lock(&priv->tun.ipv6_off_lock);
892         list_for_each_entry(entry, &priv->tun.ipv6_off_list, list)
893                 if (!memcmp(&entry->ipv6_addr, ipv6, sizeof(*ipv6))) {
894                         entry->ref_count++;
895                         mutex_unlock(&priv->tun.ipv6_off_lock);
896                         return entry;
897                 }
898
899         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
900         if (!entry) {
901                 mutex_unlock(&priv->tun.ipv6_off_lock);
902                 nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
903                 return NULL;
904         }
905         entry->ipv6_addr = *ipv6;
906         entry->ref_count = 1;
907         list_add_tail(&entry->list, &priv->tun.ipv6_off_list);
908         mutex_unlock(&priv->tun.ipv6_off_lock);
909
910         nfp_tun_write_ipv6_list(app);
911
912         return entry;
913 }
914
915 void
916 nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry)
917 {
918         struct nfp_flower_priv *priv = app->priv;
919         bool freed = false;
920
921         mutex_lock(&priv->tun.ipv6_off_lock);
922         if (!--entry->ref_count) {
923                 list_del(&entry->list);
924                 kfree(entry);
925                 freed = true;
926         }
927         mutex_unlock(&priv->tun.ipv6_off_lock);
928
929         if (freed)
930                 nfp_tun_write_ipv6_list(app);
931 }
932
933 static int
934 __nfp_tunnel_offload_mac(struct nfp_app *app, const u8 *mac, u16 idx, bool del)
935 {
936         struct nfp_tun_mac_addr_offload payload;
937
938         memset(&payload, 0, sizeof(payload));
939
940         if (del)
941                 payload.flags = cpu_to_be16(NFP_TUN_MAC_OFFLOAD_DEL_FLAG);
942
943         /* FW supports multiple MACs per cmsg but restrict to single. */
944         payload.count = cpu_to_be16(1);
945         payload.index = cpu_to_be16(idx);
946         ether_addr_copy(payload.addr, mac);
947
948         return nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
949                                         sizeof(struct nfp_tun_mac_addr_offload),
950                                         &payload, GFP_KERNEL);
951 }
952
953 static bool nfp_tunnel_port_is_phy_repr(int port)
954 {
955         if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
956             NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT)
957                 return true;
958
959         return false;
960 }
961
962 static u16 nfp_tunnel_get_mac_idx_from_phy_port_id(int port)
963 {
964         return port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
965 }
966
967 static u16 nfp_tunnel_get_global_mac_idx_from_ida(int id)
968 {
969         return id << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
970 }
971
972 static int nfp_tunnel_get_ida_from_global_mac_idx(u16 nfp_mac_idx)
973 {
974         return nfp_mac_idx >> 8;
975 }
976
977 static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx)
978 {
979         return (nfp_mac_idx & 0xff) == NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
980 }
981
982 static struct nfp_tun_offloaded_mac *
983 nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, const u8 *mac)
984 {
985         struct nfp_flower_priv *priv = app->priv;
986
987         return rhashtable_lookup_fast(&priv->tun.offloaded_macs, mac,
988                                       offloaded_macs_params);
989 }
990
991 static void
992 nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry,
993                                            struct net_device *netdev, bool mod)
994 {
995         if (nfp_netdev_is_nfp_repr(netdev)) {
996                 struct nfp_flower_repr_priv *repr_priv;
997                 struct nfp_repr *repr;
998
999                 repr = netdev_priv(netdev);
1000                 repr_priv = repr->app_priv;
1001
1002                 /* If modifing MAC, remove repr from old list first. */
1003                 if (mod)
1004                         list_del(&repr_priv->mac_list);
1005
1006                 list_add_tail(&repr_priv->mac_list, &entry->repr_list);
1007         } else if (nfp_flower_is_supported_bridge(netdev)) {
1008                 entry->bridge_count++;
1009         }
1010
1011         entry->ref_count++;
1012 }
1013
1014 static int
1015 nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
1016                           int port, bool mod)
1017 {
1018         struct nfp_flower_priv *priv = app->priv;
1019         struct nfp_tun_offloaded_mac *entry;
1020         int ida_idx = -1, err;
1021         u16 nfp_mac_idx = 0;
1022
1023         entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
1024         if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
1025                 if (entry->bridge_count ||
1026                     !nfp_flower_is_supported_bridge(netdev)) {
1027                         nfp_tunnel_offloaded_macs_inc_ref_and_link(entry,
1028                                                                    netdev, mod);
1029                         return 0;
1030                 }
1031
1032                 /* MAC is global but matches need to go to pre_tun table. */
1033                 nfp_mac_idx = entry->index | NFP_TUN_PRE_TUN_IDX_BIT;
1034         }
1035
1036         if (!nfp_mac_idx) {
1037                 /* Assign a global index if non-repr or MAC is now shared. */
1038                 if (entry || !port) {
1039                         ida_idx = ida_alloc_max(&priv->tun.mac_off_ids,
1040                                                 NFP_MAX_MAC_INDEX, GFP_KERNEL);
1041                         if (ida_idx < 0)
1042                                 return ida_idx;
1043
1044                         nfp_mac_idx =
1045                                 nfp_tunnel_get_global_mac_idx_from_ida(ida_idx);
1046
1047                         if (nfp_flower_is_supported_bridge(netdev))
1048                                 nfp_mac_idx |= NFP_TUN_PRE_TUN_IDX_BIT;
1049
1050                 } else {
1051                         nfp_mac_idx =
1052                                 nfp_tunnel_get_mac_idx_from_phy_port_id(port);
1053                 }
1054         }
1055
1056         if (!entry) {
1057                 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1058                 if (!entry) {
1059                         err = -ENOMEM;
1060                         goto err_free_ida;
1061                 }
1062
1063                 ether_addr_copy(entry->addr, netdev->dev_addr);
1064                 INIT_LIST_HEAD(&entry->repr_list);
1065
1066                 if (rhashtable_insert_fast(&priv->tun.offloaded_macs,
1067                                            &entry->ht_node,
1068                                            offloaded_macs_params)) {
1069                         err = -ENOMEM;
1070                         goto err_free_entry;
1071                 }
1072         }
1073
1074         err = __nfp_tunnel_offload_mac(app, netdev->dev_addr,
1075                                        nfp_mac_idx, false);
1076         if (err) {
1077                 /* If not shared then free. */
1078                 if (!entry->ref_count)
1079                         goto err_remove_hash;
1080                 goto err_free_ida;
1081         }
1082
1083         entry->index = nfp_mac_idx;
1084         nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod);
1085
1086         return 0;
1087
1088 err_remove_hash:
1089         rhashtable_remove_fast(&priv->tun.offloaded_macs, &entry->ht_node,
1090                                offloaded_macs_params);
1091 err_free_entry:
1092         kfree(entry);
1093 err_free_ida:
1094         if (ida_idx != -1)
1095                 ida_free(&priv->tun.mac_off_ids, ida_idx);
1096
1097         return err;
1098 }
1099
1100 static int
1101 nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
1102                           const u8 *mac, bool mod)
1103 {
1104         struct nfp_flower_priv *priv = app->priv;
1105         struct nfp_flower_repr_priv *repr_priv;
1106         struct nfp_tun_offloaded_mac *entry;
1107         struct nfp_repr *repr;
1108         u16 nfp_mac_idx;
1109         int ida_idx;
1110
1111         entry = nfp_tunnel_lookup_offloaded_macs(app, mac);
1112         if (!entry)
1113                 return 0;
1114
1115         entry->ref_count--;
1116         /* If del is part of a mod then mac_list is still in use elsewhere. */
1117         if (nfp_netdev_is_nfp_repr(netdev) && !mod) {
1118                 repr = netdev_priv(netdev);
1119                 repr_priv = repr->app_priv;
1120                 list_del(&repr_priv->mac_list);
1121         }
1122
1123         if (nfp_flower_is_supported_bridge(netdev)) {
1124                 entry->bridge_count--;
1125
1126                 if (!entry->bridge_count && entry->ref_count) {
1127                         nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
1128                         if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx,
1129                                                      false)) {
1130                                 nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
1131                                                      netdev_name(netdev));
1132                                 return 0;
1133                         }
1134
1135                         entry->index = nfp_mac_idx;
1136                         return 0;
1137                 }
1138         }
1139
1140         /* If MAC is now used by 1 repr set the offloaded MAC index to port. */
1141         if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
1142                 int port, err;
1143
1144                 repr_priv = list_first_entry(&entry->repr_list,
1145                                              struct nfp_flower_repr_priv,
1146                                              mac_list);
1147                 repr = repr_priv->nfp_repr;
1148                 port = nfp_repr_get_port_id(repr->netdev);
1149                 nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
1150                 err = __nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false);
1151                 if (err) {
1152                         nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
1153                                              netdev_name(netdev));
1154                         return 0;
1155                 }
1156
1157                 ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
1158                 ida_free(&priv->tun.mac_off_ids, ida_idx);
1159                 entry->index = nfp_mac_idx;
1160                 return 0;
1161         }
1162
1163         if (entry->ref_count)
1164                 return 0;
1165
1166         WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs,
1167                                             &entry->ht_node,
1168                                             offloaded_macs_params));
1169
1170         if (nfp_flower_is_supported_bridge(netdev))
1171                 nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
1172         else
1173                 nfp_mac_idx = entry->index;
1174
1175         /* If MAC has global ID then extract and free the ida entry. */
1176         if (nfp_tunnel_is_mac_idx_global(nfp_mac_idx)) {
1177                 ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
1178                 ida_free(&priv->tun.mac_off_ids, ida_idx);
1179         }
1180
1181         kfree(entry);
1182
1183         return __nfp_tunnel_offload_mac(app, mac, 0, true);
1184 }
1185
1186 static int
1187 nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev,
1188                        enum nfp_flower_mac_offload_cmd cmd)
1189 {
1190         struct nfp_flower_non_repr_priv *nr_priv = NULL;
1191         bool non_repr = false, *mac_offloaded;
1192         u8 *off_mac = NULL;
1193         int err, port = 0;
1194
1195         if (nfp_netdev_is_nfp_repr(netdev)) {
1196                 struct nfp_flower_repr_priv *repr_priv;
1197                 struct nfp_repr *repr;
1198
1199                 repr = netdev_priv(netdev);
1200                 if (repr->app != app)
1201                         return 0;
1202
1203                 repr_priv = repr->app_priv;
1204                 if (repr_priv->on_bridge)
1205                         return 0;
1206
1207                 mac_offloaded = &repr_priv->mac_offloaded;
1208                 off_mac = &repr_priv->offloaded_mac_addr[0];
1209                 port = nfp_repr_get_port_id(netdev);
1210                 if (!nfp_tunnel_port_is_phy_repr(port))
1211                         return 0;
1212         } else if (nfp_fl_is_netdev_to_offload(netdev)) {
1213                 nr_priv = nfp_flower_non_repr_priv_get(app, netdev);
1214                 if (!nr_priv)
1215                         return -ENOMEM;
1216
1217                 mac_offloaded = &nr_priv->mac_offloaded;
1218                 off_mac = &nr_priv->offloaded_mac_addr[0];
1219                 non_repr = true;
1220         } else {
1221                 return 0;
1222         }
1223
1224         if (!is_valid_ether_addr(netdev->dev_addr)) {
1225                 err = -EINVAL;
1226                 goto err_put_non_repr_priv;
1227         }
1228
1229         if (cmd == NFP_TUNNEL_MAC_OFFLOAD_MOD && !*mac_offloaded)
1230                 cmd = NFP_TUNNEL_MAC_OFFLOAD_ADD;
1231
1232         switch (cmd) {
1233         case NFP_TUNNEL_MAC_OFFLOAD_ADD:
1234                 err = nfp_tunnel_add_shared_mac(app, netdev, port, false);
1235                 if (err)
1236                         goto err_put_non_repr_priv;
1237
1238                 if (non_repr)
1239                         __nfp_flower_non_repr_priv_get(nr_priv);
1240
1241                 *mac_offloaded = true;
1242                 ether_addr_copy(off_mac, netdev->dev_addr);
1243                 break;
1244         case NFP_TUNNEL_MAC_OFFLOAD_DEL:
1245                 /* Only attempt delete if add was successful. */
1246                 if (!*mac_offloaded)
1247                         break;
1248
1249                 if (non_repr)
1250                         __nfp_flower_non_repr_priv_put(nr_priv);
1251
1252                 *mac_offloaded = false;
1253
1254                 err = nfp_tunnel_del_shared_mac(app, netdev, netdev->dev_addr,
1255                                                 false);
1256                 if (err)
1257                         goto err_put_non_repr_priv;
1258
1259                 break;
1260         case NFP_TUNNEL_MAC_OFFLOAD_MOD:
1261                 /* Ignore if changing to the same address. */
1262                 if (ether_addr_equal(netdev->dev_addr, off_mac))
1263                         break;
1264
1265                 err = nfp_tunnel_add_shared_mac(app, netdev, port, true);
1266                 if (err)
1267                         goto err_put_non_repr_priv;
1268
1269                 /* Delete the previous MAC address. */
1270                 err = nfp_tunnel_del_shared_mac(app, netdev, off_mac, true);
1271                 if (err)
1272                         nfp_flower_cmsg_warn(app, "Failed to remove offload of replaced MAC addr on %s.\n",
1273                                              netdev_name(netdev));
1274
1275                 ether_addr_copy(off_mac, netdev->dev_addr);
1276                 break;
1277         default:
1278                 err = -EINVAL;
1279                 goto err_put_non_repr_priv;
1280         }
1281
1282         if (non_repr)
1283                 __nfp_flower_non_repr_priv_put(nr_priv);
1284
1285         return 0;
1286
1287 err_put_non_repr_priv:
1288         if (non_repr)
1289                 __nfp_flower_non_repr_priv_put(nr_priv);
1290
1291         return err;
1292 }
1293
1294 int nfp_tunnel_mac_event_handler(struct nfp_app *app,
1295                                  struct net_device *netdev,
1296                                  unsigned long event, void *ptr)
1297 {
1298         int err;
1299
1300         if (event == NETDEV_DOWN) {
1301                 err = nfp_tunnel_offload_mac(app, netdev,
1302                                              NFP_TUNNEL_MAC_OFFLOAD_DEL);
1303                 if (err)
1304                         nfp_flower_cmsg_warn(app, "Failed to delete offload MAC on %s.\n",
1305                                              netdev_name(netdev));
1306         } else if (event == NETDEV_UP) {
1307                 err = nfp_tunnel_offload_mac(app, netdev,
1308                                              NFP_TUNNEL_MAC_OFFLOAD_ADD);
1309                 if (err)
1310                         nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
1311                                              netdev_name(netdev));
1312         } else if (event == NETDEV_CHANGEADDR) {
1313                 /* Only offload addr change if netdev is already up. */
1314                 if (!(netdev->flags & IFF_UP))
1315                         return NOTIFY_OK;
1316
1317                 err = nfp_tunnel_offload_mac(app, netdev,
1318                                              NFP_TUNNEL_MAC_OFFLOAD_MOD);
1319                 if (err)
1320                         nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n",
1321                                              netdev_name(netdev));
1322         } else if (event == NETDEV_CHANGEUPPER) {
1323                 /* If a repr is attached to a bridge then tunnel packets
1324                  * entering the physical port are directed through the bridge
1325                  * datapath and cannot be directly detunneled. Therefore,
1326                  * associated offloaded MACs and indexes should not be used
1327                  * by fw for detunneling.
1328                  */
1329                 struct netdev_notifier_changeupper_info *info = ptr;
1330                 struct net_device *upper = info->upper_dev;
1331                 struct nfp_flower_repr_priv *repr_priv;
1332                 struct nfp_repr *repr;
1333
1334                 if (!nfp_netdev_is_nfp_repr(netdev) ||
1335                     !nfp_flower_is_supported_bridge(upper))
1336                         return NOTIFY_OK;
1337
1338                 repr = netdev_priv(netdev);
1339                 if (repr->app != app)
1340                         return NOTIFY_OK;
1341
1342                 repr_priv = repr->app_priv;
1343
1344                 if (info->linking) {
1345                         if (nfp_tunnel_offload_mac(app, netdev,
1346                                                    NFP_TUNNEL_MAC_OFFLOAD_DEL))
1347                                 nfp_flower_cmsg_warn(app, "Failed to delete offloaded MAC on %s.\n",
1348                                                      netdev_name(netdev));
1349                         repr_priv->on_bridge = true;
1350                 } else {
1351                         repr_priv->on_bridge = false;
1352
1353                         if (!(netdev->flags & IFF_UP))
1354                                 return NOTIFY_OK;
1355
1356                         if (nfp_tunnel_offload_mac(app, netdev,
1357                                                    NFP_TUNNEL_MAC_OFFLOAD_ADD))
1358                                 nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
1359                                                      netdev_name(netdev));
1360                 }
1361         }
1362         return NOTIFY_OK;
1363 }
1364
1365 int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
1366                                  struct nfp_fl_payload *flow)
1367 {
1368         struct nfp_flower_priv *app_priv = app->priv;
1369         struct nfp_tun_offloaded_mac *mac_entry;
1370         struct nfp_flower_meta_tci *key_meta;
1371         struct nfp_tun_pre_tun_rule payload;
1372         struct net_device *internal_dev;
1373         int err;
1374
1375         if (app_priv->pre_tun_rule_cnt == NFP_TUN_PRE_TUN_RULE_LIMIT)
1376                 return -ENOSPC;
1377
1378         memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
1379
1380         internal_dev = flow->pre_tun_rule.dev;
1381         payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
1382         payload.host_ctx_id = flow->meta.host_ctx_id;
1383
1384         /* Lookup MAC index for the pre-tunnel rule egress device.
1385          * Note that because the device is always an internal port, it will
1386          * have a constant global index so does not need to be tracked.
1387          */
1388         mac_entry = nfp_tunnel_lookup_offloaded_macs(app,
1389                                                      internal_dev->dev_addr);
1390         if (!mac_entry)
1391                 return -ENOENT;
1392
1393         /* Set/clear IPV6 bit. cpu_to_be16() swap will lead to MSB being
1394          * set/clear for port_idx.
1395          */
1396         key_meta = (struct nfp_flower_meta_tci *)flow->unmasked_data;
1397         if (key_meta->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV6)
1398                 mac_entry->index |= NFP_TUN_PRE_TUN_IPV6_BIT;
1399         else
1400                 mac_entry->index &= ~NFP_TUN_PRE_TUN_IPV6_BIT;
1401
1402         payload.port_idx = cpu_to_be16(mac_entry->index);
1403
1404         /* Copy mac id and vlan to flow - dev may not exist at delete time. */
1405         flow->pre_tun_rule.vlan_tci = payload.vlan_tci;
1406         flow->pre_tun_rule.port_idx = payload.port_idx;
1407
1408         err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
1409                                        sizeof(struct nfp_tun_pre_tun_rule),
1410                                        (unsigned char *)&payload, GFP_KERNEL);
1411         if (err)
1412                 return err;
1413
1414         app_priv->pre_tun_rule_cnt++;
1415
1416         return 0;
1417 }
1418
1419 int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
1420                                      struct nfp_fl_payload *flow)
1421 {
1422         struct nfp_flower_priv *app_priv = app->priv;
1423         struct nfp_tun_pre_tun_rule payload;
1424         u32 tmp_flags = 0;
1425         int err;
1426
1427         memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
1428
1429         tmp_flags |= NFP_TUN_PRE_TUN_RULE_DEL;
1430         payload.flags = cpu_to_be32(tmp_flags);
1431         payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
1432         payload.port_idx = flow->pre_tun_rule.port_idx;
1433
1434         err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
1435                                        sizeof(struct nfp_tun_pre_tun_rule),
1436                                        (unsigned char *)&payload, GFP_KERNEL);
1437         if (err)
1438                 return err;
1439
1440         app_priv->pre_tun_rule_cnt--;
1441
1442         return 0;
1443 }
1444
1445 int nfp_tunnel_config_start(struct nfp_app *app)
1446 {
1447         struct nfp_flower_priv *priv = app->priv;
1448         int err;
1449
1450         /* Initialise rhash for MAC offload tracking. */
1451         err = rhashtable_init(&priv->tun.offloaded_macs,
1452                               &offloaded_macs_params);
1453         if (err)
1454                 return err;
1455
1456         ida_init(&priv->tun.mac_off_ids);
1457
1458         /* Initialise priv data for IPv4/v6 offloading. */
1459         mutex_init(&priv->tun.ipv4_off_lock);
1460         INIT_LIST_HEAD(&priv->tun.ipv4_off_list);
1461         mutex_init(&priv->tun.ipv6_off_lock);
1462         INIT_LIST_HEAD(&priv->tun.ipv6_off_list);
1463
1464         /* Initialise priv data for neighbour offloading. */
1465         priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
1466
1467         err = register_netevent_notifier(&priv->tun.neigh_nb);
1468         if (err) {
1469                 rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
1470                                             nfp_check_rhashtable_empty, NULL);
1471                 return err;
1472         }
1473
1474         return 0;
1475 }
1476
1477 void nfp_tunnel_config_stop(struct nfp_app *app)
1478 {
1479         struct nfp_flower_priv *priv = app->priv;
1480         struct nfp_ipv4_addr_entry *ip_entry;
1481         struct list_head *ptr, *storage;
1482
1483         unregister_netevent_notifier(&priv->tun.neigh_nb);
1484
1485         ida_destroy(&priv->tun.mac_off_ids);
1486
1487         /* Free any memory that may be occupied by ipv4 list. */
1488         list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
1489                 ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
1490                 list_del(&ip_entry->list);
1491                 kfree(ip_entry);
1492         }
1493
1494         mutex_destroy(&priv->tun.ipv6_off_lock);
1495
1496         /* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */
1497         rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
1498                                     nfp_check_rhashtable_empty, NULL);
1499
1500         nfp_tun_cleanup_nn_entries(app);
1501 }