ARM: 9148/1: handle CONFIG_CPU_ENDIAN_BE32 in arch/arm/kernel/head.S
[platform/kernel/linux-rpi.git] / drivers / net / ethernet / mellanox / mlx5 / core / en / rep / tc.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies. */
3
4 #include <net/dst_metadata.h>
5 #include <linux/netdevice.h>
6 #include <linux/list.h>
7 #include <linux/rculist.h>
8 #include <linux/rtnetlink.h>
9 #include <linux/workqueue.h>
10 #include <linux/spinlock.h>
11 #include "tc.h"
12 #include "neigh.h"
13 #include "en_rep.h"
14 #include "eswitch.h"
15 #include "lib/fs_chains.h"
16 #include "en/tc_ct.h"
17 #include "en/mapping.h"
18 #include "en/tc_tun.h"
19 #include "lib/port_tun.h"
20 #include "en/tc/sample.h"
21
22 struct mlx5e_rep_indr_block_priv {
23         struct net_device *netdev;
24         struct mlx5e_rep_priv *rpriv;
25
26         struct list_head list;
27 };
28
29 int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
30                                  struct mlx5e_encap_entry *e,
31                                  struct mlx5e_neigh *m_neigh,
32                                  struct net_device *neigh_dev)
33 {
34         struct mlx5e_rep_priv *rpriv = priv->ppriv;
35         struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
36         struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
37         struct mlx5e_neigh_hash_entry *nhe;
38         int err;
39
40         err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type);
41         if (err)
42                 return err;
43
44         mutex_lock(&rpriv->neigh_update.encap_lock);
45         nhe = mlx5e_rep_neigh_entry_lookup(priv, m_neigh);
46         if (!nhe) {
47                 err = mlx5e_rep_neigh_entry_create(priv, m_neigh, neigh_dev, &nhe);
48                 if (err) {
49                         mutex_unlock(&rpriv->neigh_update.encap_lock);
50                         mlx5_tun_entropy_refcount_dec(tun_entropy,
51                                                       e->reformat_type);
52                         return err;
53                 }
54         }
55
56         e->nhe = nhe;
57         spin_lock(&nhe->encap_list_lock);
58         list_add_rcu(&e->encap_list, &nhe->encap_list);
59         spin_unlock(&nhe->encap_list_lock);
60
61         mutex_unlock(&rpriv->neigh_update.encap_lock);
62
63         return 0;
64 }
65
66 void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
67                                   struct mlx5e_encap_entry *e)
68 {
69         struct mlx5e_rep_priv *rpriv = priv->ppriv;
70         struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
71         struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
72
73         if (!e->nhe)
74                 return;
75
76         spin_lock(&e->nhe->encap_list_lock);
77         list_del_rcu(&e->encap_list);
78         spin_unlock(&e->nhe->encap_list_lock);
79
80         mlx5e_rep_neigh_entry_release(e->nhe);
81         e->nhe = NULL;
82         mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
83 }
84
85 void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
86                             struct mlx5e_encap_entry *e,
87                             bool neigh_connected,
88                             unsigned char ha[ETH_ALEN])
89 {
90         struct ethhdr *eth = (struct ethhdr *)e->encap_header;
91         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
92         bool encap_connected;
93         LIST_HEAD(flow_list);
94
95         ASSERT_RTNL();
96
97         mutex_lock(&esw->offloads.encap_tbl_lock);
98         encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
99         if (encap_connected == neigh_connected && ether_addr_equal(e->h_dest, ha))
100                 goto unlock;
101
102         mlx5e_take_all_encap_flows(e, &flow_list);
103
104         if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
105             (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
106                 mlx5e_tc_encap_flows_del(priv, e, &flow_list);
107
108         if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
109                 struct net_device *route_dev;
110
111                 ether_addr_copy(e->h_dest, ha);
112                 ether_addr_copy(eth->h_dest, ha);
113                 /* Update the encap source mac, in case that we delete
114                  * the flows when encap source mac changed.
115                  */
116                 route_dev = __dev_get_by_index(dev_net(priv->netdev), e->route_dev_ifindex);
117                 if (route_dev)
118                         ether_addr_copy(eth->h_source, route_dev->dev_addr);
119
120                 mlx5e_tc_encap_flows_add(priv, e, &flow_list);
121         }
122 unlock:
123         mutex_unlock(&esw->offloads.encap_tbl_lock);
124         mlx5e_put_flow_list(priv, &flow_list);
125 }
126
127 static int
128 mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
129                               struct flow_cls_offload *cls_flower, int flags)
130 {
131         switch (cls_flower->command) {
132         case FLOW_CLS_REPLACE:
133                 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
134                                               flags);
135         case FLOW_CLS_DESTROY:
136                 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
137                                            flags);
138         case FLOW_CLS_STATS:
139                 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
140                                           flags);
141         default:
142                 return -EOPNOTSUPP;
143         }
144 }
145
146 static
147 int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv,
148                                     struct tc_cls_matchall_offload *ma)
149 {
150         switch (ma->command) {
151         case TC_CLSMATCHALL_REPLACE:
152                 return mlx5e_tc_configure_matchall(priv, ma);
153         case TC_CLSMATCHALL_DESTROY:
154                 return mlx5e_tc_delete_matchall(priv, ma);
155         case TC_CLSMATCHALL_STATS:
156                 mlx5e_tc_stats_matchall(priv, ma);
157                 return 0;
158         default:
159                 return -EOPNOTSUPP;
160         }
161 }
162
163 static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
164                                  void *cb_priv)
165 {
166         unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
167         struct mlx5e_priv *priv = cb_priv;
168
169         if (!priv->netdev || !netif_device_present(priv->netdev))
170                 return -EOPNOTSUPP;
171
172         switch (type) {
173         case TC_SETUP_CLSFLOWER:
174                 return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags);
175         case TC_SETUP_CLSMATCHALL:
176                 return mlx5e_rep_setup_tc_cls_matchall(priv, type_data);
177         default:
178                 return -EOPNOTSUPP;
179         }
180 }
181
182 static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
183                                  void *cb_priv)
184 {
185         struct flow_cls_offload tmp, *f = type_data;
186         struct mlx5e_priv *priv = cb_priv;
187         struct mlx5_eswitch *esw;
188         unsigned long flags;
189         int err;
190
191         flags = MLX5_TC_FLAG(INGRESS) |
192                 MLX5_TC_FLAG(ESW_OFFLOAD) |
193                 MLX5_TC_FLAG(FT_OFFLOAD);
194         esw = priv->mdev->priv.eswitch;
195
196         switch (type) {
197         case TC_SETUP_CLSFLOWER:
198                 memcpy(&tmp, f, sizeof(*f));
199
200                 if (!mlx5_chains_prios_supported(esw_chains(esw)))
201                         return -EOPNOTSUPP;
202
203                 /* Re-use tc offload path by moving the ft flow to the
204                  * reserved ft chain.
205                  *
206                  * FT offload can use prio range [0, INT_MAX], so we normalize
207                  * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
208                  * as with tc, where prio 0 isn't supported.
209                  *
210                  * We only support chain 0 of FT offload.
211                  */
212                 if (tmp.common.prio >= mlx5_chains_get_prio_range(esw_chains(esw)))
213                         return -EOPNOTSUPP;
214                 if (tmp.common.chain_index != 0)
215                         return -EOPNOTSUPP;
216
217                 tmp.common.chain_index = mlx5_chains_get_nf_ft_chain(esw_chains(esw));
218                 tmp.common.prio++;
219                 err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags);
220                 memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
221                 return err;
222         default:
223                 return -EOPNOTSUPP;
224         }
225 }
226
227 static LIST_HEAD(mlx5e_rep_block_tc_cb_list);
228 static LIST_HEAD(mlx5e_rep_block_ft_cb_list);
229 int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
230                        void *type_data)
231 {
232         struct mlx5e_priv *priv = netdev_priv(dev);
233         struct flow_block_offload *f = type_data;
234
235         f->unlocked_driver_cb = true;
236
237         switch (type) {
238         case TC_SETUP_BLOCK:
239                 return flow_block_cb_setup_simple(type_data,
240                                                   &mlx5e_rep_block_tc_cb_list,
241                                                   mlx5e_rep_setup_tc_cb,
242                                                   priv, priv, true);
243         case TC_SETUP_FT:
244                 return flow_block_cb_setup_simple(type_data,
245                                                   &mlx5e_rep_block_ft_cb_list,
246                                                   mlx5e_rep_setup_ft_cb,
247                                                   priv, priv, true);
248         default:
249                 return -EOPNOTSUPP;
250         }
251 }
252
253 int mlx5e_rep_tc_init(struct mlx5e_rep_priv *rpriv)
254 {
255         struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
256         int err;
257
258         mutex_init(&uplink_priv->unready_flows_lock);
259         INIT_LIST_HEAD(&uplink_priv->unready_flows);
260
261         /* init shared tc flow table */
262         err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
263         return err;
264 }
265
266 void mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv)
267 {
268         /* delete shared tc flow table */
269         mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
270         mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
271 }
272
273 void mlx5e_rep_tc_enable(struct mlx5e_priv *priv)
274 {
275         struct mlx5e_rep_priv *rpriv = priv->ppriv;
276
277         INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work,
278                   mlx5e_tc_reoffload_flows_work);
279 }
280
281 void mlx5e_rep_tc_disable(struct mlx5e_priv *priv)
282 {
283         struct mlx5e_rep_priv *rpriv = priv->ppriv;
284
285         cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work);
286 }
287
288 int mlx5e_rep_tc_event_port_affinity(struct mlx5e_priv *priv)
289 {
290         struct mlx5e_rep_priv *rpriv = priv->ppriv;
291
292         queue_work(priv->wq, &rpriv->uplink_priv.reoffload_flows_work);
293
294         return NOTIFY_OK;
295 }
296
297 static struct mlx5e_rep_indr_block_priv *
298 mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
299                                  struct net_device *netdev)
300 {
301         struct mlx5e_rep_indr_block_priv *cb_priv;
302
303         /* All callback list access should be protected by RTNL. */
304         ASSERT_RTNL();
305
306         list_for_each_entry(cb_priv,
307                             &rpriv->uplink_priv.tc_indr_block_priv_list,
308                             list)
309                 if (cb_priv->netdev == netdev)
310                         return cb_priv;
311
312         return NULL;
313 }
314
315 static int
316 mlx5e_rep_indr_offload(struct net_device *netdev,
317                        struct flow_cls_offload *flower,
318                        struct mlx5e_rep_indr_block_priv *indr_priv,
319                        unsigned long flags)
320 {
321         struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
322         int err = 0;
323
324         if (!netif_device_present(indr_priv->rpriv->netdev))
325                 return -EOPNOTSUPP;
326
327         switch (flower->command) {
328         case FLOW_CLS_REPLACE:
329                 err = mlx5e_configure_flower(netdev, priv, flower, flags);
330                 break;
331         case FLOW_CLS_DESTROY:
332                 err = mlx5e_delete_flower(netdev, priv, flower, flags);
333                 break;
334         case FLOW_CLS_STATS:
335                 err = mlx5e_stats_flower(netdev, priv, flower, flags);
336                 break;
337         default:
338                 err = -EOPNOTSUPP;
339         }
340
341         return err;
342 }
343
344 static int mlx5e_rep_indr_setup_tc_cb(enum tc_setup_type type,
345                                       void *type_data, void *indr_priv)
346 {
347         unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
348         struct mlx5e_rep_indr_block_priv *priv = indr_priv;
349
350         switch (type) {
351         case TC_SETUP_CLSFLOWER:
352                 return mlx5e_rep_indr_offload(priv->netdev, type_data, priv,
353                                               flags);
354         default:
355                 return -EOPNOTSUPP;
356         }
357 }
358
359 static int mlx5e_rep_indr_setup_ft_cb(enum tc_setup_type type,
360                                       void *type_data, void *indr_priv)
361 {
362         struct mlx5e_rep_indr_block_priv *priv = indr_priv;
363         struct flow_cls_offload *f = type_data;
364         struct flow_cls_offload tmp;
365         struct mlx5e_priv *mpriv;
366         struct mlx5_eswitch *esw;
367         unsigned long flags;
368         int err;
369
370         mpriv = netdev_priv(priv->rpriv->netdev);
371         esw = mpriv->mdev->priv.eswitch;
372
373         flags = MLX5_TC_FLAG(EGRESS) |
374                 MLX5_TC_FLAG(ESW_OFFLOAD) |
375                 MLX5_TC_FLAG(FT_OFFLOAD);
376
377         switch (type) {
378         case TC_SETUP_CLSFLOWER:
379                 memcpy(&tmp, f, sizeof(*f));
380
381                 /* Re-use tc offload path by moving the ft flow to the
382                  * reserved ft chain.
383                  *
384                  * FT offload can use prio range [0, INT_MAX], so we normalize
385                  * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
386                  * as with tc, where prio 0 isn't supported.
387                  *
388                  * We only support chain 0 of FT offload.
389                  */
390                 if (!mlx5_chains_prios_supported(esw_chains(esw)) ||
391                     tmp.common.prio >= mlx5_chains_get_prio_range(esw_chains(esw)) ||
392                     tmp.common.chain_index)
393                         return -EOPNOTSUPP;
394
395                 tmp.common.chain_index = mlx5_chains_get_nf_ft_chain(esw_chains(esw));
396                 tmp.common.prio++;
397                 err = mlx5e_rep_indr_offload(priv->netdev, &tmp, priv, flags);
398                 memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
399                 return err;
400         default:
401                 return -EOPNOTSUPP;
402         }
403 }
404
405 static void mlx5e_rep_indr_block_unbind(void *cb_priv)
406 {
407         struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv;
408
409         list_del(&indr_priv->list);
410         kfree(indr_priv);
411 }
412
413 static LIST_HEAD(mlx5e_block_cb_list);
414
415 static int
416 mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
417                            struct mlx5e_rep_priv *rpriv,
418                            struct flow_block_offload *f,
419                            flow_setup_cb_t *setup_cb,
420                            void *data,
421                            void (*cleanup)(struct flow_block_cb *block_cb))
422 {
423         struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
424         struct mlx5e_rep_indr_block_priv *indr_priv;
425         struct flow_block_cb *block_cb;
426
427         if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
428             !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev))
429                 return -EOPNOTSUPP;
430
431         if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
432                 return -EOPNOTSUPP;
433
434         f->unlocked_driver_cb = true;
435         f->driver_block_list = &mlx5e_block_cb_list;
436
437         switch (f->command) {
438         case FLOW_BLOCK_BIND:
439                 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
440                 if (indr_priv)
441                         return -EEXIST;
442
443                 indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
444                 if (!indr_priv)
445                         return -ENOMEM;
446
447                 indr_priv->netdev = netdev;
448                 indr_priv->rpriv = rpriv;
449                 list_add(&indr_priv->list,
450                          &rpriv->uplink_priv.tc_indr_block_priv_list);
451
452                 block_cb = flow_indr_block_cb_alloc(setup_cb, indr_priv, indr_priv,
453                                                     mlx5e_rep_indr_block_unbind,
454                                                     f, netdev, sch, data, rpriv,
455                                                     cleanup);
456                 if (IS_ERR(block_cb)) {
457                         list_del(&indr_priv->list);
458                         kfree(indr_priv);
459                         return PTR_ERR(block_cb);
460                 }
461                 flow_block_cb_add(block_cb, f);
462                 list_add_tail(&block_cb->driver_list, &mlx5e_block_cb_list);
463
464                 return 0;
465         case FLOW_BLOCK_UNBIND:
466                 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
467                 if (!indr_priv)
468                         return -ENOENT;
469
470                 block_cb = flow_block_cb_lookup(f->block, setup_cb, indr_priv);
471                 if (!block_cb)
472                         return -ENOENT;
473
474                 flow_indr_block_cb_remove(block_cb, f);
475                 list_del(&block_cb->driver_list);
476                 return 0;
477         default:
478                 return -EOPNOTSUPP;
479         }
480         return 0;
481 }
482
483 static
484 int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
485                             enum tc_setup_type type, void *type_data,
486                             void *data,
487                             void (*cleanup)(struct flow_block_cb *block_cb))
488 {
489         switch (type) {
490         case TC_SETUP_BLOCK:
491                 return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data,
492                                                   mlx5e_rep_indr_setup_tc_cb,
493                                                   data, cleanup);
494         case TC_SETUP_FT:
495                 return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data,
496                                                   mlx5e_rep_indr_setup_ft_cb,
497                                                   data, cleanup);
498         default:
499                 return -EOPNOTSUPP;
500         }
501 }
502
503 int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv)
504 {
505         struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
506
507         /* init indirect block notifications */
508         INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
509
510         return flow_indr_dev_register(mlx5e_rep_indr_setup_cb, rpriv);
511 }
512
513 void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv)
514 {
515         flow_indr_dev_unregister(mlx5e_rep_indr_setup_cb, rpriv,
516                                  mlx5e_rep_indr_block_unbind);
517 }
518
519 static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
520                                  struct mlx5e_tc_update_priv *tc_priv,
521                                  u32 tunnel_id)
522 {
523         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
524         struct tunnel_match_enc_opts enc_opts = {};
525         struct mlx5_rep_uplink_priv *uplink_priv;
526         struct mlx5e_rep_priv *uplink_rpriv;
527         struct metadata_dst *tun_dst;
528         struct tunnel_match_key key;
529         u32 tun_id, enc_opts_id;
530         struct net_device *dev;
531         int err;
532
533         enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
534         tun_id = tunnel_id >> ENC_OPTS_BITS;
535
536         if (!tun_id)
537                 return true;
538
539         uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
540         uplink_priv = &uplink_rpriv->uplink_priv;
541
542         err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key);
543         if (err) {
544                 WARN_ON_ONCE(true);
545                 netdev_dbg(priv->netdev,
546                            "Couldn't find tunnel for tun_id: %d, err: %d\n",
547                            tun_id, err);
548                 return false;
549         }
550
551         if (enc_opts_id) {
552                 err = mapping_find(uplink_priv->tunnel_enc_opts_mapping,
553                                    enc_opts_id, &enc_opts);
554                 if (err) {
555                         netdev_dbg(priv->netdev,
556                                    "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n",
557                                    enc_opts_id, err);
558                         return false;
559                 }
560         }
561
562         if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
563                 tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst,
564                                            key.enc_ip.tos, key.enc_ip.ttl,
565                                            key.enc_tp.dst, TUNNEL_KEY,
566                                            key32_to_tunnel_id(key.enc_key_id.keyid),
567                                            enc_opts.key.len);
568         } else if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
569                 tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst,
570                                              key.enc_ip.tos, key.enc_ip.ttl,
571                                              key.enc_tp.dst, 0, TUNNEL_KEY,
572                                              key32_to_tunnel_id(key.enc_key_id.keyid),
573                                              enc_opts.key.len);
574         } else {
575                 netdev_dbg(priv->netdev,
576                            "Couldn't restore tunnel, unsupported addr_type: %d\n",
577                            key.enc_control.addr_type);
578                 return false;
579         }
580
581         if (!tun_dst) {
582                 netdev_dbg(priv->netdev, "Couldn't restore tunnel, no tun_dst\n");
583                 return false;
584         }
585
586         tun_dst->u.tun_info.key.tp_src = key.enc_tp.src;
587
588         if (enc_opts.key.len)
589                 ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
590                                         enc_opts.key.data,
591                                         enc_opts.key.len,
592                                         enc_opts.key.dst_opt_type);
593
594         skb_dst_set(skb, (struct dst_entry *)tun_dst);
595         dev = dev_get_by_index(&init_net, key.filter_ifindex);
596         if (!dev) {
597                 netdev_dbg(priv->netdev,
598                            "Couldn't find tunnel device with ifindex: %d\n",
599                            key.filter_ifindex);
600                 return false;
601         }
602
603         /* Set tun_dev so we do dev_put() after datapath */
604         tc_priv->tun_dev = dev;
605
606         skb->dev = dev;
607
608         return true;
609 }
610
611 static bool mlx5e_restore_skb_chain(struct sk_buff *skb, u32 chain, u32 reg_c1,
612                                     struct mlx5e_tc_update_priv *tc_priv)
613 {
614         struct mlx5e_priv *priv = netdev_priv(skb->dev);
615         u32 tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK;
616
617 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
618         if (chain) {
619                 struct mlx5_rep_uplink_priv *uplink_priv;
620                 struct mlx5e_rep_priv *uplink_rpriv;
621                 struct tc_skb_ext *tc_skb_ext;
622                 struct mlx5_eswitch *esw;
623                 u32 zone_restore_id;
624
625                 tc_skb_ext = tc_skb_ext_alloc(skb);
626                 if (!tc_skb_ext) {
627                         WARN_ON(1);
628                         return false;
629                 }
630                 tc_skb_ext->chain = chain;
631                 zone_restore_id = reg_c1 & ESW_ZONE_ID_MASK;
632                 esw = priv->mdev->priv.eswitch;
633                 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
634                 uplink_priv = &uplink_rpriv->uplink_priv;
635                 if (!mlx5e_tc_ct_restore_flow(uplink_priv->ct_priv, skb,
636                                               zone_restore_id))
637                         return false;
638         }
639 #endif /* CONFIG_NET_TC_SKB_EXT */
640
641         return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
642 }
643
644 static void mlx5e_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *skb,
645                                      struct mlx5_mapped_obj *mapped_obj,
646                                      struct mlx5e_tc_update_priv *tc_priv)
647 {
648         if (!mlx5e_restore_tunnel(priv, skb, tc_priv, mapped_obj->sample.tunnel_id)) {
649                 netdev_dbg(priv->netdev,
650                            "Failed to restore tunnel info for sampled packet\n");
651                 return;
652         }
653 #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
654         mlx5e_tc_sample_skb(skb, mapped_obj);
655 #endif /* CONFIG_MLX5_TC_SAMPLE */
656         mlx5_rep_tc_post_napi_receive(tc_priv);
657 }
658
659 bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
660                              struct sk_buff *skb,
661                              struct mlx5e_tc_update_priv *tc_priv)
662 {
663         struct mlx5_mapped_obj mapped_obj;
664         struct mlx5_eswitch *esw;
665         struct mlx5e_priv *priv;
666         u32 reg_c0;
667         int err;
668
669         reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
670         if (!reg_c0 || reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
671                 return true;
672
673         /* If reg_c0 is not equal to the default flow tag then skb->mark
674          * is not supported and must be reset back to 0.
675          */
676         skb->mark = 0;
677
678         priv = netdev_priv(skb->dev);
679         esw = priv->mdev->priv.eswitch;
680         err = mapping_find(esw->offloads.reg_c0_obj_pool, reg_c0, &mapped_obj);
681         if (err) {
682                 netdev_dbg(priv->netdev,
683                            "Couldn't find mapped object for reg_c0: %d, err: %d\n",
684                            reg_c0, err);
685                 return false;
686         }
687
688         if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
689                 u32 reg_c1 = be32_to_cpu(cqe->ft_metadata);
690
691                 return mlx5e_restore_skb_chain(skb, mapped_obj.chain, reg_c1, tc_priv);
692         } else if (mapped_obj.type == MLX5_MAPPED_OBJ_SAMPLE) {
693                 mlx5e_restore_skb_sample(priv, skb, &mapped_obj, tc_priv);
694                 return false;
695         } else {
696                 netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
697                 return false;
698         }
699
700         return true;
701 }
702
703 void mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv)
704 {
705         if (tc_priv->tun_dev)
706                 dev_put(tc_priv->tun_dev);
707 }