bpf: devmap memory usage
[platform/kernel/linux-starfive.git] / kernel / bpf / devmap.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
3  */
4
5 /* Devmaps primary use is as a backend map for XDP BPF helper call
6  * bpf_redirect_map(). Because XDP is mostly concerned with performance we
7  * spent some effort to ensure the datapath with redirect maps does not use
8  * any locking. This is a quick note on the details.
9  *
10  * We have three possible paths to get into the devmap control plane bpf
11  * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
12  * will invoke an update, delete, or lookup operation. To ensure updates and
13  * deletes appear atomic from the datapath side xchg() is used to modify the
14  * netdev_map array. Then because the datapath does a lookup into the netdev_map
15  * array (read-only) from an RCU critical section we use call_rcu() to wait for
16  * an rcu grace period before free'ing the old data structures. This ensures the
17  * datapath always has a valid copy. However, the datapath does a "flush"
18  * operation that pushes any pending packets in the driver outside the RCU
19  * critical section. Each bpf_dtab_netdev tracks these pending operations using
20  * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed  until
21  * this list is empty, indicating outstanding flush operations have completed.
22  *
23  * BPF syscalls may race with BPF program calls on any of the update, delete
24  * or lookup operations. As noted above the xchg() operation also keep the
25  * netdev_map consistent in this case. From the devmap side BPF programs
26  * calling into these operations are the same as multiple user space threads
27  * making system calls.
28  *
29  * Finally, any of the above may race with a netdev_unregister notifier. The
30  * unregister notifier must search for net devices in the map structure that
31  * contain a reference to the net device and remove them. This is a two step
32  * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
33  * check to see if the ifindex is the same as the net_device being removed.
34  * When removing the dev a cmpxchg() is used to ensure the correct dev is
35  * removed, in the case of a concurrent update or delete operation it is
36  * possible that the initially referenced dev is no longer in the map. As the
37  * notifier hook walks the map we know that new dev references can not be
38  * added by the user because core infrastructure ensures dev_get_by_index()
39  * calls will fail at this point.
40  *
41  * The devmap_hash type is a map type which interprets keys as ifindexes and
42  * indexes these using a hashmap. This allows maps that use ifindex as key to be
43  * densely packed instead of having holes in the lookup array for unused
44  * ifindexes. The setup and packet enqueue/send code is shared between the two
45  * types of devmap; only the lookup and insertion is different.
46  */
47 #include <linux/bpf.h>
48 #include <net/xdp.h>
49 #include <linux/filter.h>
50 #include <trace/events/xdp.h>
51 #include <linux/btf_ids.h>
52
53 #define DEV_CREATE_FLAG_MASK \
54         (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
55
56 struct xdp_dev_bulk_queue {
57         struct xdp_frame *q[DEV_MAP_BULK_SIZE];
58         struct list_head flush_node;
59         struct net_device *dev;
60         struct net_device *dev_rx;
61         struct bpf_prog *xdp_prog;
62         unsigned int count;
63 };
64
65 struct bpf_dtab_netdev {
66         struct net_device *dev; /* must be first member, due to tracepoint */
67         struct hlist_node index_hlist;
68         struct bpf_dtab *dtab;
69         struct bpf_prog *xdp_prog;
70         struct rcu_head rcu;
71         unsigned int idx;
72         struct bpf_devmap_val val;
73 };
74
75 struct bpf_dtab {
76         struct bpf_map map;
77         struct bpf_dtab_netdev __rcu **netdev_map; /* DEVMAP type only */
78         struct list_head list;
79
80         /* these are only used for DEVMAP_HASH type maps */
81         struct hlist_head *dev_index_head;
82         spinlock_t index_lock;
83         unsigned int items;
84         u32 n_buckets;
85 };
86
87 static DEFINE_PER_CPU(struct list_head, dev_flush_list);
88 static DEFINE_SPINLOCK(dev_map_lock);
89 static LIST_HEAD(dev_map_list);
90
91 static struct hlist_head *dev_map_create_hash(unsigned int entries,
92                                               int numa_node)
93 {
94         int i;
95         struct hlist_head *hash;
96
97         hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
98         if (hash != NULL)
99                 for (i = 0; i < entries; i++)
100                         INIT_HLIST_HEAD(&hash[i]);
101
102         return hash;
103 }
104
105 static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
106                                                     int idx)
107 {
108         return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
109 }
110
111 static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
112 {
113         u32 valsize = attr->value_size;
114
115         /* check sanity of attributes. 2 value sizes supported:
116          * 4 bytes: ifindex
117          * 8 bytes: ifindex + prog fd
118          */
119         if (attr->max_entries == 0 || attr->key_size != 4 ||
120             (valsize != offsetofend(struct bpf_devmap_val, ifindex) &&
121              valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) ||
122             attr->map_flags & ~DEV_CREATE_FLAG_MASK)
123                 return -EINVAL;
124
125         /* Lookup returns a pointer straight to dev->ifindex, so make sure the
126          * verifier prevents writes from the BPF side
127          */
128         attr->map_flags |= BPF_F_RDONLY_PROG;
129
130
131         bpf_map_init_from_attr(&dtab->map, attr);
132
133         if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
134                 dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
135
136                 if (!dtab->n_buckets) /* Overflow check */
137                         return -EINVAL;
138         }
139
140         if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
141                 dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
142                                                            dtab->map.numa_node);
143                 if (!dtab->dev_index_head)
144                         return -ENOMEM;
145
146                 spin_lock_init(&dtab->index_lock);
147         } else {
148                 dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
149                                                       sizeof(struct bpf_dtab_netdev *),
150                                                       dtab->map.numa_node);
151                 if (!dtab->netdev_map)
152                         return -ENOMEM;
153         }
154
155         return 0;
156 }
157
158 static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
159 {
160         struct bpf_dtab *dtab;
161         int err;
162
163         if (!capable(CAP_NET_ADMIN))
164                 return ERR_PTR(-EPERM);
165
166         dtab = bpf_map_area_alloc(sizeof(*dtab), NUMA_NO_NODE);
167         if (!dtab)
168                 return ERR_PTR(-ENOMEM);
169
170         err = dev_map_init_map(dtab, attr);
171         if (err) {
172                 bpf_map_area_free(dtab);
173                 return ERR_PTR(err);
174         }
175
176         spin_lock(&dev_map_lock);
177         list_add_tail_rcu(&dtab->list, &dev_map_list);
178         spin_unlock(&dev_map_lock);
179
180         return &dtab->map;
181 }
182
183 static void dev_map_free(struct bpf_map *map)
184 {
185         struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
186         int i;
187
188         /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
189          * so the programs (can be more than one that used this map) were
190          * disconnected from events. The following synchronize_rcu() guarantees
191          * both rcu read critical sections complete and waits for
192          * preempt-disable regions (NAPI being the relevant context here) so we
193          * are certain there will be no further reads against the netdev_map and
194          * all flush operations are complete. Flush operations can only be done
195          * from NAPI context for this reason.
196          */
197
198         spin_lock(&dev_map_lock);
199         list_del_rcu(&dtab->list);
200         spin_unlock(&dev_map_lock);
201
202         bpf_clear_redirect_map(map);
203         synchronize_rcu();
204
205         /* Make sure prior __dev_map_entry_free() have completed. */
206         rcu_barrier();
207
208         if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
209                 for (i = 0; i < dtab->n_buckets; i++) {
210                         struct bpf_dtab_netdev *dev;
211                         struct hlist_head *head;
212                         struct hlist_node *next;
213
214                         head = dev_map_index_hash(dtab, i);
215
216                         hlist_for_each_entry_safe(dev, next, head, index_hlist) {
217                                 hlist_del_rcu(&dev->index_hlist);
218                                 if (dev->xdp_prog)
219                                         bpf_prog_put(dev->xdp_prog);
220                                 dev_put(dev->dev);
221                                 kfree(dev);
222                         }
223                 }
224
225                 bpf_map_area_free(dtab->dev_index_head);
226         } else {
227                 for (i = 0; i < dtab->map.max_entries; i++) {
228                         struct bpf_dtab_netdev *dev;
229
230                         dev = rcu_dereference_raw(dtab->netdev_map[i]);
231                         if (!dev)
232                                 continue;
233
234                         if (dev->xdp_prog)
235                                 bpf_prog_put(dev->xdp_prog);
236                         dev_put(dev->dev);
237                         kfree(dev);
238                 }
239
240                 bpf_map_area_free(dtab->netdev_map);
241         }
242
243         bpf_map_area_free(dtab);
244 }
245
246 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
247 {
248         struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
249         u32 index = key ? *(u32 *)key : U32_MAX;
250         u32 *next = next_key;
251
252         if (index >= dtab->map.max_entries) {
253                 *next = 0;
254                 return 0;
255         }
256
257         if (index == dtab->map.max_entries - 1)
258                 return -ENOENT;
259         *next = index + 1;
260         return 0;
261 }
262
263 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
264  * by local_bh_disable() (from XDP calls inside NAPI). The
265  * rcu_read_lock_bh_held() below makes lockdep accept both.
266  */
267 static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
268 {
269         struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
270         struct hlist_head *head = dev_map_index_hash(dtab, key);
271         struct bpf_dtab_netdev *dev;
272
273         hlist_for_each_entry_rcu(dev, head, index_hlist,
274                                  lockdep_is_held(&dtab->index_lock))
275                 if (dev->idx == key)
276                         return dev;
277
278         return NULL;
279 }
280
281 static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
282                                     void *next_key)
283 {
284         struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
285         u32 idx, *next = next_key;
286         struct bpf_dtab_netdev *dev, *next_dev;
287         struct hlist_head *head;
288         int i = 0;
289
290         if (!key)
291                 goto find_first;
292
293         idx = *(u32 *)key;
294
295         dev = __dev_map_hash_lookup_elem(map, idx);
296         if (!dev)
297                 goto find_first;
298
299         next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
300                                     struct bpf_dtab_netdev, index_hlist);
301
302         if (next_dev) {
303                 *next = next_dev->idx;
304                 return 0;
305         }
306
307         i = idx & (dtab->n_buckets - 1);
308         i++;
309
310  find_first:
311         for (; i < dtab->n_buckets; i++) {
312                 head = dev_map_index_hash(dtab, i);
313
314                 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
315                                             struct bpf_dtab_netdev,
316                                             index_hlist);
317                 if (next_dev) {
318                         *next = next_dev->idx;
319                         return 0;
320                 }
321         }
322
323         return -ENOENT;
324 }
325
326 static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
327                                 struct xdp_frame **frames, int n,
328                                 struct net_device *dev)
329 {
330         struct xdp_txq_info txq = { .dev = dev };
331         struct xdp_buff xdp;
332         int i, nframes = 0;
333
334         for (i = 0; i < n; i++) {
335                 struct xdp_frame *xdpf = frames[i];
336                 u32 act;
337                 int err;
338
339                 xdp_convert_frame_to_buff(xdpf, &xdp);
340                 xdp.txq = &txq;
341
342                 act = bpf_prog_run_xdp(xdp_prog, &xdp);
343                 switch (act) {
344                 case XDP_PASS:
345                         err = xdp_update_frame_from_buff(&xdp, xdpf);
346                         if (unlikely(err < 0))
347                                 xdp_return_frame_rx_napi(xdpf);
348                         else
349                                 frames[nframes++] = xdpf;
350                         break;
351                 default:
352                         bpf_warn_invalid_xdp_action(NULL, xdp_prog, act);
353                         fallthrough;
354                 case XDP_ABORTED:
355                         trace_xdp_exception(dev, xdp_prog, act);
356                         fallthrough;
357                 case XDP_DROP:
358                         xdp_return_frame_rx_napi(xdpf);
359                         break;
360                 }
361         }
362         return nframes; /* sent frames count */
363 }
364
365 static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
366 {
367         struct net_device *dev = bq->dev;
368         unsigned int cnt = bq->count;
369         int sent = 0, err = 0;
370         int to_send = cnt;
371         int i;
372
373         if (unlikely(!cnt))
374                 return;
375
376         for (i = 0; i < cnt; i++) {
377                 struct xdp_frame *xdpf = bq->q[i];
378
379                 prefetch(xdpf);
380         }
381
382         if (bq->xdp_prog) {
383                 to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev);
384                 if (!to_send)
385                         goto out;
386         }
387
388         sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags);
389         if (sent < 0) {
390                 /* If ndo_xdp_xmit fails with an errno, no frames have
391                  * been xmit'ed.
392                  */
393                 err = sent;
394                 sent = 0;
395         }
396
397         /* If not all frames have been transmitted, it is our
398          * responsibility to free them
399          */
400         for (i = sent; unlikely(i < to_send); i++)
401                 xdp_return_frame_rx_napi(bq->q[i]);
402
403 out:
404         bq->count = 0;
405         trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err);
406 }
407
408 /* __dev_flush is called from xdp_do_flush() which _must_ be signalled from the
409  * driver before returning from its napi->poll() routine. See the comment above
410  * xdp_do_flush() in filter.c.
411  */
412 void __dev_flush(void)
413 {
414         struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
415         struct xdp_dev_bulk_queue *bq, *tmp;
416
417         list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
418                 bq_xmit_all(bq, XDP_XMIT_FLUSH);
419                 bq->dev_rx = NULL;
420                 bq->xdp_prog = NULL;
421                 __list_del_clearprev(&bq->flush_node);
422         }
423 }
424
425 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
426  * by local_bh_disable() (from XDP calls inside NAPI). The
427  * rcu_read_lock_bh_held() below makes lockdep accept both.
428  */
429 static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
430 {
431         struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
432         struct bpf_dtab_netdev *obj;
433
434         if (key >= map->max_entries)
435                 return NULL;
436
437         obj = rcu_dereference_check(dtab->netdev_map[key],
438                                     rcu_read_lock_bh_held());
439         return obj;
440 }
441
442 /* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu
443  * variable access, and map elements stick around. See comment above
444  * xdp_do_flush() in filter.c.
445  */
446 static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
447                        struct net_device *dev_rx, struct bpf_prog *xdp_prog)
448 {
449         struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
450         struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
451
452         if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
453                 bq_xmit_all(bq, 0);
454
455         /* Ingress dev_rx will be the same for all xdp_frame's in
456          * bulk_queue, because bq stored per-CPU and must be flushed
457          * from net_device drivers NAPI func end.
458          *
459          * Do the same with xdp_prog and flush_list since these fields
460          * are only ever modified together.
461          */
462         if (!bq->dev_rx) {
463                 bq->dev_rx = dev_rx;
464                 bq->xdp_prog = xdp_prog;
465                 list_add(&bq->flush_node, flush_list);
466         }
467
468         bq->q[bq->count++] = xdpf;
469 }
470
471 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
472                                 struct net_device *dev_rx,
473                                 struct bpf_prog *xdp_prog)
474 {
475         int err;
476
477         if (!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
478                 return -EOPNOTSUPP;
479
480         if (unlikely(!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
481                      xdp_frame_has_frags(xdpf)))
482                 return -EOPNOTSUPP;
483
484         err = xdp_ok_fwd_dev(dev, xdp_get_frame_len(xdpf));
485         if (unlikely(err))
486                 return err;
487
488         bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
489         return 0;
490 }
491
492 static u32 dev_map_bpf_prog_run_skb(struct sk_buff *skb, struct bpf_dtab_netdev *dst)
493 {
494         struct xdp_txq_info txq = { .dev = dst->dev };
495         struct xdp_buff xdp;
496         u32 act;
497
498         if (!dst->xdp_prog)
499                 return XDP_PASS;
500
501         __skb_pull(skb, skb->mac_len);
502         xdp.txq = &txq;
503
504         act = bpf_prog_run_generic_xdp(skb, &xdp, dst->xdp_prog);
505         switch (act) {
506         case XDP_PASS:
507                 __skb_push(skb, skb->mac_len);
508                 break;
509         default:
510                 bpf_warn_invalid_xdp_action(NULL, dst->xdp_prog, act);
511                 fallthrough;
512         case XDP_ABORTED:
513                 trace_xdp_exception(dst->dev, dst->xdp_prog, act);
514                 fallthrough;
515         case XDP_DROP:
516                 kfree_skb(skb);
517                 break;
518         }
519
520         return act;
521 }
522
523 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
524                     struct net_device *dev_rx)
525 {
526         return __xdp_enqueue(dev, xdpf, dev_rx, NULL);
527 }
528
529 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
530                     struct net_device *dev_rx)
531 {
532         struct net_device *dev = dst->dev;
533
534         return __xdp_enqueue(dev, xdpf, dev_rx, dst->xdp_prog);
535 }
536
537 static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf)
538 {
539         if (!obj)
540                 return false;
541
542         if (!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
543                 return false;
544
545         if (unlikely(!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
546                      xdp_frame_has_frags(xdpf)))
547                 return false;
548
549         if (xdp_ok_fwd_dev(obj->dev, xdp_get_frame_len(xdpf)))
550                 return false;
551
552         return true;
553 }
554
555 static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj,
556                                  struct net_device *dev_rx,
557                                  struct xdp_frame *xdpf)
558 {
559         struct xdp_frame *nxdpf;
560
561         nxdpf = xdpf_clone(xdpf);
562         if (!nxdpf)
563                 return -ENOMEM;
564
565         bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog);
566
567         return 0;
568 }
569
570 static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifindex)
571 {
572         while (num_excluded--) {
573                 if (ifindex == excluded[num_excluded])
574                         return true;
575         }
576         return false;
577 }
578
579 /* Get ifindex of each upper device. 'indexes' must be able to hold at
580  * least MAX_NEST_DEV elements.
581  * Returns the number of ifindexes added.
582  */
583 static int get_upper_ifindexes(struct net_device *dev, int *indexes)
584 {
585         struct net_device *upper;
586         struct list_head *iter;
587         int n = 0;
588
589         netdev_for_each_upper_dev_rcu(dev, upper, iter) {
590                 indexes[n++] = upper->ifindex;
591         }
592         return n;
593 }
594
595 int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
596                           struct bpf_map *map, bool exclude_ingress)
597 {
598         struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
599         struct bpf_dtab_netdev *dst, *last_dst = NULL;
600         int excluded_devices[1+MAX_NEST_DEV];
601         struct hlist_head *head;
602         int num_excluded = 0;
603         unsigned int i;
604         int err;
605
606         if (exclude_ingress) {
607                 num_excluded = get_upper_ifindexes(dev_rx, excluded_devices);
608                 excluded_devices[num_excluded++] = dev_rx->ifindex;
609         }
610
611         if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
612                 for (i = 0; i < map->max_entries; i++) {
613                         dst = rcu_dereference_check(dtab->netdev_map[i],
614                                                     rcu_read_lock_bh_held());
615                         if (!is_valid_dst(dst, xdpf))
616                                 continue;
617
618                         if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
619                                 continue;
620
621                         /* we only need n-1 clones; last_dst enqueued below */
622                         if (!last_dst) {
623                                 last_dst = dst;
624                                 continue;
625                         }
626
627                         err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
628                         if (err)
629                                 return err;
630
631                         last_dst = dst;
632                 }
633         } else { /* BPF_MAP_TYPE_DEVMAP_HASH */
634                 for (i = 0; i < dtab->n_buckets; i++) {
635                         head = dev_map_index_hash(dtab, i);
636                         hlist_for_each_entry_rcu(dst, head, index_hlist,
637                                                  lockdep_is_held(&dtab->index_lock)) {
638                                 if (!is_valid_dst(dst, xdpf))
639                                         continue;
640
641                                 if (is_ifindex_excluded(excluded_devices, num_excluded,
642                                                         dst->dev->ifindex))
643                                         continue;
644
645                                 /* we only need n-1 clones; last_dst enqueued below */
646                                 if (!last_dst) {
647                                         last_dst = dst;
648                                         continue;
649                                 }
650
651                                 err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
652                                 if (err)
653                                         return err;
654
655                                 last_dst = dst;
656                         }
657                 }
658         }
659
660         /* consume the last copy of the frame */
661         if (last_dst)
662                 bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog);
663         else
664                 xdp_return_frame_rx_napi(xdpf); /* dtab is empty */
665
666         return 0;
667 }
668
669 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
670                              struct bpf_prog *xdp_prog)
671 {
672         int err;
673
674         err = xdp_ok_fwd_dev(dst->dev, skb->len);
675         if (unlikely(err))
676                 return err;
677
678         /* Redirect has already succeeded semantically at this point, so we just
679          * return 0 even if packet is dropped. Helper below takes care of
680          * freeing skb.
681          */
682         if (dev_map_bpf_prog_run_skb(skb, dst) != XDP_PASS)
683                 return 0;
684
685         skb->dev = dst->dev;
686         generic_xdp_tx(skb, xdp_prog);
687
688         return 0;
689 }
690
691 static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst,
692                                   struct sk_buff *skb,
693                                   struct bpf_prog *xdp_prog)
694 {
695         struct sk_buff *nskb;
696         int err;
697
698         nskb = skb_clone(skb, GFP_ATOMIC);
699         if (!nskb)
700                 return -ENOMEM;
701
702         err = dev_map_generic_redirect(dst, nskb, xdp_prog);
703         if (unlikely(err)) {
704                 consume_skb(nskb);
705                 return err;
706         }
707
708         return 0;
709 }
710
711 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
712                            struct bpf_prog *xdp_prog, struct bpf_map *map,
713                            bool exclude_ingress)
714 {
715         struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
716         struct bpf_dtab_netdev *dst, *last_dst = NULL;
717         int excluded_devices[1+MAX_NEST_DEV];
718         struct hlist_head *head;
719         struct hlist_node *next;
720         int num_excluded = 0;
721         unsigned int i;
722         int err;
723
724         if (exclude_ingress) {
725                 num_excluded = get_upper_ifindexes(dev, excluded_devices);
726                 excluded_devices[num_excluded++] = dev->ifindex;
727         }
728
729         if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
730                 for (i = 0; i < map->max_entries; i++) {
731                         dst = rcu_dereference_check(dtab->netdev_map[i],
732                                                     rcu_read_lock_bh_held());
733                         if (!dst)
734                                 continue;
735
736                         if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
737                                 continue;
738
739                         /* we only need n-1 clones; last_dst enqueued below */
740                         if (!last_dst) {
741                                 last_dst = dst;
742                                 continue;
743                         }
744
745                         err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
746                         if (err)
747                                 return err;
748
749                         last_dst = dst;
750
751                 }
752         } else { /* BPF_MAP_TYPE_DEVMAP_HASH */
753                 for (i = 0; i < dtab->n_buckets; i++) {
754                         head = dev_map_index_hash(dtab, i);
755                         hlist_for_each_entry_safe(dst, next, head, index_hlist) {
756                                 if (!dst)
757                                         continue;
758
759                                 if (is_ifindex_excluded(excluded_devices, num_excluded,
760                                                         dst->dev->ifindex))
761                                         continue;
762
763                                 /* we only need n-1 clones; last_dst enqueued below */
764                                 if (!last_dst) {
765                                         last_dst = dst;
766                                         continue;
767                                 }
768
769                                 err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
770                                 if (err)
771                                         return err;
772
773                                 last_dst = dst;
774                         }
775                 }
776         }
777
778         /* consume the first skb and return */
779         if (last_dst)
780                 return dev_map_generic_redirect(last_dst, skb, xdp_prog);
781
782         /* dtab is empty */
783         consume_skb(skb);
784         return 0;
785 }
786
787 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
788 {
789         struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
790
791         return obj ? &obj->val : NULL;
792 }
793
794 static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
795 {
796         struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
797                                                                 *(u32 *)key);
798         return obj ? &obj->val : NULL;
799 }
800
801 static void __dev_map_entry_free(struct rcu_head *rcu)
802 {
803         struct bpf_dtab_netdev *dev;
804
805         dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
806         if (dev->xdp_prog)
807                 bpf_prog_put(dev->xdp_prog);
808         dev_put(dev->dev);
809         kfree(dev);
810 }
811
812 static int dev_map_delete_elem(struct bpf_map *map, void *key)
813 {
814         struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
815         struct bpf_dtab_netdev *old_dev;
816         int k = *(u32 *)key;
817
818         if (k >= map->max_entries)
819                 return -EINVAL;
820
821         old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL));
822         if (old_dev) {
823                 call_rcu(&old_dev->rcu, __dev_map_entry_free);
824                 atomic_dec((atomic_t *)&dtab->items);
825         }
826         return 0;
827 }
828
829 static int dev_map_hash_delete_elem(struct bpf_map *map, void *key)
830 {
831         struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
832         struct bpf_dtab_netdev *old_dev;
833         int k = *(u32 *)key;
834         unsigned long flags;
835         int ret = -ENOENT;
836
837         spin_lock_irqsave(&dtab->index_lock, flags);
838
839         old_dev = __dev_map_hash_lookup_elem(map, k);
840         if (old_dev) {
841                 dtab->items--;
842                 hlist_del_init_rcu(&old_dev->index_hlist);
843                 call_rcu(&old_dev->rcu, __dev_map_entry_free);
844                 ret = 0;
845         }
846         spin_unlock_irqrestore(&dtab->index_lock, flags);
847
848         return ret;
849 }
850
851 static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
852                                                     struct bpf_dtab *dtab,
853                                                     struct bpf_devmap_val *val,
854                                                     unsigned int idx)
855 {
856         struct bpf_prog *prog = NULL;
857         struct bpf_dtab_netdev *dev;
858
859         dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev),
860                                    GFP_NOWAIT | __GFP_NOWARN,
861                                    dtab->map.numa_node);
862         if (!dev)
863                 return ERR_PTR(-ENOMEM);
864
865         dev->dev = dev_get_by_index(net, val->ifindex);
866         if (!dev->dev)
867                 goto err_out;
868
869         if (val->bpf_prog.fd > 0) {
870                 prog = bpf_prog_get_type_dev(val->bpf_prog.fd,
871                                              BPF_PROG_TYPE_XDP, false);
872                 if (IS_ERR(prog))
873                         goto err_put_dev;
874                 if (prog->expected_attach_type != BPF_XDP_DEVMAP ||
875                     !bpf_prog_map_compatible(&dtab->map, prog))
876                         goto err_put_prog;
877         }
878
879         dev->idx = idx;
880         dev->dtab = dtab;
881         if (prog) {
882                 dev->xdp_prog = prog;
883                 dev->val.bpf_prog.id = prog->aux->id;
884         } else {
885                 dev->xdp_prog = NULL;
886                 dev->val.bpf_prog.id = 0;
887         }
888         dev->val.ifindex = val->ifindex;
889
890         return dev;
891 err_put_prog:
892         bpf_prog_put(prog);
893 err_put_dev:
894         dev_put(dev->dev);
895 err_out:
896         kfree(dev);
897         return ERR_PTR(-EINVAL);
898 }
899
900 static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
901                                  void *key, void *value, u64 map_flags)
902 {
903         struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
904         struct bpf_dtab_netdev *dev, *old_dev;
905         struct bpf_devmap_val val = {};
906         u32 i = *(u32 *)key;
907
908         if (unlikely(map_flags > BPF_EXIST))
909                 return -EINVAL;
910         if (unlikely(i >= dtab->map.max_entries))
911                 return -E2BIG;
912         if (unlikely(map_flags == BPF_NOEXIST))
913                 return -EEXIST;
914
915         /* already verified value_size <= sizeof val */
916         memcpy(&val, value, map->value_size);
917
918         if (!val.ifindex) {
919                 dev = NULL;
920                 /* can not specify fd if ifindex is 0 */
921                 if (val.bpf_prog.fd > 0)
922                         return -EINVAL;
923         } else {
924                 dev = __dev_map_alloc_node(net, dtab, &val, i);
925                 if (IS_ERR(dev))
926                         return PTR_ERR(dev);
927         }
928
929         /* Use call_rcu() here to ensure rcu critical sections have completed
930          * Remembering the driver side flush operation will happen before the
931          * net device is removed.
932          */
933         old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev)));
934         if (old_dev)
935                 call_rcu(&old_dev->rcu, __dev_map_entry_free);
936         else
937                 atomic_inc((atomic_t *)&dtab->items);
938
939         return 0;
940 }
941
942 static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
943                                u64 map_flags)
944 {
945         return __dev_map_update_elem(current->nsproxy->net_ns,
946                                      map, key, value, map_flags);
947 }
948
949 static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
950                                      void *key, void *value, u64 map_flags)
951 {
952         struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
953         struct bpf_dtab_netdev *dev, *old_dev;
954         struct bpf_devmap_val val = {};
955         u32 idx = *(u32 *)key;
956         unsigned long flags;
957         int err = -EEXIST;
958
959         /* already verified value_size <= sizeof val */
960         memcpy(&val, value, map->value_size);
961
962         if (unlikely(map_flags > BPF_EXIST || !val.ifindex))
963                 return -EINVAL;
964
965         spin_lock_irqsave(&dtab->index_lock, flags);
966
967         old_dev = __dev_map_hash_lookup_elem(map, idx);
968         if (old_dev && (map_flags & BPF_NOEXIST))
969                 goto out_err;
970
971         dev = __dev_map_alloc_node(net, dtab, &val, idx);
972         if (IS_ERR(dev)) {
973                 err = PTR_ERR(dev);
974                 goto out_err;
975         }
976
977         if (old_dev) {
978                 hlist_del_rcu(&old_dev->index_hlist);
979         } else {
980                 if (dtab->items >= dtab->map.max_entries) {
981                         spin_unlock_irqrestore(&dtab->index_lock, flags);
982                         call_rcu(&dev->rcu, __dev_map_entry_free);
983                         return -E2BIG;
984                 }
985                 dtab->items++;
986         }
987
988         hlist_add_head_rcu(&dev->index_hlist,
989                            dev_map_index_hash(dtab, idx));
990         spin_unlock_irqrestore(&dtab->index_lock, flags);
991
992         if (old_dev)
993                 call_rcu(&old_dev->rcu, __dev_map_entry_free);
994
995         return 0;
996
997 out_err:
998         spin_unlock_irqrestore(&dtab->index_lock, flags);
999         return err;
1000 }
1001
1002 static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
1003                                    u64 map_flags)
1004 {
1005         return __dev_map_hash_update_elem(current->nsproxy->net_ns,
1006                                          map, key, value, map_flags);
1007 }
1008
1009 static int dev_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
1010 {
1011         return __bpf_xdp_redirect_map(map, ifindex, flags,
1012                                       BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
1013                                       __dev_map_lookup_elem);
1014 }
1015
1016 static int dev_hash_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
1017 {
1018         return __bpf_xdp_redirect_map(map, ifindex, flags,
1019                                       BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
1020                                       __dev_map_hash_lookup_elem);
1021 }
1022
1023 static u64 dev_map_mem_usage(const struct bpf_map *map)
1024 {
1025         struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
1026         u64 usage = sizeof(struct bpf_dtab);
1027
1028         if (map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)
1029                 usage += (u64)dtab->n_buckets * sizeof(struct hlist_head);
1030         else
1031                 usage += (u64)map->max_entries * sizeof(struct bpf_dtab_netdev *);
1032         usage += atomic_read((atomic_t *)&dtab->items) *
1033                          (u64)sizeof(struct bpf_dtab_netdev);
1034         return usage;
1035 }
1036
1037 BTF_ID_LIST_SINGLE(dev_map_btf_ids, struct, bpf_dtab)
1038 const struct bpf_map_ops dev_map_ops = {
1039         .map_meta_equal = bpf_map_meta_equal,
1040         .map_alloc = dev_map_alloc,
1041         .map_free = dev_map_free,
1042         .map_get_next_key = dev_map_get_next_key,
1043         .map_lookup_elem = dev_map_lookup_elem,
1044         .map_update_elem = dev_map_update_elem,
1045         .map_delete_elem = dev_map_delete_elem,
1046         .map_check_btf = map_check_no_btf,
1047         .map_mem_usage = dev_map_mem_usage,
1048         .map_btf_id = &dev_map_btf_ids[0],
1049         .map_redirect = dev_map_redirect,
1050 };
1051
1052 const struct bpf_map_ops dev_map_hash_ops = {
1053         .map_meta_equal = bpf_map_meta_equal,
1054         .map_alloc = dev_map_alloc,
1055         .map_free = dev_map_free,
1056         .map_get_next_key = dev_map_hash_get_next_key,
1057         .map_lookup_elem = dev_map_hash_lookup_elem,
1058         .map_update_elem = dev_map_hash_update_elem,
1059         .map_delete_elem = dev_map_hash_delete_elem,
1060         .map_check_btf = map_check_no_btf,
1061         .map_mem_usage = dev_map_mem_usage,
1062         .map_btf_id = &dev_map_btf_ids[0],
1063         .map_redirect = dev_hash_map_redirect,
1064 };
1065
1066 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
1067                                        struct net_device *netdev)
1068 {
1069         unsigned long flags;
1070         u32 i;
1071
1072         spin_lock_irqsave(&dtab->index_lock, flags);
1073         for (i = 0; i < dtab->n_buckets; i++) {
1074                 struct bpf_dtab_netdev *dev;
1075                 struct hlist_head *head;
1076                 struct hlist_node *next;
1077
1078                 head = dev_map_index_hash(dtab, i);
1079
1080                 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
1081                         if (netdev != dev->dev)
1082                                 continue;
1083
1084                         dtab->items--;
1085                         hlist_del_rcu(&dev->index_hlist);
1086                         call_rcu(&dev->rcu, __dev_map_entry_free);
1087                 }
1088         }
1089         spin_unlock_irqrestore(&dtab->index_lock, flags);
1090 }
1091
1092 static int dev_map_notification(struct notifier_block *notifier,
1093                                 ulong event, void *ptr)
1094 {
1095         struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1096         struct bpf_dtab *dtab;
1097         int i, cpu;
1098
1099         switch (event) {
1100         case NETDEV_REGISTER:
1101                 if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq)
1102                         break;
1103
1104                 /* will be freed in free_netdev() */
1105                 netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue);
1106                 if (!netdev->xdp_bulkq)
1107                         return NOTIFY_BAD;
1108
1109                 for_each_possible_cpu(cpu)
1110                         per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
1111                 break;
1112         case NETDEV_UNREGISTER:
1113                 /* This rcu_read_lock/unlock pair is needed because
1114                  * dev_map_list is an RCU list AND to ensure a delete
1115                  * operation does not free a netdev_map entry while we
1116                  * are comparing it against the netdev being unregistered.
1117                  */
1118                 rcu_read_lock();
1119                 list_for_each_entry_rcu(dtab, &dev_map_list, list) {
1120                         if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
1121                                 dev_map_hash_remove_netdev(dtab, netdev);
1122                                 continue;
1123                         }
1124
1125                         for (i = 0; i < dtab->map.max_entries; i++) {
1126                                 struct bpf_dtab_netdev *dev, *odev;
1127
1128                                 dev = rcu_dereference(dtab->netdev_map[i]);
1129                                 if (!dev || netdev != dev->dev)
1130                                         continue;
1131                                 odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL));
1132                                 if (dev == odev) {
1133                                         call_rcu(&dev->rcu,
1134                                                  __dev_map_entry_free);
1135                                         atomic_dec((atomic_t *)&dtab->items);
1136                                 }
1137                         }
1138                 }
1139                 rcu_read_unlock();
1140                 break;
1141         default:
1142                 break;
1143         }
1144         return NOTIFY_OK;
1145 }
1146
1147 static struct notifier_block dev_map_notifier = {
1148         .notifier_call = dev_map_notification,
1149 };
1150
1151 static int __init dev_map_init(void)
1152 {
1153         int cpu;
1154
1155         /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
1156         BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
1157                      offsetof(struct _bpf_dtab_netdev, dev));
1158         register_netdevice_notifier(&dev_map_notifier);
1159
1160         for_each_possible_cpu(cpu)
1161                 INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu));
1162         return 0;
1163 }
1164
1165 subsys_initcall(dev_map_init);