Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[platform/kernel/linux-starfive.git] / kernel / bpf / devmap.c
index 3b45c23..2546daf 100644 (file)
@@ -57,6 +57,7 @@ struct xdp_dev_bulk_queue {
        struct list_head flush_node;
        struct net_device *dev;
        struct net_device *dev_rx;
+       struct bpf_prog *xdp_prog;
        unsigned int count;
 };
 
@@ -72,7 +73,7 @@ struct bpf_dtab_netdev {
 
 struct bpf_dtab {
        struct bpf_map map;
-       struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */
+       struct bpf_dtab_netdev __rcu **netdev_map; /* DEVMAP type only */
        struct list_head list;
 
        /* these are only used for DEVMAP_HASH type maps */
@@ -197,6 +198,7 @@ static void dev_map_free(struct bpf_map *map)
        list_del_rcu(&dtab->list);
        spin_unlock(&dev_map_lock);
 
+       bpf_clear_redirect_map(map);
        synchronize_rcu();
 
        /* Make sure prior __dev_map_entry_free() have completed. */
@@ -224,7 +226,7 @@ static void dev_map_free(struct bpf_map *map)
                for (i = 0; i < dtab->map.max_entries; i++) {
                        struct bpf_dtab_netdev *dev;
 
-                       dev = dtab->netdev_map[i];
+                       dev = rcu_dereference_raw(dtab->netdev_map[i]);
                        if (!dev)
                                continue;
 
@@ -257,6 +259,10 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
        return 0;
 }
 
+/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
+ * by local_bh_disable() (from XDP calls inside NAPI). The
+ * rcu_read_lock_bh_held() below makes lockdep accept both.
+ */
 static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
 {
        struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
@@ -326,22 +332,69 @@ bool dev_map_can_have_prog(struct bpf_map *map)
        return false;
 }
 
+static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
+                               struct xdp_frame **frames, int n,
+                               struct net_device *dev)
+{
+       struct xdp_txq_info txq = { .dev = dev };
+       struct xdp_buff xdp;
+       int i, nframes = 0;
+
+       for (i = 0; i < n; i++) {
+               struct xdp_frame *xdpf = frames[i];
+               u32 act;
+               int err;
+
+               xdp_convert_frame_to_buff(xdpf, &xdp);
+               xdp.txq = &txq;
+
+               act = bpf_prog_run_xdp(xdp_prog, &xdp);
+               switch (act) {
+               case XDP_PASS:
+                       err = xdp_update_frame_from_buff(&xdp, xdpf);
+                       if (unlikely(err < 0))
+                               xdp_return_frame_rx_napi(xdpf);
+                       else
+                               frames[nframes++] = xdpf;
+                       break;
+               default:
+                       bpf_warn_invalid_xdp_action(act);
+                       fallthrough;
+               case XDP_ABORTED:
+                       trace_xdp_exception(dev, xdp_prog, act);
+                       fallthrough;
+               case XDP_DROP:
+                       xdp_return_frame_rx_napi(xdpf);
+                       break;
+               }
+       }
+       return nframes; /* sent frames count */
+}
+
 static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
 {
        struct net_device *dev = bq->dev;
+       unsigned int cnt = bq->count;
        int sent = 0, err = 0;
+       int to_send = cnt;
        int i;
 
-       if (unlikely(!bq->count))
+       if (unlikely(!cnt))
                return;
 
-       for (i = 0; i < bq->count; i++) {
+       for (i = 0; i < cnt; i++) {
                struct xdp_frame *xdpf = bq->q[i];
 
                prefetch(xdpf);
        }
 
-       sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags);
+       if (bq->xdp_prog) {
+               to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev);
+               if (!to_send)
+                       goto out;
+       }
+
+       sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags);
        if (sent < 0) {
                /* If ndo_xdp_xmit fails with an errno, no frames have
                 * been xmit'ed.
@@ -353,37 +406,34 @@ static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
        /* If not all frames have been transmitted, it is our
         * responsibility to free them
         */
-       for (i = sent; unlikely(i < bq->count); i++)
+       for (i = sent; unlikely(i < to_send); i++)
                xdp_return_frame_rx_napi(bq->q[i]);
 
-       trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, bq->count - sent, err);
-       bq->dev_rx = NULL;
+out:
        bq->count = 0;
-       __list_del_clearprev(&bq->flush_node);
-}
-
-/* __dev_flush is called from xdp_do_flush() which _must_ be signaled
- * from the driver before returning from its napi->poll() routine. The poll()
- * routine is called either from busy_poll context or net_rx_action signaled
- * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the
- * net device can be torn down. On devmap tear down we ensure the flush list
- * is empty before completing to ensure all flush operations have completed.
- * When drivers update the bpf program they may need to ensure any flush ops
- * are also complete. Using synchronize_rcu or call_rcu will suffice for this
- * because both wait for napi context to exit.
+       trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err);
+}
+
+/* __dev_flush is called from xdp_do_flush() which _must_ be signalled from the
+ * driver before returning from its napi->poll() routine. See the comment above
+ * xdp_do_flush() in filter.c.
  */
 void __dev_flush(void)
 {
        struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
        struct xdp_dev_bulk_queue *bq, *tmp;
 
-       list_for_each_entry_safe(bq, tmp, flush_list, flush_node)
+       list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
                bq_xmit_all(bq, XDP_XMIT_FLUSH);
+               bq->dev_rx = NULL;
+               bq->xdp_prog = NULL;
+               __list_del_clearprev(&bq->flush_node);
+       }
 }
 
-/* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
- * update happens in parallel here a dev_put wont happen until after reading the
- * ifindex.
+/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
+ * by local_bh_disable() (from XDP calls inside NAPI). The
+ * rcu_read_lock_bh_held() below makes lockdep accept both.
  */
 static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
 {
@@ -393,15 +443,17 @@ static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
        if (key >= map->max_entries)
                return NULL;
 
-       obj = READ_ONCE(dtab->netdev_map[key]);
+       obj = rcu_dereference_check(dtab->netdev_map[key],
+                                   rcu_read_lock_bh_held());
        return obj;
 }
 
-/* Runs under RCU-read-side, plus in softirq under NAPI protection.
- * Thus, safe percpu variable access.
+/* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu
+ * variable access, and map elements stick around. See comment above
+ * xdp_do_flush() in filter.c.
  */
 static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
-                      struct net_device *dev_rx)
+                      struct net_device *dev_rx, struct bpf_prog *xdp_prog)
 {
        struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
        struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
@@ -412,18 +464,22 @@ static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
        /* Ingress dev_rx will be the same for all xdp_frame's in
         * bulk_queue, because bq stored per-CPU and must be flushed
         * from net_device drivers NAPI func end.
+        *
+        * Do the same with xdp_prog and flush_list since these fields
+        * are only ever modified together.
         */
-       if (!bq->dev_rx)
+       if (!bq->dev_rx) {
                bq->dev_rx = dev_rx;
+               bq->xdp_prog = xdp_prog;
+               list_add(&bq->flush_node, flush_list);
+       }
 
        bq->q[bq->count++] = xdpf;
-
-       if (!bq->flush_node.prev)
-               list_add(&bq->flush_node, flush_list);
 }
 
 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
-                              struct net_device *dev_rx)
+                               struct net_device *dev_rx,
+                               struct bpf_prog *xdp_prog)
 {
        struct xdp_frame *xdpf;
        int err;
@@ -439,55 +495,115 @@ static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
        if (unlikely(!xdpf))
                return -EOVERFLOW;
 
-       bq_enqueue(dev, xdpf, dev_rx);
+       bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
        return 0;
 }
 
-static struct xdp_buff *dev_map_run_prog(struct net_device *dev,
-                                        struct xdp_buff *xdp,
-                                        struct bpf_prog *xdp_prog)
+int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
+                   struct net_device *dev_rx)
 {
-       struct xdp_txq_info txq = { .dev = dev };
-       u32 act;
+       return __xdp_enqueue(dev, xdp, dev_rx, NULL);
+}
 
-       xdp_set_data_meta_invalid(xdp);
-       xdp->txq = &txq;
+int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
+                   struct net_device *dev_rx)
+{
+       struct net_device *dev = dst->dev;
 
-       act = bpf_prog_run_xdp(xdp_prog, xdp);
-       switch (act) {
-       case XDP_PASS:
-               return xdp;
-       case XDP_DROP:
-               break;
-       default:
-               bpf_warn_invalid_xdp_action(act);
-               fallthrough;
-       case XDP_ABORTED:
-               trace_xdp_exception(dev, xdp_prog, act);
-               break;
-       }
+       return __xdp_enqueue(dev, xdp, dev_rx, dst->xdp_prog);
+}
 
-       xdp_return_buff(xdp);
-       return NULL;
+static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_buff *xdp,
+                        int exclude_ifindex)
+{
+       if (!obj || obj->dev->ifindex == exclude_ifindex ||
+           !obj->dev->netdev_ops->ndo_xdp_xmit)
+               return false;
+
+       if (xdp_ok_fwd_dev(obj->dev, xdp->data_end - xdp->data))
+               return false;
+
+       return true;
 }
 
-int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
-                   struct net_device *dev_rx)
+static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj,
+                                struct net_device *dev_rx,
+                                struct xdp_frame *xdpf)
 {
-       return __xdp_enqueue(dev, xdp, dev_rx);
+       struct xdp_frame *nxdpf;
+
+       nxdpf = xdpf_clone(xdpf);
+       if (!nxdpf)
+               return -ENOMEM;
+
+       bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog);
+
+       return 0;
 }
 
-int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
-                   struct net_device *dev_rx)
+int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
+                         struct bpf_map *map, bool exclude_ingress)
 {
-       struct net_device *dev = dst->dev;
+       struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
+       int exclude_ifindex = exclude_ingress ? dev_rx->ifindex : 0;
+       struct bpf_dtab_netdev *dst, *last_dst = NULL;
+       struct hlist_head *head;
+       struct xdp_frame *xdpf;
+       unsigned int i;
+       int err;
 
-       if (dst->xdp_prog) {
-               xdp = dev_map_run_prog(dev, xdp, dst->xdp_prog);
-               if (!xdp)
-                       return 0;
+       xdpf = xdp_convert_buff_to_frame(xdp);
+       if (unlikely(!xdpf))
+               return -EOVERFLOW;
+
+       if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
+               for (i = 0; i < map->max_entries; i++) {
+                       dst = READ_ONCE(dtab->netdev_map[i]);
+                       if (!is_valid_dst(dst, xdp, exclude_ifindex))
+                               continue;
+
+                       /* we only need n-1 clones; last_dst enqueued below */
+                       if (!last_dst) {
+                               last_dst = dst;
+                               continue;
+                       }
+
+                       err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
+                       if (err)
+                               return err;
+
+                       last_dst = dst;
+               }
+       } else { /* BPF_MAP_TYPE_DEVMAP_HASH */
+               for (i = 0; i < dtab->n_buckets; i++) {
+                       head = dev_map_index_hash(dtab, i);
+                       hlist_for_each_entry_rcu(dst, head, index_hlist,
+                                                lockdep_is_held(&dtab->index_lock)) {
+                               if (!is_valid_dst(dst, xdp, exclude_ifindex))
+                                       continue;
+
+                               /* we only need n-1 clones; last_dst enqueued below */
+                               if (!last_dst) {
+                                       last_dst = dst;
+                                       continue;
+                               }
+
+                               err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
+                               if (err)
+                                       return err;
+
+                               last_dst = dst;
+                       }
+               }
        }
-       return __xdp_enqueue(dev, xdp, dev_rx);
+
+       /* consume the last copy of the frame */
+       if (last_dst)
+               bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog);
+       else
+               xdp_return_frame_rx_napi(xdpf); /* dtab is empty */
+
+       return 0;
 }
 
 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
@@ -504,6 +620,87 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
        return 0;
 }
 
+static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst,
+                                 struct sk_buff *skb,
+                                 struct bpf_prog *xdp_prog)
+{
+       struct sk_buff *nskb;
+       int err;
+
+       nskb = skb_clone(skb, GFP_ATOMIC);
+       if (!nskb)
+               return -ENOMEM;
+
+       err = dev_map_generic_redirect(dst, nskb, xdp_prog);
+       if (unlikely(err)) {
+               consume_skb(nskb);
+               return err;
+       }
+
+       return 0;
+}
+
+int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
+                          struct bpf_prog *xdp_prog, struct bpf_map *map,
+                          bool exclude_ingress)
+{
+       struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
+       int exclude_ifindex = exclude_ingress ? dev->ifindex : 0;
+       struct bpf_dtab_netdev *dst, *last_dst = NULL;
+       struct hlist_head *head;
+       struct hlist_node *next;
+       unsigned int i;
+       int err;
+
+       if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
+               for (i = 0; i < map->max_entries; i++) {
+                       dst = READ_ONCE(dtab->netdev_map[i]);
+                       if (!dst || dst->dev->ifindex == exclude_ifindex)
+                               continue;
+
+                       /* we only need n-1 clones; last_dst enqueued below */
+                       if (!last_dst) {
+                               last_dst = dst;
+                               continue;
+                       }
+
+                       err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
+                       if (err)
+                               return err;
+
+                       last_dst = dst;
+               }
+       } else { /* BPF_MAP_TYPE_DEVMAP_HASH */
+               for (i = 0; i < dtab->n_buckets; i++) {
+                       head = dev_map_index_hash(dtab, i);
+                       hlist_for_each_entry_safe(dst, next, head, index_hlist) {
+                               if (!dst || dst->dev->ifindex == exclude_ifindex)
+                                       continue;
+
+                               /* we only need n-1 clones; last_dst enqueued below */
+                               if (!last_dst) {
+                                       last_dst = dst;
+                                       continue;
+                               }
+
+                               err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
+                               if (err)
+                                       return err;
+
+                               last_dst = dst;
+                       }
+               }
+       }
+
+       /* consume the first skb and return */
+       if (last_dst)
+               return dev_map_generic_redirect(last_dst, skb, xdp_prog);
+
+       /* dtab is empty */
+       consume_skb(skb);
+       return 0;
+}
+
 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
 {
        struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
@@ -538,14 +735,7 @@ static int dev_map_delete_elem(struct bpf_map *map, void *key)
        if (k >= map->max_entries)
                return -EINVAL;
 
-       /* Use call_rcu() here to ensure any rcu critical sections have
-        * completed as well as any flush operations because call_rcu
-        * will wait for preempt-disable region to complete, NAPI in this
-        * context.  And additionally, the driver tear down ensures all
-        * soft irqs are complete before removing the net device in the
-        * case of dev_put equals zero.
-        */
-       old_dev = xchg(&dtab->netdev_map[k], NULL);
+       old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL));
        if (old_dev)
                call_rcu(&old_dev->rcu, __dev_map_entry_free);
        return 0;
@@ -654,7 +844,7 @@ static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
         * Remembering the driver side flush operation will happen before the
         * net device is removed.
         */
-       old_dev = xchg(&dtab->netdev_map[i], dev);
+       old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev)));
        if (old_dev)
                call_rcu(&old_dev->rcu, __dev_map_entry_free);
 
@@ -730,12 +920,16 @@ static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
 
 static int dev_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
 {
-       return __bpf_xdp_redirect_map(map, ifindex, flags, __dev_map_lookup_elem);
+       return __bpf_xdp_redirect_map(map, ifindex, flags,
+                                     BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
+                                     __dev_map_lookup_elem);
 }
 
 static int dev_hash_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
 {
-       return __bpf_xdp_redirect_map(map, ifindex, flags, __dev_map_hash_lookup_elem);
+       return __bpf_xdp_redirect_map(map, ifindex, flags,
+                                     BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
+                                     __dev_map_hash_lookup_elem);
 }
 
 static int dev_map_btf_id;
@@ -830,10 +1024,10 @@ static int dev_map_notification(struct notifier_block *notifier,
                        for (i = 0; i < dtab->map.max_entries; i++) {
                                struct bpf_dtab_netdev *dev, *odev;
 
-                               dev = READ_ONCE(dtab->netdev_map[i]);
+                               dev = rcu_dereference(dtab->netdev_map[i]);
                                if (!dev || netdev != dev->dev)
                                        continue;
-                               odev = cmpxchg(&dtab->netdev_map[i], dev, NULL);
+                               odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL));
                                if (dev == odev)
                                        call_rcu(&dev->rcu,
                                                 __dev_map_entry_free);