1 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
13 /* Devmaps primary use is as a backend map for XDP BPF helper call
14 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
15 * spent some effort to ensure the datapath with redirect maps does not use
16 * any locking. This is a quick note on the details.
18 * We have three possible paths to get into the devmap control plane bpf
19 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
20 * will invoke an update, delete, or lookup operation. To ensure updates and
21 * deletes appear atomic from the datapath side xchg() is used to modify the
22 * netdev_map array. Then because the datapath does a lookup into the netdev_map
23 * array (read-only) from an RCU critical section we use call_rcu() to wait for
24 * an rcu grace period before free'ing the old data structures. This ensures the
25 * datapath always has a valid copy. However, the datapath does a "flush"
26 * operation that pushes any pending packets in the driver outside the RCU
27 * critical section. Each bpf_dtab_netdev tracks these pending operations using
28 * an atomic per-cpu bitmap. The bpf_dtab_netdev object will not be destroyed
29 * until all bits are cleared indicating outstanding flush operations have
32 * BPF syscalls may race with BPF program calls on any of the update, delete
33 * or lookup operations. As noted above the xchg() operation also keep the
34 * netdev_map consistent in this case. From the devmap side BPF programs
35 * calling into these operations are the same as multiple user space threads
36 * making system calls.
38 * Finally, any of the above may race with a netdev_unregister notifier. The
39 * unregister notifier must search for net devices in the map structure that
40 * contain a reference to the net device and remove them. This is a two step
41 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
42 * check to see if the ifindex is the same as the net_device being removed.
43 * When removing the dev a cmpxchg() is used to ensure the correct dev is
44 * removed, in the case of a concurrent update or delete operation it is
45 * possible that the initially referenced dev is no longer in the map. As the
46 * notifier hook walks the map we know that new dev references can not be
47 * added by the user because core infrastructure ensures dev_get_by_index()
48 * calls will fail at this point.
50 #include <linux/bpf.h>
51 #include <linux/filter.h>
53 struct bpf_dtab_netdev {
54 struct net_device *dev;
55 struct bpf_dtab *dtab;
62 struct bpf_dtab_netdev **netdev_map;
63 unsigned long __percpu *flush_needed;
64 struct list_head list;
67 static DEFINE_SPINLOCK(dev_map_lock);
68 static LIST_HEAD(dev_map_list);
70 static u64 dev_map_bitmap_size(const union bpf_attr *attr)
72 return BITS_TO_LONGS((u64) attr->max_entries) * sizeof(unsigned long);
75 static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
77 struct bpf_dtab *dtab;
81 if (!capable(CAP_NET_ADMIN))
82 return ERR_PTR(-EPERM);
84 /* check sanity of attributes */
85 if (attr->max_entries == 0 || attr->key_size != 4 ||
86 attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
87 return ERR_PTR(-EINVAL);
89 dtab = kzalloc(sizeof(*dtab), GFP_USER);
91 return ERR_PTR(-ENOMEM);
93 /* mandatory map attributes */
94 dtab->map.map_type = attr->map_type;
95 dtab->map.key_size = attr->key_size;
96 dtab->map.value_size = attr->value_size;
97 dtab->map.max_entries = attr->max_entries;
98 dtab->map.map_flags = attr->map_flags;
99 dtab->map.numa_node = bpf_map_attr_numa_node(attr);
101 /* make sure page count doesn't overflow */
102 cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
103 cost += dev_map_bitmap_size(attr) * num_possible_cpus();
104 if (cost >= U32_MAX - PAGE_SIZE)
107 dtab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
109 /* if map size is larger than memlock limit, reject it early */
110 err = bpf_map_precharge_memlock(dtab->map.pages);
116 /* A per cpu bitfield with a bit per possible net device */
117 dtab->flush_needed = __alloc_percpu_gfp(dev_map_bitmap_size(attr),
118 __alignof__(unsigned long),
119 GFP_KERNEL | __GFP_NOWARN);
120 if (!dtab->flush_needed)
123 dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
124 sizeof(struct bpf_dtab_netdev *),
125 dtab->map.numa_node);
126 if (!dtab->netdev_map)
129 spin_lock(&dev_map_lock);
130 list_add_tail_rcu(&dtab->list, &dev_map_list);
131 spin_unlock(&dev_map_lock);
135 free_percpu(dtab->flush_needed);
140 static void dev_map_free(struct bpf_map *map)
142 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
145 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
146 * so the programs (can be more than one that used this map) were
147 * disconnected from events. Wait for outstanding critical sections in
148 * these programs to complete. The rcu critical section only guarantees
149 * no further reads against netdev_map. It does __not__ ensure pending
150 * flush operations (if any) are complete.
153 spin_lock(&dev_map_lock);
154 list_del_rcu(&dtab->list);
155 spin_unlock(&dev_map_lock);
159 /* To ensure all pending flush operations have completed wait for flush
160 * bitmap to indicate all flush_needed bits to be zero on _all_ cpus.
161 * Because the above synchronize_rcu() ensures the map is disconnected
162 * from the program we can assume no new bits will be set.
164 for_each_online_cpu(cpu) {
165 unsigned long *bitmap = per_cpu_ptr(dtab->flush_needed, cpu);
167 while (!bitmap_empty(bitmap, dtab->map.max_entries))
171 for (i = 0; i < dtab->map.max_entries; i++) {
172 struct bpf_dtab_netdev *dev;
174 dev = dtab->netdev_map[i];
182 free_percpu(dtab->flush_needed);
183 bpf_map_area_free(dtab->netdev_map);
187 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
189 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
190 u32 index = key ? *(u32 *)key : U32_MAX;
191 u32 *next = next_key;
193 if (index >= dtab->map.max_entries) {
198 if (index == dtab->map.max_entries - 1)
204 void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
206 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
207 unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed);
209 __set_bit(bit, bitmap);
212 /* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled
213 * from the driver before returning from its napi->poll() routine. The poll()
214 * routine is called either from busy_poll context or net_rx_action signaled
215 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the
216 * net device can be torn down. On devmap tear down we ensure the ctx bitmap
217 * is zeroed before completing to ensure all flush operations have completed.
219 void __dev_map_flush(struct bpf_map *map)
221 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
222 unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed);
225 for_each_set_bit(bit, bitmap, map->max_entries) {
226 struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]);
227 struct net_device *netdev;
229 /* This is possible if the dev entry is removed by user space
230 * between xdp redirect and flush op.
235 __clear_bit(bit, bitmap);
237 if (likely(netdev->netdev_ops->ndo_xdp_flush))
238 netdev->netdev_ops->ndo_xdp_flush(netdev);
242 /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
243 * update happens in parallel here a dev_put wont happen until after reading the
246 struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
248 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
249 struct bpf_dtab_netdev *dev;
251 if (key >= map->max_entries)
254 dev = READ_ONCE(dtab->netdev_map[key]);
255 return dev ? dev->dev : NULL;
258 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
260 struct net_device *dev = __dev_map_lookup_elem(map, *(u32 *)key);
262 return dev ? &dev->ifindex : NULL;
265 static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
267 if (dev->dev->netdev_ops->ndo_xdp_flush) {
268 struct net_device *fl = dev->dev;
269 unsigned long *bitmap;
272 for_each_online_cpu(cpu) {
273 bitmap = per_cpu_ptr(dev->dtab->flush_needed, cpu);
274 __clear_bit(dev->bit, bitmap);
276 fl->netdev_ops->ndo_xdp_flush(dev->dev);
281 static void __dev_map_entry_free(struct rcu_head *rcu)
283 struct bpf_dtab_netdev *dev;
285 dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
286 dev_map_flush_old(dev);
291 static int dev_map_delete_elem(struct bpf_map *map, void *key)
293 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
294 struct bpf_dtab_netdev *old_dev;
297 if (k >= map->max_entries)
300 /* Use call_rcu() here to ensure any rcu critical sections have
301 * completed, but this does not guarantee a flush has happened
302 * yet. Because driver side rcu_read_lock/unlock only protects the
303 * running XDP program. However, for pending flush operations the
304 * dev and ctx are stored in another per cpu map. And additionally,
305 * the driver tear down ensures all soft irqs are complete before
306 * removing the net device in the case of dev_put equals zero.
308 old_dev = xchg(&dtab->netdev_map[k], NULL);
310 call_rcu(&old_dev->rcu, __dev_map_entry_free);
314 static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
317 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
318 struct net *net = current->nsproxy->net_ns;
319 struct bpf_dtab_netdev *dev, *old_dev;
321 u32 ifindex = *(u32 *)value;
323 if (unlikely(map_flags > BPF_EXIST))
325 if (unlikely(i >= dtab->map.max_entries))
327 if (unlikely(map_flags == BPF_NOEXIST))
333 dev = kmalloc_node(sizeof(*dev), GFP_ATOMIC | __GFP_NOWARN,
338 dev->dev = dev_get_by_index(net, ifindex);
348 /* Use call_rcu() here to ensure rcu critical sections have completed
349 * Remembering the driver side flush operation will happen before the
350 * net device is removed.
352 old_dev = xchg(&dtab->netdev_map[i], dev);
354 call_rcu(&old_dev->rcu, __dev_map_entry_free);
359 const struct bpf_map_ops dev_map_ops = {
360 .map_alloc = dev_map_alloc,
361 .map_free = dev_map_free,
362 .map_get_next_key = dev_map_get_next_key,
363 .map_lookup_elem = dev_map_lookup_elem,
364 .map_update_elem = dev_map_update_elem,
365 .map_delete_elem = dev_map_delete_elem,
368 static int dev_map_notification(struct notifier_block *notifier,
369 ulong event, void *ptr)
371 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
372 struct bpf_dtab *dtab;
376 case NETDEV_UNREGISTER:
377 /* This rcu_read_lock/unlock pair is needed because
378 * dev_map_list is an RCU list AND to ensure a delete
379 * operation does not free a netdev_map entry while we
380 * are comparing it against the netdev being unregistered.
383 list_for_each_entry_rcu(dtab, &dev_map_list, list) {
384 for (i = 0; i < dtab->map.max_entries; i++) {
385 struct bpf_dtab_netdev *dev, *odev;
387 dev = READ_ONCE(dtab->netdev_map[i]);
389 dev->dev->ifindex != netdev->ifindex)
391 odev = cmpxchg(&dtab->netdev_map[i], dev, NULL);
394 __dev_map_entry_free);
405 static struct notifier_block dev_map_notifier = {
406 .notifier_call = dev_map_notification,
409 static int __init dev_map_init(void)
411 register_netdevice_notifier(&dev_map_notifier);
415 subsys_initcall(dev_map_init);