1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
9 #include <linux/btf_ids.h>
10 #include <linux/bpf_local_storage.h>
12 #include <uapi/linux/sock_diag.h>
13 #include <uapi/linux/btf.h>
14 #include <linux/rcupdate.h>
15 #include <linux/rcupdate_trace.h>
16 #include <linux/rcupdate_wait.h>
18 #define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
20 static struct bpf_local_storage_map_bucket *
21 select_bucket(struct bpf_local_storage_map *smap,
22 struct bpf_local_storage_elem *selem)
24 return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
27 static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
29 struct bpf_map *map = &smap->map;
31 if (!map->ops->map_local_storage_charge)
34 return map->ops->map_local_storage_charge(smap, owner, size);
37 static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
40 struct bpf_map *map = &smap->map;
42 if (map->ops->map_local_storage_uncharge)
43 map->ops->map_local_storage_uncharge(smap, owner, size);
46 static struct bpf_local_storage __rcu **
47 owner_storage(struct bpf_local_storage_map *smap, void *owner)
49 struct bpf_map *map = &smap->map;
51 return map->ops->map_owner_storage_ptr(owner);
54 static bool selem_linked_to_storage_lockless(const struct bpf_local_storage_elem *selem)
56 return !hlist_unhashed_lockless(&selem->snode);
59 static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem)
61 return !hlist_unhashed(&selem->snode);
64 static bool selem_linked_to_map_lockless(const struct bpf_local_storage_elem *selem)
66 return !hlist_unhashed_lockless(&selem->map_node);
69 static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
71 return !hlist_unhashed(&selem->map_node);
74 struct bpf_local_storage_elem *
75 bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
76 void *value, bool charge_mem, gfp_t gfp_flags)
78 struct bpf_local_storage_elem *selem;
80 if (charge_mem && mem_charge(smap, owner, smap->elem_size))
85 selem = bpf_mem_cache_alloc_flags(&smap->selem_ma, gfp_flags);
88 /* Keep the original bpf_map_kzalloc behavior
89 * before started using the bpf_mem_cache_alloc.
91 * No need to use zero_map_value. The bpf_selem_free()
92 * only does bpf_mem_cache_free when there is
93 * no other bpf prog is using the selem.
95 memset(SDATA(selem)->data, 0, smap->map.value_size);
97 selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
98 gfp_flags | __GFP_NOWARN);
103 copy_map_value(&smap->map, SDATA(selem)->data, value);
104 /* No need to call check_and_init_map_value as memory is zero init */
109 mem_uncharge(smap, owner, smap->elem_size);
114 static void bpf_local_storage_free_rcu(struct rcu_head *rcu)
116 struct bpf_local_storage *local_storage;
118 local_storage = container_of(rcu, struct bpf_local_storage, rcu);
119 kfree(local_storage);
122 static void bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
124 /* If RCU Tasks Trace grace period implies RCU grace period, do
125 * kfree(), else do kfree_rcu().
127 if (rcu_trace_implies_rcu_gp())
128 bpf_local_storage_free_rcu(rcu);
130 call_rcu(rcu, bpf_local_storage_free_rcu);
133 static void bpf_local_storage_free(struct bpf_local_storage *local_storage,
137 call_rcu_tasks_trace(&local_storage->rcu,
138 bpf_local_storage_free_trace_rcu);
140 call_rcu(&local_storage->rcu, bpf_local_storage_free_rcu);
143 /* rcu tasks trace callback for bpf_ma == false */
144 static void __bpf_selem_free_trace_rcu(struct rcu_head *rcu)
146 struct bpf_local_storage_elem *selem;
148 selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
149 if (rcu_trace_implies_rcu_gp())
152 kfree_rcu(selem, rcu);
155 /* Handle bpf_ma == false */
156 static void __bpf_selem_free(struct bpf_local_storage_elem *selem,
160 kfree_rcu(selem, rcu);
162 call_rcu_tasks_trace(&selem->rcu, __bpf_selem_free_trace_rcu);
165 static void bpf_selem_free_rcu(struct rcu_head *rcu)
167 struct bpf_local_storage_elem *selem;
169 selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
170 bpf_mem_cache_raw_free(selem);
173 static void bpf_selem_free_trace_rcu(struct rcu_head *rcu)
175 if (rcu_trace_implies_rcu_gp())
176 bpf_selem_free_rcu(rcu);
178 call_rcu(rcu, bpf_selem_free_rcu);
181 void bpf_selem_free(struct bpf_local_storage_elem *selem,
182 struct bpf_local_storage_map *smap,
185 bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
188 __bpf_selem_free(selem, reuse_now);
193 call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu);
195 /* Instead of using the vanilla call_rcu(),
196 * bpf_mem_cache_free will be able to reuse selem
200 bpf_mem_cache_free(&smap->selem_ma, selem);
205 /* local_storage->lock must be held and selem->local_storage == local_storage.
206 * The caller must ensure selem->smap is still valid to be
207 * dereferenced for its smap->elem_size and smap->cache_idx.
209 static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
210 struct bpf_local_storage_elem *selem,
211 bool uncharge_mem, bool reuse_now)
213 struct bpf_local_storage_map *smap;
214 bool free_local_storage;
217 smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
218 owner = local_storage->owner;
220 /* All uncharging on the owner must be done first.
221 * The owner may be freed once the last selem is unlinked
222 * from local_storage.
225 mem_uncharge(smap, owner, smap->elem_size);
227 free_local_storage = hlist_is_singular_node(&selem->snode,
228 &local_storage->list);
229 if (free_local_storage) {
230 mem_uncharge(smap, owner, sizeof(struct bpf_local_storage));
231 local_storage->owner = NULL;
233 /* After this RCU_INIT, owner may be freed and cannot be used */
234 RCU_INIT_POINTER(*owner_storage(smap, owner), NULL);
236 /* local_storage is not freed now. local_storage->lock is
237 * still held and raw_spin_unlock_bh(&local_storage->lock)
238 * will be done by the caller.
240 * Although the unlock will be done under
241 * rcu_read_lock(), it is more intuitive to
242 * read if the freeing of the storage is done
243 * after the raw_spin_unlock_bh(&local_storage->lock).
245 * Hence, a "bool free_local_storage" is returned
246 * to the caller which then calls then frees the storage after
247 * all the RCU grace periods have expired.
250 hlist_del_init_rcu(&selem->snode);
251 if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
253 RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
255 bpf_selem_free(selem, smap, reuse_now);
257 if (rcu_access_pointer(local_storage->smap) == smap)
258 RCU_INIT_POINTER(local_storage->smap, NULL);
260 return free_local_storage;
263 static void bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
266 struct bpf_local_storage *local_storage;
267 bool free_local_storage = false;
270 if (unlikely(!selem_linked_to_storage_lockless(selem)))
271 /* selem has already been unlinked from sk */
274 local_storage = rcu_dereference_check(selem->local_storage,
275 bpf_rcu_lock_held());
276 raw_spin_lock_irqsave(&local_storage->lock, flags);
277 if (likely(selem_linked_to_storage(selem)))
278 free_local_storage = bpf_selem_unlink_storage_nolock(
279 local_storage, selem, true, reuse_now);
280 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
282 if (free_local_storage)
283 bpf_local_storage_free(local_storage, reuse_now);
286 void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
287 struct bpf_local_storage_elem *selem)
289 RCU_INIT_POINTER(selem->local_storage, local_storage);
290 hlist_add_head_rcu(&selem->snode, &local_storage->list);
293 static void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
295 struct bpf_local_storage_map *smap;
296 struct bpf_local_storage_map_bucket *b;
299 if (unlikely(!selem_linked_to_map_lockless(selem)))
300 /* selem has already be unlinked from smap */
303 smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
304 b = select_bucket(smap, selem);
305 raw_spin_lock_irqsave(&b->lock, flags);
306 if (likely(selem_linked_to_map(selem)))
307 hlist_del_init_rcu(&selem->map_node);
308 raw_spin_unlock_irqrestore(&b->lock, flags);
311 void bpf_selem_link_map(struct bpf_local_storage_map *smap,
312 struct bpf_local_storage_elem *selem)
314 struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
317 raw_spin_lock_irqsave(&b->lock, flags);
318 RCU_INIT_POINTER(SDATA(selem)->smap, smap);
319 hlist_add_head_rcu(&selem->map_node, &b->list);
320 raw_spin_unlock_irqrestore(&b->lock, flags);
323 void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now)
325 /* Always unlink from map before unlinking from local_storage
326 * because selem will be freed after successfully unlinked from
329 bpf_selem_unlink_map(selem);
330 bpf_selem_unlink_storage(selem, reuse_now);
333 /* If cacheit_lockit is false, this lookup function is lockless */
334 struct bpf_local_storage_data *
335 bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
336 struct bpf_local_storage_map *smap,
339 struct bpf_local_storage_data *sdata;
340 struct bpf_local_storage_elem *selem;
342 /* Fast path (cache hit) */
343 sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
344 bpf_rcu_lock_held());
345 if (sdata && rcu_access_pointer(sdata->smap) == smap)
348 /* Slow path (cache miss) */
349 hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
350 rcu_read_lock_trace_held())
351 if (rcu_access_pointer(SDATA(selem)->smap) == smap)
357 sdata = SDATA(selem);
358 if (cacheit_lockit) {
361 /* spinlock is needed to avoid racing with the
362 * parallel delete. Otherwise, publishing an already
363 * deleted sdata to the cache will become a use-after-free
364 * problem in the next bpf_local_storage_lookup().
366 raw_spin_lock_irqsave(&local_storage->lock, flags);
367 if (selem_linked_to_storage(selem))
368 rcu_assign_pointer(local_storage->cache[smap->cache_idx],
370 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
376 static int check_flags(const struct bpf_local_storage_data *old_sdata,
379 if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
380 /* elem already exists */
383 if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
384 /* elem doesn't exist, cannot update it */
390 int bpf_local_storage_alloc(void *owner,
391 struct bpf_local_storage_map *smap,
392 struct bpf_local_storage_elem *first_selem,
395 struct bpf_local_storage *prev_storage, *storage;
396 struct bpf_local_storage **owner_storage_ptr;
399 err = mem_charge(smap, owner, sizeof(*storage));
403 storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
404 gfp_flags | __GFP_NOWARN);
410 RCU_INIT_POINTER(storage->smap, smap);
411 INIT_HLIST_HEAD(&storage->list);
412 raw_spin_lock_init(&storage->lock);
413 storage->owner = owner;
415 bpf_selem_link_storage_nolock(storage, first_selem);
416 bpf_selem_link_map(smap, first_selem);
419 (struct bpf_local_storage **)owner_storage(smap, owner);
420 /* Publish storage to the owner.
421 * Instead of using any lock of the kernel object (i.e. owner),
422 * cmpxchg will work with any kernel object regardless what
423 * the running context is, bh, irq...etc.
425 * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage)
426 * is protected by the storage->lock. Hence, when freeing
427 * the owner->storage, the storage->lock must be held before
428 * setting owner->storage ptr to NULL.
430 prev_storage = cmpxchg(owner_storage_ptr, NULL, storage);
431 if (unlikely(prev_storage)) {
432 bpf_selem_unlink_map(first_selem);
436 /* Note that even first_selem was linked to smap's
437 * bucket->list, first_selem can be freed immediately
438 * (instead of kfree_rcu) because
439 * bpf_local_storage_map_free() does a
440 * synchronize_rcu_mult (waiting for both sleepable and
441 * normal programs) before walking the bucket->list.
442 * Hence, no one is accessing selem from the
443 * bucket->list under rcu_read_lock().
450 bpf_local_storage_free(storage, true);
451 mem_uncharge(smap, owner, sizeof(*storage));
455 /* sk cannot be going away because it is linking new elem
456 * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
457 * Otherwise, it will become a leak (and other memory issues
458 * during map destruction).
460 struct bpf_local_storage_data *
461 bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
462 void *value, u64 map_flags, gfp_t gfp_flags)
464 struct bpf_local_storage_data *old_sdata = NULL;
465 struct bpf_local_storage_elem *selem = NULL;
466 struct bpf_local_storage *local_storage;
470 /* BPF_EXIST and BPF_NOEXIST cannot be both set */
471 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
472 /* BPF_F_LOCK can only be used in a value with spin_lock */
473 unlikely((map_flags & BPF_F_LOCK) &&
474 !btf_record_has_field(smap->map.record, BPF_SPIN_LOCK)))
475 return ERR_PTR(-EINVAL);
477 if (gfp_flags == GFP_KERNEL && (map_flags & ~BPF_F_LOCK) != BPF_NOEXIST)
478 return ERR_PTR(-EINVAL);
480 local_storage = rcu_dereference_check(*owner_storage(smap, owner),
481 bpf_rcu_lock_held());
482 if (!local_storage || hlist_empty(&local_storage->list)) {
483 /* Very first elem for the owner */
484 err = check_flags(NULL, map_flags);
488 selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
490 return ERR_PTR(-ENOMEM);
492 err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags);
494 bpf_selem_free(selem, smap, true);
495 mem_uncharge(smap, owner, smap->elem_size);
502 if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
503 /* Hoping to find an old_sdata to do inline update
504 * such that it can avoid taking the local_storage->lock
505 * and changing the lists.
508 bpf_local_storage_lookup(local_storage, smap, false);
509 err = check_flags(old_sdata, map_flags);
512 if (old_sdata && selem_linked_to_storage_lockless(SELEM(old_sdata))) {
513 copy_map_value_locked(&smap->map, old_sdata->data,
519 if (gfp_flags == GFP_KERNEL) {
520 selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
522 return ERR_PTR(-ENOMEM);
525 raw_spin_lock_irqsave(&local_storage->lock, flags);
527 /* Recheck local_storage->list under local_storage->lock */
528 if (unlikely(hlist_empty(&local_storage->list))) {
529 /* A parallel del is happening and local_storage is going
530 * away. It has just been checked before, so very
531 * unlikely. Return instead of retry to keep things
538 old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
539 err = check_flags(old_sdata, map_flags);
543 if (old_sdata && (map_flags & BPF_F_LOCK)) {
544 copy_map_value_locked(&smap->map, old_sdata->data, value,
546 selem = SELEM(old_sdata);
550 if (gfp_flags != GFP_KERNEL) {
551 /* local_storage->lock is held. Hence, we are sure
552 * we can unlink and uncharge the old_sdata successfully
553 * later. Hence, instead of charging the new selem now
554 * and then uncharge the old selem later (which may cause
555 * a potential but unnecessary charge failure), avoid taking
556 * a charge at all here (the "!old_sdata" check) and the
557 * old_sdata will not be uncharged later during
558 * bpf_selem_unlink_storage_nolock().
560 selem = bpf_selem_alloc(smap, owner, value, !old_sdata, gfp_flags);
567 /* First, link the new selem to the map */
568 bpf_selem_link_map(smap, selem);
570 /* Second, link (and publish) the new selem to local_storage */
571 bpf_selem_link_storage_nolock(local_storage, selem);
573 /* Third, remove old selem, SELEM(old_sdata) */
575 bpf_selem_unlink_map(SELEM(old_sdata));
576 bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
581 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
585 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
587 mem_uncharge(smap, owner, smap->elem_size);
588 bpf_selem_free(selem, smap, true);
593 static u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
595 u64 min_usage = U64_MAX;
598 spin_lock(&cache->idx_lock);
600 for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) {
601 if (cache->idx_usage_counts[i] < min_usage) {
602 min_usage = cache->idx_usage_counts[i];
605 /* Found a free cache_idx */
610 cache->idx_usage_counts[res]++;
612 spin_unlock(&cache->idx_lock);
617 static void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
620 spin_lock(&cache->idx_lock);
621 cache->idx_usage_counts[idx]--;
622 spin_unlock(&cache->idx_lock);
625 int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
627 if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK ||
628 !(attr->map_flags & BPF_F_NO_PREALLOC) ||
630 attr->key_size != sizeof(int) || !attr->value_size ||
631 /* Enforce BTF for userspace sk dumping */
632 !attr->btf_key_type_id || !attr->btf_value_type_id)
638 if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
644 int bpf_local_storage_map_check_btf(const struct bpf_map *map,
645 const struct btf *btf,
646 const struct btf_type *key_type,
647 const struct btf_type *value_type)
651 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
654 int_data = *(u32 *)(key_type + 1);
655 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
661 void bpf_local_storage_destroy(struct bpf_local_storage *local_storage)
663 struct bpf_local_storage_elem *selem;
664 bool free_storage = false;
665 struct hlist_node *n;
668 /* Neither the bpf_prog nor the bpf_map's syscall
669 * could be modifying the local_storage->list now.
670 * Thus, no elem can be added to or deleted from the
671 * local_storage->list by the bpf_prog or by the bpf_map's syscall.
673 * It is racing with bpf_local_storage_map_free() alone
674 * when unlinking elem from the local_storage->list and
675 * the map's bucket->list.
677 raw_spin_lock_irqsave(&local_storage->lock, flags);
678 hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
679 /* Always unlink from map before unlinking from
682 bpf_selem_unlink_map(selem);
683 /* If local_storage list has only one element, the
684 * bpf_selem_unlink_storage_nolock() will return true.
685 * Otherwise, it will return false. The current loop iteration
686 * intends to remove all local storage. So the last iteration
687 * of the loop will set the free_cgroup_storage to true.
689 free_storage = bpf_selem_unlink_storage_nolock(
690 local_storage, selem, false, true);
692 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
695 bpf_local_storage_free(local_storage, true);
698 u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map)
700 struct bpf_local_storage_map *smap = (struct bpf_local_storage_map *)map;
701 u64 usage = sizeof(*smap);
703 /* The dynamically callocated selems are not counted currently. */
704 usage += sizeof(*smap->buckets) * (1ULL << smap->bucket_log);
708 /* When bpf_ma == true, the bpf_mem_alloc is used to allocate and free memory.
709 * A deadlock free allocator is useful for storage that the bpf prog can easily
710 * get a hold of the owner PTR_TO_BTF_ID in any context. eg. bpf_get_current_task_btf.
711 * The task and cgroup storage fall into this case. The bpf_mem_alloc reuses
712 * memory immediately. To be reuse-immediate safe, the owner destruction
713 * code path needs to go through a rcu grace period before calling
714 * bpf_local_storage_destroy().
716 * When bpf_ma == false, the kmalloc and kfree are used.
719 bpf_local_storage_map_alloc(union bpf_attr *attr,
720 struct bpf_local_storage_cache *cache,
723 struct bpf_local_storage_map *smap;
728 smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE);
730 return ERR_PTR(-ENOMEM);
731 bpf_map_init_from_attr(&smap->map, attr);
733 nbuckets = roundup_pow_of_two(num_possible_cpus());
734 /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
735 nbuckets = max_t(u32, 2, nbuckets);
736 smap->bucket_log = ilog2(nbuckets);
738 smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets),
739 nbuckets, GFP_USER | __GFP_NOWARN);
740 if (!smap->buckets) {
745 for (i = 0; i < nbuckets; i++) {
746 INIT_HLIST_HEAD(&smap->buckets[i].list);
747 raw_spin_lock_init(&smap->buckets[i].lock);
750 smap->elem_size = offsetof(struct bpf_local_storage_elem,
751 sdata.data[attr->value_size]);
753 smap->bpf_ma = bpf_ma;
755 err = bpf_mem_alloc_init(&smap->selem_ma, smap->elem_size, false);
760 smap->cache_idx = bpf_local_storage_cache_idx_get(cache);
764 kvfree(smap->buckets);
765 bpf_map_area_free(smap);
769 void bpf_local_storage_map_free(struct bpf_map *map,
770 struct bpf_local_storage_cache *cache,
771 int __percpu *busy_counter)
773 struct bpf_local_storage_map_bucket *b;
774 struct bpf_local_storage_elem *selem;
775 struct bpf_local_storage_map *smap;
778 smap = (struct bpf_local_storage_map *)map;
779 bpf_local_storage_cache_idx_free(cache, smap->cache_idx);
781 /* Note that this map might be concurrently cloned from
782 * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
783 * RCU read section to finish before proceeding. New RCU
784 * read sections should be prevented via bpf_map_inc_not_zero.
788 /* bpf prog and the userspace can no longer access this map
789 * now. No new selem (of this map) can be added
790 * to the owner->storage or to the map bucket's list.
792 * The elem of this map can be cleaned up here
793 * or when the storage is freed e.g.
794 * by bpf_sk_storage_free() during __sk_destruct().
796 for (i = 0; i < (1U << smap->bucket_log); i++) {
797 b = &smap->buckets[i];
800 /* No one is adding to b->list now */
801 while ((selem = hlist_entry_safe(
802 rcu_dereference_raw(hlist_first_rcu(&b->list)),
803 struct bpf_local_storage_elem, map_node))) {
806 this_cpu_inc(*busy_counter);
808 bpf_selem_unlink(selem, true);
810 this_cpu_dec(*busy_counter);
818 /* While freeing the storage we may still need to access the map.
820 * e.g. when bpf_sk_storage_free() has unlinked selem from the map
821 * which then made the above while((selem = ...)) loop
824 * However, while freeing the storage one still needs to access the
825 * smap->elem_size to do the uncharging in
826 * bpf_selem_unlink_storage_nolock().
828 * Hence, wait another rcu grace period for the storage to be freed.
833 bpf_mem_alloc_destroy(&smap->selem_ma);
834 kvfree(smap->buckets);
835 bpf_map_area_free(smap);