rcuscale: Move shutdown from wait_event() to wait_event_idle()
[platform/kernel/linux-starfive.git] / kernel / bpf / bpf_local_storage.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook  */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
8 #include <linux/bpf.h>
9 #include <linux/btf_ids.h>
10 #include <linux/bpf_local_storage.h>
11 #include <net/sock.h>
12 #include <uapi/linux/sock_diag.h>
13 #include <uapi/linux/btf.h>
14 #include <linux/rcupdate.h>
15 #include <linux/rcupdate_trace.h>
16 #include <linux/rcupdate_wait.h>
17
18 #define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
19
20 static struct bpf_local_storage_map_bucket *
21 select_bucket(struct bpf_local_storage_map *smap,
22               struct bpf_local_storage_elem *selem)
23 {
24         return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
25 }
26
27 static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
28 {
29         struct bpf_map *map = &smap->map;
30
31         if (!map->ops->map_local_storage_charge)
32                 return 0;
33
34         return map->ops->map_local_storage_charge(smap, owner, size);
35 }
36
37 static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
38                          u32 size)
39 {
40         struct bpf_map *map = &smap->map;
41
42         if (map->ops->map_local_storage_uncharge)
43                 map->ops->map_local_storage_uncharge(smap, owner, size);
44 }
45
46 static struct bpf_local_storage __rcu **
47 owner_storage(struct bpf_local_storage_map *smap, void *owner)
48 {
49         struct bpf_map *map = &smap->map;
50
51         return map->ops->map_owner_storage_ptr(owner);
52 }
53
54 static bool selem_linked_to_storage_lockless(const struct bpf_local_storage_elem *selem)
55 {
56         return !hlist_unhashed_lockless(&selem->snode);
57 }
58
59 static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem)
60 {
61         return !hlist_unhashed(&selem->snode);
62 }
63
64 static bool selem_linked_to_map_lockless(const struct bpf_local_storage_elem *selem)
65 {
66         return !hlist_unhashed_lockless(&selem->map_node);
67 }
68
69 static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
70 {
71         return !hlist_unhashed(&selem->map_node);
72 }
73
74 struct bpf_local_storage_elem *
75 bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
76                 void *value, bool charge_mem, gfp_t gfp_flags)
77 {
78         struct bpf_local_storage_elem *selem;
79
80         if (charge_mem && mem_charge(smap, owner, smap->elem_size))
81                 return NULL;
82
83         selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
84                                 gfp_flags | __GFP_NOWARN);
85         if (selem) {
86                 if (value)
87                         copy_map_value(&smap->map, SDATA(selem)->data, value);
88                 return selem;
89         }
90
91         if (charge_mem)
92                 mem_uncharge(smap, owner, smap->elem_size);
93
94         return NULL;
95 }
96
97 void bpf_local_storage_free_rcu(struct rcu_head *rcu)
98 {
99         struct bpf_local_storage *local_storage;
100
101         local_storage = container_of(rcu, struct bpf_local_storage, rcu);
102         kfree_rcu(local_storage, rcu);
103 }
104
105 static void bpf_selem_free_rcu(struct rcu_head *rcu)
106 {
107         struct bpf_local_storage_elem *selem;
108
109         selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
110         kfree_rcu(selem, rcu);
111 }
112
113 /* local_storage->lock must be held and selem->local_storage == local_storage.
114  * The caller must ensure selem->smap is still valid to be
115  * dereferenced for its smap->elem_size and smap->cache_idx.
116  */
117 bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
118                                      struct bpf_local_storage_elem *selem,
119                                      bool uncharge_mem, bool use_trace_rcu)
120 {
121         struct bpf_local_storage_map *smap;
122         bool free_local_storage;
123         void *owner;
124
125         smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
126         owner = local_storage->owner;
127
128         /* All uncharging on the owner must be done first.
129          * The owner may be freed once the last selem is unlinked
130          * from local_storage.
131          */
132         if (uncharge_mem)
133                 mem_uncharge(smap, owner, smap->elem_size);
134
135         free_local_storage = hlist_is_singular_node(&selem->snode,
136                                                     &local_storage->list);
137         if (free_local_storage) {
138                 mem_uncharge(smap, owner, sizeof(struct bpf_local_storage));
139                 local_storage->owner = NULL;
140
141                 /* After this RCU_INIT, owner may be freed and cannot be used */
142                 RCU_INIT_POINTER(*owner_storage(smap, owner), NULL);
143
144                 /* local_storage is not freed now.  local_storage->lock is
145                  * still held and raw_spin_unlock_bh(&local_storage->lock)
146                  * will be done by the caller.
147                  *
148                  * Although the unlock will be done under
149                  * rcu_read_lock(),  it is more intuitive to
150                  * read if the freeing of the storage is done
151                  * after the raw_spin_unlock_bh(&local_storage->lock).
152                  *
153                  * Hence, a "bool free_local_storage" is returned
154                  * to the caller which then calls then frees the storage after
155                  * all the RCU grace periods have expired.
156                  */
157         }
158         hlist_del_init_rcu(&selem->snode);
159         if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
160             SDATA(selem))
161                 RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
162
163         if (use_trace_rcu)
164                 call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_rcu);
165         else
166                 kfree_rcu(selem, rcu);
167
168         return free_local_storage;
169 }
170
171 static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
172                                        bool use_trace_rcu)
173 {
174         struct bpf_local_storage *local_storage;
175         bool free_local_storage = false;
176         unsigned long flags;
177
178         if (unlikely(!selem_linked_to_storage_lockless(selem)))
179                 /* selem has already been unlinked from sk */
180                 return;
181
182         local_storage = rcu_dereference_check(selem->local_storage,
183                                               bpf_rcu_lock_held());
184         raw_spin_lock_irqsave(&local_storage->lock, flags);
185         if (likely(selem_linked_to_storage(selem)))
186                 free_local_storage = bpf_selem_unlink_storage_nolock(
187                         local_storage, selem, true, use_trace_rcu);
188         raw_spin_unlock_irqrestore(&local_storage->lock, flags);
189
190         if (free_local_storage) {
191                 if (use_trace_rcu)
192                         call_rcu_tasks_trace(&local_storage->rcu,
193                                      bpf_local_storage_free_rcu);
194                 else
195                         kfree_rcu(local_storage, rcu);
196         }
197 }
198
199 void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
200                                    struct bpf_local_storage_elem *selem)
201 {
202         RCU_INIT_POINTER(selem->local_storage, local_storage);
203         hlist_add_head_rcu(&selem->snode, &local_storage->list);
204 }
205
206 void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
207 {
208         struct bpf_local_storage_map *smap;
209         struct bpf_local_storage_map_bucket *b;
210         unsigned long flags;
211
212         if (unlikely(!selem_linked_to_map_lockless(selem)))
213                 /* selem has already be unlinked from smap */
214                 return;
215
216         smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
217         b = select_bucket(smap, selem);
218         raw_spin_lock_irqsave(&b->lock, flags);
219         if (likely(selem_linked_to_map(selem)))
220                 hlist_del_init_rcu(&selem->map_node);
221         raw_spin_unlock_irqrestore(&b->lock, flags);
222 }
223
224 void bpf_selem_link_map(struct bpf_local_storage_map *smap,
225                         struct bpf_local_storage_elem *selem)
226 {
227         struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
228         unsigned long flags;
229
230         raw_spin_lock_irqsave(&b->lock, flags);
231         RCU_INIT_POINTER(SDATA(selem)->smap, smap);
232         hlist_add_head_rcu(&selem->map_node, &b->list);
233         raw_spin_unlock_irqrestore(&b->lock, flags);
234 }
235
236 void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool use_trace_rcu)
237 {
238         /* Always unlink from map before unlinking from local_storage
239          * because selem will be freed after successfully unlinked from
240          * the local_storage.
241          */
242         bpf_selem_unlink_map(selem);
243         __bpf_selem_unlink_storage(selem, use_trace_rcu);
244 }
245
246 struct bpf_local_storage_data *
247 bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
248                          struct bpf_local_storage_map *smap,
249                          bool cacheit_lockit)
250 {
251         struct bpf_local_storage_data *sdata;
252         struct bpf_local_storage_elem *selem;
253
254         /* Fast path (cache hit) */
255         sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
256                                       bpf_rcu_lock_held());
257         if (sdata && rcu_access_pointer(sdata->smap) == smap)
258                 return sdata;
259
260         /* Slow path (cache miss) */
261         hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
262                                   rcu_read_lock_trace_held())
263                 if (rcu_access_pointer(SDATA(selem)->smap) == smap)
264                         break;
265
266         if (!selem)
267                 return NULL;
268
269         sdata = SDATA(selem);
270         if (cacheit_lockit) {
271                 unsigned long flags;
272
273                 /* spinlock is needed to avoid racing with the
274                  * parallel delete.  Otherwise, publishing an already
275                  * deleted sdata to the cache will become a use-after-free
276                  * problem in the next bpf_local_storage_lookup().
277                  */
278                 raw_spin_lock_irqsave(&local_storage->lock, flags);
279                 if (selem_linked_to_storage(selem))
280                         rcu_assign_pointer(local_storage->cache[smap->cache_idx],
281                                            sdata);
282                 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
283         }
284
285         return sdata;
286 }
287
288 static int check_flags(const struct bpf_local_storage_data *old_sdata,
289                        u64 map_flags)
290 {
291         if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
292                 /* elem already exists */
293                 return -EEXIST;
294
295         if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
296                 /* elem doesn't exist, cannot update it */
297                 return -ENOENT;
298
299         return 0;
300 }
301
302 int bpf_local_storage_alloc(void *owner,
303                             struct bpf_local_storage_map *smap,
304                             struct bpf_local_storage_elem *first_selem,
305                             gfp_t gfp_flags)
306 {
307         struct bpf_local_storage *prev_storage, *storage;
308         struct bpf_local_storage **owner_storage_ptr;
309         int err;
310
311         err = mem_charge(smap, owner, sizeof(*storage));
312         if (err)
313                 return err;
314
315         storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
316                                   gfp_flags | __GFP_NOWARN);
317         if (!storage) {
318                 err = -ENOMEM;
319                 goto uncharge;
320         }
321
322         INIT_HLIST_HEAD(&storage->list);
323         raw_spin_lock_init(&storage->lock);
324         storage->owner = owner;
325
326         bpf_selem_link_storage_nolock(storage, first_selem);
327         bpf_selem_link_map(smap, first_selem);
328
329         owner_storage_ptr =
330                 (struct bpf_local_storage **)owner_storage(smap, owner);
331         /* Publish storage to the owner.
332          * Instead of using any lock of the kernel object (i.e. owner),
333          * cmpxchg will work with any kernel object regardless what
334          * the running context is, bh, irq...etc.
335          *
336          * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage)
337          * is protected by the storage->lock.  Hence, when freeing
338          * the owner->storage, the storage->lock must be held before
339          * setting owner->storage ptr to NULL.
340          */
341         prev_storage = cmpxchg(owner_storage_ptr, NULL, storage);
342         if (unlikely(prev_storage)) {
343                 bpf_selem_unlink_map(first_selem);
344                 err = -EAGAIN;
345                 goto uncharge;
346
347                 /* Note that even first_selem was linked to smap's
348                  * bucket->list, first_selem can be freed immediately
349                  * (instead of kfree_rcu) because
350                  * bpf_local_storage_map_free() does a
351                  * synchronize_rcu_mult (waiting for both sleepable and
352                  * normal programs) before walking the bucket->list.
353                  * Hence, no one is accessing selem from the
354                  * bucket->list under rcu_read_lock().
355                  */
356         }
357
358         return 0;
359
360 uncharge:
361         kfree(storage);
362         mem_uncharge(smap, owner, sizeof(*storage));
363         return err;
364 }
365
366 /* sk cannot be going away because it is linking new elem
367  * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
368  * Otherwise, it will become a leak (and other memory issues
369  * during map destruction).
370  */
371 struct bpf_local_storage_data *
372 bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
373                          void *value, u64 map_flags, gfp_t gfp_flags)
374 {
375         struct bpf_local_storage_data *old_sdata = NULL;
376         struct bpf_local_storage_elem *selem = NULL;
377         struct bpf_local_storage *local_storage;
378         unsigned long flags;
379         int err;
380
381         /* BPF_EXIST and BPF_NOEXIST cannot be both set */
382         if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
383             /* BPF_F_LOCK can only be used in a value with spin_lock */
384             unlikely((map_flags & BPF_F_LOCK) &&
385                      !map_value_has_spin_lock(&smap->map)))
386                 return ERR_PTR(-EINVAL);
387
388         if (gfp_flags == GFP_KERNEL && (map_flags & ~BPF_F_LOCK) != BPF_NOEXIST)
389                 return ERR_PTR(-EINVAL);
390
391         local_storage = rcu_dereference_check(*owner_storage(smap, owner),
392                                               bpf_rcu_lock_held());
393         if (!local_storage || hlist_empty(&local_storage->list)) {
394                 /* Very first elem for the owner */
395                 err = check_flags(NULL, map_flags);
396                 if (err)
397                         return ERR_PTR(err);
398
399                 selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
400                 if (!selem)
401                         return ERR_PTR(-ENOMEM);
402
403                 err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags);
404                 if (err) {
405                         kfree(selem);
406                         mem_uncharge(smap, owner, smap->elem_size);
407                         return ERR_PTR(err);
408                 }
409
410                 return SDATA(selem);
411         }
412
413         if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
414                 /* Hoping to find an old_sdata to do inline update
415                  * such that it can avoid taking the local_storage->lock
416                  * and changing the lists.
417                  */
418                 old_sdata =
419                         bpf_local_storage_lookup(local_storage, smap, false);
420                 err = check_flags(old_sdata, map_flags);
421                 if (err)
422                         return ERR_PTR(err);
423                 if (old_sdata && selem_linked_to_storage_lockless(SELEM(old_sdata))) {
424                         copy_map_value_locked(&smap->map, old_sdata->data,
425                                               value, false);
426                         return old_sdata;
427                 }
428         }
429
430         if (gfp_flags == GFP_KERNEL) {
431                 selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
432                 if (!selem)
433                         return ERR_PTR(-ENOMEM);
434         }
435
436         raw_spin_lock_irqsave(&local_storage->lock, flags);
437
438         /* Recheck local_storage->list under local_storage->lock */
439         if (unlikely(hlist_empty(&local_storage->list))) {
440                 /* A parallel del is happening and local_storage is going
441                  * away.  It has just been checked before, so very
442                  * unlikely.  Return instead of retry to keep things
443                  * simple.
444                  */
445                 err = -EAGAIN;
446                 goto unlock_err;
447         }
448
449         old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
450         err = check_flags(old_sdata, map_flags);
451         if (err)
452                 goto unlock_err;
453
454         if (old_sdata && (map_flags & BPF_F_LOCK)) {
455                 copy_map_value_locked(&smap->map, old_sdata->data, value,
456                                       false);
457                 selem = SELEM(old_sdata);
458                 goto unlock;
459         }
460
461         if (gfp_flags != GFP_KERNEL) {
462                 /* local_storage->lock is held.  Hence, we are sure
463                  * we can unlink and uncharge the old_sdata successfully
464                  * later.  Hence, instead of charging the new selem now
465                  * and then uncharge the old selem later (which may cause
466                  * a potential but unnecessary charge failure),  avoid taking
467                  * a charge at all here (the "!old_sdata" check) and the
468                  * old_sdata will not be uncharged later during
469                  * bpf_selem_unlink_storage_nolock().
470                  */
471                 selem = bpf_selem_alloc(smap, owner, value, !old_sdata, gfp_flags);
472                 if (!selem) {
473                         err = -ENOMEM;
474                         goto unlock_err;
475                 }
476         }
477
478         /* First, link the new selem to the map */
479         bpf_selem_link_map(smap, selem);
480
481         /* Second, link (and publish) the new selem to local_storage */
482         bpf_selem_link_storage_nolock(local_storage, selem);
483
484         /* Third, remove old selem, SELEM(old_sdata) */
485         if (old_sdata) {
486                 bpf_selem_unlink_map(SELEM(old_sdata));
487                 bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
488                                                 false, true);
489         }
490
491 unlock:
492         raw_spin_unlock_irqrestore(&local_storage->lock, flags);
493         return SDATA(selem);
494
495 unlock_err:
496         raw_spin_unlock_irqrestore(&local_storage->lock, flags);
497         if (selem) {
498                 mem_uncharge(smap, owner, smap->elem_size);
499                 kfree(selem);
500         }
501         return ERR_PTR(err);
502 }
503
504 u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
505 {
506         u64 min_usage = U64_MAX;
507         u16 i, res = 0;
508
509         spin_lock(&cache->idx_lock);
510
511         for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) {
512                 if (cache->idx_usage_counts[i] < min_usage) {
513                         min_usage = cache->idx_usage_counts[i];
514                         res = i;
515
516                         /* Found a free cache_idx */
517                         if (!min_usage)
518                                 break;
519                 }
520         }
521         cache->idx_usage_counts[res]++;
522
523         spin_unlock(&cache->idx_lock);
524
525         return res;
526 }
527
528 void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
529                                       u16 idx)
530 {
531         spin_lock(&cache->idx_lock);
532         cache->idx_usage_counts[idx]--;
533         spin_unlock(&cache->idx_lock);
534 }
535
536 void bpf_local_storage_map_free(struct bpf_local_storage_map *smap,
537                                 int __percpu *busy_counter)
538 {
539         struct bpf_local_storage_elem *selem;
540         struct bpf_local_storage_map_bucket *b;
541         unsigned int i;
542
543         /* Note that this map might be concurrently cloned from
544          * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
545          * RCU read section to finish before proceeding. New RCU
546          * read sections should be prevented via bpf_map_inc_not_zero.
547          */
548         synchronize_rcu();
549
550         /* bpf prog and the userspace can no longer access this map
551          * now.  No new selem (of this map) can be added
552          * to the owner->storage or to the map bucket's list.
553          *
554          * The elem of this map can be cleaned up here
555          * or when the storage is freed e.g.
556          * by bpf_sk_storage_free() during __sk_destruct().
557          */
558         for (i = 0; i < (1U << smap->bucket_log); i++) {
559                 b = &smap->buckets[i];
560
561                 rcu_read_lock();
562                 /* No one is adding to b->list now */
563                 while ((selem = hlist_entry_safe(
564                                 rcu_dereference_raw(hlist_first_rcu(&b->list)),
565                                 struct bpf_local_storage_elem, map_node))) {
566                         if (busy_counter) {
567                                 migrate_disable();
568                                 this_cpu_inc(*busy_counter);
569                         }
570                         bpf_selem_unlink(selem, false);
571                         if (busy_counter) {
572                                 this_cpu_dec(*busy_counter);
573                                 migrate_enable();
574                         }
575                         cond_resched_rcu();
576                 }
577                 rcu_read_unlock();
578         }
579
580         /* While freeing the storage we may still need to access the map.
581          *
582          * e.g. when bpf_sk_storage_free() has unlinked selem from the map
583          * which then made the above while((selem = ...)) loop
584          * exit immediately.
585          *
586          * However, while freeing the storage one still needs to access the
587          * smap->elem_size to do the uncharging in
588          * bpf_selem_unlink_storage_nolock().
589          *
590          * Hence, wait another rcu grace period for the storage to be freed.
591          */
592         synchronize_rcu();
593
594         kvfree(smap->buckets);
595         bpf_map_area_free(smap);
596 }
597
598 int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
599 {
600         if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK ||
601             !(attr->map_flags & BPF_F_NO_PREALLOC) ||
602             attr->max_entries ||
603             attr->key_size != sizeof(int) || !attr->value_size ||
604             /* Enforce BTF for userspace sk dumping */
605             !attr->btf_key_type_id || !attr->btf_value_type_id)
606                 return -EINVAL;
607
608         if (!bpf_capable())
609                 return -EPERM;
610
611         if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
612                 return -E2BIG;
613
614         return 0;
615 }
616
617 struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr)
618 {
619         struct bpf_local_storage_map *smap;
620         unsigned int i;
621         u32 nbuckets;
622
623         smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE);
624         if (!smap)
625                 return ERR_PTR(-ENOMEM);
626         bpf_map_init_from_attr(&smap->map, attr);
627
628         nbuckets = roundup_pow_of_two(num_possible_cpus());
629         /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
630         nbuckets = max_t(u32, 2, nbuckets);
631         smap->bucket_log = ilog2(nbuckets);
632
633         smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
634                                  GFP_USER | __GFP_NOWARN | __GFP_ACCOUNT);
635         if (!smap->buckets) {
636                 bpf_map_area_free(smap);
637                 return ERR_PTR(-ENOMEM);
638         }
639
640         for (i = 0; i < nbuckets; i++) {
641                 INIT_HLIST_HEAD(&smap->buckets[i].list);
642                 raw_spin_lock_init(&smap->buckets[i].lock);
643         }
644
645         smap->elem_size =
646                 sizeof(struct bpf_local_storage_elem) + attr->value_size;
647
648         return smap;
649 }
650
651 int bpf_local_storage_map_check_btf(const struct bpf_map *map,
652                                     const struct btf *btf,
653                                     const struct btf_type *key_type,
654                                     const struct btf_type *value_type)
655 {
656         u32 int_data;
657
658         if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
659                 return -EINVAL;
660
661         int_data = *(u32 *)(key_type + 1);
662         if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
663                 return -EINVAL;
664
665         return 0;
666 }