1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
10 #include <linux/btf_ids.h>
11 #include <linux/bpf_local_storage.h>
12 #include <net/bpf_sk_storage.h>
14 #include <uapi/linux/sock_diag.h>
15 #include <uapi/linux/btf.h>
16 #include <linux/rcupdate_trace.h>
18 DEFINE_BPF_STORAGE_CACHE(sk_cache);
20 static struct bpf_local_storage_data *
21 bpf_sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
23 struct bpf_local_storage *sk_storage;
24 struct bpf_local_storage_map *smap;
27 rcu_dereference_check(sk->sk_bpf_storage, bpf_rcu_lock_held());
31 smap = (struct bpf_local_storage_map *)map;
32 return bpf_local_storage_lookup(sk_storage, smap, cacheit_lockit);
35 static int bpf_sk_storage_del(struct sock *sk, struct bpf_map *map)
37 struct bpf_local_storage_data *sdata;
39 sdata = bpf_sk_storage_lookup(sk, map, false);
43 bpf_selem_unlink(SELEM(sdata), false);
48 /* Called by __sk_destruct() & bpf_sk_storage_clone() */
49 void bpf_sk_storage_free(struct sock *sk)
51 struct bpf_local_storage *sk_storage;
54 sk_storage = rcu_dereference(sk->sk_bpf_storage);
60 bpf_local_storage_destroy(sk_storage);
64 static void bpf_sk_storage_map_free(struct bpf_map *map)
66 bpf_local_storage_map_free(map, &sk_cache, NULL);
69 static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
71 return bpf_local_storage_map_alloc(attr, &sk_cache, false);
74 static int notsupp_get_next_key(struct bpf_map *map, void *key,
80 static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
82 struct bpf_local_storage_data *sdata;
87 sock = sockfd_lookup(fd, &err);
89 sdata = bpf_sk_storage_lookup(sock->sk, map, true);
91 return sdata ? sdata->data : NULL;
97 static long bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
98 void *value, u64 map_flags)
100 struct bpf_local_storage_data *sdata;
105 sock = sockfd_lookup(fd, &err);
107 sdata = bpf_local_storage_update(
108 sock->sk, (struct bpf_local_storage_map *)map, value,
109 map_flags, GFP_ATOMIC);
111 return PTR_ERR_OR_ZERO(sdata);
117 static long bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
123 sock = sockfd_lookup(fd, &err);
125 err = bpf_sk_storage_del(sock->sk, map);
133 static struct bpf_local_storage_elem *
134 bpf_sk_storage_clone_elem(struct sock *newsk,
135 struct bpf_local_storage_map *smap,
136 struct bpf_local_storage_elem *selem)
138 struct bpf_local_storage_elem *copy_selem;
140 copy_selem = bpf_selem_alloc(smap, newsk, NULL, true, GFP_ATOMIC);
144 if (btf_record_has_field(smap->map.record, BPF_SPIN_LOCK))
145 copy_map_value_locked(&smap->map, SDATA(copy_selem)->data,
146 SDATA(selem)->data, true);
148 copy_map_value(&smap->map, SDATA(copy_selem)->data,
154 int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
156 struct bpf_local_storage *new_sk_storage = NULL;
157 struct bpf_local_storage *sk_storage;
158 struct bpf_local_storage_elem *selem;
161 RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL);
164 sk_storage = rcu_dereference(sk->sk_bpf_storage);
166 if (!sk_storage || hlist_empty(&sk_storage->list))
169 hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
170 struct bpf_local_storage_elem *copy_selem;
171 struct bpf_local_storage_map *smap;
174 smap = rcu_dereference(SDATA(selem)->smap);
175 if (!(smap->map.map_flags & BPF_F_CLONE))
178 /* Note that for lockless listeners adding new element
179 * here can race with cleanup in bpf_local_storage_map_free.
180 * Try to grab map refcnt to make sure that it's still
181 * alive and prevent concurrent removal.
183 map = bpf_map_inc_not_zero(&smap->map);
187 copy_selem = bpf_sk_storage_clone_elem(newsk, smap, selem);
194 if (new_sk_storage) {
195 bpf_selem_link_map(smap, copy_selem);
196 bpf_selem_link_storage_nolock(new_sk_storage, copy_selem);
198 ret = bpf_local_storage_alloc(newsk, smap, copy_selem, GFP_ATOMIC);
200 bpf_selem_free(copy_selem, smap, true);
201 atomic_sub(smap->elem_size,
202 &newsk->sk_omem_alloc);
208 rcu_dereference(copy_selem->local_storage);
216 /* In case of an error, don't free anything explicitly here, the
217 * caller is responsible to call bpf_sk_storage_free.
223 /* *gfp_flags* is a hidden argument provided by the verifier */
224 BPF_CALL_5(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
225 void *, value, u64, flags, gfp_t, gfp_flags)
227 struct bpf_local_storage_data *sdata;
229 WARN_ON_ONCE(!bpf_rcu_lock_held());
230 if (!sk || !sk_fullsock(sk) || flags > BPF_SK_STORAGE_GET_F_CREATE)
231 return (unsigned long)NULL;
233 sdata = bpf_sk_storage_lookup(sk, map, true);
235 return (unsigned long)sdata->data;
237 if (flags == BPF_SK_STORAGE_GET_F_CREATE &&
238 /* Cannot add new elem to a going away sk.
239 * Otherwise, the new elem may become a leak
240 * (and also other memory issues during map
243 refcount_inc_not_zero(&sk->sk_refcnt)) {
244 sdata = bpf_local_storage_update(
245 sk, (struct bpf_local_storage_map *)map, value,
246 BPF_NOEXIST, gfp_flags);
247 /* sk must be a fullsock (guaranteed by verifier),
248 * so sock_gen_put() is unnecessary.
251 return IS_ERR(sdata) ?
252 (unsigned long)NULL : (unsigned long)sdata->data;
255 return (unsigned long)NULL;
258 BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
260 WARN_ON_ONCE(!bpf_rcu_lock_held());
261 if (!sk || !sk_fullsock(sk))
264 if (refcount_inc_not_zero(&sk->sk_refcnt)) {
267 err = bpf_sk_storage_del(sk, map);
275 static int bpf_sk_storage_charge(struct bpf_local_storage_map *smap,
276 void *owner, u32 size)
278 int optmem_max = READ_ONCE(sysctl_optmem_max);
279 struct sock *sk = (struct sock *)owner;
281 /* same check as in sock_kmalloc() */
282 if (size <= optmem_max &&
283 atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
284 atomic_add(size, &sk->sk_omem_alloc);
291 static void bpf_sk_storage_uncharge(struct bpf_local_storage_map *smap,
292 void *owner, u32 size)
294 struct sock *sk = owner;
296 atomic_sub(size, &sk->sk_omem_alloc);
299 static struct bpf_local_storage __rcu **
300 bpf_sk_storage_ptr(void *owner)
302 struct sock *sk = owner;
304 return &sk->sk_bpf_storage;
307 const struct bpf_map_ops sk_storage_map_ops = {
308 .map_meta_equal = bpf_map_meta_equal,
309 .map_alloc_check = bpf_local_storage_map_alloc_check,
310 .map_alloc = bpf_sk_storage_map_alloc,
311 .map_free = bpf_sk_storage_map_free,
312 .map_get_next_key = notsupp_get_next_key,
313 .map_lookup_elem = bpf_fd_sk_storage_lookup_elem,
314 .map_update_elem = bpf_fd_sk_storage_update_elem,
315 .map_delete_elem = bpf_fd_sk_storage_delete_elem,
316 .map_check_btf = bpf_local_storage_map_check_btf,
317 .map_btf_id = &bpf_local_storage_map_btf_id[0],
318 .map_local_storage_charge = bpf_sk_storage_charge,
319 .map_local_storage_uncharge = bpf_sk_storage_uncharge,
320 .map_owner_storage_ptr = bpf_sk_storage_ptr,
321 .map_mem_usage = bpf_local_storage_map_mem_usage,
324 const struct bpf_func_proto bpf_sk_storage_get_proto = {
325 .func = bpf_sk_storage_get,
327 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
328 .arg1_type = ARG_CONST_MAP_PTR,
329 .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
330 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
331 .arg4_type = ARG_ANYTHING,
334 const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto = {
335 .func = bpf_sk_storage_get,
337 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
338 .arg1_type = ARG_CONST_MAP_PTR,
339 .arg2_type = ARG_PTR_TO_CTX, /* context is 'struct sock' */
340 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
341 .arg4_type = ARG_ANYTHING,
344 const struct bpf_func_proto bpf_sk_storage_delete_proto = {
345 .func = bpf_sk_storage_delete,
347 .ret_type = RET_INTEGER,
348 .arg1_type = ARG_CONST_MAP_PTR,
349 .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
352 static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog)
354 const struct btf *btf_vmlinux;
355 const struct btf_type *t;
359 if (prog->aux->dst_prog)
362 /* Ensure the tracing program is not tracing
363 * any bpf_sk_storage*() function and also
364 * use the bpf_sk_storage_(get|delete) helper.
366 switch (prog->expected_attach_type) {
368 case BPF_TRACE_RAW_TP:
369 /* bpf_sk_storage has no trace point */
371 case BPF_TRACE_FENTRY:
372 case BPF_TRACE_FEXIT:
373 btf_vmlinux = bpf_get_btf_vmlinux();
374 if (IS_ERR_OR_NULL(btf_vmlinux))
376 btf_id = prog->aux->attach_btf_id;
377 t = btf_type_by_id(btf_vmlinux, btf_id);
378 tname = btf_name_by_offset(btf_vmlinux, t->name_off);
379 return !!strncmp(tname, "bpf_sk_storage",
380 strlen("bpf_sk_storage"));
388 /* *gfp_flags* is a hidden argument provided by the verifier */
389 BPF_CALL_5(bpf_sk_storage_get_tracing, struct bpf_map *, map, struct sock *, sk,
390 void *, value, u64, flags, gfp_t, gfp_flags)
392 WARN_ON_ONCE(!bpf_rcu_lock_held());
393 if (in_hardirq() || in_nmi())
394 return (unsigned long)NULL;
396 return (unsigned long)____bpf_sk_storage_get(map, sk, value, flags,
400 BPF_CALL_2(bpf_sk_storage_delete_tracing, struct bpf_map *, map,
403 WARN_ON_ONCE(!bpf_rcu_lock_held());
404 if (in_hardirq() || in_nmi())
407 return ____bpf_sk_storage_delete(map, sk);
410 const struct bpf_func_proto bpf_sk_storage_get_tracing_proto = {
411 .func = bpf_sk_storage_get_tracing,
413 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
414 .arg1_type = ARG_CONST_MAP_PTR,
415 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
416 .arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
417 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
418 .arg4_type = ARG_ANYTHING,
419 .allowed = bpf_sk_storage_tracing_allowed,
422 const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto = {
423 .func = bpf_sk_storage_delete_tracing,
425 .ret_type = RET_INTEGER,
426 .arg1_type = ARG_CONST_MAP_PTR,
427 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
428 .arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
429 .allowed = bpf_sk_storage_tracing_allowed,
432 struct bpf_sk_storage_diag {
434 struct bpf_map *maps[];
437 /* The reply will be like:
438 * INET_DIAG_BPF_SK_STORAGES (nla_nest)
439 * SK_DIAG_BPF_STORAGE (nla_nest)
440 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
441 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
442 * SK_DIAG_BPF_STORAGE (nla_nest)
443 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
444 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
447 static int nla_value_size(u32 value_size)
449 /* SK_DIAG_BPF_STORAGE (nla_nest)
450 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
451 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
453 return nla_total_size(0) + nla_total_size(sizeof(u32)) +
454 nla_total_size_64bit(value_size);
457 void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag)
464 for (i = 0; i < diag->nr_maps; i++)
465 bpf_map_put(diag->maps[i]);
469 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_free);
471 static bool diag_check_dup(const struct bpf_sk_storage_diag *diag,
472 const struct bpf_map *map)
476 for (i = 0; i < diag->nr_maps; i++) {
477 if (diag->maps[i] == map)
484 struct bpf_sk_storage_diag *
485 bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs)
487 struct bpf_sk_storage_diag *diag;
492 /* bpf_local_storage_map is currently limited to CAP_SYS_ADMIN as
493 * the map_alloc_check() side also does.
496 return ERR_PTR(-EPERM);
498 nla_for_each_nested(nla, nla_stgs, rem) {
499 if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD) {
500 if (nla_len(nla) != sizeof(u32))
501 return ERR_PTR(-EINVAL);
506 diag = kzalloc(struct_size(diag, maps, nr_maps), GFP_KERNEL);
508 return ERR_PTR(-ENOMEM);
510 nla_for_each_nested(nla, nla_stgs, rem) {
514 if (nla_type(nla) != SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
517 map_fd = nla_get_u32(nla);
518 map = bpf_map_get(map_fd);
523 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) {
528 if (diag_check_dup(diag, map)) {
533 diag->maps[diag->nr_maps++] = map;
539 bpf_sk_storage_diag_free(diag);
542 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_alloc);
544 static int diag_get(struct bpf_local_storage_data *sdata, struct sk_buff *skb)
546 struct nlattr *nla_stg, *nla_value;
547 struct bpf_local_storage_map *smap;
549 /* It cannot exceed max nlattr's payload */
550 BUILD_BUG_ON(U16_MAX - NLA_HDRLEN < BPF_LOCAL_STORAGE_MAX_VALUE_SIZE);
552 nla_stg = nla_nest_start(skb, SK_DIAG_BPF_STORAGE);
556 smap = rcu_dereference(sdata->smap);
557 if (nla_put_u32(skb, SK_DIAG_BPF_STORAGE_MAP_ID, smap->map.id))
560 nla_value = nla_reserve_64bit(skb, SK_DIAG_BPF_STORAGE_MAP_VALUE,
561 smap->map.value_size,
562 SK_DIAG_BPF_STORAGE_PAD);
566 if (btf_record_has_field(smap->map.record, BPF_SPIN_LOCK))
567 copy_map_value_locked(&smap->map, nla_data(nla_value),
570 copy_map_value(&smap->map, nla_data(nla_value), sdata->data);
572 nla_nest_end(skb, nla_stg);
576 nla_nest_cancel(skb, nla_stg);
580 static int bpf_sk_storage_diag_put_all(struct sock *sk, struct sk_buff *skb,
582 unsigned int *res_diag_size)
584 /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
585 unsigned int diag_size = nla_total_size(0);
586 struct bpf_local_storage *sk_storage;
587 struct bpf_local_storage_elem *selem;
588 struct bpf_local_storage_map *smap;
589 struct nlattr *nla_stgs;
590 unsigned int saved_len;
595 sk_storage = rcu_dereference(sk->sk_bpf_storage);
596 if (!sk_storage || hlist_empty(&sk_storage->list)) {
601 nla_stgs = nla_nest_start(skb, stg_array_type);
603 /* Continue to learn diag_size */
606 saved_len = skb->len;
607 hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
608 smap = rcu_dereference(SDATA(selem)->smap);
609 diag_size += nla_value_size(smap->map.value_size);
611 if (nla_stgs && diag_get(SDATA(selem), skb))
612 /* Continue to learn diag_size */
619 if (saved_len == skb->len)
620 nla_nest_cancel(skb, nla_stgs);
622 nla_nest_end(skb, nla_stgs);
625 if (diag_size == nla_total_size(0)) {
630 *res_diag_size = diag_size;
634 int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag,
635 struct sock *sk, struct sk_buff *skb,
637 unsigned int *res_diag_size)
639 /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
640 unsigned int diag_size = nla_total_size(0);
641 struct bpf_local_storage *sk_storage;
642 struct bpf_local_storage_data *sdata;
643 struct nlattr *nla_stgs;
644 unsigned int saved_len;
650 /* No map has been specified. Dump all. */
652 return bpf_sk_storage_diag_put_all(sk, skb, stg_array_type,
656 sk_storage = rcu_dereference(sk->sk_bpf_storage);
657 if (!sk_storage || hlist_empty(&sk_storage->list)) {
662 nla_stgs = nla_nest_start(skb, stg_array_type);
664 /* Continue to learn diag_size */
667 saved_len = skb->len;
668 for (i = 0; i < diag->nr_maps; i++) {
669 sdata = bpf_local_storage_lookup(sk_storage,
670 (struct bpf_local_storage_map *)diag->maps[i],
676 diag_size += nla_value_size(diag->maps[i]->value_size);
678 if (nla_stgs && diag_get(sdata, skb))
679 /* Continue to learn diag_size */
685 if (saved_len == skb->len)
686 nla_nest_cancel(skb, nla_stgs);
688 nla_nest_end(skb, nla_stgs);
691 if (diag_size == nla_total_size(0)) {
696 *res_diag_size = diag_size;
699 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_put);
701 struct bpf_iter_seq_sk_storage_map_info {
703 unsigned int bucket_id;
707 static struct bpf_local_storage_elem *
708 bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info,
709 struct bpf_local_storage_elem *prev_selem)
710 __acquires(RCU) __releases(RCU)
712 struct bpf_local_storage *sk_storage;
713 struct bpf_local_storage_elem *selem;
714 u32 skip_elems = info->skip_elems;
715 struct bpf_local_storage_map *smap;
716 u32 bucket_id = info->bucket_id;
717 u32 i, count, n_buckets;
718 struct bpf_local_storage_map_bucket *b;
720 smap = (struct bpf_local_storage_map *)info->map;
721 n_buckets = 1U << smap->bucket_log;
722 if (bucket_id >= n_buckets)
725 /* try to find next selem in the same bucket */
729 selem = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&selem->map_node)),
730 struct bpf_local_storage_elem, map_node);
732 /* not found, unlock and go to the next bucket */
733 b = &smap->buckets[bucket_id++];
738 sk_storage = rcu_dereference(selem->local_storage);
740 info->skip_elems = skip_elems + count;
746 for (i = bucket_id; i < (1U << smap->bucket_log); i++) {
747 b = &smap->buckets[i];
750 hlist_for_each_entry_rcu(selem, &b->list, map_node) {
751 sk_storage = rcu_dereference(selem->local_storage);
752 if (sk_storage && count >= skip_elems) {
754 info->skip_elems = count;
764 info->skip_elems = 0;
768 static void *bpf_sk_storage_map_seq_start(struct seq_file *seq, loff_t *pos)
770 struct bpf_local_storage_elem *selem;
772 selem = bpf_sk_storage_map_seq_find_next(seq->private, NULL);
781 static void *bpf_sk_storage_map_seq_next(struct seq_file *seq, void *v,
784 struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
788 return bpf_sk_storage_map_seq_find_next(seq->private, v);
791 struct bpf_iter__bpf_sk_storage_map {
792 __bpf_md_ptr(struct bpf_iter_meta *, meta);
793 __bpf_md_ptr(struct bpf_map *, map);
794 __bpf_md_ptr(struct sock *, sk);
795 __bpf_md_ptr(void *, value);
798 DEFINE_BPF_ITER_FUNC(bpf_sk_storage_map, struct bpf_iter_meta *meta,
799 struct bpf_map *map, struct sock *sk,
802 static int __bpf_sk_storage_map_seq_show(struct seq_file *seq,
803 struct bpf_local_storage_elem *selem)
805 struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
806 struct bpf_iter__bpf_sk_storage_map ctx = {};
807 struct bpf_local_storage *sk_storage;
808 struct bpf_iter_meta meta;
809 struct bpf_prog *prog;
813 prog = bpf_iter_get_info(&meta, selem == NULL);
818 sk_storage = rcu_dereference(selem->local_storage);
819 ctx.sk = sk_storage->owner;
820 ctx.value = SDATA(selem)->data;
822 ret = bpf_iter_run_prog(prog, &ctx);
828 static int bpf_sk_storage_map_seq_show(struct seq_file *seq, void *v)
830 return __bpf_sk_storage_map_seq_show(seq, v);
833 static void bpf_sk_storage_map_seq_stop(struct seq_file *seq, void *v)
837 (void)__bpf_sk_storage_map_seq_show(seq, v);
842 static int bpf_iter_init_sk_storage_map(void *priv_data,
843 struct bpf_iter_aux_info *aux)
845 struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
847 bpf_map_inc_with_uref(aux->map);
848 seq_info->map = aux->map;
852 static void bpf_iter_fini_sk_storage_map(void *priv_data)
854 struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
856 bpf_map_put_with_uref(seq_info->map);
859 static int bpf_iter_attach_map(struct bpf_prog *prog,
860 union bpf_iter_link_info *linfo,
861 struct bpf_iter_aux_info *aux)
866 if (!linfo->map.map_fd)
869 map = bpf_map_get_with_uref(linfo->map.map_fd);
873 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
876 if (prog->aux->max_rdwr_access > map->value_size) {
885 bpf_map_put_with_uref(map);
889 static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux)
891 bpf_map_put_with_uref(aux->map);
894 static const struct seq_operations bpf_sk_storage_map_seq_ops = {
895 .start = bpf_sk_storage_map_seq_start,
896 .next = bpf_sk_storage_map_seq_next,
897 .stop = bpf_sk_storage_map_seq_stop,
898 .show = bpf_sk_storage_map_seq_show,
901 static const struct bpf_iter_seq_info iter_seq_info = {
902 .seq_ops = &bpf_sk_storage_map_seq_ops,
903 .init_seq_private = bpf_iter_init_sk_storage_map,
904 .fini_seq_private = bpf_iter_fini_sk_storage_map,
905 .seq_priv_size = sizeof(struct bpf_iter_seq_sk_storage_map_info),
908 static struct bpf_iter_reg bpf_sk_storage_map_reg_info = {
909 .target = "bpf_sk_storage_map",
910 .attach_target = bpf_iter_attach_map,
911 .detach_target = bpf_iter_detach_map,
912 .show_fdinfo = bpf_iter_map_show_fdinfo,
913 .fill_link_info = bpf_iter_map_fill_link_info,
914 .ctx_arg_info_size = 2,
916 { offsetof(struct bpf_iter__bpf_sk_storage_map, sk),
917 PTR_TO_BTF_ID_OR_NULL },
918 { offsetof(struct bpf_iter__bpf_sk_storage_map, value),
919 PTR_TO_BUF | PTR_MAYBE_NULL },
921 .seq_info = &iter_seq_info,
924 static int __init bpf_sk_storage_map_iter_init(void)
926 bpf_sk_storage_map_reg_info.ctx_arg_info[0].btf_id =
927 btf_sock_ids[BTF_SOCK_TYPE_SOCK];
928 return bpf_iter_reg_target(&bpf_sk_storage_map_reg_info);
930 late_initcall(bpf_sk_storage_map_iter_init);