1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
10 #include <linux/btf_ids.h>
11 #include <linux/bpf_local_storage.h>
12 #include <net/bpf_sk_storage.h>
14 #include <uapi/linux/sock_diag.h>
15 #include <uapi/linux/btf.h>
17 DEFINE_BPF_STORAGE_CACHE(sk_cache);
19 static struct bpf_local_storage_data *
20 bpf_sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
22 struct bpf_local_storage *sk_storage;
23 struct bpf_local_storage_map *smap;
25 sk_storage = rcu_dereference(sk->sk_bpf_storage);
29 smap = (struct bpf_local_storage_map *)map;
30 return bpf_local_storage_lookup(sk_storage, smap, cacheit_lockit);
33 static int bpf_sk_storage_del(struct sock *sk, struct bpf_map *map)
35 struct bpf_local_storage_data *sdata;
37 sdata = bpf_sk_storage_lookup(sk, map, false);
41 bpf_selem_unlink(SELEM(sdata));
46 /* Called by __sk_destruct() & bpf_sk_storage_clone() */
47 void bpf_sk_storage_free(struct sock *sk)
49 struct bpf_local_storage_elem *selem;
50 struct bpf_local_storage *sk_storage;
51 bool free_sk_storage = false;
55 sk_storage = rcu_dereference(sk->sk_bpf_storage);
61 /* Netiher the bpf_prog nor the bpf-map's syscall
62 * could be modifying the sk_storage->list now.
63 * Thus, no elem can be added-to or deleted-from the
64 * sk_storage->list by the bpf_prog or by the bpf-map's syscall.
66 * It is racing with bpf_local_storage_map_free() alone
67 * when unlinking elem from the sk_storage->list and
68 * the map's bucket->list.
70 raw_spin_lock_bh(&sk_storage->lock);
71 hlist_for_each_entry_safe(selem, n, &sk_storage->list, snode) {
72 /* Always unlink from map before unlinking from
75 bpf_selem_unlink_map(selem);
76 free_sk_storage = bpf_selem_unlink_storage_nolock(sk_storage,
79 raw_spin_unlock_bh(&sk_storage->lock);
83 kfree_rcu(sk_storage, rcu);
86 static void bpf_sk_storage_map_free(struct bpf_map *map)
88 struct bpf_local_storage_map *smap;
90 smap = (struct bpf_local_storage_map *)map;
91 bpf_local_storage_cache_idx_free(&sk_cache, smap->cache_idx);
92 bpf_local_storage_map_free(smap, NULL);
95 static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
97 struct bpf_local_storage_map *smap;
99 smap = bpf_local_storage_map_alloc(attr);
101 return ERR_CAST(smap);
103 smap->cache_idx = bpf_local_storage_cache_idx_get(&sk_cache);
107 static int notsupp_get_next_key(struct bpf_map *map, void *key,
113 static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
115 struct bpf_local_storage_data *sdata;
120 sock = sockfd_lookup(fd, &err);
122 sdata = bpf_sk_storage_lookup(sock->sk, map, true);
124 return sdata ? sdata->data : NULL;
130 static int bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
131 void *value, u64 map_flags)
133 struct bpf_local_storage_data *sdata;
138 sock = sockfd_lookup(fd, &err);
140 sdata = bpf_local_storage_update(
141 sock->sk, (struct bpf_local_storage_map *)map, value,
144 return PTR_ERR_OR_ZERO(sdata);
150 static int bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
156 sock = sockfd_lookup(fd, &err);
158 err = bpf_sk_storage_del(sock->sk, map);
166 static struct bpf_local_storage_elem *
167 bpf_sk_storage_clone_elem(struct sock *newsk,
168 struct bpf_local_storage_map *smap,
169 struct bpf_local_storage_elem *selem)
171 struct bpf_local_storage_elem *copy_selem;
173 copy_selem = bpf_selem_alloc(smap, newsk, NULL, true);
177 if (map_value_has_spin_lock(&smap->map))
178 copy_map_value_locked(&smap->map, SDATA(copy_selem)->data,
179 SDATA(selem)->data, true);
181 copy_map_value(&smap->map, SDATA(copy_selem)->data,
187 int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
189 struct bpf_local_storage *new_sk_storage = NULL;
190 struct bpf_local_storage *sk_storage;
191 struct bpf_local_storage_elem *selem;
194 RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL);
197 sk_storage = rcu_dereference(sk->sk_bpf_storage);
199 if (!sk_storage || hlist_empty(&sk_storage->list))
202 hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
203 struct bpf_local_storage_elem *copy_selem;
204 struct bpf_local_storage_map *smap;
207 smap = rcu_dereference(SDATA(selem)->smap);
208 if (!(smap->map.map_flags & BPF_F_CLONE))
211 /* Note that for lockless listeners adding new element
212 * here can race with cleanup in bpf_local_storage_map_free.
213 * Try to grab map refcnt to make sure that it's still
214 * alive and prevent concurrent removal.
216 map = bpf_map_inc_not_zero(&smap->map);
220 copy_selem = bpf_sk_storage_clone_elem(newsk, smap, selem);
227 if (new_sk_storage) {
228 bpf_selem_link_map(smap, copy_selem);
229 bpf_selem_link_storage_nolock(new_sk_storage, copy_selem);
231 ret = bpf_local_storage_alloc(newsk, smap, copy_selem);
234 atomic_sub(smap->elem_size,
235 &newsk->sk_omem_alloc);
241 rcu_dereference(copy_selem->local_storage);
249 /* In case of an error, don't free anything explicitly here, the
250 * caller is responsible to call bpf_sk_storage_free.
256 BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
257 void *, value, u64, flags)
259 struct bpf_local_storage_data *sdata;
261 if (!sk || !sk_fullsock(sk) || flags > BPF_SK_STORAGE_GET_F_CREATE)
262 return (unsigned long)NULL;
264 sdata = bpf_sk_storage_lookup(sk, map, true);
266 return (unsigned long)sdata->data;
268 if (flags == BPF_SK_STORAGE_GET_F_CREATE &&
269 /* Cannot add new elem to a going away sk.
270 * Otherwise, the new elem may become a leak
271 * (and also other memory issues during map
274 refcount_inc_not_zero(&sk->sk_refcnt)) {
275 sdata = bpf_local_storage_update(
276 sk, (struct bpf_local_storage_map *)map, value,
278 /* sk must be a fullsock (guaranteed by verifier),
279 * so sock_gen_put() is unnecessary.
282 return IS_ERR(sdata) ?
283 (unsigned long)NULL : (unsigned long)sdata->data;
286 return (unsigned long)NULL;
289 BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
291 if (!sk || !sk_fullsock(sk))
294 if (refcount_inc_not_zero(&sk->sk_refcnt)) {
297 err = bpf_sk_storage_del(sk, map);
305 static int bpf_sk_storage_charge(struct bpf_local_storage_map *smap,
306 void *owner, u32 size)
308 int optmem_max = READ_ONCE(sysctl_optmem_max);
309 struct sock *sk = (struct sock *)owner;
311 /* same check as in sock_kmalloc() */
312 if (size <= optmem_max &&
313 atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
314 atomic_add(size, &sk->sk_omem_alloc);
321 static void bpf_sk_storage_uncharge(struct bpf_local_storage_map *smap,
322 void *owner, u32 size)
324 struct sock *sk = owner;
326 atomic_sub(size, &sk->sk_omem_alloc);
329 static struct bpf_local_storage __rcu **
330 bpf_sk_storage_ptr(void *owner)
332 struct sock *sk = owner;
334 return &sk->sk_bpf_storage;
337 static int sk_storage_map_btf_id;
338 const struct bpf_map_ops sk_storage_map_ops = {
339 .map_meta_equal = bpf_map_meta_equal,
340 .map_alloc_check = bpf_local_storage_map_alloc_check,
341 .map_alloc = bpf_sk_storage_map_alloc,
342 .map_free = bpf_sk_storage_map_free,
343 .map_get_next_key = notsupp_get_next_key,
344 .map_lookup_elem = bpf_fd_sk_storage_lookup_elem,
345 .map_update_elem = bpf_fd_sk_storage_update_elem,
346 .map_delete_elem = bpf_fd_sk_storage_delete_elem,
347 .map_check_btf = bpf_local_storage_map_check_btf,
348 .map_btf_name = "bpf_local_storage_map",
349 .map_btf_id = &sk_storage_map_btf_id,
350 .map_local_storage_charge = bpf_sk_storage_charge,
351 .map_local_storage_uncharge = bpf_sk_storage_uncharge,
352 .map_owner_storage_ptr = bpf_sk_storage_ptr,
355 const struct bpf_func_proto bpf_sk_storage_get_proto = {
356 .func = bpf_sk_storage_get,
358 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
359 .arg1_type = ARG_CONST_MAP_PTR,
360 .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
361 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
362 .arg4_type = ARG_ANYTHING,
365 const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto = {
366 .func = bpf_sk_storage_get,
368 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
369 .arg1_type = ARG_CONST_MAP_PTR,
370 .arg2_type = ARG_PTR_TO_CTX, /* context is 'struct sock' */
371 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
372 .arg4_type = ARG_ANYTHING,
375 const struct bpf_func_proto bpf_sk_storage_delete_proto = {
376 .func = bpf_sk_storage_delete,
378 .ret_type = RET_INTEGER,
379 .arg1_type = ARG_CONST_MAP_PTR,
380 .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
383 static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog)
385 const struct btf *btf_vmlinux;
386 const struct btf_type *t;
390 if (prog->aux->dst_prog)
393 /* Ensure the tracing program is not tracing
394 * any bpf_sk_storage*() function and also
395 * use the bpf_sk_storage_(get|delete) helper.
397 switch (prog->expected_attach_type) {
399 case BPF_TRACE_RAW_TP:
400 /* bpf_sk_storage has no trace point */
402 case BPF_TRACE_FENTRY:
403 case BPF_TRACE_FEXIT:
404 btf_vmlinux = bpf_get_btf_vmlinux();
405 btf_id = prog->aux->attach_btf_id;
406 t = btf_type_by_id(btf_vmlinux, btf_id);
407 tname = btf_name_by_offset(btf_vmlinux, t->name_off);
408 return !!strncmp(tname, "bpf_sk_storage",
409 strlen("bpf_sk_storage"));
417 BPF_CALL_4(bpf_sk_storage_get_tracing, struct bpf_map *, map, struct sock *, sk,
418 void *, value, u64, flags)
420 if (in_hardirq() || in_nmi())
421 return (unsigned long)NULL;
423 return (unsigned long)____bpf_sk_storage_get(map, sk, value, flags);
426 BPF_CALL_2(bpf_sk_storage_delete_tracing, struct bpf_map *, map,
429 if (in_hardirq() || in_nmi())
432 return ____bpf_sk_storage_delete(map, sk);
435 const struct bpf_func_proto bpf_sk_storage_get_tracing_proto = {
436 .func = bpf_sk_storage_get_tracing,
438 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
439 .arg1_type = ARG_CONST_MAP_PTR,
440 .arg2_type = ARG_PTR_TO_BTF_ID,
441 .arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
442 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
443 .arg4_type = ARG_ANYTHING,
444 .allowed = bpf_sk_storage_tracing_allowed,
447 const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto = {
448 .func = bpf_sk_storage_delete_tracing,
450 .ret_type = RET_INTEGER,
451 .arg1_type = ARG_CONST_MAP_PTR,
452 .arg2_type = ARG_PTR_TO_BTF_ID,
453 .arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
454 .allowed = bpf_sk_storage_tracing_allowed,
457 struct bpf_sk_storage_diag {
459 struct bpf_map *maps[];
462 /* The reply will be like:
463 * INET_DIAG_BPF_SK_STORAGES (nla_nest)
464 * SK_DIAG_BPF_STORAGE (nla_nest)
465 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
466 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
467 * SK_DIAG_BPF_STORAGE (nla_nest)
468 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
469 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
472 static int nla_value_size(u32 value_size)
474 /* SK_DIAG_BPF_STORAGE (nla_nest)
475 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
476 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
478 return nla_total_size(0) + nla_total_size(sizeof(u32)) +
479 nla_total_size_64bit(value_size);
482 void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag)
489 for (i = 0; i < diag->nr_maps; i++)
490 bpf_map_put(diag->maps[i]);
494 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_free);
496 static bool diag_check_dup(const struct bpf_sk_storage_diag *diag,
497 const struct bpf_map *map)
501 for (i = 0; i < diag->nr_maps; i++) {
502 if (diag->maps[i] == map)
509 struct bpf_sk_storage_diag *
510 bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs)
512 struct bpf_sk_storage_diag *diag;
517 /* bpf_local_storage_map is currently limited to CAP_SYS_ADMIN as
518 * the map_alloc_check() side also does.
521 return ERR_PTR(-EPERM);
523 nla_for_each_nested(nla, nla_stgs, rem) {
524 if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
528 diag = kzalloc(struct_size(diag, maps, nr_maps), GFP_KERNEL);
530 return ERR_PTR(-ENOMEM);
532 nla_for_each_nested(nla, nla_stgs, rem) {
536 if (nla_type(nla) != SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
539 map_fd = nla_get_u32(nla);
540 map = bpf_map_get(map_fd);
545 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) {
550 if (diag_check_dup(diag, map)) {
555 diag->maps[diag->nr_maps++] = map;
561 bpf_sk_storage_diag_free(diag);
564 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_alloc);
566 static int diag_get(struct bpf_local_storage_data *sdata, struct sk_buff *skb)
568 struct nlattr *nla_stg, *nla_value;
569 struct bpf_local_storage_map *smap;
571 /* It cannot exceed max nlattr's payload */
572 BUILD_BUG_ON(U16_MAX - NLA_HDRLEN < BPF_LOCAL_STORAGE_MAX_VALUE_SIZE);
574 nla_stg = nla_nest_start(skb, SK_DIAG_BPF_STORAGE);
578 smap = rcu_dereference(sdata->smap);
579 if (nla_put_u32(skb, SK_DIAG_BPF_STORAGE_MAP_ID, smap->map.id))
582 nla_value = nla_reserve_64bit(skb, SK_DIAG_BPF_STORAGE_MAP_VALUE,
583 smap->map.value_size,
584 SK_DIAG_BPF_STORAGE_PAD);
588 if (map_value_has_spin_lock(&smap->map))
589 copy_map_value_locked(&smap->map, nla_data(nla_value),
592 copy_map_value(&smap->map, nla_data(nla_value), sdata->data);
594 nla_nest_end(skb, nla_stg);
598 nla_nest_cancel(skb, nla_stg);
602 static int bpf_sk_storage_diag_put_all(struct sock *sk, struct sk_buff *skb,
604 unsigned int *res_diag_size)
606 /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
607 unsigned int diag_size = nla_total_size(0);
608 struct bpf_local_storage *sk_storage;
609 struct bpf_local_storage_elem *selem;
610 struct bpf_local_storage_map *smap;
611 struct nlattr *nla_stgs;
612 unsigned int saved_len;
617 sk_storage = rcu_dereference(sk->sk_bpf_storage);
618 if (!sk_storage || hlist_empty(&sk_storage->list)) {
623 nla_stgs = nla_nest_start(skb, stg_array_type);
625 /* Continue to learn diag_size */
628 saved_len = skb->len;
629 hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
630 smap = rcu_dereference(SDATA(selem)->smap);
631 diag_size += nla_value_size(smap->map.value_size);
633 if (nla_stgs && diag_get(SDATA(selem), skb))
634 /* Continue to learn diag_size */
641 if (saved_len == skb->len)
642 nla_nest_cancel(skb, nla_stgs);
644 nla_nest_end(skb, nla_stgs);
647 if (diag_size == nla_total_size(0)) {
652 *res_diag_size = diag_size;
656 int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag,
657 struct sock *sk, struct sk_buff *skb,
659 unsigned int *res_diag_size)
661 /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
662 unsigned int diag_size = nla_total_size(0);
663 struct bpf_local_storage *sk_storage;
664 struct bpf_local_storage_data *sdata;
665 struct nlattr *nla_stgs;
666 unsigned int saved_len;
672 /* No map has been specified. Dump all. */
674 return bpf_sk_storage_diag_put_all(sk, skb, stg_array_type,
678 sk_storage = rcu_dereference(sk->sk_bpf_storage);
679 if (!sk_storage || hlist_empty(&sk_storage->list)) {
684 nla_stgs = nla_nest_start(skb, stg_array_type);
686 /* Continue to learn diag_size */
689 saved_len = skb->len;
690 for (i = 0; i < diag->nr_maps; i++) {
691 sdata = bpf_local_storage_lookup(sk_storage,
692 (struct bpf_local_storage_map *)diag->maps[i],
698 diag_size += nla_value_size(diag->maps[i]->value_size);
700 if (nla_stgs && diag_get(sdata, skb))
701 /* Continue to learn diag_size */
707 if (saved_len == skb->len)
708 nla_nest_cancel(skb, nla_stgs);
710 nla_nest_end(skb, nla_stgs);
713 if (diag_size == nla_total_size(0)) {
718 *res_diag_size = diag_size;
721 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_put);
723 struct bpf_iter_seq_sk_storage_map_info {
725 unsigned int bucket_id;
729 static struct bpf_local_storage_elem *
730 bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info,
731 struct bpf_local_storage_elem *prev_selem)
732 __acquires(RCU) __releases(RCU)
734 struct bpf_local_storage *sk_storage;
735 struct bpf_local_storage_elem *selem;
736 u32 skip_elems = info->skip_elems;
737 struct bpf_local_storage_map *smap;
738 u32 bucket_id = info->bucket_id;
739 u32 i, count, n_buckets;
740 struct bpf_local_storage_map_bucket *b;
742 smap = (struct bpf_local_storage_map *)info->map;
743 n_buckets = 1U << smap->bucket_log;
744 if (bucket_id >= n_buckets)
747 /* try to find next selem in the same bucket */
751 selem = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&selem->map_node)),
752 struct bpf_local_storage_elem, map_node);
754 /* not found, unlock and go to the next bucket */
755 b = &smap->buckets[bucket_id++];
760 sk_storage = rcu_dereference(selem->local_storage);
762 info->skip_elems = skip_elems + count;
768 for (i = bucket_id; i < (1U << smap->bucket_log); i++) {
769 b = &smap->buckets[i];
772 hlist_for_each_entry_rcu(selem, &b->list, map_node) {
773 sk_storage = rcu_dereference(selem->local_storage);
774 if (sk_storage && count >= skip_elems) {
776 info->skip_elems = count;
786 info->skip_elems = 0;
790 static void *bpf_sk_storage_map_seq_start(struct seq_file *seq, loff_t *pos)
792 struct bpf_local_storage_elem *selem;
794 selem = bpf_sk_storage_map_seq_find_next(seq->private, NULL);
803 static void *bpf_sk_storage_map_seq_next(struct seq_file *seq, void *v,
806 struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
810 return bpf_sk_storage_map_seq_find_next(seq->private, v);
813 struct bpf_iter__bpf_sk_storage_map {
814 __bpf_md_ptr(struct bpf_iter_meta *, meta);
815 __bpf_md_ptr(struct bpf_map *, map);
816 __bpf_md_ptr(struct sock *, sk);
817 __bpf_md_ptr(void *, value);
820 DEFINE_BPF_ITER_FUNC(bpf_sk_storage_map, struct bpf_iter_meta *meta,
821 struct bpf_map *map, struct sock *sk,
824 static int __bpf_sk_storage_map_seq_show(struct seq_file *seq,
825 struct bpf_local_storage_elem *selem)
827 struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
828 struct bpf_iter__bpf_sk_storage_map ctx = {};
829 struct bpf_local_storage *sk_storage;
830 struct bpf_iter_meta meta;
831 struct bpf_prog *prog;
835 prog = bpf_iter_get_info(&meta, selem == NULL);
840 sk_storage = rcu_dereference(selem->local_storage);
841 ctx.sk = sk_storage->owner;
842 ctx.value = SDATA(selem)->data;
844 ret = bpf_iter_run_prog(prog, &ctx);
850 static int bpf_sk_storage_map_seq_show(struct seq_file *seq, void *v)
852 return __bpf_sk_storage_map_seq_show(seq, v);
855 static void bpf_sk_storage_map_seq_stop(struct seq_file *seq, void *v)
859 (void)__bpf_sk_storage_map_seq_show(seq, v);
864 static int bpf_iter_init_sk_storage_map(void *priv_data,
865 struct bpf_iter_aux_info *aux)
867 struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
869 bpf_map_inc_with_uref(aux->map);
870 seq_info->map = aux->map;
874 static void bpf_iter_fini_sk_storage_map(void *priv_data)
876 struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
878 bpf_map_put_with_uref(seq_info->map);
881 static int bpf_iter_attach_map(struct bpf_prog *prog,
882 union bpf_iter_link_info *linfo,
883 struct bpf_iter_aux_info *aux)
888 if (!linfo->map.map_fd)
891 map = bpf_map_get_with_uref(linfo->map.map_fd);
895 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
898 if (prog->aux->max_rdwr_access > map->value_size) {
907 bpf_map_put_with_uref(map);
911 static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux)
913 bpf_map_put_with_uref(aux->map);
916 static const struct seq_operations bpf_sk_storage_map_seq_ops = {
917 .start = bpf_sk_storage_map_seq_start,
918 .next = bpf_sk_storage_map_seq_next,
919 .stop = bpf_sk_storage_map_seq_stop,
920 .show = bpf_sk_storage_map_seq_show,
923 static const struct bpf_iter_seq_info iter_seq_info = {
924 .seq_ops = &bpf_sk_storage_map_seq_ops,
925 .init_seq_private = bpf_iter_init_sk_storage_map,
926 .fini_seq_private = bpf_iter_fini_sk_storage_map,
927 .seq_priv_size = sizeof(struct bpf_iter_seq_sk_storage_map_info),
930 static struct bpf_iter_reg bpf_sk_storage_map_reg_info = {
931 .target = "bpf_sk_storage_map",
932 .attach_target = bpf_iter_attach_map,
933 .detach_target = bpf_iter_detach_map,
934 .show_fdinfo = bpf_iter_map_show_fdinfo,
935 .fill_link_info = bpf_iter_map_fill_link_info,
936 .ctx_arg_info_size = 2,
938 { offsetof(struct bpf_iter__bpf_sk_storage_map, sk),
939 PTR_TO_BTF_ID_OR_NULL },
940 { offsetof(struct bpf_iter__bpf_sk_storage_map, value),
941 PTR_TO_BUF | PTR_MAYBE_NULL },
943 .seq_info = &iter_seq_info,
946 static int __init bpf_sk_storage_map_iter_init(void)
948 bpf_sk_storage_map_reg_info.ctx_arg_info[0].btf_id =
949 btf_sock_ids[BTF_SOCK_TYPE_SOCK];
950 return bpf_iter_reg_target(&bpf_sk_storage_map_reg_info);
952 late_initcall(bpf_sk_storage_map_iter_init);