bpf: Move skb->len == 0 checks into __bpf_redirect
[platform/kernel/linux-rpi.git] / net / core / bpf_sk_storage.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook  */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
8 #include <linux/bpf.h>
9 #include <linux/btf.h>
10 #include <linux/btf_ids.h>
11 #include <linux/bpf_local_storage.h>
12 #include <net/bpf_sk_storage.h>
13 #include <net/sock.h>
14 #include <uapi/linux/sock_diag.h>
15 #include <uapi/linux/btf.h>
16
17 DEFINE_BPF_STORAGE_CACHE(sk_cache);
18
19 static struct bpf_local_storage_data *
20 bpf_sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
21 {
22         struct bpf_local_storage *sk_storage;
23         struct bpf_local_storage_map *smap;
24
25         sk_storage = rcu_dereference(sk->sk_bpf_storage);
26         if (!sk_storage)
27                 return NULL;
28
29         smap = (struct bpf_local_storage_map *)map;
30         return bpf_local_storage_lookup(sk_storage, smap, cacheit_lockit);
31 }
32
33 static int bpf_sk_storage_del(struct sock *sk, struct bpf_map *map)
34 {
35         struct bpf_local_storage_data *sdata;
36
37         sdata = bpf_sk_storage_lookup(sk, map, false);
38         if (!sdata)
39                 return -ENOENT;
40
41         bpf_selem_unlink(SELEM(sdata));
42
43         return 0;
44 }
45
46 /* Called by __sk_destruct() & bpf_sk_storage_clone() */
47 void bpf_sk_storage_free(struct sock *sk)
48 {
49         struct bpf_local_storage_elem *selem;
50         struct bpf_local_storage *sk_storage;
51         bool free_sk_storage = false;
52         struct hlist_node *n;
53
54         rcu_read_lock();
55         sk_storage = rcu_dereference(sk->sk_bpf_storage);
56         if (!sk_storage) {
57                 rcu_read_unlock();
58                 return;
59         }
60
61         /* Netiher the bpf_prog nor the bpf-map's syscall
62          * could be modifying the sk_storage->list now.
63          * Thus, no elem can be added-to or deleted-from the
64          * sk_storage->list by the bpf_prog or by the bpf-map's syscall.
65          *
66          * It is racing with bpf_local_storage_map_free() alone
67          * when unlinking elem from the sk_storage->list and
68          * the map's bucket->list.
69          */
70         raw_spin_lock_bh(&sk_storage->lock);
71         hlist_for_each_entry_safe(selem, n, &sk_storage->list, snode) {
72                 /* Always unlink from map before unlinking from
73                  * sk_storage.
74                  */
75                 bpf_selem_unlink_map(selem);
76                 free_sk_storage = bpf_selem_unlink_storage_nolock(sk_storage,
77                                                                   selem, true);
78         }
79         raw_spin_unlock_bh(&sk_storage->lock);
80         rcu_read_unlock();
81
82         if (free_sk_storage)
83                 kfree_rcu(sk_storage, rcu);
84 }
85
86 static void bpf_sk_storage_map_free(struct bpf_map *map)
87 {
88         struct bpf_local_storage_map *smap;
89
90         smap = (struct bpf_local_storage_map *)map;
91         bpf_local_storage_cache_idx_free(&sk_cache, smap->cache_idx);
92         bpf_local_storage_map_free(smap, NULL);
93 }
94
95 static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
96 {
97         struct bpf_local_storage_map *smap;
98
99         smap = bpf_local_storage_map_alloc(attr);
100         if (IS_ERR(smap))
101                 return ERR_CAST(smap);
102
103         smap->cache_idx = bpf_local_storage_cache_idx_get(&sk_cache);
104         return &smap->map;
105 }
106
107 static int notsupp_get_next_key(struct bpf_map *map, void *key,
108                                 void *next_key)
109 {
110         return -ENOTSUPP;
111 }
112
113 static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
114 {
115         struct bpf_local_storage_data *sdata;
116         struct socket *sock;
117         int fd, err;
118
119         fd = *(int *)key;
120         sock = sockfd_lookup(fd, &err);
121         if (sock) {
122                 sdata = bpf_sk_storage_lookup(sock->sk, map, true);
123                 sockfd_put(sock);
124                 return sdata ? sdata->data : NULL;
125         }
126
127         return ERR_PTR(err);
128 }
129
130 static int bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
131                                          void *value, u64 map_flags)
132 {
133         struct bpf_local_storage_data *sdata;
134         struct socket *sock;
135         int fd, err;
136
137         fd = *(int *)key;
138         sock = sockfd_lookup(fd, &err);
139         if (sock) {
140                 sdata = bpf_local_storage_update(
141                         sock->sk, (struct bpf_local_storage_map *)map, value,
142                         map_flags);
143                 sockfd_put(sock);
144                 return PTR_ERR_OR_ZERO(sdata);
145         }
146
147         return err;
148 }
149
150 static int bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
151 {
152         struct socket *sock;
153         int fd, err;
154
155         fd = *(int *)key;
156         sock = sockfd_lookup(fd, &err);
157         if (sock) {
158                 err = bpf_sk_storage_del(sock->sk, map);
159                 sockfd_put(sock);
160                 return err;
161         }
162
163         return err;
164 }
165
166 static struct bpf_local_storage_elem *
167 bpf_sk_storage_clone_elem(struct sock *newsk,
168                           struct bpf_local_storage_map *smap,
169                           struct bpf_local_storage_elem *selem)
170 {
171         struct bpf_local_storage_elem *copy_selem;
172
173         copy_selem = bpf_selem_alloc(smap, newsk, NULL, true);
174         if (!copy_selem)
175                 return NULL;
176
177         if (map_value_has_spin_lock(&smap->map))
178                 copy_map_value_locked(&smap->map, SDATA(copy_selem)->data,
179                                       SDATA(selem)->data, true);
180         else
181                 copy_map_value(&smap->map, SDATA(copy_selem)->data,
182                                SDATA(selem)->data);
183
184         return copy_selem;
185 }
186
187 int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
188 {
189         struct bpf_local_storage *new_sk_storage = NULL;
190         struct bpf_local_storage *sk_storage;
191         struct bpf_local_storage_elem *selem;
192         int ret = 0;
193
194         RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL);
195
196         rcu_read_lock();
197         sk_storage = rcu_dereference(sk->sk_bpf_storage);
198
199         if (!sk_storage || hlist_empty(&sk_storage->list))
200                 goto out;
201
202         hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
203                 struct bpf_local_storage_elem *copy_selem;
204                 struct bpf_local_storage_map *smap;
205                 struct bpf_map *map;
206
207                 smap = rcu_dereference(SDATA(selem)->smap);
208                 if (!(smap->map.map_flags & BPF_F_CLONE))
209                         continue;
210
211                 /* Note that for lockless listeners adding new element
212                  * here can race with cleanup in bpf_local_storage_map_free.
213                  * Try to grab map refcnt to make sure that it's still
214                  * alive and prevent concurrent removal.
215                  */
216                 map = bpf_map_inc_not_zero(&smap->map);
217                 if (IS_ERR(map))
218                         continue;
219
220                 copy_selem = bpf_sk_storage_clone_elem(newsk, smap, selem);
221                 if (!copy_selem) {
222                         ret = -ENOMEM;
223                         bpf_map_put(map);
224                         goto out;
225                 }
226
227                 if (new_sk_storage) {
228                         bpf_selem_link_map(smap, copy_selem);
229                         bpf_selem_link_storage_nolock(new_sk_storage, copy_selem);
230                 } else {
231                         ret = bpf_local_storage_alloc(newsk, smap, copy_selem);
232                         if (ret) {
233                                 kfree(copy_selem);
234                                 atomic_sub(smap->elem_size,
235                                            &newsk->sk_omem_alloc);
236                                 bpf_map_put(map);
237                                 goto out;
238                         }
239
240                         new_sk_storage =
241                                 rcu_dereference(copy_selem->local_storage);
242                 }
243                 bpf_map_put(map);
244         }
245
246 out:
247         rcu_read_unlock();
248
249         /* In case of an error, don't free anything explicitly here, the
250          * caller is responsible to call bpf_sk_storage_free.
251          */
252
253         return ret;
254 }
255
256 BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
257            void *, value, u64, flags)
258 {
259         struct bpf_local_storage_data *sdata;
260
261         if (!sk || !sk_fullsock(sk) || flags > BPF_SK_STORAGE_GET_F_CREATE)
262                 return (unsigned long)NULL;
263
264         sdata = bpf_sk_storage_lookup(sk, map, true);
265         if (sdata)
266                 return (unsigned long)sdata->data;
267
268         if (flags == BPF_SK_STORAGE_GET_F_CREATE &&
269             /* Cannot add new elem to a going away sk.
270              * Otherwise, the new elem may become a leak
271              * (and also other memory issues during map
272              *  destruction).
273              */
274             refcount_inc_not_zero(&sk->sk_refcnt)) {
275                 sdata = bpf_local_storage_update(
276                         sk, (struct bpf_local_storage_map *)map, value,
277                         BPF_NOEXIST);
278                 /* sk must be a fullsock (guaranteed by verifier),
279                  * so sock_gen_put() is unnecessary.
280                  */
281                 sock_put(sk);
282                 return IS_ERR(sdata) ?
283                         (unsigned long)NULL : (unsigned long)sdata->data;
284         }
285
286         return (unsigned long)NULL;
287 }
288
289 BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
290 {
291         if (!sk || !sk_fullsock(sk))
292                 return -EINVAL;
293
294         if (refcount_inc_not_zero(&sk->sk_refcnt)) {
295                 int err;
296
297                 err = bpf_sk_storage_del(sk, map);
298                 sock_put(sk);
299                 return err;
300         }
301
302         return -ENOENT;
303 }
304
305 static int bpf_sk_storage_charge(struct bpf_local_storage_map *smap,
306                                  void *owner, u32 size)
307 {
308         int optmem_max = READ_ONCE(sysctl_optmem_max);
309         struct sock *sk = (struct sock *)owner;
310
311         /* same check as in sock_kmalloc() */
312         if (size <= optmem_max &&
313             atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
314                 atomic_add(size, &sk->sk_omem_alloc);
315                 return 0;
316         }
317
318         return -ENOMEM;
319 }
320
321 static void bpf_sk_storage_uncharge(struct bpf_local_storage_map *smap,
322                                     void *owner, u32 size)
323 {
324         struct sock *sk = owner;
325
326         atomic_sub(size, &sk->sk_omem_alloc);
327 }
328
329 static struct bpf_local_storage __rcu **
330 bpf_sk_storage_ptr(void *owner)
331 {
332         struct sock *sk = owner;
333
334         return &sk->sk_bpf_storage;
335 }
336
337 static int sk_storage_map_btf_id;
338 const struct bpf_map_ops sk_storage_map_ops = {
339         .map_meta_equal = bpf_map_meta_equal,
340         .map_alloc_check = bpf_local_storage_map_alloc_check,
341         .map_alloc = bpf_sk_storage_map_alloc,
342         .map_free = bpf_sk_storage_map_free,
343         .map_get_next_key = notsupp_get_next_key,
344         .map_lookup_elem = bpf_fd_sk_storage_lookup_elem,
345         .map_update_elem = bpf_fd_sk_storage_update_elem,
346         .map_delete_elem = bpf_fd_sk_storage_delete_elem,
347         .map_check_btf = bpf_local_storage_map_check_btf,
348         .map_btf_name = "bpf_local_storage_map",
349         .map_btf_id = &sk_storage_map_btf_id,
350         .map_local_storage_charge = bpf_sk_storage_charge,
351         .map_local_storage_uncharge = bpf_sk_storage_uncharge,
352         .map_owner_storage_ptr = bpf_sk_storage_ptr,
353 };
354
355 const struct bpf_func_proto bpf_sk_storage_get_proto = {
356         .func           = bpf_sk_storage_get,
357         .gpl_only       = false,
358         .ret_type       = RET_PTR_TO_MAP_VALUE_OR_NULL,
359         .arg1_type      = ARG_CONST_MAP_PTR,
360         .arg2_type      = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
361         .arg3_type      = ARG_PTR_TO_MAP_VALUE_OR_NULL,
362         .arg4_type      = ARG_ANYTHING,
363 };
364
365 const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto = {
366         .func           = bpf_sk_storage_get,
367         .gpl_only       = false,
368         .ret_type       = RET_PTR_TO_MAP_VALUE_OR_NULL,
369         .arg1_type      = ARG_CONST_MAP_PTR,
370         .arg2_type      = ARG_PTR_TO_CTX, /* context is 'struct sock' */
371         .arg3_type      = ARG_PTR_TO_MAP_VALUE_OR_NULL,
372         .arg4_type      = ARG_ANYTHING,
373 };
374
375 const struct bpf_func_proto bpf_sk_storage_delete_proto = {
376         .func           = bpf_sk_storage_delete,
377         .gpl_only       = false,
378         .ret_type       = RET_INTEGER,
379         .arg1_type      = ARG_CONST_MAP_PTR,
380         .arg2_type      = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
381 };
382
383 static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog)
384 {
385         const struct btf *btf_vmlinux;
386         const struct btf_type *t;
387         const char *tname;
388         u32 btf_id;
389
390         if (prog->aux->dst_prog)
391                 return false;
392
393         /* Ensure the tracing program is not tracing
394          * any bpf_sk_storage*() function and also
395          * use the bpf_sk_storage_(get|delete) helper.
396          */
397         switch (prog->expected_attach_type) {
398         case BPF_TRACE_ITER:
399         case BPF_TRACE_RAW_TP:
400                 /* bpf_sk_storage has no trace point */
401                 return true;
402         case BPF_TRACE_FENTRY:
403         case BPF_TRACE_FEXIT:
404                 btf_vmlinux = bpf_get_btf_vmlinux();
405                 btf_id = prog->aux->attach_btf_id;
406                 t = btf_type_by_id(btf_vmlinux, btf_id);
407                 tname = btf_name_by_offset(btf_vmlinux, t->name_off);
408                 return !!strncmp(tname, "bpf_sk_storage",
409                                  strlen("bpf_sk_storage"));
410         default:
411                 return false;
412         }
413
414         return false;
415 }
416
417 BPF_CALL_4(bpf_sk_storage_get_tracing, struct bpf_map *, map, struct sock *, sk,
418            void *, value, u64, flags)
419 {
420         if (in_hardirq() || in_nmi())
421                 return (unsigned long)NULL;
422
423         return (unsigned long)____bpf_sk_storage_get(map, sk, value, flags);
424 }
425
426 BPF_CALL_2(bpf_sk_storage_delete_tracing, struct bpf_map *, map,
427            struct sock *, sk)
428 {
429         if (in_hardirq() || in_nmi())
430                 return -EPERM;
431
432         return ____bpf_sk_storage_delete(map, sk);
433 }
434
435 const struct bpf_func_proto bpf_sk_storage_get_tracing_proto = {
436         .func           = bpf_sk_storage_get_tracing,
437         .gpl_only       = false,
438         .ret_type       = RET_PTR_TO_MAP_VALUE_OR_NULL,
439         .arg1_type      = ARG_CONST_MAP_PTR,
440         .arg2_type      = ARG_PTR_TO_BTF_ID,
441         .arg2_btf_id    = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
442         .arg3_type      = ARG_PTR_TO_MAP_VALUE_OR_NULL,
443         .arg4_type      = ARG_ANYTHING,
444         .allowed        = bpf_sk_storage_tracing_allowed,
445 };
446
447 const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto = {
448         .func           = bpf_sk_storage_delete_tracing,
449         .gpl_only       = false,
450         .ret_type       = RET_INTEGER,
451         .arg1_type      = ARG_CONST_MAP_PTR,
452         .arg2_type      = ARG_PTR_TO_BTF_ID,
453         .arg2_btf_id    = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
454         .allowed        = bpf_sk_storage_tracing_allowed,
455 };
456
457 struct bpf_sk_storage_diag {
458         u32 nr_maps;
459         struct bpf_map *maps[];
460 };
461
462 /* The reply will be like:
463  * INET_DIAG_BPF_SK_STORAGES (nla_nest)
464  *      SK_DIAG_BPF_STORAGE (nla_nest)
465  *              SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
466  *              SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
467  *      SK_DIAG_BPF_STORAGE (nla_nest)
468  *              SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
469  *              SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
470  *      ....
471  */
472 static int nla_value_size(u32 value_size)
473 {
474         /* SK_DIAG_BPF_STORAGE (nla_nest)
475          *      SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
476          *      SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
477          */
478         return nla_total_size(0) + nla_total_size(sizeof(u32)) +
479                 nla_total_size_64bit(value_size);
480 }
481
482 void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag)
483 {
484         u32 i;
485
486         if (!diag)
487                 return;
488
489         for (i = 0; i < diag->nr_maps; i++)
490                 bpf_map_put(diag->maps[i]);
491
492         kfree(diag);
493 }
494 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_free);
495
496 static bool diag_check_dup(const struct bpf_sk_storage_diag *diag,
497                            const struct bpf_map *map)
498 {
499         u32 i;
500
501         for (i = 0; i < diag->nr_maps; i++) {
502                 if (diag->maps[i] == map)
503                         return true;
504         }
505
506         return false;
507 }
508
509 struct bpf_sk_storage_diag *
510 bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs)
511 {
512         struct bpf_sk_storage_diag *diag;
513         struct nlattr *nla;
514         u32 nr_maps = 0;
515         int rem, err;
516
517         /* bpf_local_storage_map is currently limited to CAP_SYS_ADMIN as
518          * the map_alloc_check() side also does.
519          */
520         if (!bpf_capable())
521                 return ERR_PTR(-EPERM);
522
523         nla_for_each_nested(nla, nla_stgs, rem) {
524                 if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
525                         nr_maps++;
526         }
527
528         diag = kzalloc(struct_size(diag, maps, nr_maps), GFP_KERNEL);
529         if (!diag)
530                 return ERR_PTR(-ENOMEM);
531
532         nla_for_each_nested(nla, nla_stgs, rem) {
533                 struct bpf_map *map;
534                 int map_fd;
535
536                 if (nla_type(nla) != SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
537                         continue;
538
539                 map_fd = nla_get_u32(nla);
540                 map = bpf_map_get(map_fd);
541                 if (IS_ERR(map)) {
542                         err = PTR_ERR(map);
543                         goto err_free;
544                 }
545                 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) {
546                         bpf_map_put(map);
547                         err = -EINVAL;
548                         goto err_free;
549                 }
550                 if (diag_check_dup(diag, map)) {
551                         bpf_map_put(map);
552                         err = -EEXIST;
553                         goto err_free;
554                 }
555                 diag->maps[diag->nr_maps++] = map;
556         }
557
558         return diag;
559
560 err_free:
561         bpf_sk_storage_diag_free(diag);
562         return ERR_PTR(err);
563 }
564 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_alloc);
565
566 static int diag_get(struct bpf_local_storage_data *sdata, struct sk_buff *skb)
567 {
568         struct nlattr *nla_stg, *nla_value;
569         struct bpf_local_storage_map *smap;
570
571         /* It cannot exceed max nlattr's payload */
572         BUILD_BUG_ON(U16_MAX - NLA_HDRLEN < BPF_LOCAL_STORAGE_MAX_VALUE_SIZE);
573
574         nla_stg = nla_nest_start(skb, SK_DIAG_BPF_STORAGE);
575         if (!nla_stg)
576                 return -EMSGSIZE;
577
578         smap = rcu_dereference(sdata->smap);
579         if (nla_put_u32(skb, SK_DIAG_BPF_STORAGE_MAP_ID, smap->map.id))
580                 goto errout;
581
582         nla_value = nla_reserve_64bit(skb, SK_DIAG_BPF_STORAGE_MAP_VALUE,
583                                       smap->map.value_size,
584                                       SK_DIAG_BPF_STORAGE_PAD);
585         if (!nla_value)
586                 goto errout;
587
588         if (map_value_has_spin_lock(&smap->map))
589                 copy_map_value_locked(&smap->map, nla_data(nla_value),
590                                       sdata->data, true);
591         else
592                 copy_map_value(&smap->map, nla_data(nla_value), sdata->data);
593
594         nla_nest_end(skb, nla_stg);
595         return 0;
596
597 errout:
598         nla_nest_cancel(skb, nla_stg);
599         return -EMSGSIZE;
600 }
601
602 static int bpf_sk_storage_diag_put_all(struct sock *sk, struct sk_buff *skb,
603                                        int stg_array_type,
604                                        unsigned int *res_diag_size)
605 {
606         /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
607         unsigned int diag_size = nla_total_size(0);
608         struct bpf_local_storage *sk_storage;
609         struct bpf_local_storage_elem *selem;
610         struct bpf_local_storage_map *smap;
611         struct nlattr *nla_stgs;
612         unsigned int saved_len;
613         int err = 0;
614
615         rcu_read_lock();
616
617         sk_storage = rcu_dereference(sk->sk_bpf_storage);
618         if (!sk_storage || hlist_empty(&sk_storage->list)) {
619                 rcu_read_unlock();
620                 return 0;
621         }
622
623         nla_stgs = nla_nest_start(skb, stg_array_type);
624         if (!nla_stgs)
625                 /* Continue to learn diag_size */
626                 err = -EMSGSIZE;
627
628         saved_len = skb->len;
629         hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
630                 smap = rcu_dereference(SDATA(selem)->smap);
631                 diag_size += nla_value_size(smap->map.value_size);
632
633                 if (nla_stgs && diag_get(SDATA(selem), skb))
634                         /* Continue to learn diag_size */
635                         err = -EMSGSIZE;
636         }
637
638         rcu_read_unlock();
639
640         if (nla_stgs) {
641                 if (saved_len == skb->len)
642                         nla_nest_cancel(skb, nla_stgs);
643                 else
644                         nla_nest_end(skb, nla_stgs);
645         }
646
647         if (diag_size == nla_total_size(0)) {
648                 *res_diag_size = 0;
649                 return 0;
650         }
651
652         *res_diag_size = diag_size;
653         return err;
654 }
655
656 int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag,
657                             struct sock *sk, struct sk_buff *skb,
658                             int stg_array_type,
659                             unsigned int *res_diag_size)
660 {
661         /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
662         unsigned int diag_size = nla_total_size(0);
663         struct bpf_local_storage *sk_storage;
664         struct bpf_local_storage_data *sdata;
665         struct nlattr *nla_stgs;
666         unsigned int saved_len;
667         int err = 0;
668         u32 i;
669
670         *res_diag_size = 0;
671
672         /* No map has been specified.  Dump all. */
673         if (!diag->nr_maps)
674                 return bpf_sk_storage_diag_put_all(sk, skb, stg_array_type,
675                                                    res_diag_size);
676
677         rcu_read_lock();
678         sk_storage = rcu_dereference(sk->sk_bpf_storage);
679         if (!sk_storage || hlist_empty(&sk_storage->list)) {
680                 rcu_read_unlock();
681                 return 0;
682         }
683
684         nla_stgs = nla_nest_start(skb, stg_array_type);
685         if (!nla_stgs)
686                 /* Continue to learn diag_size */
687                 err = -EMSGSIZE;
688
689         saved_len = skb->len;
690         for (i = 0; i < diag->nr_maps; i++) {
691                 sdata = bpf_local_storage_lookup(sk_storage,
692                                 (struct bpf_local_storage_map *)diag->maps[i],
693                                 false);
694
695                 if (!sdata)
696                         continue;
697
698                 diag_size += nla_value_size(diag->maps[i]->value_size);
699
700                 if (nla_stgs && diag_get(sdata, skb))
701                         /* Continue to learn diag_size */
702                         err = -EMSGSIZE;
703         }
704         rcu_read_unlock();
705
706         if (nla_stgs) {
707                 if (saved_len == skb->len)
708                         nla_nest_cancel(skb, nla_stgs);
709                 else
710                         nla_nest_end(skb, nla_stgs);
711         }
712
713         if (diag_size == nla_total_size(0)) {
714                 *res_diag_size = 0;
715                 return 0;
716         }
717
718         *res_diag_size = diag_size;
719         return err;
720 }
721 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_put);
722
723 struct bpf_iter_seq_sk_storage_map_info {
724         struct bpf_map *map;
725         unsigned int bucket_id;
726         unsigned skip_elems;
727 };
728
729 static struct bpf_local_storage_elem *
730 bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info,
731                                  struct bpf_local_storage_elem *prev_selem)
732         __acquires(RCU) __releases(RCU)
733 {
734         struct bpf_local_storage *sk_storage;
735         struct bpf_local_storage_elem *selem;
736         u32 skip_elems = info->skip_elems;
737         struct bpf_local_storage_map *smap;
738         u32 bucket_id = info->bucket_id;
739         u32 i, count, n_buckets;
740         struct bpf_local_storage_map_bucket *b;
741
742         smap = (struct bpf_local_storage_map *)info->map;
743         n_buckets = 1U << smap->bucket_log;
744         if (bucket_id >= n_buckets)
745                 return NULL;
746
747         /* try to find next selem in the same bucket */
748         selem = prev_selem;
749         count = 0;
750         while (selem) {
751                 selem = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&selem->map_node)),
752                                          struct bpf_local_storage_elem, map_node);
753                 if (!selem) {
754                         /* not found, unlock and go to the next bucket */
755                         b = &smap->buckets[bucket_id++];
756                         rcu_read_unlock();
757                         skip_elems = 0;
758                         break;
759                 }
760                 sk_storage = rcu_dereference(selem->local_storage);
761                 if (sk_storage) {
762                         info->skip_elems = skip_elems + count;
763                         return selem;
764                 }
765                 count++;
766         }
767
768         for (i = bucket_id; i < (1U << smap->bucket_log); i++) {
769                 b = &smap->buckets[i];
770                 rcu_read_lock();
771                 count = 0;
772                 hlist_for_each_entry_rcu(selem, &b->list, map_node) {
773                         sk_storage = rcu_dereference(selem->local_storage);
774                         if (sk_storage && count >= skip_elems) {
775                                 info->bucket_id = i;
776                                 info->skip_elems = count;
777                                 return selem;
778                         }
779                         count++;
780                 }
781                 rcu_read_unlock();
782                 skip_elems = 0;
783         }
784
785         info->bucket_id = i;
786         info->skip_elems = 0;
787         return NULL;
788 }
789
790 static void *bpf_sk_storage_map_seq_start(struct seq_file *seq, loff_t *pos)
791 {
792         struct bpf_local_storage_elem *selem;
793
794         selem = bpf_sk_storage_map_seq_find_next(seq->private, NULL);
795         if (!selem)
796                 return NULL;
797
798         if (*pos == 0)
799                 ++*pos;
800         return selem;
801 }
802
803 static void *bpf_sk_storage_map_seq_next(struct seq_file *seq, void *v,
804                                          loff_t *pos)
805 {
806         struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
807
808         ++*pos;
809         ++info->skip_elems;
810         return bpf_sk_storage_map_seq_find_next(seq->private, v);
811 }
812
813 struct bpf_iter__bpf_sk_storage_map {
814         __bpf_md_ptr(struct bpf_iter_meta *, meta);
815         __bpf_md_ptr(struct bpf_map *, map);
816         __bpf_md_ptr(struct sock *, sk);
817         __bpf_md_ptr(void *, value);
818 };
819
820 DEFINE_BPF_ITER_FUNC(bpf_sk_storage_map, struct bpf_iter_meta *meta,
821                      struct bpf_map *map, struct sock *sk,
822                      void *value)
823
824 static int __bpf_sk_storage_map_seq_show(struct seq_file *seq,
825                                          struct bpf_local_storage_elem *selem)
826 {
827         struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
828         struct bpf_iter__bpf_sk_storage_map ctx = {};
829         struct bpf_local_storage *sk_storage;
830         struct bpf_iter_meta meta;
831         struct bpf_prog *prog;
832         int ret = 0;
833
834         meta.seq = seq;
835         prog = bpf_iter_get_info(&meta, selem == NULL);
836         if (prog) {
837                 ctx.meta = &meta;
838                 ctx.map = info->map;
839                 if (selem) {
840                         sk_storage = rcu_dereference(selem->local_storage);
841                         ctx.sk = sk_storage->owner;
842                         ctx.value = SDATA(selem)->data;
843                 }
844                 ret = bpf_iter_run_prog(prog, &ctx);
845         }
846
847         return ret;
848 }
849
850 static int bpf_sk_storage_map_seq_show(struct seq_file *seq, void *v)
851 {
852         return __bpf_sk_storage_map_seq_show(seq, v);
853 }
854
855 static void bpf_sk_storage_map_seq_stop(struct seq_file *seq, void *v)
856         __releases(RCU)
857 {
858         if (!v)
859                 (void)__bpf_sk_storage_map_seq_show(seq, v);
860         else
861                 rcu_read_unlock();
862 }
863
864 static int bpf_iter_init_sk_storage_map(void *priv_data,
865                                         struct bpf_iter_aux_info *aux)
866 {
867         struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
868
869         bpf_map_inc_with_uref(aux->map);
870         seq_info->map = aux->map;
871         return 0;
872 }
873
874 static void bpf_iter_fini_sk_storage_map(void *priv_data)
875 {
876         struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
877
878         bpf_map_put_with_uref(seq_info->map);
879 }
880
881 static int bpf_iter_attach_map(struct bpf_prog *prog,
882                                union bpf_iter_link_info *linfo,
883                                struct bpf_iter_aux_info *aux)
884 {
885         struct bpf_map *map;
886         int err = -EINVAL;
887
888         if (!linfo->map.map_fd)
889                 return -EBADF;
890
891         map = bpf_map_get_with_uref(linfo->map.map_fd);
892         if (IS_ERR(map))
893                 return PTR_ERR(map);
894
895         if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
896                 goto put_map;
897
898         if (prog->aux->max_rdwr_access > map->value_size) {
899                 err = -EACCES;
900                 goto put_map;
901         }
902
903         aux->map = map;
904         return 0;
905
906 put_map:
907         bpf_map_put_with_uref(map);
908         return err;
909 }
910
911 static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux)
912 {
913         bpf_map_put_with_uref(aux->map);
914 }
915
916 static const struct seq_operations bpf_sk_storage_map_seq_ops = {
917         .start  = bpf_sk_storage_map_seq_start,
918         .next   = bpf_sk_storage_map_seq_next,
919         .stop   = bpf_sk_storage_map_seq_stop,
920         .show   = bpf_sk_storage_map_seq_show,
921 };
922
923 static const struct bpf_iter_seq_info iter_seq_info = {
924         .seq_ops                = &bpf_sk_storage_map_seq_ops,
925         .init_seq_private       = bpf_iter_init_sk_storage_map,
926         .fini_seq_private       = bpf_iter_fini_sk_storage_map,
927         .seq_priv_size          = sizeof(struct bpf_iter_seq_sk_storage_map_info),
928 };
929
930 static struct bpf_iter_reg bpf_sk_storage_map_reg_info = {
931         .target                 = "bpf_sk_storage_map",
932         .attach_target          = bpf_iter_attach_map,
933         .detach_target          = bpf_iter_detach_map,
934         .show_fdinfo            = bpf_iter_map_show_fdinfo,
935         .fill_link_info         = bpf_iter_map_fill_link_info,
936         .ctx_arg_info_size      = 2,
937         .ctx_arg_info           = {
938                 { offsetof(struct bpf_iter__bpf_sk_storage_map, sk),
939                   PTR_TO_BTF_ID_OR_NULL },
940                 { offsetof(struct bpf_iter__bpf_sk_storage_map, value),
941                   PTR_TO_BUF | PTR_MAYBE_NULL },
942         },
943         .seq_info               = &iter_seq_info,
944 };
945
946 static int __init bpf_sk_storage_map_iter_init(void)
947 {
948         bpf_sk_storage_map_reg_info.ctx_arg_info[0].btf_id =
949                 btf_sock_ids[BTF_SOCK_TYPE_SOCK];
950         return bpf_iter_reg_target(&bpf_sk_storage_map_reg_info);
951 }
952 late_initcall(bpf_sk_storage_map_iter_init);