0d888a90a805b5dd0ebea380bccad04ffaf9ebcf
[platform/kernel/linux-rpi.git] / kernel / bpf / hashtab.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  */
5 #include <linux/bpf.h>
6 #include <linux/btf.h>
7 #include <linux/jhash.h>
8 #include <linux/filter.h>
9 #include <linux/rculist_nulls.h>
10 #include <linux/random.h>
11 #include <uapi/linux/btf.h>
12 #include <linux/rcupdate_trace.h>
13 #include <linux/btf_ids.h>
14 #include "percpu_freelist.h"
15 #include "bpf_lru_list.h"
16 #include "map_in_map.h"
17 #include <linux/bpf_mem_alloc.h>
18
19 #define HTAB_CREATE_FLAG_MASK                                           \
20         (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE |    \
21          BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED)
22
23 #define BATCH_OPS(_name)                        \
24         .map_lookup_batch =                     \
25         _name##_map_lookup_batch,               \
26         .map_lookup_and_delete_batch =          \
27         _name##_map_lookup_and_delete_batch,    \
28         .map_update_batch =                     \
29         generic_map_update_batch,               \
30         .map_delete_batch =                     \
31         generic_map_delete_batch
32
33 /*
34  * The bucket lock has two protection scopes:
35  *
36  * 1) Serializing concurrent operations from BPF programs on different
37  *    CPUs
38  *
39  * 2) Serializing concurrent operations from BPF programs and sys_bpf()
40  *
41  * BPF programs can execute in any context including perf, kprobes and
42  * tracing. As there are almost no limits where perf, kprobes and tracing
43  * can be invoked from the lock operations need to be protected against
44  * deadlocks. Deadlocks can be caused by recursion and by an invocation in
45  * the lock held section when functions which acquire this lock are invoked
46  * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU
47  * variable bpf_prog_active, which prevents BPF programs attached to perf
48  * events, kprobes and tracing to be invoked before the prior invocation
49  * from one of these contexts completed. sys_bpf() uses the same mechanism
50  * by pinning the task to the current CPU and incrementing the recursion
51  * protection across the map operation.
52  *
53  * This has subtle implications on PREEMPT_RT. PREEMPT_RT forbids certain
54  * operations like memory allocations (even with GFP_ATOMIC) from atomic
55  * contexts. This is required because even with GFP_ATOMIC the memory
56  * allocator calls into code paths which acquire locks with long held lock
57  * sections. To ensure the deterministic behaviour these locks are regular
58  * spinlocks, which are converted to 'sleepable' spinlocks on RT. The only
59  * true atomic contexts on an RT kernel are the low level hardware
60  * handling, scheduling, low level interrupt handling, NMIs etc. None of
61  * these contexts should ever do memory allocations.
62  *
63  * As regular device interrupt handlers and soft interrupts are forced into
64  * thread context, the existing code which does
65  *   spin_lock*(); alloc(GFP_ATOMIC); spin_unlock*();
66  * just works.
67  *
68  * In theory the BPF locks could be converted to regular spinlocks as well,
69  * but the bucket locks and percpu_freelist locks can be taken from
70  * arbitrary contexts (perf, kprobes, tracepoints) which are required to be
71  * atomic contexts even on RT. These mechanisms require preallocated maps,
72  * so there is no need to invoke memory allocations within the lock held
73  * sections.
74  *
75  * BPF maps which need dynamic allocation are only used from (forced)
76  * thread context on RT and can therefore use regular spinlocks which in
77  * turn allows to invoke memory allocations from the lock held section.
78  *
79  * On a non RT kernel this distinction is neither possible nor required.
80  * spinlock maps to raw_spinlock and the extra code is optimized out by the
81  * compiler.
82  */
83 struct bucket {
84         struct hlist_nulls_head head;
85         union {
86                 raw_spinlock_t raw_lock;
87                 spinlock_t     lock;
88         };
89 };
90
91 #define HASHTAB_MAP_LOCK_COUNT 8
92 #define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1)
93
94 struct bpf_htab {
95         struct bpf_map map;
96         struct bpf_mem_alloc ma;
97         struct bucket *buckets;
98         void *elems;
99         union {
100                 struct pcpu_freelist freelist;
101                 struct bpf_lru lru;
102         };
103         struct htab_elem *__percpu *extra_elems;
104         /* number of elements in non-preallocated hashtable are kept
105          * in either pcount or count
106          */
107         struct percpu_counter pcount;
108         atomic_t count;
109         bool use_percpu_counter;
110         u32 n_buckets;  /* number of hash buckets */
111         u32 elem_size;  /* size of each element in bytes */
112         u32 hashrnd;
113         struct lock_class_key lockdep_key;
114         int __percpu *map_locked[HASHTAB_MAP_LOCK_COUNT];
115 };
116
117 /* each htab element is struct htab_elem + key + value */
118 struct htab_elem {
119         union {
120                 struct hlist_nulls_node hash_node;
121                 struct {
122                         void *padding;
123                         union {
124                                 struct bpf_htab *htab;
125                                 struct pcpu_freelist_node fnode;
126                                 struct htab_elem *batch_flink;
127                         };
128                 };
129         };
130         union {
131                 struct rcu_head rcu;
132                 struct bpf_lru_node lru_node;
133         };
134         u32 hash;
135         char key[] __aligned(8);
136 };
137
138 static inline bool htab_is_prealloc(const struct bpf_htab *htab)
139 {
140         return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
141 }
142
143 static inline bool htab_use_raw_lock(const struct bpf_htab *htab)
144 {
145         return (!IS_ENABLED(CONFIG_PREEMPT_RT) || htab_is_prealloc(htab));
146 }
147
148 static void htab_init_buckets(struct bpf_htab *htab)
149 {
150         unsigned int i;
151
152         for (i = 0; i < htab->n_buckets; i++) {
153                 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
154                 if (htab_use_raw_lock(htab)) {
155                         raw_spin_lock_init(&htab->buckets[i].raw_lock);
156                         lockdep_set_class(&htab->buckets[i].raw_lock,
157                                           &htab->lockdep_key);
158                 } else {
159                         spin_lock_init(&htab->buckets[i].lock);
160                         lockdep_set_class(&htab->buckets[i].lock,
161                                           &htab->lockdep_key);
162                 }
163                 cond_resched();
164         }
165 }
166
167 static inline int htab_lock_bucket(const struct bpf_htab *htab,
168                                    struct bucket *b, u32 hash,
169                                    unsigned long *pflags)
170 {
171         unsigned long flags;
172         bool use_raw_lock;
173
174         hash = hash & HASHTAB_MAP_LOCK_MASK;
175
176         use_raw_lock = htab_use_raw_lock(htab);
177         if (use_raw_lock)
178                 preempt_disable();
179         else
180                 migrate_disable();
181         if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
182                 __this_cpu_dec(*(htab->map_locked[hash]));
183                 if (use_raw_lock)
184                         preempt_enable();
185                 else
186                         migrate_enable();
187                 return -EBUSY;
188         }
189
190         if (use_raw_lock)
191                 raw_spin_lock_irqsave(&b->raw_lock, flags);
192         else
193                 spin_lock_irqsave(&b->lock, flags);
194         *pflags = flags;
195
196         return 0;
197 }
198
199 static inline void htab_unlock_bucket(const struct bpf_htab *htab,
200                                       struct bucket *b, u32 hash,
201                                       unsigned long flags)
202 {
203         bool use_raw_lock = htab_use_raw_lock(htab);
204
205         hash = hash & HASHTAB_MAP_LOCK_MASK;
206         if (use_raw_lock)
207                 raw_spin_unlock_irqrestore(&b->raw_lock, flags);
208         else
209                 spin_unlock_irqrestore(&b->lock, flags);
210         __this_cpu_dec(*(htab->map_locked[hash]));
211         if (use_raw_lock)
212                 preempt_enable();
213         else
214                 migrate_enable();
215 }
216
217 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
218
219 static bool htab_is_lru(const struct bpf_htab *htab)
220 {
221         return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
222                 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
223 }
224
225 static bool htab_is_percpu(const struct bpf_htab *htab)
226 {
227         return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
228                 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
229 }
230
231 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
232                                      void __percpu *pptr)
233 {
234         *(void __percpu **)(l->key + key_size) = pptr;
235 }
236
237 static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
238 {
239         return *(void __percpu **)(l->key + key_size);
240 }
241
242 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
243 {
244         return *(void **)(l->key + roundup(map->key_size, 8));
245 }
246
247 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
248 {
249         return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size);
250 }
251
252 static bool htab_has_extra_elems(struct bpf_htab *htab)
253 {
254         return !htab_is_percpu(htab) && !htab_is_lru(htab);
255 }
256
257 static void htab_free_prealloced_timers(struct bpf_htab *htab)
258 {
259         u32 num_entries = htab->map.max_entries;
260         int i;
261
262         if (!map_value_has_timer(&htab->map))
263                 return;
264         if (htab_has_extra_elems(htab))
265                 num_entries += num_possible_cpus();
266
267         for (i = 0; i < num_entries; i++) {
268                 struct htab_elem *elem;
269
270                 elem = get_htab_elem(htab, i);
271                 bpf_timer_cancel_and_free(elem->key +
272                                           round_up(htab->map.key_size, 8) +
273                                           htab->map.timer_off);
274                 cond_resched();
275         }
276 }
277
278 static void htab_free_prealloced_kptrs(struct bpf_htab *htab)
279 {
280         u32 num_entries = htab->map.max_entries;
281         int i;
282
283         if (!map_value_has_kptrs(&htab->map))
284                 return;
285         if (htab_has_extra_elems(htab))
286                 num_entries += num_possible_cpus();
287
288         for (i = 0; i < num_entries; i++) {
289                 struct htab_elem *elem;
290
291                 elem = get_htab_elem(htab, i);
292                 bpf_map_free_kptrs(&htab->map, elem->key + round_up(htab->map.key_size, 8));
293                 cond_resched();
294         }
295 }
296
297 static void htab_free_elems(struct bpf_htab *htab)
298 {
299         int i;
300
301         if (!htab_is_percpu(htab))
302                 goto free_elems;
303
304         for (i = 0; i < htab->map.max_entries; i++) {
305                 void __percpu *pptr;
306
307                 pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
308                                          htab->map.key_size);
309                 free_percpu(pptr);
310                 cond_resched();
311         }
312 free_elems:
313         bpf_map_area_free(htab->elems);
314 }
315
316 /* The LRU list has a lock (lru_lock). Each htab bucket has a lock
317  * (bucket_lock). If both locks need to be acquired together, the lock
318  * order is always lru_lock -> bucket_lock and this only happens in
319  * bpf_lru_list.c logic. For example, certain code path of
320  * bpf_lru_pop_free(), which is called by function prealloc_lru_pop(),
321  * will acquire lru_lock first followed by acquiring bucket_lock.
322  *
323  * In hashtab.c, to avoid deadlock, lock acquisition of
324  * bucket_lock followed by lru_lock is not allowed. In such cases,
325  * bucket_lock needs to be released first before acquiring lru_lock.
326  */
327 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
328                                           u32 hash)
329 {
330         struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
331         struct htab_elem *l;
332
333         if (node) {
334                 l = container_of(node, struct htab_elem, lru_node);
335                 memcpy(l->key, key, htab->map.key_size);
336                 return l;
337         }
338
339         return NULL;
340 }
341
342 static int prealloc_init(struct bpf_htab *htab)
343 {
344         u32 num_entries = htab->map.max_entries;
345         int err = -ENOMEM, i;
346
347         if (htab_has_extra_elems(htab))
348                 num_entries += num_possible_cpus();
349
350         htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries,
351                                          htab->map.numa_node);
352         if (!htab->elems)
353                 return -ENOMEM;
354
355         if (!htab_is_percpu(htab))
356                 goto skip_percpu_elems;
357
358         for (i = 0; i < num_entries; i++) {
359                 u32 size = round_up(htab->map.value_size, 8);
360                 void __percpu *pptr;
361
362                 pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
363                                             GFP_USER | __GFP_NOWARN);
364                 if (!pptr)
365                         goto free_elems;
366                 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
367                                   pptr);
368                 cond_resched();
369         }
370
371 skip_percpu_elems:
372         if (htab_is_lru(htab))
373                 err = bpf_lru_init(&htab->lru,
374                                    htab->map.map_flags & BPF_F_NO_COMMON_LRU,
375                                    offsetof(struct htab_elem, hash) -
376                                    offsetof(struct htab_elem, lru_node),
377                                    htab_lru_map_delete_node,
378                                    htab);
379         else
380                 err = pcpu_freelist_init(&htab->freelist);
381
382         if (err)
383                 goto free_elems;
384
385         if (htab_is_lru(htab))
386                 bpf_lru_populate(&htab->lru, htab->elems,
387                                  offsetof(struct htab_elem, lru_node),
388                                  htab->elem_size, num_entries);
389         else
390                 pcpu_freelist_populate(&htab->freelist,
391                                        htab->elems + offsetof(struct htab_elem, fnode),
392                                        htab->elem_size, num_entries);
393
394         return 0;
395
396 free_elems:
397         htab_free_elems(htab);
398         return err;
399 }
400
401 static void prealloc_destroy(struct bpf_htab *htab)
402 {
403         htab_free_elems(htab);
404
405         if (htab_is_lru(htab))
406                 bpf_lru_destroy(&htab->lru);
407         else
408                 pcpu_freelist_destroy(&htab->freelist);
409 }
410
411 static int alloc_extra_elems(struct bpf_htab *htab)
412 {
413         struct htab_elem *__percpu *pptr, *l_new;
414         struct pcpu_freelist_node *l;
415         int cpu;
416
417         pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8,
418                                     GFP_USER | __GFP_NOWARN);
419         if (!pptr)
420                 return -ENOMEM;
421
422         for_each_possible_cpu(cpu) {
423                 l = pcpu_freelist_pop(&htab->freelist);
424                 /* pop will succeed, since prealloc_init()
425                  * preallocated extra num_possible_cpus elements
426                  */
427                 l_new = container_of(l, struct htab_elem, fnode);
428                 *per_cpu_ptr(pptr, cpu) = l_new;
429         }
430         htab->extra_elems = pptr;
431         return 0;
432 }
433
434 /* Called from syscall */
435 static int htab_map_alloc_check(union bpf_attr *attr)
436 {
437         bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
438                        attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
439         bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
440                     attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
441         /* percpu_lru means each cpu has its own LRU list.
442          * it is different from BPF_MAP_TYPE_PERCPU_HASH where
443          * the map's value itself is percpu.  percpu_lru has
444          * nothing to do with the map's value.
445          */
446         bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
447         bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
448         bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED);
449         int numa_node = bpf_map_attr_numa_node(attr);
450
451         BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
452                      offsetof(struct htab_elem, hash_node.pprev));
453         BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
454                      offsetof(struct htab_elem, hash_node.pprev));
455
456         if (lru && !bpf_capable())
457                 /* LRU implementation is much complicated than other
458                  * maps.  Hence, limit to CAP_BPF.
459                  */
460                 return -EPERM;
461
462         if (zero_seed && !capable(CAP_SYS_ADMIN))
463                 /* Guard against local DoS, and discourage production use. */
464                 return -EPERM;
465
466         if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK ||
467             !bpf_map_flags_access_ok(attr->map_flags))
468                 return -EINVAL;
469
470         if (!lru && percpu_lru)
471                 return -EINVAL;
472
473         if (lru && !prealloc)
474                 return -ENOTSUPP;
475
476         if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
477                 return -EINVAL;
478
479         /* check sanity of attributes.
480          * value_size == 0 may be allowed in the future to use map as a set
481          */
482         if (attr->max_entries == 0 || attr->key_size == 0 ||
483             attr->value_size == 0)
484                 return -EINVAL;
485
486         if ((u64)attr->key_size + attr->value_size >= KMALLOC_MAX_SIZE -
487            sizeof(struct htab_elem))
488                 /* if key_size + value_size is bigger, the user space won't be
489                  * able to access the elements via bpf syscall. This check
490                  * also makes sure that the elem_size doesn't overflow and it's
491                  * kmalloc-able later in htab_map_update_elem()
492                  */
493                 return -E2BIG;
494
495         return 0;
496 }
497
498 static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
499 {
500         bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
501                        attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
502         bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
503                     attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
504         /* percpu_lru means each cpu has its own LRU list.
505          * it is different from BPF_MAP_TYPE_PERCPU_HASH where
506          * the map's value itself is percpu.  percpu_lru has
507          * nothing to do with the map's value.
508          */
509         bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
510         bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
511         struct bpf_htab *htab;
512         int err, i;
513
514         htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
515         if (!htab)
516                 return ERR_PTR(-ENOMEM);
517
518         lockdep_register_key(&htab->lockdep_key);
519
520         bpf_map_init_from_attr(&htab->map, attr);
521
522         if (percpu_lru) {
523                 /* ensure each CPU's lru list has >=1 elements.
524                  * since we are at it, make each lru list has the same
525                  * number of elements.
526                  */
527                 htab->map.max_entries = roundup(attr->max_entries,
528                                                 num_possible_cpus());
529                 if (htab->map.max_entries < attr->max_entries)
530                         htab->map.max_entries = rounddown(attr->max_entries,
531                                                           num_possible_cpus());
532         }
533
534         /* hash table size must be power of 2 */
535         htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
536
537         htab->elem_size = sizeof(struct htab_elem) +
538                           round_up(htab->map.key_size, 8);
539         if (percpu)
540                 htab->elem_size += sizeof(void *);
541         else
542                 htab->elem_size += round_up(htab->map.value_size, 8);
543
544         err = -E2BIG;
545         /* prevent zero size kmalloc and check for u32 overflow */
546         if (htab->n_buckets == 0 ||
547             htab->n_buckets > U32_MAX / sizeof(struct bucket))
548                 goto free_htab;
549
550         err = -ENOMEM;
551         htab->buckets = bpf_map_area_alloc(htab->n_buckets *
552                                            sizeof(struct bucket),
553                                            htab->map.numa_node);
554         if (!htab->buckets)
555                 goto free_htab;
556
557         for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) {
558                 htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map,
559                                                            sizeof(int),
560                                                            sizeof(int),
561                                                            GFP_USER);
562                 if (!htab->map_locked[i])
563                         goto free_map_locked;
564         }
565
566         if (htab->map.map_flags & BPF_F_ZERO_SEED)
567                 htab->hashrnd = 0;
568         else
569                 htab->hashrnd = get_random_int();
570
571         htab_init_buckets(htab);
572
573 /* compute_batch_value() computes batch value as num_online_cpus() * 2
574  * and __percpu_counter_compare() needs
575  * htab->max_entries - cur_number_of_elems to be more than batch * num_online_cpus()
576  * for percpu_counter to be faster than atomic_t. In practice the average bpf
577  * hash map size is 10k, which means that a system with 64 cpus will fill
578  * hashmap to 20% of 10k before percpu_counter becomes ineffective. Therefore
579  * define our own batch count as 32 then 10k hash map can be filled up to 80%:
580  * 10k - 8k > 32 _batch_ * 64 _cpus_
581  * and __percpu_counter_compare() will still be fast. At that point hash map
582  * collisions will dominate its performance anyway. Assume that hash map filled
583  * to 50+% isn't going to be O(1) and use the following formula to choose
584  * between percpu_counter and atomic_t.
585  */
586 #define PERCPU_COUNTER_BATCH 32
587         if (attr->max_entries / 2 > num_online_cpus() * PERCPU_COUNTER_BATCH)
588                 htab->use_percpu_counter = true;
589
590         if (htab->use_percpu_counter) {
591                 err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL);
592                 if (err)
593                         goto free_map_locked;
594         }
595
596         if (prealloc) {
597                 err = prealloc_init(htab);
598                 if (err)
599                         goto free_map_locked;
600
601                 if (!percpu && !lru) {
602                         /* lru itself can remove the least used element, so
603                          * there is no need for an extra elem during map_update.
604                          */
605                         err = alloc_extra_elems(htab);
606                         if (err)
607                                 goto free_prealloc;
608                 }
609         } else {
610                 err = bpf_mem_alloc_init(&htab->ma, htab->elem_size);
611                 if (err)
612                         goto free_map_locked;
613         }
614
615         return &htab->map;
616
617 free_prealloc:
618         prealloc_destroy(htab);
619 free_map_locked:
620         for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
621                 free_percpu(htab->map_locked[i]);
622         bpf_map_area_free(htab->buckets);
623         bpf_mem_alloc_destroy(&htab->ma);
624 free_htab:
625         lockdep_unregister_key(&htab->lockdep_key);
626         bpf_map_area_free(htab);
627         return ERR_PTR(err);
628 }
629
630 static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
631 {
632         return jhash(key, key_len, hashrnd);
633 }
634
635 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
636 {
637         return &htab->buckets[hash & (htab->n_buckets - 1)];
638 }
639
640 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
641 {
642         return &__select_bucket(htab, hash)->head;
643 }
644
645 /* this lookup function can only be called with bucket lock taken */
646 static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
647                                          void *key, u32 key_size)
648 {
649         struct hlist_nulls_node *n;
650         struct htab_elem *l;
651
652         hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
653                 if (l->hash == hash && !memcmp(&l->key, key, key_size))
654                         return l;
655
656         return NULL;
657 }
658
659 /* can be called without bucket lock. it will repeat the loop in
660  * the unlikely event when elements moved from one bucket into another
661  * while link list is being walked
662  */
663 static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
664                                                u32 hash, void *key,
665                                                u32 key_size, u32 n_buckets)
666 {
667         struct hlist_nulls_node *n;
668         struct htab_elem *l;
669
670 again:
671         hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
672                 if (l->hash == hash && !memcmp(&l->key, key, key_size))
673                         return l;
674
675         if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
676                 goto again;
677
678         return NULL;
679 }
680
681 /* Called from syscall or from eBPF program directly, so
682  * arguments have to match bpf_map_lookup_elem() exactly.
683  * The return value is adjusted by BPF instructions
684  * in htab_map_gen_lookup().
685  */
686 static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
687 {
688         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
689         struct hlist_nulls_head *head;
690         struct htab_elem *l;
691         u32 hash, key_size;
692
693         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
694                      !rcu_read_lock_bh_held());
695
696         key_size = map->key_size;
697
698         hash = htab_map_hash(key, key_size, htab->hashrnd);
699
700         head = select_bucket(htab, hash);
701
702         l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
703
704         return l;
705 }
706
707 static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
708 {
709         struct htab_elem *l = __htab_map_lookup_elem(map, key);
710
711         if (l)
712                 return l->key + round_up(map->key_size, 8);
713
714         return NULL;
715 }
716
717 /* inline bpf_map_lookup_elem() call.
718  * Instead of:
719  * bpf_prog
720  *   bpf_map_lookup_elem
721  *     map->ops->map_lookup_elem
722  *       htab_map_lookup_elem
723  *         __htab_map_lookup_elem
724  * do:
725  * bpf_prog
726  *   __htab_map_lookup_elem
727  */
728 static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
729 {
730         struct bpf_insn *insn = insn_buf;
731         const int ret = BPF_REG_0;
732
733         BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
734                      (void *(*)(struct bpf_map *map, void *key))NULL));
735         *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
736         *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
737         *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
738                                 offsetof(struct htab_elem, key) +
739                                 round_up(map->key_size, 8));
740         return insn - insn_buf;
741 }
742
743 static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
744                                                         void *key, const bool mark)
745 {
746         struct htab_elem *l = __htab_map_lookup_elem(map, key);
747
748         if (l) {
749                 if (mark)
750                         bpf_lru_node_set_ref(&l->lru_node);
751                 return l->key + round_up(map->key_size, 8);
752         }
753
754         return NULL;
755 }
756
757 static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
758 {
759         return __htab_lru_map_lookup_elem(map, key, true);
760 }
761
762 static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
763 {
764         return __htab_lru_map_lookup_elem(map, key, false);
765 }
766
767 static int htab_lru_map_gen_lookup(struct bpf_map *map,
768                                    struct bpf_insn *insn_buf)
769 {
770         struct bpf_insn *insn = insn_buf;
771         const int ret = BPF_REG_0;
772         const int ref_reg = BPF_REG_1;
773
774         BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
775                      (void *(*)(struct bpf_map *map, void *key))NULL));
776         *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
777         *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
778         *insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
779                               offsetof(struct htab_elem, lru_node) +
780                               offsetof(struct bpf_lru_node, ref));
781         *insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1);
782         *insn++ = BPF_ST_MEM(BPF_B, ret,
783                              offsetof(struct htab_elem, lru_node) +
784                              offsetof(struct bpf_lru_node, ref),
785                              1);
786         *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
787                                 offsetof(struct htab_elem, key) +
788                                 round_up(map->key_size, 8));
789         return insn - insn_buf;
790 }
791
792 static void check_and_free_fields(struct bpf_htab *htab,
793                                   struct htab_elem *elem)
794 {
795         void *map_value = elem->key + round_up(htab->map.key_size, 8);
796
797         if (map_value_has_timer(&htab->map))
798                 bpf_timer_cancel_and_free(map_value + htab->map.timer_off);
799         if (map_value_has_kptrs(&htab->map))
800                 bpf_map_free_kptrs(&htab->map, map_value);
801 }
802
803 /* It is called from the bpf_lru_list when the LRU needs to delete
804  * older elements from the htab.
805  */
806 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
807 {
808         struct bpf_htab *htab = arg;
809         struct htab_elem *l = NULL, *tgt_l;
810         struct hlist_nulls_head *head;
811         struct hlist_nulls_node *n;
812         unsigned long flags;
813         struct bucket *b;
814         int ret;
815
816         tgt_l = container_of(node, struct htab_elem, lru_node);
817         b = __select_bucket(htab, tgt_l->hash);
818         head = &b->head;
819
820         ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags);
821         if (ret)
822                 return false;
823
824         hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
825                 if (l == tgt_l) {
826                         hlist_nulls_del_rcu(&l->hash_node);
827                         check_and_free_fields(htab, l);
828                         break;
829                 }
830
831         htab_unlock_bucket(htab, b, tgt_l->hash, flags);
832
833         return l == tgt_l;
834 }
835
836 /* Called from syscall */
837 static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
838 {
839         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
840         struct hlist_nulls_head *head;
841         struct htab_elem *l, *next_l;
842         u32 hash, key_size;
843         int i = 0;
844
845         WARN_ON_ONCE(!rcu_read_lock_held());
846
847         key_size = map->key_size;
848
849         if (!key)
850                 goto find_first_elem;
851
852         hash = htab_map_hash(key, key_size, htab->hashrnd);
853
854         head = select_bucket(htab, hash);
855
856         /* lookup the key */
857         l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
858
859         if (!l)
860                 goto find_first_elem;
861
862         /* key was found, get next key in the same bucket */
863         next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
864                                   struct htab_elem, hash_node);
865
866         if (next_l) {
867                 /* if next elem in this hash list is non-zero, just return it */
868                 memcpy(next_key, next_l->key, key_size);
869                 return 0;
870         }
871
872         /* no more elements in this hash list, go to the next bucket */
873         i = hash & (htab->n_buckets - 1);
874         i++;
875
876 find_first_elem:
877         /* iterate over buckets */
878         for (; i < htab->n_buckets; i++) {
879                 head = select_bucket(htab, i);
880
881                 /* pick first element in the bucket */
882                 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
883                                           struct htab_elem, hash_node);
884                 if (next_l) {
885                         /* if it's not empty, just return it */
886                         memcpy(next_key, next_l->key, key_size);
887                         return 0;
888                 }
889         }
890
891         /* iterated over all buckets and all elements */
892         return -ENOENT;
893 }
894
895 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
896 {
897         if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
898                 free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
899         check_and_free_fields(htab, l);
900         bpf_mem_cache_free(&htab->ma, l);
901 }
902
903 static void htab_elem_free_rcu(struct rcu_head *head)
904 {
905         struct htab_elem *l = container_of(head, struct htab_elem, rcu);
906         struct bpf_htab *htab = l->htab;
907
908         htab_elem_free(htab, l);
909 }
910
911 static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
912 {
913         struct bpf_map *map = &htab->map;
914         void *ptr;
915
916         if (map->ops->map_fd_put_ptr) {
917                 ptr = fd_htab_map_get_ptr(map, l);
918                 map->ops->map_fd_put_ptr(ptr);
919         }
920 }
921
922 static bool is_map_full(struct bpf_htab *htab)
923 {
924         if (htab->use_percpu_counter)
925                 return __percpu_counter_compare(&htab->pcount, htab->map.max_entries,
926                                                 PERCPU_COUNTER_BATCH) >= 0;
927         return atomic_read(&htab->count) >= htab->map.max_entries;
928 }
929
930 static void inc_elem_count(struct bpf_htab *htab)
931 {
932         if (htab->use_percpu_counter)
933                 percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH);
934         else
935                 atomic_inc(&htab->count);
936 }
937
938 static void dec_elem_count(struct bpf_htab *htab)
939 {
940         if (htab->use_percpu_counter)
941                 percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH);
942         else
943                 atomic_dec(&htab->count);
944 }
945
946
947 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
948 {
949         htab_put_fd_value(htab, l);
950
951         if (htab_is_prealloc(htab)) {
952                 check_and_free_fields(htab, l);
953                 __pcpu_freelist_push(&htab->freelist, &l->fnode);
954         } else {
955                 dec_elem_count(htab);
956                 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) {
957                         l->htab = htab;
958                         call_rcu(&l->rcu, htab_elem_free_rcu);
959                 } else {
960                         htab_elem_free(htab, l);
961                 }
962         }
963 }
964
965 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
966                             void *value, bool onallcpus)
967 {
968         if (!onallcpus) {
969                 /* copy true value_size bytes */
970                 memcpy(this_cpu_ptr(pptr), value, htab->map.value_size);
971         } else {
972                 u32 size = round_up(htab->map.value_size, 8);
973                 int off = 0, cpu;
974
975                 for_each_possible_cpu(cpu) {
976                         bpf_long_memcpy(per_cpu_ptr(pptr, cpu),
977                                         value + off, size);
978                         off += size;
979                 }
980         }
981 }
982
983 static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
984                             void *value, bool onallcpus)
985 {
986         /* When using prealloc and not setting the initial value on all cpus,
987          * zero-fill element values for other cpus (just as what happens when
988          * not using prealloc). Otherwise, bpf program has no way to ensure
989          * known initial values for cpus other than current one
990          * (onallcpus=false always when coming from bpf prog).
991          */
992         if (htab_is_prealloc(htab) && !onallcpus) {
993                 u32 size = round_up(htab->map.value_size, 8);
994                 int current_cpu = raw_smp_processor_id();
995                 int cpu;
996
997                 for_each_possible_cpu(cpu) {
998                         if (cpu == current_cpu)
999                                 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value,
1000                                                 size);
1001                         else
1002                                 memset(per_cpu_ptr(pptr, cpu), 0, size);
1003                 }
1004         } else {
1005                 pcpu_copy_value(htab, pptr, value, onallcpus);
1006         }
1007 }
1008
1009 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
1010 {
1011         return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
1012                BITS_PER_LONG == 64;
1013 }
1014
1015 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
1016                                          void *value, u32 key_size, u32 hash,
1017                                          bool percpu, bool onallcpus,
1018                                          struct htab_elem *old_elem)
1019 {
1020         u32 size = htab->map.value_size;
1021         bool prealloc = htab_is_prealloc(htab);
1022         struct htab_elem *l_new, **pl_new;
1023         void __percpu *pptr;
1024
1025         if (prealloc) {
1026                 if (old_elem) {
1027                         /* if we're updating the existing element,
1028                          * use per-cpu extra elems to avoid freelist_pop/push
1029                          */
1030                         pl_new = this_cpu_ptr(htab->extra_elems);
1031                         l_new = *pl_new;
1032                         htab_put_fd_value(htab, old_elem);
1033                         *pl_new = old_elem;
1034                 } else {
1035                         struct pcpu_freelist_node *l;
1036
1037                         l = __pcpu_freelist_pop(&htab->freelist);
1038                         if (!l)
1039                                 return ERR_PTR(-E2BIG);
1040                         l_new = container_of(l, struct htab_elem, fnode);
1041                 }
1042         } else {
1043                 if (is_map_full(htab))
1044                         if (!old_elem)
1045                                 /* when map is full and update() is replacing
1046                                  * old element, it's ok to allocate, since
1047                                  * old element will be freed immediately.
1048                                  * Otherwise return an error
1049                                  */
1050                                 return ERR_PTR(-E2BIG);
1051                 inc_elem_count(htab);
1052                 l_new = bpf_mem_cache_alloc(&htab->ma);
1053                 if (!l_new) {
1054                         l_new = ERR_PTR(-ENOMEM);
1055                         goto dec_count;
1056                 }
1057                 check_and_init_map_value(&htab->map,
1058                                          l_new->key + round_up(key_size, 8));
1059         }
1060
1061         memcpy(l_new->key, key, key_size);
1062         if (percpu) {
1063                 size = round_up(size, 8);
1064                 if (prealloc) {
1065                         pptr = htab_elem_get_ptr(l_new, key_size);
1066                 } else {
1067                         /* alloc_percpu zero-fills */
1068                         pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
1069                                                     GFP_NOWAIT | __GFP_NOWARN);
1070                         if (!pptr) {
1071                                 bpf_mem_cache_free(&htab->ma, l_new);
1072                                 l_new = ERR_PTR(-ENOMEM);
1073                                 goto dec_count;
1074                         }
1075                 }
1076
1077                 pcpu_init_value(htab, pptr, value, onallcpus);
1078
1079                 if (!prealloc)
1080                         htab_elem_set_ptr(l_new, key_size, pptr);
1081         } else if (fd_htab_map_needs_adjust(htab)) {
1082                 size = round_up(size, 8);
1083                 memcpy(l_new->key + round_up(key_size, 8), value, size);
1084         } else {
1085                 copy_map_value(&htab->map,
1086                                l_new->key + round_up(key_size, 8),
1087                                value);
1088         }
1089
1090         l_new->hash = hash;
1091         return l_new;
1092 dec_count:
1093         dec_elem_count(htab);
1094         return l_new;
1095 }
1096
1097 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
1098                        u64 map_flags)
1099 {
1100         if (l_old && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
1101                 /* elem already exists */
1102                 return -EEXIST;
1103
1104         if (!l_old && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
1105                 /* elem doesn't exist, cannot update it */
1106                 return -ENOENT;
1107
1108         return 0;
1109 }
1110
1111 /* Called from syscall or from eBPF program */
1112 static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
1113                                 u64 map_flags)
1114 {
1115         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1116         struct htab_elem *l_new = NULL, *l_old;
1117         struct hlist_nulls_head *head;
1118         unsigned long flags;
1119         struct bucket *b;
1120         u32 key_size, hash;
1121         int ret;
1122
1123         if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
1124                 /* unknown flags */
1125                 return -EINVAL;
1126
1127         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1128                      !rcu_read_lock_bh_held());
1129
1130         key_size = map->key_size;
1131
1132         hash = htab_map_hash(key, key_size, htab->hashrnd);
1133
1134         b = __select_bucket(htab, hash);
1135         head = &b->head;
1136
1137         if (unlikely(map_flags & BPF_F_LOCK)) {
1138                 if (unlikely(!map_value_has_spin_lock(map)))
1139                         return -EINVAL;
1140                 /* find an element without taking the bucket lock */
1141                 l_old = lookup_nulls_elem_raw(head, hash, key, key_size,
1142                                               htab->n_buckets);
1143                 ret = check_flags(htab, l_old, map_flags);
1144                 if (ret)
1145                         return ret;
1146                 if (l_old) {
1147                         /* grab the element lock and update value in place */
1148                         copy_map_value_locked(map,
1149                                               l_old->key + round_up(key_size, 8),
1150                                               value, false);
1151                         return 0;
1152                 }
1153                 /* fall through, grab the bucket lock and lookup again.
1154                  * 99.9% chance that the element won't be found,
1155                  * but second lookup under lock has to be done.
1156                  */
1157         }
1158
1159         ret = htab_lock_bucket(htab, b, hash, &flags);
1160         if (ret)
1161                 return ret;
1162
1163         l_old = lookup_elem_raw(head, hash, key, key_size);
1164
1165         ret = check_flags(htab, l_old, map_flags);
1166         if (ret)
1167                 goto err;
1168
1169         if (unlikely(l_old && (map_flags & BPF_F_LOCK))) {
1170                 /* first lookup without the bucket lock didn't find the element,
1171                  * but second lookup with the bucket lock found it.
1172                  * This case is highly unlikely, but has to be dealt with:
1173                  * grab the element lock in addition to the bucket lock
1174                  * and update element in place
1175                  */
1176                 copy_map_value_locked(map,
1177                                       l_old->key + round_up(key_size, 8),
1178                                       value, false);
1179                 ret = 0;
1180                 goto err;
1181         }
1182
1183         l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
1184                                 l_old);
1185         if (IS_ERR(l_new)) {
1186                 /* all pre-allocated elements are in use or memory exhausted */
1187                 ret = PTR_ERR(l_new);
1188                 goto err;
1189         }
1190
1191         /* add new element to the head of the list, so that
1192          * concurrent search will find it before old elem
1193          */
1194         hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1195         if (l_old) {
1196                 hlist_nulls_del_rcu(&l_old->hash_node);
1197                 if (!htab_is_prealloc(htab))
1198                         free_htab_elem(htab, l_old);
1199                 else
1200                         check_and_free_fields(htab, l_old);
1201         }
1202         ret = 0;
1203 err:
1204         htab_unlock_bucket(htab, b, hash, flags);
1205         return ret;
1206 }
1207
1208 static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem)
1209 {
1210         check_and_free_fields(htab, elem);
1211         bpf_lru_push_free(&htab->lru, &elem->lru_node);
1212 }
1213
1214 static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
1215                                     u64 map_flags)
1216 {
1217         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1218         struct htab_elem *l_new, *l_old = NULL;
1219         struct hlist_nulls_head *head;
1220         unsigned long flags;
1221         struct bucket *b;
1222         u32 key_size, hash;
1223         int ret;
1224
1225         if (unlikely(map_flags > BPF_EXIST))
1226                 /* unknown flags */
1227                 return -EINVAL;
1228
1229         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1230                      !rcu_read_lock_bh_held());
1231
1232         key_size = map->key_size;
1233
1234         hash = htab_map_hash(key, key_size, htab->hashrnd);
1235
1236         b = __select_bucket(htab, hash);
1237         head = &b->head;
1238
1239         /* For LRU, we need to alloc before taking bucket's
1240          * spinlock because getting free nodes from LRU may need
1241          * to remove older elements from htab and this removal
1242          * operation will need a bucket lock.
1243          */
1244         l_new = prealloc_lru_pop(htab, key, hash);
1245         if (!l_new)
1246                 return -ENOMEM;
1247         copy_map_value(&htab->map,
1248                        l_new->key + round_up(map->key_size, 8), value);
1249
1250         ret = htab_lock_bucket(htab, b, hash, &flags);
1251         if (ret)
1252                 return ret;
1253
1254         l_old = lookup_elem_raw(head, hash, key, key_size);
1255
1256         ret = check_flags(htab, l_old, map_flags);
1257         if (ret)
1258                 goto err;
1259
1260         /* add new element to the head of the list, so that
1261          * concurrent search will find it before old elem
1262          */
1263         hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1264         if (l_old) {
1265                 bpf_lru_node_set_ref(&l_new->lru_node);
1266                 hlist_nulls_del_rcu(&l_old->hash_node);
1267         }
1268         ret = 0;
1269
1270 err:
1271         htab_unlock_bucket(htab, b, hash, flags);
1272
1273         if (ret)
1274                 htab_lru_push_free(htab, l_new);
1275         else if (l_old)
1276                 htab_lru_push_free(htab, l_old);
1277
1278         return ret;
1279 }
1280
1281 static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1282                                          void *value, u64 map_flags,
1283                                          bool onallcpus)
1284 {
1285         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1286         struct htab_elem *l_new = NULL, *l_old;
1287         struct hlist_nulls_head *head;
1288         unsigned long flags;
1289         struct bucket *b;
1290         u32 key_size, hash;
1291         int ret;
1292
1293         if (unlikely(map_flags > BPF_EXIST))
1294                 /* unknown flags */
1295                 return -EINVAL;
1296
1297         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1298                      !rcu_read_lock_bh_held());
1299
1300         key_size = map->key_size;
1301
1302         hash = htab_map_hash(key, key_size, htab->hashrnd);
1303
1304         b = __select_bucket(htab, hash);
1305         head = &b->head;
1306
1307         ret = htab_lock_bucket(htab, b, hash, &flags);
1308         if (ret)
1309                 return ret;
1310
1311         l_old = lookup_elem_raw(head, hash, key, key_size);
1312
1313         ret = check_flags(htab, l_old, map_flags);
1314         if (ret)
1315                 goto err;
1316
1317         if (l_old) {
1318                 /* per-cpu hash map can update value in-place */
1319                 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1320                                 value, onallcpus);
1321         } else {
1322                 l_new = alloc_htab_elem(htab, key, value, key_size,
1323                                         hash, true, onallcpus, NULL);
1324                 if (IS_ERR(l_new)) {
1325                         ret = PTR_ERR(l_new);
1326                         goto err;
1327                 }
1328                 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1329         }
1330         ret = 0;
1331 err:
1332         htab_unlock_bucket(htab, b, hash, flags);
1333         return ret;
1334 }
1335
1336 static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1337                                              void *value, u64 map_flags,
1338                                              bool onallcpus)
1339 {
1340         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1341         struct htab_elem *l_new = NULL, *l_old;
1342         struct hlist_nulls_head *head;
1343         unsigned long flags;
1344         struct bucket *b;
1345         u32 key_size, hash;
1346         int ret;
1347
1348         if (unlikely(map_flags > BPF_EXIST))
1349                 /* unknown flags */
1350                 return -EINVAL;
1351
1352         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1353                      !rcu_read_lock_bh_held());
1354
1355         key_size = map->key_size;
1356
1357         hash = htab_map_hash(key, key_size, htab->hashrnd);
1358
1359         b = __select_bucket(htab, hash);
1360         head = &b->head;
1361
1362         /* For LRU, we need to alloc before taking bucket's
1363          * spinlock because LRU's elem alloc may need
1364          * to remove older elem from htab and this removal
1365          * operation will need a bucket lock.
1366          */
1367         if (map_flags != BPF_EXIST) {
1368                 l_new = prealloc_lru_pop(htab, key, hash);
1369                 if (!l_new)
1370                         return -ENOMEM;
1371         }
1372
1373         ret = htab_lock_bucket(htab, b, hash, &flags);
1374         if (ret)
1375                 return ret;
1376
1377         l_old = lookup_elem_raw(head, hash, key, key_size);
1378
1379         ret = check_flags(htab, l_old, map_flags);
1380         if (ret)
1381                 goto err;
1382
1383         if (l_old) {
1384                 bpf_lru_node_set_ref(&l_old->lru_node);
1385
1386                 /* per-cpu hash map can update value in-place */
1387                 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1388                                 value, onallcpus);
1389         } else {
1390                 pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size),
1391                                 value, onallcpus);
1392                 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1393                 l_new = NULL;
1394         }
1395         ret = 0;
1396 err:
1397         htab_unlock_bucket(htab, b, hash, flags);
1398         if (l_new)
1399                 bpf_lru_push_free(&htab->lru, &l_new->lru_node);
1400         return ret;
1401 }
1402
1403 static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1404                                        void *value, u64 map_flags)
1405 {
1406         return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
1407 }
1408
1409 static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1410                                            void *value, u64 map_flags)
1411 {
1412         return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
1413                                                  false);
1414 }
1415
1416 /* Called from syscall or from eBPF program */
1417 static int htab_map_delete_elem(struct bpf_map *map, void *key)
1418 {
1419         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1420         struct hlist_nulls_head *head;
1421         struct bucket *b;
1422         struct htab_elem *l;
1423         unsigned long flags;
1424         u32 hash, key_size;
1425         int ret;
1426
1427         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1428                      !rcu_read_lock_bh_held());
1429
1430         key_size = map->key_size;
1431
1432         hash = htab_map_hash(key, key_size, htab->hashrnd);
1433         b = __select_bucket(htab, hash);
1434         head = &b->head;
1435
1436         ret = htab_lock_bucket(htab, b, hash, &flags);
1437         if (ret)
1438                 return ret;
1439
1440         l = lookup_elem_raw(head, hash, key, key_size);
1441
1442         if (l) {
1443                 hlist_nulls_del_rcu(&l->hash_node);
1444                 free_htab_elem(htab, l);
1445         } else {
1446                 ret = -ENOENT;
1447         }
1448
1449         htab_unlock_bucket(htab, b, hash, flags);
1450         return ret;
1451 }
1452
1453 static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
1454 {
1455         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1456         struct hlist_nulls_head *head;
1457         struct bucket *b;
1458         struct htab_elem *l;
1459         unsigned long flags;
1460         u32 hash, key_size;
1461         int ret;
1462
1463         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1464                      !rcu_read_lock_bh_held());
1465
1466         key_size = map->key_size;
1467
1468         hash = htab_map_hash(key, key_size, htab->hashrnd);
1469         b = __select_bucket(htab, hash);
1470         head = &b->head;
1471
1472         ret = htab_lock_bucket(htab, b, hash, &flags);
1473         if (ret)
1474                 return ret;
1475
1476         l = lookup_elem_raw(head, hash, key, key_size);
1477
1478         if (l)
1479                 hlist_nulls_del_rcu(&l->hash_node);
1480         else
1481                 ret = -ENOENT;
1482
1483         htab_unlock_bucket(htab, b, hash, flags);
1484         if (l)
1485                 htab_lru_push_free(htab, l);
1486         return ret;
1487 }
1488
1489 static void delete_all_elements(struct bpf_htab *htab)
1490 {
1491         int i;
1492
1493         /* It's called from a worker thread, so disable migration here,
1494          * since bpf_mem_cache_free() relies on that.
1495          */
1496         migrate_disable();
1497         for (i = 0; i < htab->n_buckets; i++) {
1498                 struct hlist_nulls_head *head = select_bucket(htab, i);
1499                 struct hlist_nulls_node *n;
1500                 struct htab_elem *l;
1501
1502                 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1503                         hlist_nulls_del_rcu(&l->hash_node);
1504                         htab_elem_free(htab, l);
1505                 }
1506         }
1507         migrate_enable();
1508 }
1509
1510 static void htab_free_malloced_timers(struct bpf_htab *htab)
1511 {
1512         int i;
1513
1514         rcu_read_lock();
1515         for (i = 0; i < htab->n_buckets; i++) {
1516                 struct hlist_nulls_head *head = select_bucket(htab, i);
1517                 struct hlist_nulls_node *n;
1518                 struct htab_elem *l;
1519
1520                 hlist_nulls_for_each_entry(l, n, head, hash_node) {
1521                         /* We don't reset or free kptr on uref dropping to zero,
1522                          * hence just free timer.
1523                          */
1524                         bpf_timer_cancel_and_free(l->key +
1525                                                   round_up(htab->map.key_size, 8) +
1526                                                   htab->map.timer_off);
1527                 }
1528                 cond_resched_rcu();
1529         }
1530         rcu_read_unlock();
1531 }
1532
1533 static void htab_map_free_timers(struct bpf_map *map)
1534 {
1535         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1536
1537         /* We don't reset or free kptr on uref dropping to zero. */
1538         if (!map_value_has_timer(&htab->map))
1539                 return;
1540         if (!htab_is_prealloc(htab))
1541                 htab_free_malloced_timers(htab);
1542         else
1543                 htab_free_prealloced_timers(htab);
1544 }
1545
1546 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
1547 static void htab_map_free(struct bpf_map *map)
1548 {
1549         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1550         int i;
1551
1552         /* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback.
1553          * bpf_free_used_maps() is called after bpf prog is no longer executing.
1554          * There is no need to synchronize_rcu() here to protect map elements.
1555          */
1556
1557         /* some of free_htab_elem() callbacks for elements of this map may
1558          * not have executed. Wait for them.
1559          */
1560         rcu_barrier();
1561         if (!htab_is_prealloc(htab)) {
1562                 delete_all_elements(htab);
1563         } else {
1564                 htab_free_prealloced_kptrs(htab);
1565                 prealloc_destroy(htab);
1566         }
1567
1568         bpf_map_free_kptr_off_tab(map);
1569         free_percpu(htab->extra_elems);
1570         bpf_map_area_free(htab->buckets);
1571         bpf_mem_alloc_destroy(&htab->ma);
1572         if (htab->use_percpu_counter)
1573                 percpu_counter_destroy(&htab->pcount);
1574         for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
1575                 free_percpu(htab->map_locked[i]);
1576         lockdep_unregister_key(&htab->lockdep_key);
1577         bpf_map_area_free(htab);
1578 }
1579
1580 static void htab_map_seq_show_elem(struct bpf_map *map, void *key,
1581                                    struct seq_file *m)
1582 {
1583         void *value;
1584
1585         rcu_read_lock();
1586
1587         value = htab_map_lookup_elem(map, key);
1588         if (!value) {
1589                 rcu_read_unlock();
1590                 return;
1591         }
1592
1593         btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
1594         seq_puts(m, ": ");
1595         btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
1596         seq_puts(m, "\n");
1597
1598         rcu_read_unlock();
1599 }
1600
1601 static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1602                                              void *value, bool is_lru_map,
1603                                              bool is_percpu, u64 flags)
1604 {
1605         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1606         struct hlist_nulls_head *head;
1607         unsigned long bflags;
1608         struct htab_elem *l;
1609         u32 hash, key_size;
1610         struct bucket *b;
1611         int ret;
1612
1613         key_size = map->key_size;
1614
1615         hash = htab_map_hash(key, key_size, htab->hashrnd);
1616         b = __select_bucket(htab, hash);
1617         head = &b->head;
1618
1619         ret = htab_lock_bucket(htab, b, hash, &bflags);
1620         if (ret)
1621                 return ret;
1622
1623         l = lookup_elem_raw(head, hash, key, key_size);
1624         if (!l) {
1625                 ret = -ENOENT;
1626         } else {
1627                 if (is_percpu) {
1628                         u32 roundup_value_size = round_up(map->value_size, 8);
1629                         void __percpu *pptr;
1630                         int off = 0, cpu;
1631
1632                         pptr = htab_elem_get_ptr(l, key_size);
1633                         for_each_possible_cpu(cpu) {
1634                                 bpf_long_memcpy(value + off,
1635                                                 per_cpu_ptr(pptr, cpu),
1636                                                 roundup_value_size);
1637                                 off += roundup_value_size;
1638                         }
1639                 } else {
1640                         u32 roundup_key_size = round_up(map->key_size, 8);
1641
1642                         if (flags & BPF_F_LOCK)
1643                                 copy_map_value_locked(map, value, l->key +
1644                                                       roundup_key_size,
1645                                                       true);
1646                         else
1647                                 copy_map_value(map, value, l->key +
1648                                                roundup_key_size);
1649                         check_and_init_map_value(map, value);
1650                 }
1651
1652                 hlist_nulls_del_rcu(&l->hash_node);
1653                 if (!is_lru_map)
1654                         free_htab_elem(htab, l);
1655         }
1656
1657         htab_unlock_bucket(htab, b, hash, bflags);
1658
1659         if (is_lru_map && l)
1660                 htab_lru_push_free(htab, l);
1661
1662         return ret;
1663 }
1664
1665 static int htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1666                                            void *value, u64 flags)
1667 {
1668         return __htab_map_lookup_and_delete_elem(map, key, value, false, false,
1669                                                  flags);
1670 }
1671
1672 static int htab_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
1673                                                   void *key, void *value,
1674                                                   u64 flags)
1675 {
1676         return __htab_map_lookup_and_delete_elem(map, key, value, false, true,
1677                                                  flags);
1678 }
1679
1680 static int htab_lru_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1681                                                void *value, u64 flags)
1682 {
1683         return __htab_map_lookup_and_delete_elem(map, key, value, true, false,
1684                                                  flags);
1685 }
1686
1687 static int htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
1688                                                       void *key, void *value,
1689                                                       u64 flags)
1690 {
1691         return __htab_map_lookup_and_delete_elem(map, key, value, true, true,
1692                                                  flags);
1693 }
1694
1695 static int
1696 __htab_map_lookup_and_delete_batch(struct bpf_map *map,
1697                                    const union bpf_attr *attr,
1698                                    union bpf_attr __user *uattr,
1699                                    bool do_delete, bool is_lru_map,
1700                                    bool is_percpu)
1701 {
1702         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1703         u32 bucket_cnt, total, key_size, value_size, roundup_key_size;
1704         void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val;
1705         void __user *uvalues = u64_to_user_ptr(attr->batch.values);
1706         void __user *ukeys = u64_to_user_ptr(attr->batch.keys);
1707         void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1708         u32 batch, max_count, size, bucket_size, map_id;
1709         struct htab_elem *node_to_free = NULL;
1710         u64 elem_map_flags, map_flags;
1711         struct hlist_nulls_head *head;
1712         struct hlist_nulls_node *n;
1713         unsigned long flags = 0;
1714         bool locked = false;
1715         struct htab_elem *l;
1716         struct bucket *b;
1717         int ret = 0;
1718
1719         elem_map_flags = attr->batch.elem_flags;
1720         if ((elem_map_flags & ~BPF_F_LOCK) ||
1721             ((elem_map_flags & BPF_F_LOCK) && !map_value_has_spin_lock(map)))
1722                 return -EINVAL;
1723
1724         map_flags = attr->batch.flags;
1725         if (map_flags)
1726                 return -EINVAL;
1727
1728         max_count = attr->batch.count;
1729         if (!max_count)
1730                 return 0;
1731
1732         if (put_user(0, &uattr->batch.count))
1733                 return -EFAULT;
1734
1735         batch = 0;
1736         if (ubatch && copy_from_user(&batch, ubatch, sizeof(batch)))
1737                 return -EFAULT;
1738
1739         if (batch >= htab->n_buckets)
1740                 return -ENOENT;
1741
1742         key_size = htab->map.key_size;
1743         roundup_key_size = round_up(htab->map.key_size, 8);
1744         value_size = htab->map.value_size;
1745         size = round_up(value_size, 8);
1746         if (is_percpu)
1747                 value_size = size * num_possible_cpus();
1748         total = 0;
1749         /* while experimenting with hash tables with sizes ranging from 10 to
1750          * 1000, it was observed that a bucket can have up to 5 entries.
1751          */
1752         bucket_size = 5;
1753
1754 alloc:
1755         /* We cannot do copy_from_user or copy_to_user inside
1756          * the rcu_read_lock. Allocate enough space here.
1757          */
1758         keys = kvmalloc_array(key_size, bucket_size, GFP_USER | __GFP_NOWARN);
1759         values = kvmalloc_array(value_size, bucket_size, GFP_USER | __GFP_NOWARN);
1760         if (!keys || !values) {
1761                 ret = -ENOMEM;
1762                 goto after_loop;
1763         }
1764
1765 again:
1766         bpf_disable_instrumentation();
1767         rcu_read_lock();
1768 again_nocopy:
1769         dst_key = keys;
1770         dst_val = values;
1771         b = &htab->buckets[batch];
1772         head = &b->head;
1773         /* do not grab the lock unless need it (bucket_cnt > 0). */
1774         if (locked) {
1775                 ret = htab_lock_bucket(htab, b, batch, &flags);
1776                 if (ret) {
1777                         rcu_read_unlock();
1778                         bpf_enable_instrumentation();
1779                         goto after_loop;
1780                 }
1781         }
1782
1783         bucket_cnt = 0;
1784         hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
1785                 bucket_cnt++;
1786
1787         if (bucket_cnt && !locked) {
1788                 locked = true;
1789                 goto again_nocopy;
1790         }
1791
1792         if (bucket_cnt > (max_count - total)) {
1793                 if (total == 0)
1794                         ret = -ENOSPC;
1795                 /* Note that since bucket_cnt > 0 here, it is implicit
1796                  * that the locked was grabbed, so release it.
1797                  */
1798                 htab_unlock_bucket(htab, b, batch, flags);
1799                 rcu_read_unlock();
1800                 bpf_enable_instrumentation();
1801                 goto after_loop;
1802         }
1803
1804         if (bucket_cnt > bucket_size) {
1805                 bucket_size = bucket_cnt;
1806                 /* Note that since bucket_cnt > 0 here, it is implicit
1807                  * that the locked was grabbed, so release it.
1808                  */
1809                 htab_unlock_bucket(htab, b, batch, flags);
1810                 rcu_read_unlock();
1811                 bpf_enable_instrumentation();
1812                 kvfree(keys);
1813                 kvfree(values);
1814                 goto alloc;
1815         }
1816
1817         /* Next block is only safe to run if you have grabbed the lock */
1818         if (!locked)
1819                 goto next_batch;
1820
1821         hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1822                 memcpy(dst_key, l->key, key_size);
1823
1824                 if (is_percpu) {
1825                         int off = 0, cpu;
1826                         void __percpu *pptr;
1827
1828                         pptr = htab_elem_get_ptr(l, map->key_size);
1829                         for_each_possible_cpu(cpu) {
1830                                 bpf_long_memcpy(dst_val + off,
1831                                                 per_cpu_ptr(pptr, cpu), size);
1832                                 off += size;
1833                         }
1834                 } else {
1835                         value = l->key + roundup_key_size;
1836                         if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
1837                                 struct bpf_map **inner_map = value;
1838
1839                                  /* Actual value is the id of the inner map */
1840                                 map_id = map->ops->map_fd_sys_lookup_elem(*inner_map);
1841                                 value = &map_id;
1842                         }
1843
1844                         if (elem_map_flags & BPF_F_LOCK)
1845                                 copy_map_value_locked(map, dst_val, value,
1846                                                       true);
1847                         else
1848                                 copy_map_value(map, dst_val, value);
1849                         check_and_init_map_value(map, dst_val);
1850                 }
1851                 if (do_delete) {
1852                         hlist_nulls_del_rcu(&l->hash_node);
1853
1854                         /* bpf_lru_push_free() will acquire lru_lock, which
1855                          * may cause deadlock. See comments in function
1856                          * prealloc_lru_pop(). Let us do bpf_lru_push_free()
1857                          * after releasing the bucket lock.
1858                          */
1859                         if (is_lru_map) {
1860                                 l->batch_flink = node_to_free;
1861                                 node_to_free = l;
1862                         } else {
1863                                 free_htab_elem(htab, l);
1864                         }
1865                 }
1866                 dst_key += key_size;
1867                 dst_val += value_size;
1868         }
1869
1870         htab_unlock_bucket(htab, b, batch, flags);
1871         locked = false;
1872
1873         while (node_to_free) {
1874                 l = node_to_free;
1875                 node_to_free = node_to_free->batch_flink;
1876                 htab_lru_push_free(htab, l);
1877         }
1878
1879 next_batch:
1880         /* If we are not copying data, we can go to next bucket and avoid
1881          * unlocking the rcu.
1882          */
1883         if (!bucket_cnt && (batch + 1 < htab->n_buckets)) {
1884                 batch++;
1885                 goto again_nocopy;
1886         }
1887
1888         rcu_read_unlock();
1889         bpf_enable_instrumentation();
1890         if (bucket_cnt && (copy_to_user(ukeys + total * key_size, keys,
1891             key_size * bucket_cnt) ||
1892             copy_to_user(uvalues + total * value_size, values,
1893             value_size * bucket_cnt))) {
1894                 ret = -EFAULT;
1895                 goto after_loop;
1896         }
1897
1898         total += bucket_cnt;
1899         batch++;
1900         if (batch >= htab->n_buckets) {
1901                 ret = -ENOENT;
1902                 goto after_loop;
1903         }
1904         goto again;
1905
1906 after_loop:
1907         if (ret == -EFAULT)
1908                 goto out;
1909
1910         /* copy # of entries and next batch */
1911         ubatch = u64_to_user_ptr(attr->batch.out_batch);
1912         if (copy_to_user(ubatch, &batch, sizeof(batch)) ||
1913             put_user(total, &uattr->batch.count))
1914                 ret = -EFAULT;
1915
1916 out:
1917         kvfree(keys);
1918         kvfree(values);
1919         return ret;
1920 }
1921
1922 static int
1923 htab_percpu_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1924                              union bpf_attr __user *uattr)
1925 {
1926         return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1927                                                   false, true);
1928 }
1929
1930 static int
1931 htab_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1932                                         const union bpf_attr *attr,
1933                                         union bpf_attr __user *uattr)
1934 {
1935         return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1936                                                   false, true);
1937 }
1938
1939 static int
1940 htab_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1941                       union bpf_attr __user *uattr)
1942 {
1943         return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1944                                                   false, false);
1945 }
1946
1947 static int
1948 htab_map_lookup_and_delete_batch(struct bpf_map *map,
1949                                  const union bpf_attr *attr,
1950                                  union bpf_attr __user *uattr)
1951 {
1952         return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1953                                                   false, false);
1954 }
1955
1956 static int
1957 htab_lru_percpu_map_lookup_batch(struct bpf_map *map,
1958                                  const union bpf_attr *attr,
1959                                  union bpf_attr __user *uattr)
1960 {
1961         return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1962                                                   true, true);
1963 }
1964
1965 static int
1966 htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1967                                             const union bpf_attr *attr,
1968                                             union bpf_attr __user *uattr)
1969 {
1970         return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1971                                                   true, true);
1972 }
1973
1974 static int
1975 htab_lru_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1976                           union bpf_attr __user *uattr)
1977 {
1978         return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1979                                                   true, false);
1980 }
1981
1982 static int
1983 htab_lru_map_lookup_and_delete_batch(struct bpf_map *map,
1984                                      const union bpf_attr *attr,
1985                                      union bpf_attr __user *uattr)
1986 {
1987         return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1988                                                   true, false);
1989 }
1990
1991 struct bpf_iter_seq_hash_map_info {
1992         struct bpf_map *map;
1993         struct bpf_htab *htab;
1994         void *percpu_value_buf; // non-zero means percpu hash
1995         u32 bucket_id;
1996         u32 skip_elems;
1997 };
1998
1999 static struct htab_elem *
2000 bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info,
2001                            struct htab_elem *prev_elem)
2002 {
2003         const struct bpf_htab *htab = info->htab;
2004         u32 skip_elems = info->skip_elems;
2005         u32 bucket_id = info->bucket_id;
2006         struct hlist_nulls_head *head;
2007         struct hlist_nulls_node *n;
2008         struct htab_elem *elem;
2009         struct bucket *b;
2010         u32 i, count;
2011
2012         if (bucket_id >= htab->n_buckets)
2013                 return NULL;
2014
2015         /* try to find next elem in the same bucket */
2016         if (prev_elem) {
2017                 /* no update/deletion on this bucket, prev_elem should be still valid
2018                  * and we won't skip elements.
2019                  */
2020                 n = rcu_dereference_raw(hlist_nulls_next_rcu(&prev_elem->hash_node));
2021                 elem = hlist_nulls_entry_safe(n, struct htab_elem, hash_node);
2022                 if (elem)
2023                         return elem;
2024
2025                 /* not found, unlock and go to the next bucket */
2026                 b = &htab->buckets[bucket_id++];
2027                 rcu_read_unlock();
2028                 skip_elems = 0;
2029         }
2030
2031         for (i = bucket_id; i < htab->n_buckets; i++) {
2032                 b = &htab->buckets[i];
2033                 rcu_read_lock();
2034
2035                 count = 0;
2036                 head = &b->head;
2037                 hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
2038                         if (count >= skip_elems) {
2039                                 info->bucket_id = i;
2040                                 info->skip_elems = count;
2041                                 return elem;
2042                         }
2043                         count++;
2044                 }
2045
2046                 rcu_read_unlock();
2047                 skip_elems = 0;
2048         }
2049
2050         info->bucket_id = i;
2051         info->skip_elems = 0;
2052         return NULL;
2053 }
2054
2055 static void *bpf_hash_map_seq_start(struct seq_file *seq, loff_t *pos)
2056 {
2057         struct bpf_iter_seq_hash_map_info *info = seq->private;
2058         struct htab_elem *elem;
2059
2060         elem = bpf_hash_map_seq_find_next(info, NULL);
2061         if (!elem)
2062                 return NULL;
2063
2064         if (*pos == 0)
2065                 ++*pos;
2066         return elem;
2067 }
2068
2069 static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2070 {
2071         struct bpf_iter_seq_hash_map_info *info = seq->private;
2072
2073         ++*pos;
2074         ++info->skip_elems;
2075         return bpf_hash_map_seq_find_next(info, v);
2076 }
2077
2078 static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem)
2079 {
2080         struct bpf_iter_seq_hash_map_info *info = seq->private;
2081         u32 roundup_key_size, roundup_value_size;
2082         struct bpf_iter__bpf_map_elem ctx = {};
2083         struct bpf_map *map = info->map;
2084         struct bpf_iter_meta meta;
2085         int ret = 0, off = 0, cpu;
2086         struct bpf_prog *prog;
2087         void __percpu *pptr;
2088
2089         meta.seq = seq;
2090         prog = bpf_iter_get_info(&meta, elem == NULL);
2091         if (prog) {
2092                 ctx.meta = &meta;
2093                 ctx.map = info->map;
2094                 if (elem) {
2095                         roundup_key_size = round_up(map->key_size, 8);
2096                         ctx.key = elem->key;
2097                         if (!info->percpu_value_buf) {
2098                                 ctx.value = elem->key + roundup_key_size;
2099                         } else {
2100                                 roundup_value_size = round_up(map->value_size, 8);
2101                                 pptr = htab_elem_get_ptr(elem, map->key_size);
2102                                 for_each_possible_cpu(cpu) {
2103                                         bpf_long_memcpy(info->percpu_value_buf + off,
2104                                                         per_cpu_ptr(pptr, cpu),
2105                                                         roundup_value_size);
2106                                         off += roundup_value_size;
2107                                 }
2108                                 ctx.value = info->percpu_value_buf;
2109                         }
2110                 }
2111                 ret = bpf_iter_run_prog(prog, &ctx);
2112         }
2113
2114         return ret;
2115 }
2116
2117 static int bpf_hash_map_seq_show(struct seq_file *seq, void *v)
2118 {
2119         return __bpf_hash_map_seq_show(seq, v);
2120 }
2121
2122 static void bpf_hash_map_seq_stop(struct seq_file *seq, void *v)
2123 {
2124         if (!v)
2125                 (void)__bpf_hash_map_seq_show(seq, NULL);
2126         else
2127                 rcu_read_unlock();
2128 }
2129
2130 static int bpf_iter_init_hash_map(void *priv_data,
2131                                   struct bpf_iter_aux_info *aux)
2132 {
2133         struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
2134         struct bpf_map *map = aux->map;
2135         void *value_buf;
2136         u32 buf_size;
2137
2138         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
2139             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
2140                 buf_size = round_up(map->value_size, 8) * num_possible_cpus();
2141                 value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
2142                 if (!value_buf)
2143                         return -ENOMEM;
2144
2145                 seq_info->percpu_value_buf = value_buf;
2146         }
2147
2148         bpf_map_inc_with_uref(map);
2149         seq_info->map = map;
2150         seq_info->htab = container_of(map, struct bpf_htab, map);
2151         return 0;
2152 }
2153
2154 static void bpf_iter_fini_hash_map(void *priv_data)
2155 {
2156         struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
2157
2158         bpf_map_put_with_uref(seq_info->map);
2159         kfree(seq_info->percpu_value_buf);
2160 }
2161
2162 static const struct seq_operations bpf_hash_map_seq_ops = {
2163         .start  = bpf_hash_map_seq_start,
2164         .next   = bpf_hash_map_seq_next,
2165         .stop   = bpf_hash_map_seq_stop,
2166         .show   = bpf_hash_map_seq_show,
2167 };
2168
2169 static const struct bpf_iter_seq_info iter_seq_info = {
2170         .seq_ops                = &bpf_hash_map_seq_ops,
2171         .init_seq_private       = bpf_iter_init_hash_map,
2172         .fini_seq_private       = bpf_iter_fini_hash_map,
2173         .seq_priv_size          = sizeof(struct bpf_iter_seq_hash_map_info),
2174 };
2175
2176 static int bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn,
2177                                   void *callback_ctx, u64 flags)
2178 {
2179         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2180         struct hlist_nulls_head *head;
2181         struct hlist_nulls_node *n;
2182         struct htab_elem *elem;
2183         u32 roundup_key_size;
2184         int i, num_elems = 0;
2185         void __percpu *pptr;
2186         struct bucket *b;
2187         void *key, *val;
2188         bool is_percpu;
2189         u64 ret = 0;
2190
2191         if (flags != 0)
2192                 return -EINVAL;
2193
2194         is_percpu = htab_is_percpu(htab);
2195
2196         roundup_key_size = round_up(map->key_size, 8);
2197         /* disable migration so percpu value prepared here will be the
2198          * same as the one seen by the bpf program with bpf_map_lookup_elem().
2199          */
2200         if (is_percpu)
2201                 migrate_disable();
2202         for (i = 0; i < htab->n_buckets; i++) {
2203                 b = &htab->buckets[i];
2204                 rcu_read_lock();
2205                 head = &b->head;
2206                 hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
2207                         key = elem->key;
2208                         if (is_percpu) {
2209                                 /* current cpu value for percpu map */
2210                                 pptr = htab_elem_get_ptr(elem, map->key_size);
2211                                 val = this_cpu_ptr(pptr);
2212                         } else {
2213                                 val = elem->key + roundup_key_size;
2214                         }
2215                         num_elems++;
2216                         ret = callback_fn((u64)(long)map, (u64)(long)key,
2217                                           (u64)(long)val, (u64)(long)callback_ctx, 0);
2218                         /* return value: 0 - continue, 1 - stop and return */
2219                         if (ret) {
2220                                 rcu_read_unlock();
2221                                 goto out;
2222                         }
2223                 }
2224                 rcu_read_unlock();
2225         }
2226 out:
2227         if (is_percpu)
2228                 migrate_enable();
2229         return num_elems;
2230 }
2231
2232 BTF_ID_LIST_SINGLE(htab_map_btf_ids, struct, bpf_htab)
2233 const struct bpf_map_ops htab_map_ops = {
2234         .map_meta_equal = bpf_map_meta_equal,
2235         .map_alloc_check = htab_map_alloc_check,
2236         .map_alloc = htab_map_alloc,
2237         .map_free = htab_map_free,
2238         .map_get_next_key = htab_map_get_next_key,
2239         .map_release_uref = htab_map_free_timers,
2240         .map_lookup_elem = htab_map_lookup_elem,
2241         .map_lookup_and_delete_elem = htab_map_lookup_and_delete_elem,
2242         .map_update_elem = htab_map_update_elem,
2243         .map_delete_elem = htab_map_delete_elem,
2244         .map_gen_lookup = htab_map_gen_lookup,
2245         .map_seq_show_elem = htab_map_seq_show_elem,
2246         .map_set_for_each_callback_args = map_set_for_each_callback_args,
2247         .map_for_each_callback = bpf_for_each_hash_elem,
2248         BATCH_OPS(htab),
2249         .map_btf_id = &htab_map_btf_ids[0],
2250         .iter_seq_info = &iter_seq_info,
2251 };
2252
2253 const struct bpf_map_ops htab_lru_map_ops = {
2254         .map_meta_equal = bpf_map_meta_equal,
2255         .map_alloc_check = htab_map_alloc_check,
2256         .map_alloc = htab_map_alloc,
2257         .map_free = htab_map_free,
2258         .map_get_next_key = htab_map_get_next_key,
2259         .map_release_uref = htab_map_free_timers,
2260         .map_lookup_elem = htab_lru_map_lookup_elem,
2261         .map_lookup_and_delete_elem = htab_lru_map_lookup_and_delete_elem,
2262         .map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
2263         .map_update_elem = htab_lru_map_update_elem,
2264         .map_delete_elem = htab_lru_map_delete_elem,
2265         .map_gen_lookup = htab_lru_map_gen_lookup,
2266         .map_seq_show_elem = htab_map_seq_show_elem,
2267         .map_set_for_each_callback_args = map_set_for_each_callback_args,
2268         .map_for_each_callback = bpf_for_each_hash_elem,
2269         BATCH_OPS(htab_lru),
2270         .map_btf_id = &htab_map_btf_ids[0],
2271         .iter_seq_info = &iter_seq_info,
2272 };
2273
2274 /* Called from eBPF program */
2275 static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
2276 {
2277         struct htab_elem *l = __htab_map_lookup_elem(map, key);
2278
2279         if (l)
2280                 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
2281         else
2282                 return NULL;
2283 }
2284
2285 static void *htab_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
2286 {
2287         struct htab_elem *l;
2288
2289         if (cpu >= nr_cpu_ids)
2290                 return NULL;
2291
2292         l = __htab_map_lookup_elem(map, key);
2293         if (l)
2294                 return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
2295         else
2296                 return NULL;
2297 }
2298
2299 static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
2300 {
2301         struct htab_elem *l = __htab_map_lookup_elem(map, key);
2302
2303         if (l) {
2304                 bpf_lru_node_set_ref(&l->lru_node);
2305                 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
2306         }
2307
2308         return NULL;
2309 }
2310
2311 static void *htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
2312 {
2313         struct htab_elem *l;
2314
2315         if (cpu >= nr_cpu_ids)
2316                 return NULL;
2317
2318         l = __htab_map_lookup_elem(map, key);
2319         if (l) {
2320                 bpf_lru_node_set_ref(&l->lru_node);
2321                 return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
2322         }
2323
2324         return NULL;
2325 }
2326
2327 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
2328 {
2329         struct htab_elem *l;
2330         void __percpu *pptr;
2331         int ret = -ENOENT;
2332         int cpu, off = 0;
2333         u32 size;
2334
2335         /* per_cpu areas are zero-filled and bpf programs can only
2336          * access 'value_size' of them, so copying rounded areas
2337          * will not leak any kernel data
2338          */
2339         size = round_up(map->value_size, 8);
2340         rcu_read_lock();
2341         l = __htab_map_lookup_elem(map, key);
2342         if (!l)
2343                 goto out;
2344         /* We do not mark LRU map element here in order to not mess up
2345          * eviction heuristics when user space does a map walk.
2346          */
2347         pptr = htab_elem_get_ptr(l, map->key_size);
2348         for_each_possible_cpu(cpu) {
2349                 bpf_long_memcpy(value + off,
2350                                 per_cpu_ptr(pptr, cpu), size);
2351                 off += size;
2352         }
2353         ret = 0;
2354 out:
2355         rcu_read_unlock();
2356         return ret;
2357 }
2358
2359 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
2360                            u64 map_flags)
2361 {
2362         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2363         int ret;
2364
2365         rcu_read_lock();
2366         if (htab_is_lru(htab))
2367                 ret = __htab_lru_percpu_map_update_elem(map, key, value,
2368                                                         map_flags, true);
2369         else
2370                 ret = __htab_percpu_map_update_elem(map, key, value, map_flags,
2371                                                     true);
2372         rcu_read_unlock();
2373
2374         return ret;
2375 }
2376
2377 static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
2378                                           struct seq_file *m)
2379 {
2380         struct htab_elem *l;
2381         void __percpu *pptr;
2382         int cpu;
2383
2384         rcu_read_lock();
2385
2386         l = __htab_map_lookup_elem(map, key);
2387         if (!l) {
2388                 rcu_read_unlock();
2389                 return;
2390         }
2391
2392         btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
2393         seq_puts(m, ": {\n");
2394         pptr = htab_elem_get_ptr(l, map->key_size);
2395         for_each_possible_cpu(cpu) {
2396                 seq_printf(m, "\tcpu%d: ", cpu);
2397                 btf_type_seq_show(map->btf, map->btf_value_type_id,
2398                                   per_cpu_ptr(pptr, cpu), m);
2399                 seq_puts(m, "\n");
2400         }
2401         seq_puts(m, "}\n");
2402
2403         rcu_read_unlock();
2404 }
2405
2406 const struct bpf_map_ops htab_percpu_map_ops = {
2407         .map_meta_equal = bpf_map_meta_equal,
2408         .map_alloc_check = htab_map_alloc_check,
2409         .map_alloc = htab_map_alloc,
2410         .map_free = htab_map_free,
2411         .map_get_next_key = htab_map_get_next_key,
2412         .map_lookup_elem = htab_percpu_map_lookup_elem,
2413         .map_lookup_and_delete_elem = htab_percpu_map_lookup_and_delete_elem,
2414         .map_update_elem = htab_percpu_map_update_elem,
2415         .map_delete_elem = htab_map_delete_elem,
2416         .map_lookup_percpu_elem = htab_percpu_map_lookup_percpu_elem,
2417         .map_seq_show_elem = htab_percpu_map_seq_show_elem,
2418         .map_set_for_each_callback_args = map_set_for_each_callback_args,
2419         .map_for_each_callback = bpf_for_each_hash_elem,
2420         BATCH_OPS(htab_percpu),
2421         .map_btf_id = &htab_map_btf_ids[0],
2422         .iter_seq_info = &iter_seq_info,
2423 };
2424
2425 const struct bpf_map_ops htab_lru_percpu_map_ops = {
2426         .map_meta_equal = bpf_map_meta_equal,
2427         .map_alloc_check = htab_map_alloc_check,
2428         .map_alloc = htab_map_alloc,
2429         .map_free = htab_map_free,
2430         .map_get_next_key = htab_map_get_next_key,
2431         .map_lookup_elem = htab_lru_percpu_map_lookup_elem,
2432         .map_lookup_and_delete_elem = htab_lru_percpu_map_lookup_and_delete_elem,
2433         .map_update_elem = htab_lru_percpu_map_update_elem,
2434         .map_delete_elem = htab_lru_map_delete_elem,
2435         .map_lookup_percpu_elem = htab_lru_percpu_map_lookup_percpu_elem,
2436         .map_seq_show_elem = htab_percpu_map_seq_show_elem,
2437         .map_set_for_each_callback_args = map_set_for_each_callback_args,
2438         .map_for_each_callback = bpf_for_each_hash_elem,
2439         BATCH_OPS(htab_lru_percpu),
2440         .map_btf_id = &htab_map_btf_ids[0],
2441         .iter_seq_info = &iter_seq_info,
2442 };
2443
2444 static int fd_htab_map_alloc_check(union bpf_attr *attr)
2445 {
2446         if (attr->value_size != sizeof(u32))
2447                 return -EINVAL;
2448         return htab_map_alloc_check(attr);
2449 }
2450
2451 static void fd_htab_map_free(struct bpf_map *map)
2452 {
2453         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2454         struct hlist_nulls_node *n;
2455         struct hlist_nulls_head *head;
2456         struct htab_elem *l;
2457         int i;
2458
2459         for (i = 0; i < htab->n_buckets; i++) {
2460                 head = select_bucket(htab, i);
2461
2462                 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
2463                         void *ptr = fd_htab_map_get_ptr(map, l);
2464
2465                         map->ops->map_fd_put_ptr(ptr);
2466                 }
2467         }
2468
2469         htab_map_free(map);
2470 }
2471
2472 /* only called from syscall */
2473 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
2474 {
2475         void **ptr;
2476         int ret = 0;
2477
2478         if (!map->ops->map_fd_sys_lookup_elem)
2479                 return -ENOTSUPP;
2480
2481         rcu_read_lock();
2482         ptr = htab_map_lookup_elem(map, key);
2483         if (ptr)
2484                 *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr));
2485         else
2486                 ret = -ENOENT;
2487         rcu_read_unlock();
2488
2489         return ret;
2490 }
2491
2492 /* only called from syscall */
2493 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
2494                                 void *key, void *value, u64 map_flags)
2495 {
2496         void *ptr;
2497         int ret;
2498         u32 ufd = *(u32 *)value;
2499
2500         ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
2501         if (IS_ERR(ptr))
2502                 return PTR_ERR(ptr);
2503
2504         ret = htab_map_update_elem(map, key, &ptr, map_flags);
2505         if (ret)
2506                 map->ops->map_fd_put_ptr(ptr);
2507
2508         return ret;
2509 }
2510
2511 static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
2512 {
2513         struct bpf_map *map, *inner_map_meta;
2514
2515         inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
2516         if (IS_ERR(inner_map_meta))
2517                 return inner_map_meta;
2518
2519         map = htab_map_alloc(attr);
2520         if (IS_ERR(map)) {
2521                 bpf_map_meta_free(inner_map_meta);
2522                 return map;
2523         }
2524
2525         map->inner_map_meta = inner_map_meta;
2526
2527         return map;
2528 }
2529
2530 static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
2531 {
2532         struct bpf_map **inner_map  = htab_map_lookup_elem(map, key);
2533
2534         if (!inner_map)
2535                 return NULL;
2536
2537         return READ_ONCE(*inner_map);
2538 }
2539
2540 static int htab_of_map_gen_lookup(struct bpf_map *map,
2541                                   struct bpf_insn *insn_buf)
2542 {
2543         struct bpf_insn *insn = insn_buf;
2544         const int ret = BPF_REG_0;
2545
2546         BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
2547                      (void *(*)(struct bpf_map *map, void *key))NULL));
2548         *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
2549         *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
2550         *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
2551                                 offsetof(struct htab_elem, key) +
2552                                 round_up(map->key_size, 8));
2553         *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
2554
2555         return insn - insn_buf;
2556 }
2557
2558 static void htab_of_map_free(struct bpf_map *map)
2559 {
2560         bpf_map_meta_free(map->inner_map_meta);
2561         fd_htab_map_free(map);
2562 }
2563
2564 const struct bpf_map_ops htab_of_maps_map_ops = {
2565         .map_alloc_check = fd_htab_map_alloc_check,
2566         .map_alloc = htab_of_map_alloc,
2567         .map_free = htab_of_map_free,
2568         .map_get_next_key = htab_map_get_next_key,
2569         .map_lookup_elem = htab_of_map_lookup_elem,
2570         .map_delete_elem = htab_map_delete_elem,
2571         .map_fd_get_ptr = bpf_map_fd_get_ptr,
2572         .map_fd_put_ptr = bpf_map_fd_put_ptr,
2573         .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
2574         .map_gen_lookup = htab_of_map_gen_lookup,
2575         .map_check_btf = map_check_no_btf,
2576         BATCH_OPS(htab),
2577         .map_btf_id = &htab_map_btf_ids[0],
2578 };