Merge tag 'nvme-6.6-2023-09-14' of git://git.infradead.org/nvme into block-6.6
[platform/kernel/linux-rpi.git] / kernel / bpf / hashtab.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  */
5 #include <linux/bpf.h>
6 #include <linux/btf.h>
7 #include <linux/jhash.h>
8 #include <linux/filter.h>
9 #include <linux/rculist_nulls.h>
10 #include <linux/random.h>
11 #include <uapi/linux/btf.h>
12 #include <linux/rcupdate_trace.h>
13 #include <linux/btf_ids.h>
14 #include "percpu_freelist.h"
15 #include "bpf_lru_list.h"
16 #include "map_in_map.h"
17 #include <linux/bpf_mem_alloc.h>
18
19 #define HTAB_CREATE_FLAG_MASK                                           \
20         (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE |    \
21          BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED)
22
23 #define BATCH_OPS(_name)                        \
24         .map_lookup_batch =                     \
25         _name##_map_lookup_batch,               \
26         .map_lookup_and_delete_batch =          \
27         _name##_map_lookup_and_delete_batch,    \
28         .map_update_batch =                     \
29         generic_map_update_batch,               \
30         .map_delete_batch =                     \
31         generic_map_delete_batch
32
33 /*
34  * The bucket lock has two protection scopes:
35  *
36  * 1) Serializing concurrent operations from BPF programs on different
37  *    CPUs
38  *
39  * 2) Serializing concurrent operations from BPF programs and sys_bpf()
40  *
41  * BPF programs can execute in any context including perf, kprobes and
42  * tracing. As there are almost no limits where perf, kprobes and tracing
43  * can be invoked from the lock operations need to be protected against
44  * deadlocks. Deadlocks can be caused by recursion and by an invocation in
45  * the lock held section when functions which acquire this lock are invoked
46  * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU
47  * variable bpf_prog_active, which prevents BPF programs attached to perf
48  * events, kprobes and tracing to be invoked before the prior invocation
49  * from one of these contexts completed. sys_bpf() uses the same mechanism
50  * by pinning the task to the current CPU and incrementing the recursion
51  * protection across the map operation.
52  *
53  * This has subtle implications on PREEMPT_RT. PREEMPT_RT forbids certain
54  * operations like memory allocations (even with GFP_ATOMIC) from atomic
55  * contexts. This is required because even with GFP_ATOMIC the memory
56  * allocator calls into code paths which acquire locks with long held lock
57  * sections. To ensure the deterministic behaviour these locks are regular
58  * spinlocks, which are converted to 'sleepable' spinlocks on RT. The only
59  * true atomic contexts on an RT kernel are the low level hardware
60  * handling, scheduling, low level interrupt handling, NMIs etc. None of
61  * these contexts should ever do memory allocations.
62  *
63  * As regular device interrupt handlers and soft interrupts are forced into
64  * thread context, the existing code which does
65  *   spin_lock*(); alloc(GFP_ATOMIC); spin_unlock*();
66  * just works.
67  *
68  * In theory the BPF locks could be converted to regular spinlocks as well,
69  * but the bucket locks and percpu_freelist locks can be taken from
70  * arbitrary contexts (perf, kprobes, tracepoints) which are required to be
71  * atomic contexts even on RT. Before the introduction of bpf_mem_alloc,
72  * it is only safe to use raw spinlock for preallocated hash map on a RT kernel,
73  * because there is no memory allocation within the lock held sections. However
74  * after hash map was fully converted to use bpf_mem_alloc, there will be
75  * non-synchronous memory allocation for non-preallocated hash map, so it is
76  * safe to always use raw spinlock for bucket lock.
77  */
78 struct bucket {
79         struct hlist_nulls_head head;
80         raw_spinlock_t raw_lock;
81 };
82
83 #define HASHTAB_MAP_LOCK_COUNT 8
84 #define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1)
85
86 struct bpf_htab {
87         struct bpf_map map;
88         struct bpf_mem_alloc ma;
89         struct bpf_mem_alloc pcpu_ma;
90         struct bucket *buckets;
91         void *elems;
92         union {
93                 struct pcpu_freelist freelist;
94                 struct bpf_lru lru;
95         };
96         struct htab_elem *__percpu *extra_elems;
97         /* number of elements in non-preallocated hashtable are kept
98          * in either pcount or count
99          */
100         struct percpu_counter pcount;
101         atomic_t count;
102         bool use_percpu_counter;
103         u32 n_buckets;  /* number of hash buckets */
104         u32 elem_size;  /* size of each element in bytes */
105         u32 hashrnd;
106         struct lock_class_key lockdep_key;
107         int __percpu *map_locked[HASHTAB_MAP_LOCK_COUNT];
108 };
109
110 /* each htab element is struct htab_elem + key + value */
111 struct htab_elem {
112         union {
113                 struct hlist_nulls_node hash_node;
114                 struct {
115                         void *padding;
116                         union {
117                                 struct pcpu_freelist_node fnode;
118                                 struct htab_elem *batch_flink;
119                         };
120                 };
121         };
122         union {
123                 /* pointer to per-cpu pointer */
124                 void *ptr_to_pptr;
125                 struct bpf_lru_node lru_node;
126         };
127         u32 hash;
128         char key[] __aligned(8);
129 };
130
131 static inline bool htab_is_prealloc(const struct bpf_htab *htab)
132 {
133         return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
134 }
135
136 static void htab_init_buckets(struct bpf_htab *htab)
137 {
138         unsigned int i;
139
140         for (i = 0; i < htab->n_buckets; i++) {
141                 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
142                 raw_spin_lock_init(&htab->buckets[i].raw_lock);
143                 lockdep_set_class(&htab->buckets[i].raw_lock,
144                                           &htab->lockdep_key);
145                 cond_resched();
146         }
147 }
148
149 static inline int htab_lock_bucket(const struct bpf_htab *htab,
150                                    struct bucket *b, u32 hash,
151                                    unsigned long *pflags)
152 {
153         unsigned long flags;
154
155         hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
156
157         preempt_disable();
158         if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
159                 __this_cpu_dec(*(htab->map_locked[hash]));
160                 preempt_enable();
161                 return -EBUSY;
162         }
163
164         raw_spin_lock_irqsave(&b->raw_lock, flags);
165         *pflags = flags;
166
167         return 0;
168 }
169
170 static inline void htab_unlock_bucket(const struct bpf_htab *htab,
171                                       struct bucket *b, u32 hash,
172                                       unsigned long flags)
173 {
174         hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
175         raw_spin_unlock_irqrestore(&b->raw_lock, flags);
176         __this_cpu_dec(*(htab->map_locked[hash]));
177         preempt_enable();
178 }
179
180 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
181
182 static bool htab_is_lru(const struct bpf_htab *htab)
183 {
184         return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
185                 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
186 }
187
188 static bool htab_is_percpu(const struct bpf_htab *htab)
189 {
190         return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
191                 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
192 }
193
194 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
195                                      void __percpu *pptr)
196 {
197         *(void __percpu **)(l->key + key_size) = pptr;
198 }
199
200 static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
201 {
202         return *(void __percpu **)(l->key + key_size);
203 }
204
205 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
206 {
207         return *(void **)(l->key + roundup(map->key_size, 8));
208 }
209
210 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
211 {
212         return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size);
213 }
214
215 static bool htab_has_extra_elems(struct bpf_htab *htab)
216 {
217         return !htab_is_percpu(htab) && !htab_is_lru(htab);
218 }
219
220 static void htab_free_prealloced_timers(struct bpf_htab *htab)
221 {
222         u32 num_entries = htab->map.max_entries;
223         int i;
224
225         if (!btf_record_has_field(htab->map.record, BPF_TIMER))
226                 return;
227         if (htab_has_extra_elems(htab))
228                 num_entries += num_possible_cpus();
229
230         for (i = 0; i < num_entries; i++) {
231                 struct htab_elem *elem;
232
233                 elem = get_htab_elem(htab, i);
234                 bpf_obj_free_timer(htab->map.record, elem->key + round_up(htab->map.key_size, 8));
235                 cond_resched();
236         }
237 }
238
239 static void htab_free_prealloced_fields(struct bpf_htab *htab)
240 {
241         u32 num_entries = htab->map.max_entries;
242         int i;
243
244         if (IS_ERR_OR_NULL(htab->map.record))
245                 return;
246         if (htab_has_extra_elems(htab))
247                 num_entries += num_possible_cpus();
248         for (i = 0; i < num_entries; i++) {
249                 struct htab_elem *elem;
250
251                 elem = get_htab_elem(htab, i);
252                 if (htab_is_percpu(htab)) {
253                         void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size);
254                         int cpu;
255
256                         for_each_possible_cpu(cpu) {
257                                 bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
258                                 cond_resched();
259                         }
260                 } else {
261                         bpf_obj_free_fields(htab->map.record, elem->key + round_up(htab->map.key_size, 8));
262                         cond_resched();
263                 }
264                 cond_resched();
265         }
266 }
267
268 static void htab_free_elems(struct bpf_htab *htab)
269 {
270         int i;
271
272         if (!htab_is_percpu(htab))
273                 goto free_elems;
274
275         for (i = 0; i < htab->map.max_entries; i++) {
276                 void __percpu *pptr;
277
278                 pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
279                                          htab->map.key_size);
280                 free_percpu(pptr);
281                 cond_resched();
282         }
283 free_elems:
284         bpf_map_area_free(htab->elems);
285 }
286
287 /* The LRU list has a lock (lru_lock). Each htab bucket has a lock
288  * (bucket_lock). If both locks need to be acquired together, the lock
289  * order is always lru_lock -> bucket_lock and this only happens in
290  * bpf_lru_list.c logic. For example, certain code path of
291  * bpf_lru_pop_free(), which is called by function prealloc_lru_pop(),
292  * will acquire lru_lock first followed by acquiring bucket_lock.
293  *
294  * In hashtab.c, to avoid deadlock, lock acquisition of
295  * bucket_lock followed by lru_lock is not allowed. In such cases,
296  * bucket_lock needs to be released first before acquiring lru_lock.
297  */
298 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
299                                           u32 hash)
300 {
301         struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
302         struct htab_elem *l;
303
304         if (node) {
305                 bpf_map_inc_elem_count(&htab->map);
306                 l = container_of(node, struct htab_elem, lru_node);
307                 memcpy(l->key, key, htab->map.key_size);
308                 return l;
309         }
310
311         return NULL;
312 }
313
314 static int prealloc_init(struct bpf_htab *htab)
315 {
316         u32 num_entries = htab->map.max_entries;
317         int err = -ENOMEM, i;
318
319         if (htab_has_extra_elems(htab))
320                 num_entries += num_possible_cpus();
321
322         htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries,
323                                          htab->map.numa_node);
324         if (!htab->elems)
325                 return -ENOMEM;
326
327         if (!htab_is_percpu(htab))
328                 goto skip_percpu_elems;
329
330         for (i = 0; i < num_entries; i++) {
331                 u32 size = round_up(htab->map.value_size, 8);
332                 void __percpu *pptr;
333
334                 pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
335                                             GFP_USER | __GFP_NOWARN);
336                 if (!pptr)
337                         goto free_elems;
338                 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
339                                   pptr);
340                 cond_resched();
341         }
342
343 skip_percpu_elems:
344         if (htab_is_lru(htab))
345                 err = bpf_lru_init(&htab->lru,
346                                    htab->map.map_flags & BPF_F_NO_COMMON_LRU,
347                                    offsetof(struct htab_elem, hash) -
348                                    offsetof(struct htab_elem, lru_node),
349                                    htab_lru_map_delete_node,
350                                    htab);
351         else
352                 err = pcpu_freelist_init(&htab->freelist);
353
354         if (err)
355                 goto free_elems;
356
357         if (htab_is_lru(htab))
358                 bpf_lru_populate(&htab->lru, htab->elems,
359                                  offsetof(struct htab_elem, lru_node),
360                                  htab->elem_size, num_entries);
361         else
362                 pcpu_freelist_populate(&htab->freelist,
363                                        htab->elems + offsetof(struct htab_elem, fnode),
364                                        htab->elem_size, num_entries);
365
366         return 0;
367
368 free_elems:
369         htab_free_elems(htab);
370         return err;
371 }
372
373 static void prealloc_destroy(struct bpf_htab *htab)
374 {
375         htab_free_elems(htab);
376
377         if (htab_is_lru(htab))
378                 bpf_lru_destroy(&htab->lru);
379         else
380                 pcpu_freelist_destroy(&htab->freelist);
381 }
382
383 static int alloc_extra_elems(struct bpf_htab *htab)
384 {
385         struct htab_elem *__percpu *pptr, *l_new;
386         struct pcpu_freelist_node *l;
387         int cpu;
388
389         pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8,
390                                     GFP_USER | __GFP_NOWARN);
391         if (!pptr)
392                 return -ENOMEM;
393
394         for_each_possible_cpu(cpu) {
395                 l = pcpu_freelist_pop(&htab->freelist);
396                 /* pop will succeed, since prealloc_init()
397                  * preallocated extra num_possible_cpus elements
398                  */
399                 l_new = container_of(l, struct htab_elem, fnode);
400                 *per_cpu_ptr(pptr, cpu) = l_new;
401         }
402         htab->extra_elems = pptr;
403         return 0;
404 }
405
406 /* Called from syscall */
407 static int htab_map_alloc_check(union bpf_attr *attr)
408 {
409         bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
410                        attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
411         bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
412                     attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
413         /* percpu_lru means each cpu has its own LRU list.
414          * it is different from BPF_MAP_TYPE_PERCPU_HASH where
415          * the map's value itself is percpu.  percpu_lru has
416          * nothing to do with the map's value.
417          */
418         bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
419         bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
420         bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED);
421         int numa_node = bpf_map_attr_numa_node(attr);
422
423         BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
424                      offsetof(struct htab_elem, hash_node.pprev));
425
426         if (zero_seed && !capable(CAP_SYS_ADMIN))
427                 /* Guard against local DoS, and discourage production use. */
428                 return -EPERM;
429
430         if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK ||
431             !bpf_map_flags_access_ok(attr->map_flags))
432                 return -EINVAL;
433
434         if (!lru && percpu_lru)
435                 return -EINVAL;
436
437         if (lru && !prealloc)
438                 return -ENOTSUPP;
439
440         if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
441                 return -EINVAL;
442
443         /* check sanity of attributes.
444          * value_size == 0 may be allowed in the future to use map as a set
445          */
446         if (attr->max_entries == 0 || attr->key_size == 0 ||
447             attr->value_size == 0)
448                 return -EINVAL;
449
450         if ((u64)attr->key_size + attr->value_size >= KMALLOC_MAX_SIZE -
451            sizeof(struct htab_elem))
452                 /* if key_size + value_size is bigger, the user space won't be
453                  * able to access the elements via bpf syscall. This check
454                  * also makes sure that the elem_size doesn't overflow and it's
455                  * kmalloc-able later in htab_map_update_elem()
456                  */
457                 return -E2BIG;
458
459         return 0;
460 }
461
462 static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
463 {
464         bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
465                        attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
466         bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
467                     attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
468         /* percpu_lru means each cpu has its own LRU list.
469          * it is different from BPF_MAP_TYPE_PERCPU_HASH where
470          * the map's value itself is percpu.  percpu_lru has
471          * nothing to do with the map's value.
472          */
473         bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
474         bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
475         struct bpf_htab *htab;
476         int err, i;
477
478         htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
479         if (!htab)
480                 return ERR_PTR(-ENOMEM);
481
482         lockdep_register_key(&htab->lockdep_key);
483
484         bpf_map_init_from_attr(&htab->map, attr);
485
486         if (percpu_lru) {
487                 /* ensure each CPU's lru list has >=1 elements.
488                  * since we are at it, make each lru list has the same
489                  * number of elements.
490                  */
491                 htab->map.max_entries = roundup(attr->max_entries,
492                                                 num_possible_cpus());
493                 if (htab->map.max_entries < attr->max_entries)
494                         htab->map.max_entries = rounddown(attr->max_entries,
495                                                           num_possible_cpus());
496         }
497
498         /* hash table size must be power of 2 */
499         htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
500
501         htab->elem_size = sizeof(struct htab_elem) +
502                           round_up(htab->map.key_size, 8);
503         if (percpu)
504                 htab->elem_size += sizeof(void *);
505         else
506                 htab->elem_size += round_up(htab->map.value_size, 8);
507
508         err = -E2BIG;
509         /* prevent zero size kmalloc and check for u32 overflow */
510         if (htab->n_buckets == 0 ||
511             htab->n_buckets > U32_MAX / sizeof(struct bucket))
512                 goto free_htab;
513
514         err = bpf_map_init_elem_count(&htab->map);
515         if (err)
516                 goto free_htab;
517
518         err = -ENOMEM;
519         htab->buckets = bpf_map_area_alloc(htab->n_buckets *
520                                            sizeof(struct bucket),
521                                            htab->map.numa_node);
522         if (!htab->buckets)
523                 goto free_elem_count;
524
525         for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) {
526                 htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map,
527                                                            sizeof(int),
528                                                            sizeof(int),
529                                                            GFP_USER);
530                 if (!htab->map_locked[i])
531                         goto free_map_locked;
532         }
533
534         if (htab->map.map_flags & BPF_F_ZERO_SEED)
535                 htab->hashrnd = 0;
536         else
537                 htab->hashrnd = get_random_u32();
538
539         htab_init_buckets(htab);
540
541 /* compute_batch_value() computes batch value as num_online_cpus() * 2
542  * and __percpu_counter_compare() needs
543  * htab->max_entries - cur_number_of_elems to be more than batch * num_online_cpus()
544  * for percpu_counter to be faster than atomic_t. In practice the average bpf
545  * hash map size is 10k, which means that a system with 64 cpus will fill
546  * hashmap to 20% of 10k before percpu_counter becomes ineffective. Therefore
547  * define our own batch count as 32 then 10k hash map can be filled up to 80%:
548  * 10k - 8k > 32 _batch_ * 64 _cpus_
549  * and __percpu_counter_compare() will still be fast. At that point hash map
550  * collisions will dominate its performance anyway. Assume that hash map filled
551  * to 50+% isn't going to be O(1) and use the following formula to choose
552  * between percpu_counter and atomic_t.
553  */
554 #define PERCPU_COUNTER_BATCH 32
555         if (attr->max_entries / 2 > num_online_cpus() * PERCPU_COUNTER_BATCH)
556                 htab->use_percpu_counter = true;
557
558         if (htab->use_percpu_counter) {
559                 err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL);
560                 if (err)
561                         goto free_map_locked;
562         }
563
564         if (prealloc) {
565                 err = prealloc_init(htab);
566                 if (err)
567                         goto free_map_locked;
568
569                 if (!percpu && !lru) {
570                         /* lru itself can remove the least used element, so
571                          * there is no need for an extra elem during map_update.
572                          */
573                         err = alloc_extra_elems(htab);
574                         if (err)
575                                 goto free_prealloc;
576                 }
577         } else {
578                 err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false);
579                 if (err)
580                         goto free_map_locked;
581                 if (percpu) {
582                         err = bpf_mem_alloc_init(&htab->pcpu_ma,
583                                                  round_up(htab->map.value_size, 8), true);
584                         if (err)
585                                 goto free_map_locked;
586                 }
587         }
588
589         return &htab->map;
590
591 free_prealloc:
592         prealloc_destroy(htab);
593 free_map_locked:
594         if (htab->use_percpu_counter)
595                 percpu_counter_destroy(&htab->pcount);
596         for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
597                 free_percpu(htab->map_locked[i]);
598         bpf_map_area_free(htab->buckets);
599         bpf_mem_alloc_destroy(&htab->pcpu_ma);
600         bpf_mem_alloc_destroy(&htab->ma);
601 free_elem_count:
602         bpf_map_free_elem_count(&htab->map);
603 free_htab:
604         lockdep_unregister_key(&htab->lockdep_key);
605         bpf_map_area_free(htab);
606         return ERR_PTR(err);
607 }
608
609 static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
610 {
611         if (likely(key_len % 4 == 0))
612                 return jhash2(key, key_len / 4, hashrnd);
613         return jhash(key, key_len, hashrnd);
614 }
615
616 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
617 {
618         return &htab->buckets[hash & (htab->n_buckets - 1)];
619 }
620
621 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
622 {
623         return &__select_bucket(htab, hash)->head;
624 }
625
626 /* this lookup function can only be called with bucket lock taken */
627 static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
628                                          void *key, u32 key_size)
629 {
630         struct hlist_nulls_node *n;
631         struct htab_elem *l;
632
633         hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
634                 if (l->hash == hash && !memcmp(&l->key, key, key_size))
635                         return l;
636
637         return NULL;
638 }
639
640 /* can be called without bucket lock. it will repeat the loop in
641  * the unlikely event when elements moved from one bucket into another
642  * while link list is being walked
643  */
644 static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
645                                                u32 hash, void *key,
646                                                u32 key_size, u32 n_buckets)
647 {
648         struct hlist_nulls_node *n;
649         struct htab_elem *l;
650
651 again:
652         hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
653                 if (l->hash == hash && !memcmp(&l->key, key, key_size))
654                         return l;
655
656         if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
657                 goto again;
658
659         return NULL;
660 }
661
662 /* Called from syscall or from eBPF program directly, so
663  * arguments have to match bpf_map_lookup_elem() exactly.
664  * The return value is adjusted by BPF instructions
665  * in htab_map_gen_lookup().
666  */
667 static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
668 {
669         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
670         struct hlist_nulls_head *head;
671         struct htab_elem *l;
672         u32 hash, key_size;
673
674         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
675                      !rcu_read_lock_bh_held());
676
677         key_size = map->key_size;
678
679         hash = htab_map_hash(key, key_size, htab->hashrnd);
680
681         head = select_bucket(htab, hash);
682
683         l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
684
685         return l;
686 }
687
688 static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
689 {
690         struct htab_elem *l = __htab_map_lookup_elem(map, key);
691
692         if (l)
693                 return l->key + round_up(map->key_size, 8);
694
695         return NULL;
696 }
697
698 /* inline bpf_map_lookup_elem() call.
699  * Instead of:
700  * bpf_prog
701  *   bpf_map_lookup_elem
702  *     map->ops->map_lookup_elem
703  *       htab_map_lookup_elem
704  *         __htab_map_lookup_elem
705  * do:
706  * bpf_prog
707  *   __htab_map_lookup_elem
708  */
709 static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
710 {
711         struct bpf_insn *insn = insn_buf;
712         const int ret = BPF_REG_0;
713
714         BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
715                      (void *(*)(struct bpf_map *map, void *key))NULL));
716         *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
717         *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
718         *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
719                                 offsetof(struct htab_elem, key) +
720                                 round_up(map->key_size, 8));
721         return insn - insn_buf;
722 }
723
724 static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
725                                                         void *key, const bool mark)
726 {
727         struct htab_elem *l = __htab_map_lookup_elem(map, key);
728
729         if (l) {
730                 if (mark)
731                         bpf_lru_node_set_ref(&l->lru_node);
732                 return l->key + round_up(map->key_size, 8);
733         }
734
735         return NULL;
736 }
737
738 static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
739 {
740         return __htab_lru_map_lookup_elem(map, key, true);
741 }
742
743 static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
744 {
745         return __htab_lru_map_lookup_elem(map, key, false);
746 }
747
748 static int htab_lru_map_gen_lookup(struct bpf_map *map,
749                                    struct bpf_insn *insn_buf)
750 {
751         struct bpf_insn *insn = insn_buf;
752         const int ret = BPF_REG_0;
753         const int ref_reg = BPF_REG_1;
754
755         BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
756                      (void *(*)(struct bpf_map *map, void *key))NULL));
757         *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
758         *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
759         *insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
760                               offsetof(struct htab_elem, lru_node) +
761                               offsetof(struct bpf_lru_node, ref));
762         *insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1);
763         *insn++ = BPF_ST_MEM(BPF_B, ret,
764                              offsetof(struct htab_elem, lru_node) +
765                              offsetof(struct bpf_lru_node, ref),
766                              1);
767         *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
768                                 offsetof(struct htab_elem, key) +
769                                 round_up(map->key_size, 8));
770         return insn - insn_buf;
771 }
772
773 static void check_and_free_fields(struct bpf_htab *htab,
774                                   struct htab_elem *elem)
775 {
776         if (htab_is_percpu(htab)) {
777                 void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size);
778                 int cpu;
779
780                 for_each_possible_cpu(cpu)
781                         bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
782         } else {
783                 void *map_value = elem->key + round_up(htab->map.key_size, 8);
784
785                 bpf_obj_free_fields(htab->map.record, map_value);
786         }
787 }
788
789 /* It is called from the bpf_lru_list when the LRU needs to delete
790  * older elements from the htab.
791  */
792 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
793 {
794         struct bpf_htab *htab = arg;
795         struct htab_elem *l = NULL, *tgt_l;
796         struct hlist_nulls_head *head;
797         struct hlist_nulls_node *n;
798         unsigned long flags;
799         struct bucket *b;
800         int ret;
801
802         tgt_l = container_of(node, struct htab_elem, lru_node);
803         b = __select_bucket(htab, tgt_l->hash);
804         head = &b->head;
805
806         ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags);
807         if (ret)
808                 return false;
809
810         hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
811                 if (l == tgt_l) {
812                         hlist_nulls_del_rcu(&l->hash_node);
813                         check_and_free_fields(htab, l);
814                         bpf_map_dec_elem_count(&htab->map);
815                         break;
816                 }
817
818         htab_unlock_bucket(htab, b, tgt_l->hash, flags);
819
820         return l == tgt_l;
821 }
822
823 /* Called from syscall */
824 static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
825 {
826         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
827         struct hlist_nulls_head *head;
828         struct htab_elem *l, *next_l;
829         u32 hash, key_size;
830         int i = 0;
831
832         WARN_ON_ONCE(!rcu_read_lock_held());
833
834         key_size = map->key_size;
835
836         if (!key)
837                 goto find_first_elem;
838
839         hash = htab_map_hash(key, key_size, htab->hashrnd);
840
841         head = select_bucket(htab, hash);
842
843         /* lookup the key */
844         l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
845
846         if (!l)
847                 goto find_first_elem;
848
849         /* key was found, get next key in the same bucket */
850         next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
851                                   struct htab_elem, hash_node);
852
853         if (next_l) {
854                 /* if next elem in this hash list is non-zero, just return it */
855                 memcpy(next_key, next_l->key, key_size);
856                 return 0;
857         }
858
859         /* no more elements in this hash list, go to the next bucket */
860         i = hash & (htab->n_buckets - 1);
861         i++;
862
863 find_first_elem:
864         /* iterate over buckets */
865         for (; i < htab->n_buckets; i++) {
866                 head = select_bucket(htab, i);
867
868                 /* pick first element in the bucket */
869                 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
870                                           struct htab_elem, hash_node);
871                 if (next_l) {
872                         /* if it's not empty, just return it */
873                         memcpy(next_key, next_l->key, key_size);
874                         return 0;
875                 }
876         }
877
878         /* iterated over all buckets and all elements */
879         return -ENOENT;
880 }
881
882 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
883 {
884         check_and_free_fields(htab, l);
885         if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
886                 bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr);
887         bpf_mem_cache_free(&htab->ma, l);
888 }
889
890 static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
891 {
892         struct bpf_map *map = &htab->map;
893         void *ptr;
894
895         if (map->ops->map_fd_put_ptr) {
896                 ptr = fd_htab_map_get_ptr(map, l);
897                 map->ops->map_fd_put_ptr(ptr);
898         }
899 }
900
901 static bool is_map_full(struct bpf_htab *htab)
902 {
903         if (htab->use_percpu_counter)
904                 return __percpu_counter_compare(&htab->pcount, htab->map.max_entries,
905                                                 PERCPU_COUNTER_BATCH) >= 0;
906         return atomic_read(&htab->count) >= htab->map.max_entries;
907 }
908
909 static void inc_elem_count(struct bpf_htab *htab)
910 {
911         bpf_map_inc_elem_count(&htab->map);
912
913         if (htab->use_percpu_counter)
914                 percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH);
915         else
916                 atomic_inc(&htab->count);
917 }
918
919 static void dec_elem_count(struct bpf_htab *htab)
920 {
921         bpf_map_dec_elem_count(&htab->map);
922
923         if (htab->use_percpu_counter)
924                 percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH);
925         else
926                 atomic_dec(&htab->count);
927 }
928
929
930 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
931 {
932         htab_put_fd_value(htab, l);
933
934         if (htab_is_prealloc(htab)) {
935                 bpf_map_dec_elem_count(&htab->map);
936                 check_and_free_fields(htab, l);
937                 __pcpu_freelist_push(&htab->freelist, &l->fnode);
938         } else {
939                 dec_elem_count(htab);
940                 htab_elem_free(htab, l);
941         }
942 }
943
944 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
945                             void *value, bool onallcpus)
946 {
947         if (!onallcpus) {
948                 /* copy true value_size bytes */
949                 copy_map_value(&htab->map, this_cpu_ptr(pptr), value);
950         } else {
951                 u32 size = round_up(htab->map.value_size, 8);
952                 int off = 0, cpu;
953
954                 for_each_possible_cpu(cpu) {
955                         copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value + off);
956                         off += size;
957                 }
958         }
959 }
960
961 static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
962                             void *value, bool onallcpus)
963 {
964         /* When not setting the initial value on all cpus, zero-fill element
965          * values for other cpus. Otherwise, bpf program has no way to ensure
966          * known initial values for cpus other than current one
967          * (onallcpus=false always when coming from bpf prog).
968          */
969         if (!onallcpus) {
970                 int current_cpu = raw_smp_processor_id();
971                 int cpu;
972
973                 for_each_possible_cpu(cpu) {
974                         if (cpu == current_cpu)
975                                 copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value);
976                         else /* Since elem is preallocated, we cannot touch special fields */
977                                 zero_map_value(&htab->map, per_cpu_ptr(pptr, cpu));
978                 }
979         } else {
980                 pcpu_copy_value(htab, pptr, value, onallcpus);
981         }
982 }
983
984 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
985 {
986         return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
987                BITS_PER_LONG == 64;
988 }
989
990 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
991                                          void *value, u32 key_size, u32 hash,
992                                          bool percpu, bool onallcpus,
993                                          struct htab_elem *old_elem)
994 {
995         u32 size = htab->map.value_size;
996         bool prealloc = htab_is_prealloc(htab);
997         struct htab_elem *l_new, **pl_new;
998         void __percpu *pptr;
999
1000         if (prealloc) {
1001                 if (old_elem) {
1002                         /* if we're updating the existing element,
1003                          * use per-cpu extra elems to avoid freelist_pop/push
1004                          */
1005                         pl_new = this_cpu_ptr(htab->extra_elems);
1006                         l_new = *pl_new;
1007                         htab_put_fd_value(htab, old_elem);
1008                         *pl_new = old_elem;
1009                 } else {
1010                         struct pcpu_freelist_node *l;
1011
1012                         l = __pcpu_freelist_pop(&htab->freelist);
1013                         if (!l)
1014                                 return ERR_PTR(-E2BIG);
1015                         l_new = container_of(l, struct htab_elem, fnode);
1016                         bpf_map_inc_elem_count(&htab->map);
1017                 }
1018         } else {
1019                 if (is_map_full(htab))
1020                         if (!old_elem)
1021                                 /* when map is full and update() is replacing
1022                                  * old element, it's ok to allocate, since
1023                                  * old element will be freed immediately.
1024                                  * Otherwise return an error
1025                                  */
1026                                 return ERR_PTR(-E2BIG);
1027                 inc_elem_count(htab);
1028                 l_new = bpf_mem_cache_alloc(&htab->ma);
1029                 if (!l_new) {
1030                         l_new = ERR_PTR(-ENOMEM);
1031                         goto dec_count;
1032                 }
1033         }
1034
1035         memcpy(l_new->key, key, key_size);
1036         if (percpu) {
1037                 if (prealloc) {
1038                         pptr = htab_elem_get_ptr(l_new, key_size);
1039                 } else {
1040                         /* alloc_percpu zero-fills */
1041                         pptr = bpf_mem_cache_alloc(&htab->pcpu_ma);
1042                         if (!pptr) {
1043                                 bpf_mem_cache_free(&htab->ma, l_new);
1044                                 l_new = ERR_PTR(-ENOMEM);
1045                                 goto dec_count;
1046                         }
1047                         l_new->ptr_to_pptr = pptr;
1048                         pptr = *(void **)pptr;
1049                 }
1050
1051                 pcpu_init_value(htab, pptr, value, onallcpus);
1052
1053                 if (!prealloc)
1054                         htab_elem_set_ptr(l_new, key_size, pptr);
1055         } else if (fd_htab_map_needs_adjust(htab)) {
1056                 size = round_up(size, 8);
1057                 memcpy(l_new->key + round_up(key_size, 8), value, size);
1058         } else {
1059                 copy_map_value(&htab->map,
1060                                l_new->key + round_up(key_size, 8),
1061                                value);
1062         }
1063
1064         l_new->hash = hash;
1065         return l_new;
1066 dec_count:
1067         dec_elem_count(htab);
1068         return l_new;
1069 }
1070
1071 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
1072                        u64 map_flags)
1073 {
1074         if (l_old && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
1075                 /* elem already exists */
1076                 return -EEXIST;
1077
1078         if (!l_old && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
1079                 /* elem doesn't exist, cannot update it */
1080                 return -ENOENT;
1081
1082         return 0;
1083 }
1084
1085 /* Called from syscall or from eBPF program */
1086 static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
1087                                  u64 map_flags)
1088 {
1089         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1090         struct htab_elem *l_new = NULL, *l_old;
1091         struct hlist_nulls_head *head;
1092         unsigned long flags;
1093         struct bucket *b;
1094         u32 key_size, hash;
1095         int ret;
1096
1097         if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
1098                 /* unknown flags */
1099                 return -EINVAL;
1100
1101         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1102                      !rcu_read_lock_bh_held());
1103
1104         key_size = map->key_size;
1105
1106         hash = htab_map_hash(key, key_size, htab->hashrnd);
1107
1108         b = __select_bucket(htab, hash);
1109         head = &b->head;
1110
1111         if (unlikely(map_flags & BPF_F_LOCK)) {
1112                 if (unlikely(!btf_record_has_field(map->record, BPF_SPIN_LOCK)))
1113                         return -EINVAL;
1114                 /* find an element without taking the bucket lock */
1115                 l_old = lookup_nulls_elem_raw(head, hash, key, key_size,
1116                                               htab->n_buckets);
1117                 ret = check_flags(htab, l_old, map_flags);
1118                 if (ret)
1119                         return ret;
1120                 if (l_old) {
1121                         /* grab the element lock and update value in place */
1122                         copy_map_value_locked(map,
1123                                               l_old->key + round_up(key_size, 8),
1124                                               value, false);
1125                         return 0;
1126                 }
1127                 /* fall through, grab the bucket lock and lookup again.
1128                  * 99.9% chance that the element won't be found,
1129                  * but second lookup under lock has to be done.
1130                  */
1131         }
1132
1133         ret = htab_lock_bucket(htab, b, hash, &flags);
1134         if (ret)
1135                 return ret;
1136
1137         l_old = lookup_elem_raw(head, hash, key, key_size);
1138
1139         ret = check_flags(htab, l_old, map_flags);
1140         if (ret)
1141                 goto err;
1142
1143         if (unlikely(l_old && (map_flags & BPF_F_LOCK))) {
1144                 /* first lookup without the bucket lock didn't find the element,
1145                  * but second lookup with the bucket lock found it.
1146                  * This case is highly unlikely, but has to be dealt with:
1147                  * grab the element lock in addition to the bucket lock
1148                  * and update element in place
1149                  */
1150                 copy_map_value_locked(map,
1151                                       l_old->key + round_up(key_size, 8),
1152                                       value, false);
1153                 ret = 0;
1154                 goto err;
1155         }
1156
1157         l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
1158                                 l_old);
1159         if (IS_ERR(l_new)) {
1160                 /* all pre-allocated elements are in use or memory exhausted */
1161                 ret = PTR_ERR(l_new);
1162                 goto err;
1163         }
1164
1165         /* add new element to the head of the list, so that
1166          * concurrent search will find it before old elem
1167          */
1168         hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1169         if (l_old) {
1170                 hlist_nulls_del_rcu(&l_old->hash_node);
1171                 if (!htab_is_prealloc(htab))
1172                         free_htab_elem(htab, l_old);
1173                 else
1174                         check_and_free_fields(htab, l_old);
1175         }
1176         ret = 0;
1177 err:
1178         htab_unlock_bucket(htab, b, hash, flags);
1179         return ret;
1180 }
1181
1182 static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem)
1183 {
1184         check_and_free_fields(htab, elem);
1185         bpf_map_dec_elem_count(&htab->map);
1186         bpf_lru_push_free(&htab->lru, &elem->lru_node);
1187 }
1188
1189 static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
1190                                      u64 map_flags)
1191 {
1192         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1193         struct htab_elem *l_new, *l_old = NULL;
1194         struct hlist_nulls_head *head;
1195         unsigned long flags;
1196         struct bucket *b;
1197         u32 key_size, hash;
1198         int ret;
1199
1200         if (unlikely(map_flags > BPF_EXIST))
1201                 /* unknown flags */
1202                 return -EINVAL;
1203
1204         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1205                      !rcu_read_lock_bh_held());
1206
1207         key_size = map->key_size;
1208
1209         hash = htab_map_hash(key, key_size, htab->hashrnd);
1210
1211         b = __select_bucket(htab, hash);
1212         head = &b->head;
1213
1214         /* For LRU, we need to alloc before taking bucket's
1215          * spinlock because getting free nodes from LRU may need
1216          * to remove older elements from htab and this removal
1217          * operation will need a bucket lock.
1218          */
1219         l_new = prealloc_lru_pop(htab, key, hash);
1220         if (!l_new)
1221                 return -ENOMEM;
1222         copy_map_value(&htab->map,
1223                        l_new->key + round_up(map->key_size, 8), value);
1224
1225         ret = htab_lock_bucket(htab, b, hash, &flags);
1226         if (ret)
1227                 goto err_lock_bucket;
1228
1229         l_old = lookup_elem_raw(head, hash, key, key_size);
1230
1231         ret = check_flags(htab, l_old, map_flags);
1232         if (ret)
1233                 goto err;
1234
1235         /* add new element to the head of the list, so that
1236          * concurrent search will find it before old elem
1237          */
1238         hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1239         if (l_old) {
1240                 bpf_lru_node_set_ref(&l_new->lru_node);
1241                 hlist_nulls_del_rcu(&l_old->hash_node);
1242         }
1243         ret = 0;
1244
1245 err:
1246         htab_unlock_bucket(htab, b, hash, flags);
1247
1248 err_lock_bucket:
1249         if (ret)
1250                 htab_lru_push_free(htab, l_new);
1251         else if (l_old)
1252                 htab_lru_push_free(htab, l_old);
1253
1254         return ret;
1255 }
1256
1257 static long __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1258                                           void *value, u64 map_flags,
1259                                           bool onallcpus)
1260 {
1261         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1262         struct htab_elem *l_new = NULL, *l_old;
1263         struct hlist_nulls_head *head;
1264         unsigned long flags;
1265         struct bucket *b;
1266         u32 key_size, hash;
1267         int ret;
1268
1269         if (unlikely(map_flags > BPF_EXIST))
1270                 /* unknown flags */
1271                 return -EINVAL;
1272
1273         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1274                      !rcu_read_lock_bh_held());
1275
1276         key_size = map->key_size;
1277
1278         hash = htab_map_hash(key, key_size, htab->hashrnd);
1279
1280         b = __select_bucket(htab, hash);
1281         head = &b->head;
1282
1283         ret = htab_lock_bucket(htab, b, hash, &flags);
1284         if (ret)
1285                 return ret;
1286
1287         l_old = lookup_elem_raw(head, hash, key, key_size);
1288
1289         ret = check_flags(htab, l_old, map_flags);
1290         if (ret)
1291                 goto err;
1292
1293         if (l_old) {
1294                 /* per-cpu hash map can update value in-place */
1295                 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1296                                 value, onallcpus);
1297         } else {
1298                 l_new = alloc_htab_elem(htab, key, value, key_size,
1299                                         hash, true, onallcpus, NULL);
1300                 if (IS_ERR(l_new)) {
1301                         ret = PTR_ERR(l_new);
1302                         goto err;
1303                 }
1304                 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1305         }
1306         ret = 0;
1307 err:
1308         htab_unlock_bucket(htab, b, hash, flags);
1309         return ret;
1310 }
1311
1312 static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1313                                               void *value, u64 map_flags,
1314                                               bool onallcpus)
1315 {
1316         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1317         struct htab_elem *l_new = NULL, *l_old;
1318         struct hlist_nulls_head *head;
1319         unsigned long flags;
1320         struct bucket *b;
1321         u32 key_size, hash;
1322         int ret;
1323
1324         if (unlikely(map_flags > BPF_EXIST))
1325                 /* unknown flags */
1326                 return -EINVAL;
1327
1328         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1329                      !rcu_read_lock_bh_held());
1330
1331         key_size = map->key_size;
1332
1333         hash = htab_map_hash(key, key_size, htab->hashrnd);
1334
1335         b = __select_bucket(htab, hash);
1336         head = &b->head;
1337
1338         /* For LRU, we need to alloc before taking bucket's
1339          * spinlock because LRU's elem alloc may need
1340          * to remove older elem from htab and this removal
1341          * operation will need a bucket lock.
1342          */
1343         if (map_flags != BPF_EXIST) {
1344                 l_new = prealloc_lru_pop(htab, key, hash);
1345                 if (!l_new)
1346                         return -ENOMEM;
1347         }
1348
1349         ret = htab_lock_bucket(htab, b, hash, &flags);
1350         if (ret)
1351                 goto err_lock_bucket;
1352
1353         l_old = lookup_elem_raw(head, hash, key, key_size);
1354
1355         ret = check_flags(htab, l_old, map_flags);
1356         if (ret)
1357                 goto err;
1358
1359         if (l_old) {
1360                 bpf_lru_node_set_ref(&l_old->lru_node);
1361
1362                 /* per-cpu hash map can update value in-place */
1363                 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1364                                 value, onallcpus);
1365         } else {
1366                 pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size),
1367                                 value, onallcpus);
1368                 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1369                 l_new = NULL;
1370         }
1371         ret = 0;
1372 err:
1373         htab_unlock_bucket(htab, b, hash, flags);
1374 err_lock_bucket:
1375         if (l_new) {
1376                 bpf_map_dec_elem_count(&htab->map);
1377                 bpf_lru_push_free(&htab->lru, &l_new->lru_node);
1378         }
1379         return ret;
1380 }
1381
1382 static long htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1383                                         void *value, u64 map_flags)
1384 {
1385         return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
1386 }
1387
1388 static long htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1389                                             void *value, u64 map_flags)
1390 {
1391         return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
1392                                                  false);
1393 }
1394
1395 /* Called from syscall or from eBPF program */
1396 static long htab_map_delete_elem(struct bpf_map *map, void *key)
1397 {
1398         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1399         struct hlist_nulls_head *head;
1400         struct bucket *b;
1401         struct htab_elem *l;
1402         unsigned long flags;
1403         u32 hash, key_size;
1404         int ret;
1405
1406         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1407                      !rcu_read_lock_bh_held());
1408
1409         key_size = map->key_size;
1410
1411         hash = htab_map_hash(key, key_size, htab->hashrnd);
1412         b = __select_bucket(htab, hash);
1413         head = &b->head;
1414
1415         ret = htab_lock_bucket(htab, b, hash, &flags);
1416         if (ret)
1417                 return ret;
1418
1419         l = lookup_elem_raw(head, hash, key, key_size);
1420
1421         if (l) {
1422                 hlist_nulls_del_rcu(&l->hash_node);
1423                 free_htab_elem(htab, l);
1424         } else {
1425                 ret = -ENOENT;
1426         }
1427
1428         htab_unlock_bucket(htab, b, hash, flags);
1429         return ret;
1430 }
1431
1432 static long htab_lru_map_delete_elem(struct bpf_map *map, void *key)
1433 {
1434         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1435         struct hlist_nulls_head *head;
1436         struct bucket *b;
1437         struct htab_elem *l;
1438         unsigned long flags;
1439         u32 hash, key_size;
1440         int ret;
1441
1442         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1443                      !rcu_read_lock_bh_held());
1444
1445         key_size = map->key_size;
1446
1447         hash = htab_map_hash(key, key_size, htab->hashrnd);
1448         b = __select_bucket(htab, hash);
1449         head = &b->head;
1450
1451         ret = htab_lock_bucket(htab, b, hash, &flags);
1452         if (ret)
1453                 return ret;
1454
1455         l = lookup_elem_raw(head, hash, key, key_size);
1456
1457         if (l)
1458                 hlist_nulls_del_rcu(&l->hash_node);
1459         else
1460                 ret = -ENOENT;
1461
1462         htab_unlock_bucket(htab, b, hash, flags);
1463         if (l)
1464                 htab_lru_push_free(htab, l);
1465         return ret;
1466 }
1467
1468 static void delete_all_elements(struct bpf_htab *htab)
1469 {
1470         int i;
1471
1472         /* It's called from a worker thread, so disable migration here,
1473          * since bpf_mem_cache_free() relies on that.
1474          */
1475         migrate_disable();
1476         for (i = 0; i < htab->n_buckets; i++) {
1477                 struct hlist_nulls_head *head = select_bucket(htab, i);
1478                 struct hlist_nulls_node *n;
1479                 struct htab_elem *l;
1480
1481                 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1482                         hlist_nulls_del_rcu(&l->hash_node);
1483                         htab_elem_free(htab, l);
1484                 }
1485         }
1486         migrate_enable();
1487 }
1488
1489 static void htab_free_malloced_timers(struct bpf_htab *htab)
1490 {
1491         int i;
1492
1493         rcu_read_lock();
1494         for (i = 0; i < htab->n_buckets; i++) {
1495                 struct hlist_nulls_head *head = select_bucket(htab, i);
1496                 struct hlist_nulls_node *n;
1497                 struct htab_elem *l;
1498
1499                 hlist_nulls_for_each_entry(l, n, head, hash_node) {
1500                         /* We only free timer on uref dropping to zero */
1501                         bpf_obj_free_timer(htab->map.record, l->key + round_up(htab->map.key_size, 8));
1502                 }
1503                 cond_resched_rcu();
1504         }
1505         rcu_read_unlock();
1506 }
1507
1508 static void htab_map_free_timers(struct bpf_map *map)
1509 {
1510         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1511
1512         /* We only free timer on uref dropping to zero */
1513         if (!btf_record_has_field(htab->map.record, BPF_TIMER))
1514                 return;
1515         if (!htab_is_prealloc(htab))
1516                 htab_free_malloced_timers(htab);
1517         else
1518                 htab_free_prealloced_timers(htab);
1519 }
1520
1521 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
1522 static void htab_map_free(struct bpf_map *map)
1523 {
1524         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1525         int i;
1526
1527         /* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback.
1528          * bpf_free_used_maps() is called after bpf prog is no longer executing.
1529          * There is no need to synchronize_rcu() here to protect map elements.
1530          */
1531
1532         /* htab no longer uses call_rcu() directly. bpf_mem_alloc does it
1533          * underneath and is reponsible for waiting for callbacks to finish
1534          * during bpf_mem_alloc_destroy().
1535          */
1536         if (!htab_is_prealloc(htab)) {
1537                 delete_all_elements(htab);
1538         } else {
1539                 htab_free_prealloced_fields(htab);
1540                 prealloc_destroy(htab);
1541         }
1542
1543         bpf_map_free_elem_count(map);
1544         free_percpu(htab->extra_elems);
1545         bpf_map_area_free(htab->buckets);
1546         bpf_mem_alloc_destroy(&htab->pcpu_ma);
1547         bpf_mem_alloc_destroy(&htab->ma);
1548         if (htab->use_percpu_counter)
1549                 percpu_counter_destroy(&htab->pcount);
1550         for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
1551                 free_percpu(htab->map_locked[i]);
1552         lockdep_unregister_key(&htab->lockdep_key);
1553         bpf_map_area_free(htab);
1554 }
1555
1556 static void htab_map_seq_show_elem(struct bpf_map *map, void *key,
1557                                    struct seq_file *m)
1558 {
1559         void *value;
1560
1561         rcu_read_lock();
1562
1563         value = htab_map_lookup_elem(map, key);
1564         if (!value) {
1565                 rcu_read_unlock();
1566                 return;
1567         }
1568
1569         btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
1570         seq_puts(m, ": ");
1571         btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
1572         seq_puts(m, "\n");
1573
1574         rcu_read_unlock();
1575 }
1576
1577 static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1578                                              void *value, bool is_lru_map,
1579                                              bool is_percpu, u64 flags)
1580 {
1581         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1582         struct hlist_nulls_head *head;
1583         unsigned long bflags;
1584         struct htab_elem *l;
1585         u32 hash, key_size;
1586         struct bucket *b;
1587         int ret;
1588
1589         key_size = map->key_size;
1590
1591         hash = htab_map_hash(key, key_size, htab->hashrnd);
1592         b = __select_bucket(htab, hash);
1593         head = &b->head;
1594
1595         ret = htab_lock_bucket(htab, b, hash, &bflags);
1596         if (ret)
1597                 return ret;
1598
1599         l = lookup_elem_raw(head, hash, key, key_size);
1600         if (!l) {
1601                 ret = -ENOENT;
1602         } else {
1603                 if (is_percpu) {
1604                         u32 roundup_value_size = round_up(map->value_size, 8);
1605                         void __percpu *pptr;
1606                         int off = 0, cpu;
1607
1608                         pptr = htab_elem_get_ptr(l, key_size);
1609                         for_each_possible_cpu(cpu) {
1610                                 copy_map_value_long(&htab->map, value + off, per_cpu_ptr(pptr, cpu));
1611                                 check_and_init_map_value(&htab->map, value + off);
1612                                 off += roundup_value_size;
1613                         }
1614                 } else {
1615                         u32 roundup_key_size = round_up(map->key_size, 8);
1616
1617                         if (flags & BPF_F_LOCK)
1618                                 copy_map_value_locked(map, value, l->key +
1619                                                       roundup_key_size,
1620                                                       true);
1621                         else
1622                                 copy_map_value(map, value, l->key +
1623                                                roundup_key_size);
1624                         /* Zeroing special fields in the temp buffer */
1625                         check_and_init_map_value(map, value);
1626                 }
1627
1628                 hlist_nulls_del_rcu(&l->hash_node);
1629                 if (!is_lru_map)
1630                         free_htab_elem(htab, l);
1631         }
1632
1633         htab_unlock_bucket(htab, b, hash, bflags);
1634
1635         if (is_lru_map && l)
1636                 htab_lru_push_free(htab, l);
1637
1638         return ret;
1639 }
1640
1641 static int htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1642                                            void *value, u64 flags)
1643 {
1644         return __htab_map_lookup_and_delete_elem(map, key, value, false, false,
1645                                                  flags);
1646 }
1647
1648 static int htab_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
1649                                                   void *key, void *value,
1650                                                   u64 flags)
1651 {
1652         return __htab_map_lookup_and_delete_elem(map, key, value, false, true,
1653                                                  flags);
1654 }
1655
1656 static int htab_lru_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1657                                                void *value, u64 flags)
1658 {
1659         return __htab_map_lookup_and_delete_elem(map, key, value, true, false,
1660                                                  flags);
1661 }
1662
1663 static int htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
1664                                                       void *key, void *value,
1665                                                       u64 flags)
1666 {
1667         return __htab_map_lookup_and_delete_elem(map, key, value, true, true,
1668                                                  flags);
1669 }
1670
1671 static int
1672 __htab_map_lookup_and_delete_batch(struct bpf_map *map,
1673                                    const union bpf_attr *attr,
1674                                    union bpf_attr __user *uattr,
1675                                    bool do_delete, bool is_lru_map,
1676                                    bool is_percpu)
1677 {
1678         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1679         u32 bucket_cnt, total, key_size, value_size, roundup_key_size;
1680         void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val;
1681         void __user *uvalues = u64_to_user_ptr(attr->batch.values);
1682         void __user *ukeys = u64_to_user_ptr(attr->batch.keys);
1683         void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1684         u32 batch, max_count, size, bucket_size, map_id;
1685         struct htab_elem *node_to_free = NULL;
1686         u64 elem_map_flags, map_flags;
1687         struct hlist_nulls_head *head;
1688         struct hlist_nulls_node *n;
1689         unsigned long flags = 0;
1690         bool locked = false;
1691         struct htab_elem *l;
1692         struct bucket *b;
1693         int ret = 0;
1694
1695         elem_map_flags = attr->batch.elem_flags;
1696         if ((elem_map_flags & ~BPF_F_LOCK) ||
1697             ((elem_map_flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK)))
1698                 return -EINVAL;
1699
1700         map_flags = attr->batch.flags;
1701         if (map_flags)
1702                 return -EINVAL;
1703
1704         max_count = attr->batch.count;
1705         if (!max_count)
1706                 return 0;
1707
1708         if (put_user(0, &uattr->batch.count))
1709                 return -EFAULT;
1710
1711         batch = 0;
1712         if (ubatch && copy_from_user(&batch, ubatch, sizeof(batch)))
1713                 return -EFAULT;
1714
1715         if (batch >= htab->n_buckets)
1716                 return -ENOENT;
1717
1718         key_size = htab->map.key_size;
1719         roundup_key_size = round_up(htab->map.key_size, 8);
1720         value_size = htab->map.value_size;
1721         size = round_up(value_size, 8);
1722         if (is_percpu)
1723                 value_size = size * num_possible_cpus();
1724         total = 0;
1725         /* while experimenting with hash tables with sizes ranging from 10 to
1726          * 1000, it was observed that a bucket can have up to 5 entries.
1727          */
1728         bucket_size = 5;
1729
1730 alloc:
1731         /* We cannot do copy_from_user or copy_to_user inside
1732          * the rcu_read_lock. Allocate enough space here.
1733          */
1734         keys = kvmalloc_array(key_size, bucket_size, GFP_USER | __GFP_NOWARN);
1735         values = kvmalloc_array(value_size, bucket_size, GFP_USER | __GFP_NOWARN);
1736         if (!keys || !values) {
1737                 ret = -ENOMEM;
1738                 goto after_loop;
1739         }
1740
1741 again:
1742         bpf_disable_instrumentation();
1743         rcu_read_lock();
1744 again_nocopy:
1745         dst_key = keys;
1746         dst_val = values;
1747         b = &htab->buckets[batch];
1748         head = &b->head;
1749         /* do not grab the lock unless need it (bucket_cnt > 0). */
1750         if (locked) {
1751                 ret = htab_lock_bucket(htab, b, batch, &flags);
1752                 if (ret) {
1753                         rcu_read_unlock();
1754                         bpf_enable_instrumentation();
1755                         goto after_loop;
1756                 }
1757         }
1758
1759         bucket_cnt = 0;
1760         hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
1761                 bucket_cnt++;
1762
1763         if (bucket_cnt && !locked) {
1764                 locked = true;
1765                 goto again_nocopy;
1766         }
1767
1768         if (bucket_cnt > (max_count - total)) {
1769                 if (total == 0)
1770                         ret = -ENOSPC;
1771                 /* Note that since bucket_cnt > 0 here, it is implicit
1772                  * that the locked was grabbed, so release it.
1773                  */
1774                 htab_unlock_bucket(htab, b, batch, flags);
1775                 rcu_read_unlock();
1776                 bpf_enable_instrumentation();
1777                 goto after_loop;
1778         }
1779
1780         if (bucket_cnt > bucket_size) {
1781                 bucket_size = bucket_cnt;
1782                 /* Note that since bucket_cnt > 0 here, it is implicit
1783                  * that the locked was grabbed, so release it.
1784                  */
1785                 htab_unlock_bucket(htab, b, batch, flags);
1786                 rcu_read_unlock();
1787                 bpf_enable_instrumentation();
1788                 kvfree(keys);
1789                 kvfree(values);
1790                 goto alloc;
1791         }
1792
1793         /* Next block is only safe to run if you have grabbed the lock */
1794         if (!locked)
1795                 goto next_batch;
1796
1797         hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1798                 memcpy(dst_key, l->key, key_size);
1799
1800                 if (is_percpu) {
1801                         int off = 0, cpu;
1802                         void __percpu *pptr;
1803
1804                         pptr = htab_elem_get_ptr(l, map->key_size);
1805                         for_each_possible_cpu(cpu) {
1806                                 copy_map_value_long(&htab->map, dst_val + off, per_cpu_ptr(pptr, cpu));
1807                                 check_and_init_map_value(&htab->map, dst_val + off);
1808                                 off += size;
1809                         }
1810                 } else {
1811                         value = l->key + roundup_key_size;
1812                         if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
1813                                 struct bpf_map **inner_map = value;
1814
1815                                  /* Actual value is the id of the inner map */
1816                                 map_id = map->ops->map_fd_sys_lookup_elem(*inner_map);
1817                                 value = &map_id;
1818                         }
1819
1820                         if (elem_map_flags & BPF_F_LOCK)
1821                                 copy_map_value_locked(map, dst_val, value,
1822                                                       true);
1823                         else
1824                                 copy_map_value(map, dst_val, value);
1825                         /* Zeroing special fields in the temp buffer */
1826                         check_and_init_map_value(map, dst_val);
1827                 }
1828                 if (do_delete) {
1829                         hlist_nulls_del_rcu(&l->hash_node);
1830
1831                         /* bpf_lru_push_free() will acquire lru_lock, which
1832                          * may cause deadlock. See comments in function
1833                          * prealloc_lru_pop(). Let us do bpf_lru_push_free()
1834                          * after releasing the bucket lock.
1835                          */
1836                         if (is_lru_map) {
1837                                 l->batch_flink = node_to_free;
1838                                 node_to_free = l;
1839                         } else {
1840                                 free_htab_elem(htab, l);
1841                         }
1842                 }
1843                 dst_key += key_size;
1844                 dst_val += value_size;
1845         }
1846
1847         htab_unlock_bucket(htab, b, batch, flags);
1848         locked = false;
1849
1850         while (node_to_free) {
1851                 l = node_to_free;
1852                 node_to_free = node_to_free->batch_flink;
1853                 htab_lru_push_free(htab, l);
1854         }
1855
1856 next_batch:
1857         /* If we are not copying data, we can go to next bucket and avoid
1858          * unlocking the rcu.
1859          */
1860         if (!bucket_cnt && (batch + 1 < htab->n_buckets)) {
1861                 batch++;
1862                 goto again_nocopy;
1863         }
1864
1865         rcu_read_unlock();
1866         bpf_enable_instrumentation();
1867         if (bucket_cnt && (copy_to_user(ukeys + total * key_size, keys,
1868             key_size * bucket_cnt) ||
1869             copy_to_user(uvalues + total * value_size, values,
1870             value_size * bucket_cnt))) {
1871                 ret = -EFAULT;
1872                 goto after_loop;
1873         }
1874
1875         total += bucket_cnt;
1876         batch++;
1877         if (batch >= htab->n_buckets) {
1878                 ret = -ENOENT;
1879                 goto after_loop;
1880         }
1881         goto again;
1882
1883 after_loop:
1884         if (ret == -EFAULT)
1885                 goto out;
1886
1887         /* copy # of entries and next batch */
1888         ubatch = u64_to_user_ptr(attr->batch.out_batch);
1889         if (copy_to_user(ubatch, &batch, sizeof(batch)) ||
1890             put_user(total, &uattr->batch.count))
1891                 ret = -EFAULT;
1892
1893 out:
1894         kvfree(keys);
1895         kvfree(values);
1896         return ret;
1897 }
1898
1899 static int
1900 htab_percpu_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1901                              union bpf_attr __user *uattr)
1902 {
1903         return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1904                                                   false, true);
1905 }
1906
1907 static int
1908 htab_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1909                                         const union bpf_attr *attr,
1910                                         union bpf_attr __user *uattr)
1911 {
1912         return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1913                                                   false, true);
1914 }
1915
1916 static int
1917 htab_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1918                       union bpf_attr __user *uattr)
1919 {
1920         return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1921                                                   false, false);
1922 }
1923
1924 static int
1925 htab_map_lookup_and_delete_batch(struct bpf_map *map,
1926                                  const union bpf_attr *attr,
1927                                  union bpf_attr __user *uattr)
1928 {
1929         return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1930                                                   false, false);
1931 }
1932
1933 static int
1934 htab_lru_percpu_map_lookup_batch(struct bpf_map *map,
1935                                  const union bpf_attr *attr,
1936                                  union bpf_attr __user *uattr)
1937 {
1938         return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1939                                                   true, true);
1940 }
1941
1942 static int
1943 htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1944                                             const union bpf_attr *attr,
1945                                             union bpf_attr __user *uattr)
1946 {
1947         return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1948                                                   true, true);
1949 }
1950
1951 static int
1952 htab_lru_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1953                           union bpf_attr __user *uattr)
1954 {
1955         return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1956                                                   true, false);
1957 }
1958
1959 static int
1960 htab_lru_map_lookup_and_delete_batch(struct bpf_map *map,
1961                                      const union bpf_attr *attr,
1962                                      union bpf_attr __user *uattr)
1963 {
1964         return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1965                                                   true, false);
1966 }
1967
1968 struct bpf_iter_seq_hash_map_info {
1969         struct bpf_map *map;
1970         struct bpf_htab *htab;
1971         void *percpu_value_buf; // non-zero means percpu hash
1972         u32 bucket_id;
1973         u32 skip_elems;
1974 };
1975
1976 static struct htab_elem *
1977 bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info,
1978                            struct htab_elem *prev_elem)
1979 {
1980         const struct bpf_htab *htab = info->htab;
1981         u32 skip_elems = info->skip_elems;
1982         u32 bucket_id = info->bucket_id;
1983         struct hlist_nulls_head *head;
1984         struct hlist_nulls_node *n;
1985         struct htab_elem *elem;
1986         struct bucket *b;
1987         u32 i, count;
1988
1989         if (bucket_id >= htab->n_buckets)
1990                 return NULL;
1991
1992         /* try to find next elem in the same bucket */
1993         if (prev_elem) {
1994                 /* no update/deletion on this bucket, prev_elem should be still valid
1995                  * and we won't skip elements.
1996                  */
1997                 n = rcu_dereference_raw(hlist_nulls_next_rcu(&prev_elem->hash_node));
1998                 elem = hlist_nulls_entry_safe(n, struct htab_elem, hash_node);
1999                 if (elem)
2000                         return elem;
2001
2002                 /* not found, unlock and go to the next bucket */
2003                 b = &htab->buckets[bucket_id++];
2004                 rcu_read_unlock();
2005                 skip_elems = 0;
2006         }
2007
2008         for (i = bucket_id; i < htab->n_buckets; i++) {
2009                 b = &htab->buckets[i];
2010                 rcu_read_lock();
2011
2012                 count = 0;
2013                 head = &b->head;
2014                 hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
2015                         if (count >= skip_elems) {
2016                                 info->bucket_id = i;
2017                                 info->skip_elems = count;
2018                                 return elem;
2019                         }
2020                         count++;
2021                 }
2022
2023                 rcu_read_unlock();
2024                 skip_elems = 0;
2025         }
2026
2027         info->bucket_id = i;
2028         info->skip_elems = 0;
2029         return NULL;
2030 }
2031
2032 static void *bpf_hash_map_seq_start(struct seq_file *seq, loff_t *pos)
2033 {
2034         struct bpf_iter_seq_hash_map_info *info = seq->private;
2035         struct htab_elem *elem;
2036
2037         elem = bpf_hash_map_seq_find_next(info, NULL);
2038         if (!elem)
2039                 return NULL;
2040
2041         if (*pos == 0)
2042                 ++*pos;
2043         return elem;
2044 }
2045
2046 static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2047 {
2048         struct bpf_iter_seq_hash_map_info *info = seq->private;
2049
2050         ++*pos;
2051         ++info->skip_elems;
2052         return bpf_hash_map_seq_find_next(info, v);
2053 }
2054
2055 static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem)
2056 {
2057         struct bpf_iter_seq_hash_map_info *info = seq->private;
2058         u32 roundup_key_size, roundup_value_size;
2059         struct bpf_iter__bpf_map_elem ctx = {};
2060         struct bpf_map *map = info->map;
2061         struct bpf_iter_meta meta;
2062         int ret = 0, off = 0, cpu;
2063         struct bpf_prog *prog;
2064         void __percpu *pptr;
2065
2066         meta.seq = seq;
2067         prog = bpf_iter_get_info(&meta, elem == NULL);
2068         if (prog) {
2069                 ctx.meta = &meta;
2070                 ctx.map = info->map;
2071                 if (elem) {
2072                         roundup_key_size = round_up(map->key_size, 8);
2073                         ctx.key = elem->key;
2074                         if (!info->percpu_value_buf) {
2075                                 ctx.value = elem->key + roundup_key_size;
2076                         } else {
2077                                 roundup_value_size = round_up(map->value_size, 8);
2078                                 pptr = htab_elem_get_ptr(elem, map->key_size);
2079                                 for_each_possible_cpu(cpu) {
2080                                         copy_map_value_long(map, info->percpu_value_buf + off,
2081                                                             per_cpu_ptr(pptr, cpu));
2082                                         check_and_init_map_value(map, info->percpu_value_buf + off);
2083                                         off += roundup_value_size;
2084                                 }
2085                                 ctx.value = info->percpu_value_buf;
2086                         }
2087                 }
2088                 ret = bpf_iter_run_prog(prog, &ctx);
2089         }
2090
2091         return ret;
2092 }
2093
2094 static int bpf_hash_map_seq_show(struct seq_file *seq, void *v)
2095 {
2096         return __bpf_hash_map_seq_show(seq, v);
2097 }
2098
2099 static void bpf_hash_map_seq_stop(struct seq_file *seq, void *v)
2100 {
2101         if (!v)
2102                 (void)__bpf_hash_map_seq_show(seq, NULL);
2103         else
2104                 rcu_read_unlock();
2105 }
2106
2107 static int bpf_iter_init_hash_map(void *priv_data,
2108                                   struct bpf_iter_aux_info *aux)
2109 {
2110         struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
2111         struct bpf_map *map = aux->map;
2112         void *value_buf;
2113         u32 buf_size;
2114
2115         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
2116             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
2117                 buf_size = round_up(map->value_size, 8) * num_possible_cpus();
2118                 value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
2119                 if (!value_buf)
2120                         return -ENOMEM;
2121
2122                 seq_info->percpu_value_buf = value_buf;
2123         }
2124
2125         bpf_map_inc_with_uref(map);
2126         seq_info->map = map;
2127         seq_info->htab = container_of(map, struct bpf_htab, map);
2128         return 0;
2129 }
2130
2131 static void bpf_iter_fini_hash_map(void *priv_data)
2132 {
2133         struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
2134
2135         bpf_map_put_with_uref(seq_info->map);
2136         kfree(seq_info->percpu_value_buf);
2137 }
2138
2139 static const struct seq_operations bpf_hash_map_seq_ops = {
2140         .start  = bpf_hash_map_seq_start,
2141         .next   = bpf_hash_map_seq_next,
2142         .stop   = bpf_hash_map_seq_stop,
2143         .show   = bpf_hash_map_seq_show,
2144 };
2145
2146 static const struct bpf_iter_seq_info iter_seq_info = {
2147         .seq_ops                = &bpf_hash_map_seq_ops,
2148         .init_seq_private       = bpf_iter_init_hash_map,
2149         .fini_seq_private       = bpf_iter_fini_hash_map,
2150         .seq_priv_size          = sizeof(struct bpf_iter_seq_hash_map_info),
2151 };
2152
2153 static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn,
2154                                    void *callback_ctx, u64 flags)
2155 {
2156         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2157         struct hlist_nulls_head *head;
2158         struct hlist_nulls_node *n;
2159         struct htab_elem *elem;
2160         u32 roundup_key_size;
2161         int i, num_elems = 0;
2162         void __percpu *pptr;
2163         struct bucket *b;
2164         void *key, *val;
2165         bool is_percpu;
2166         u64 ret = 0;
2167
2168         if (flags != 0)
2169                 return -EINVAL;
2170
2171         is_percpu = htab_is_percpu(htab);
2172
2173         roundup_key_size = round_up(map->key_size, 8);
2174         /* disable migration so percpu value prepared here will be the
2175          * same as the one seen by the bpf program with bpf_map_lookup_elem().
2176          */
2177         if (is_percpu)
2178                 migrate_disable();
2179         for (i = 0; i < htab->n_buckets; i++) {
2180                 b = &htab->buckets[i];
2181                 rcu_read_lock();
2182                 head = &b->head;
2183                 hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
2184                         key = elem->key;
2185                         if (is_percpu) {
2186                                 /* current cpu value for percpu map */
2187                                 pptr = htab_elem_get_ptr(elem, map->key_size);
2188                                 val = this_cpu_ptr(pptr);
2189                         } else {
2190                                 val = elem->key + roundup_key_size;
2191                         }
2192                         num_elems++;
2193                         ret = callback_fn((u64)(long)map, (u64)(long)key,
2194                                           (u64)(long)val, (u64)(long)callback_ctx, 0);
2195                         /* return value: 0 - continue, 1 - stop and return */
2196                         if (ret) {
2197                                 rcu_read_unlock();
2198                                 goto out;
2199                         }
2200                 }
2201                 rcu_read_unlock();
2202         }
2203 out:
2204         if (is_percpu)
2205                 migrate_enable();
2206         return num_elems;
2207 }
2208
2209 static u64 htab_map_mem_usage(const struct bpf_map *map)
2210 {
2211         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2212         u32 value_size = round_up(htab->map.value_size, 8);
2213         bool prealloc = htab_is_prealloc(htab);
2214         bool percpu = htab_is_percpu(htab);
2215         bool lru = htab_is_lru(htab);
2216         u64 num_entries;
2217         u64 usage = sizeof(struct bpf_htab);
2218
2219         usage += sizeof(struct bucket) * htab->n_buckets;
2220         usage += sizeof(int) * num_possible_cpus() * HASHTAB_MAP_LOCK_COUNT;
2221         if (prealloc) {
2222                 num_entries = map->max_entries;
2223                 if (htab_has_extra_elems(htab))
2224                         num_entries += num_possible_cpus();
2225
2226                 usage += htab->elem_size * num_entries;
2227
2228                 if (percpu)
2229                         usage += value_size * num_possible_cpus() * num_entries;
2230                 else if (!lru)
2231                         usage += sizeof(struct htab_elem *) * num_possible_cpus();
2232         } else {
2233 #define LLIST_NODE_SZ sizeof(struct llist_node)
2234
2235                 num_entries = htab->use_percpu_counter ?
2236                                           percpu_counter_sum(&htab->pcount) :
2237                                           atomic_read(&htab->count);
2238                 usage += (htab->elem_size + LLIST_NODE_SZ) * num_entries;
2239                 if (percpu) {
2240                         usage += (LLIST_NODE_SZ + sizeof(void *)) * num_entries;
2241                         usage += value_size * num_possible_cpus() * num_entries;
2242                 }
2243         }
2244         return usage;
2245 }
2246
2247 BTF_ID_LIST_SINGLE(htab_map_btf_ids, struct, bpf_htab)
2248 const struct bpf_map_ops htab_map_ops = {
2249         .map_meta_equal = bpf_map_meta_equal,
2250         .map_alloc_check = htab_map_alloc_check,
2251         .map_alloc = htab_map_alloc,
2252         .map_free = htab_map_free,
2253         .map_get_next_key = htab_map_get_next_key,
2254         .map_release_uref = htab_map_free_timers,
2255         .map_lookup_elem = htab_map_lookup_elem,
2256         .map_lookup_and_delete_elem = htab_map_lookup_and_delete_elem,
2257         .map_update_elem = htab_map_update_elem,
2258         .map_delete_elem = htab_map_delete_elem,
2259         .map_gen_lookup = htab_map_gen_lookup,
2260         .map_seq_show_elem = htab_map_seq_show_elem,
2261         .map_set_for_each_callback_args = map_set_for_each_callback_args,
2262         .map_for_each_callback = bpf_for_each_hash_elem,
2263         .map_mem_usage = htab_map_mem_usage,
2264         BATCH_OPS(htab),
2265         .map_btf_id = &htab_map_btf_ids[0],
2266         .iter_seq_info = &iter_seq_info,
2267 };
2268
2269 const struct bpf_map_ops htab_lru_map_ops = {
2270         .map_meta_equal = bpf_map_meta_equal,
2271         .map_alloc_check = htab_map_alloc_check,
2272         .map_alloc = htab_map_alloc,
2273         .map_free = htab_map_free,
2274         .map_get_next_key = htab_map_get_next_key,
2275         .map_release_uref = htab_map_free_timers,
2276         .map_lookup_elem = htab_lru_map_lookup_elem,
2277         .map_lookup_and_delete_elem = htab_lru_map_lookup_and_delete_elem,
2278         .map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
2279         .map_update_elem = htab_lru_map_update_elem,
2280         .map_delete_elem = htab_lru_map_delete_elem,
2281         .map_gen_lookup = htab_lru_map_gen_lookup,
2282         .map_seq_show_elem = htab_map_seq_show_elem,
2283         .map_set_for_each_callback_args = map_set_for_each_callback_args,
2284         .map_for_each_callback = bpf_for_each_hash_elem,
2285         .map_mem_usage = htab_map_mem_usage,
2286         BATCH_OPS(htab_lru),
2287         .map_btf_id = &htab_map_btf_ids[0],
2288         .iter_seq_info = &iter_seq_info,
2289 };
2290
2291 /* Called from eBPF program */
2292 static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
2293 {
2294         struct htab_elem *l = __htab_map_lookup_elem(map, key);
2295
2296         if (l)
2297                 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
2298         else
2299                 return NULL;
2300 }
2301
2302 static void *htab_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
2303 {
2304         struct htab_elem *l;
2305
2306         if (cpu >= nr_cpu_ids)
2307                 return NULL;
2308
2309         l = __htab_map_lookup_elem(map, key);
2310         if (l)
2311                 return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
2312         else
2313                 return NULL;
2314 }
2315
2316 static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
2317 {
2318         struct htab_elem *l = __htab_map_lookup_elem(map, key);
2319
2320         if (l) {
2321                 bpf_lru_node_set_ref(&l->lru_node);
2322                 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
2323         }
2324
2325         return NULL;
2326 }
2327
2328 static void *htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
2329 {
2330         struct htab_elem *l;
2331
2332         if (cpu >= nr_cpu_ids)
2333                 return NULL;
2334
2335         l = __htab_map_lookup_elem(map, key);
2336         if (l) {
2337                 bpf_lru_node_set_ref(&l->lru_node);
2338                 return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
2339         }
2340
2341         return NULL;
2342 }
2343
2344 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
2345 {
2346         struct htab_elem *l;
2347         void __percpu *pptr;
2348         int ret = -ENOENT;
2349         int cpu, off = 0;
2350         u32 size;
2351
2352         /* per_cpu areas are zero-filled and bpf programs can only
2353          * access 'value_size' of them, so copying rounded areas
2354          * will not leak any kernel data
2355          */
2356         size = round_up(map->value_size, 8);
2357         rcu_read_lock();
2358         l = __htab_map_lookup_elem(map, key);
2359         if (!l)
2360                 goto out;
2361         /* We do not mark LRU map element here in order to not mess up
2362          * eviction heuristics when user space does a map walk.
2363          */
2364         pptr = htab_elem_get_ptr(l, map->key_size);
2365         for_each_possible_cpu(cpu) {
2366                 copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
2367                 check_and_init_map_value(map, value + off);
2368                 off += size;
2369         }
2370         ret = 0;
2371 out:
2372         rcu_read_unlock();
2373         return ret;
2374 }
2375
2376 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
2377                            u64 map_flags)
2378 {
2379         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2380         int ret;
2381
2382         rcu_read_lock();
2383         if (htab_is_lru(htab))
2384                 ret = __htab_lru_percpu_map_update_elem(map, key, value,
2385                                                         map_flags, true);
2386         else
2387                 ret = __htab_percpu_map_update_elem(map, key, value, map_flags,
2388                                                     true);
2389         rcu_read_unlock();
2390
2391         return ret;
2392 }
2393
2394 static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
2395                                           struct seq_file *m)
2396 {
2397         struct htab_elem *l;
2398         void __percpu *pptr;
2399         int cpu;
2400
2401         rcu_read_lock();
2402
2403         l = __htab_map_lookup_elem(map, key);
2404         if (!l) {
2405                 rcu_read_unlock();
2406                 return;
2407         }
2408
2409         btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
2410         seq_puts(m, ": {\n");
2411         pptr = htab_elem_get_ptr(l, map->key_size);
2412         for_each_possible_cpu(cpu) {
2413                 seq_printf(m, "\tcpu%d: ", cpu);
2414                 btf_type_seq_show(map->btf, map->btf_value_type_id,
2415                                   per_cpu_ptr(pptr, cpu), m);
2416                 seq_puts(m, "\n");
2417         }
2418         seq_puts(m, "}\n");
2419
2420         rcu_read_unlock();
2421 }
2422
2423 const struct bpf_map_ops htab_percpu_map_ops = {
2424         .map_meta_equal = bpf_map_meta_equal,
2425         .map_alloc_check = htab_map_alloc_check,
2426         .map_alloc = htab_map_alloc,
2427         .map_free = htab_map_free,
2428         .map_get_next_key = htab_map_get_next_key,
2429         .map_lookup_elem = htab_percpu_map_lookup_elem,
2430         .map_lookup_and_delete_elem = htab_percpu_map_lookup_and_delete_elem,
2431         .map_update_elem = htab_percpu_map_update_elem,
2432         .map_delete_elem = htab_map_delete_elem,
2433         .map_lookup_percpu_elem = htab_percpu_map_lookup_percpu_elem,
2434         .map_seq_show_elem = htab_percpu_map_seq_show_elem,
2435         .map_set_for_each_callback_args = map_set_for_each_callback_args,
2436         .map_for_each_callback = bpf_for_each_hash_elem,
2437         .map_mem_usage = htab_map_mem_usage,
2438         BATCH_OPS(htab_percpu),
2439         .map_btf_id = &htab_map_btf_ids[0],
2440         .iter_seq_info = &iter_seq_info,
2441 };
2442
2443 const struct bpf_map_ops htab_lru_percpu_map_ops = {
2444         .map_meta_equal = bpf_map_meta_equal,
2445         .map_alloc_check = htab_map_alloc_check,
2446         .map_alloc = htab_map_alloc,
2447         .map_free = htab_map_free,
2448         .map_get_next_key = htab_map_get_next_key,
2449         .map_lookup_elem = htab_lru_percpu_map_lookup_elem,
2450         .map_lookup_and_delete_elem = htab_lru_percpu_map_lookup_and_delete_elem,
2451         .map_update_elem = htab_lru_percpu_map_update_elem,
2452         .map_delete_elem = htab_lru_map_delete_elem,
2453         .map_lookup_percpu_elem = htab_lru_percpu_map_lookup_percpu_elem,
2454         .map_seq_show_elem = htab_percpu_map_seq_show_elem,
2455         .map_set_for_each_callback_args = map_set_for_each_callback_args,
2456         .map_for_each_callback = bpf_for_each_hash_elem,
2457         .map_mem_usage = htab_map_mem_usage,
2458         BATCH_OPS(htab_lru_percpu),
2459         .map_btf_id = &htab_map_btf_ids[0],
2460         .iter_seq_info = &iter_seq_info,
2461 };
2462
2463 static int fd_htab_map_alloc_check(union bpf_attr *attr)
2464 {
2465         if (attr->value_size != sizeof(u32))
2466                 return -EINVAL;
2467         return htab_map_alloc_check(attr);
2468 }
2469
2470 static void fd_htab_map_free(struct bpf_map *map)
2471 {
2472         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2473         struct hlist_nulls_node *n;
2474         struct hlist_nulls_head *head;
2475         struct htab_elem *l;
2476         int i;
2477
2478         for (i = 0; i < htab->n_buckets; i++) {
2479                 head = select_bucket(htab, i);
2480
2481                 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
2482                         void *ptr = fd_htab_map_get_ptr(map, l);
2483
2484                         map->ops->map_fd_put_ptr(ptr);
2485                 }
2486         }
2487
2488         htab_map_free(map);
2489 }
2490
2491 /* only called from syscall */
2492 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
2493 {
2494         void **ptr;
2495         int ret = 0;
2496
2497         if (!map->ops->map_fd_sys_lookup_elem)
2498                 return -ENOTSUPP;
2499
2500         rcu_read_lock();
2501         ptr = htab_map_lookup_elem(map, key);
2502         if (ptr)
2503                 *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr));
2504         else
2505                 ret = -ENOENT;
2506         rcu_read_unlock();
2507
2508         return ret;
2509 }
2510
2511 /* only called from syscall */
2512 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
2513                                 void *key, void *value, u64 map_flags)
2514 {
2515         void *ptr;
2516         int ret;
2517         u32 ufd = *(u32 *)value;
2518
2519         ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
2520         if (IS_ERR(ptr))
2521                 return PTR_ERR(ptr);
2522
2523         ret = htab_map_update_elem(map, key, &ptr, map_flags);
2524         if (ret)
2525                 map->ops->map_fd_put_ptr(ptr);
2526
2527         return ret;
2528 }
2529
2530 static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
2531 {
2532         struct bpf_map *map, *inner_map_meta;
2533
2534         inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
2535         if (IS_ERR(inner_map_meta))
2536                 return inner_map_meta;
2537
2538         map = htab_map_alloc(attr);
2539         if (IS_ERR(map)) {
2540                 bpf_map_meta_free(inner_map_meta);
2541                 return map;
2542         }
2543
2544         map->inner_map_meta = inner_map_meta;
2545
2546         return map;
2547 }
2548
2549 static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
2550 {
2551         struct bpf_map **inner_map  = htab_map_lookup_elem(map, key);
2552
2553         if (!inner_map)
2554                 return NULL;
2555
2556         return READ_ONCE(*inner_map);
2557 }
2558
2559 static int htab_of_map_gen_lookup(struct bpf_map *map,
2560                                   struct bpf_insn *insn_buf)
2561 {
2562         struct bpf_insn *insn = insn_buf;
2563         const int ret = BPF_REG_0;
2564
2565         BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
2566                      (void *(*)(struct bpf_map *map, void *key))NULL));
2567         *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
2568         *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
2569         *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
2570                                 offsetof(struct htab_elem, key) +
2571                                 round_up(map->key_size, 8));
2572         *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
2573
2574         return insn - insn_buf;
2575 }
2576
2577 static void htab_of_map_free(struct bpf_map *map)
2578 {
2579         bpf_map_meta_free(map->inner_map_meta);
2580         fd_htab_map_free(map);
2581 }
2582
2583 const struct bpf_map_ops htab_of_maps_map_ops = {
2584         .map_alloc_check = fd_htab_map_alloc_check,
2585         .map_alloc = htab_of_map_alloc,
2586         .map_free = htab_of_map_free,
2587         .map_get_next_key = htab_map_get_next_key,
2588         .map_lookup_elem = htab_of_map_lookup_elem,
2589         .map_delete_elem = htab_map_delete_elem,
2590         .map_fd_get_ptr = bpf_map_fd_get_ptr,
2591         .map_fd_put_ptr = bpf_map_fd_put_ptr,
2592         .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
2593         .map_gen_lookup = htab_of_map_gen_lookup,
2594         .map_check_btf = map_check_no_btf,
2595         .map_mem_usage = htab_map_mem_usage,
2596         BATCH_OPS(htab),
2597         .map_btf_id = &htab_map_btf_ids[0],
2598 };