Merge tag 'perf-core-2023-04-27' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-starfive.git] / kernel / bpf / cpumap.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* bpf/cpumap.c
3  *
4  * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
5  */
6
7 /**
8  * DOC: cpu map
9  * The 'cpumap' is primarily used as a backend map for XDP BPF helper
10  * call bpf_redirect_map() and XDP_REDIRECT action, like 'devmap'.
11  *
12  * Unlike devmap which redirects XDP frames out to another NIC device,
13  * this map type redirects raw XDP frames to another CPU.  The remote
14  * CPU will do SKB-allocation and call the normal network stack.
15  */
16 /*
17  * This is a scalability and isolation mechanism, that allow
18  * separating the early driver network XDP layer, from the rest of the
19  * netstack, and assigning dedicated CPUs for this stage.  This
20  * basically allows for 10G wirespeed pre-filtering via bpf.
21  */
22 #include <linux/bitops.h>
23 #include <linux/bpf.h>
24 #include <linux/filter.h>
25 #include <linux/ptr_ring.h>
26 #include <net/xdp.h>
27
28 #include <linux/sched.h>
29 #include <linux/workqueue.h>
30 #include <linux/kthread.h>
31 #include <linux/capability.h>
32 #include <trace/events/xdp.h>
33 #include <linux/btf_ids.h>
34
35 #include <linux/netdevice.h>   /* netif_receive_skb_list */
36 #include <linux/etherdevice.h> /* eth_type_trans */
37
38 /* General idea: XDP packets getting XDP redirected to another CPU,
39  * will maximum be stored/queued for one driver ->poll() call.  It is
40  * guaranteed that queueing the frame and the flush operation happen on
41  * same CPU.  Thus, cpu_map_flush operation can deduct via this_cpu_ptr()
42  * which queue in bpf_cpu_map_entry contains packets.
43  */
44
45 #define CPU_MAP_BULK_SIZE 8  /* 8 == one cacheline on 64-bit archs */
46 struct bpf_cpu_map_entry;
47 struct bpf_cpu_map;
48
49 struct xdp_bulk_queue {
50         void *q[CPU_MAP_BULK_SIZE];
51         struct list_head flush_node;
52         struct bpf_cpu_map_entry *obj;
53         unsigned int count;
54 };
55
56 /* Struct for every remote "destination" CPU in map */
57 struct bpf_cpu_map_entry {
58         u32 cpu;    /* kthread CPU and map index */
59         int map_id; /* Back reference to map */
60
61         /* XDP can run multiple RX-ring queues, need __percpu enqueue store */
62         struct xdp_bulk_queue __percpu *bulkq;
63
64         struct bpf_cpu_map *cmap;
65
66         /* Queue with potential multi-producers, and single-consumer kthread */
67         struct ptr_ring *queue;
68         struct task_struct *kthread;
69
70         struct bpf_cpumap_val value;
71         struct bpf_prog *prog;
72
73         atomic_t refcnt; /* Control when this struct can be free'ed */
74         struct rcu_head rcu;
75
76         struct work_struct kthread_stop_wq;
77 };
78
79 struct bpf_cpu_map {
80         struct bpf_map map;
81         /* Below members specific for map type */
82         struct bpf_cpu_map_entry __rcu **cpu_map;
83 };
84
85 static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list);
86
87 static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
88 {
89         u32 value_size = attr->value_size;
90         struct bpf_cpu_map *cmap;
91
92         if (!bpf_capable())
93                 return ERR_PTR(-EPERM);
94
95         /* check sanity of attributes */
96         if (attr->max_entries == 0 || attr->key_size != 4 ||
97             (value_size != offsetofend(struct bpf_cpumap_val, qsize) &&
98              value_size != offsetofend(struct bpf_cpumap_val, bpf_prog.fd)) ||
99             attr->map_flags & ~BPF_F_NUMA_NODE)
100                 return ERR_PTR(-EINVAL);
101
102         /* Pre-limit array size based on NR_CPUS, not final CPU check */
103         if (attr->max_entries > NR_CPUS)
104                 return ERR_PTR(-E2BIG);
105
106         cmap = bpf_map_area_alloc(sizeof(*cmap), NUMA_NO_NODE);
107         if (!cmap)
108                 return ERR_PTR(-ENOMEM);
109
110         bpf_map_init_from_attr(&cmap->map, attr);
111
112         /* Alloc array for possible remote "destination" CPUs */
113         cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries *
114                                            sizeof(struct bpf_cpu_map_entry *),
115                                            cmap->map.numa_node);
116         if (!cmap->cpu_map) {
117                 bpf_map_area_free(cmap);
118                 return ERR_PTR(-ENOMEM);
119         }
120
121         return &cmap->map;
122 }
123
124 static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
125 {
126         atomic_inc(&rcpu->refcnt);
127 }
128
129 /* called from workqueue, to workaround syscall using preempt_disable */
130 static void cpu_map_kthread_stop(struct work_struct *work)
131 {
132         struct bpf_cpu_map_entry *rcpu;
133
134         rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
135
136         /* Wait for flush in __cpu_map_entry_free(), via full RCU barrier,
137          * as it waits until all in-flight call_rcu() callbacks complete.
138          */
139         rcu_barrier();
140
141         /* kthread_stop will wake_up_process and wait for it to complete */
142         kthread_stop(rcpu->kthread);
143 }
144
145 static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
146 {
147         /* The tear-down procedure should have made sure that queue is
148          * empty.  See __cpu_map_entry_replace() and work-queue
149          * invoked cpu_map_kthread_stop(). Catch any broken behaviour
150          * gracefully and warn once.
151          */
152         struct xdp_frame *xdpf;
153
154         while ((xdpf = ptr_ring_consume(ring)))
155                 if (WARN_ON_ONCE(xdpf))
156                         xdp_return_frame(xdpf);
157 }
158
159 static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
160 {
161         if (atomic_dec_and_test(&rcpu->refcnt)) {
162                 if (rcpu->prog)
163                         bpf_prog_put(rcpu->prog);
164                 /* The queue should be empty at this point */
165                 __cpu_map_ring_cleanup(rcpu->queue);
166                 ptr_ring_cleanup(rcpu->queue, NULL);
167                 kfree(rcpu->queue);
168                 kfree(rcpu);
169         }
170 }
171
172 static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu,
173                                      struct list_head *listp,
174                                      struct xdp_cpumap_stats *stats)
175 {
176         struct sk_buff *skb, *tmp;
177         struct xdp_buff xdp;
178         u32 act;
179         int err;
180
181         list_for_each_entry_safe(skb, tmp, listp, list) {
182                 act = bpf_prog_run_generic_xdp(skb, &xdp, rcpu->prog);
183                 switch (act) {
184                 case XDP_PASS:
185                         break;
186                 case XDP_REDIRECT:
187                         skb_list_del_init(skb);
188                         err = xdp_do_generic_redirect(skb->dev, skb, &xdp,
189                                                       rcpu->prog);
190                         if (unlikely(err)) {
191                                 kfree_skb(skb);
192                                 stats->drop++;
193                         } else {
194                                 stats->redirect++;
195                         }
196                         return;
197                 default:
198                         bpf_warn_invalid_xdp_action(NULL, rcpu->prog, act);
199                         fallthrough;
200                 case XDP_ABORTED:
201                         trace_xdp_exception(skb->dev, rcpu->prog, act);
202                         fallthrough;
203                 case XDP_DROP:
204                         skb_list_del_init(skb);
205                         kfree_skb(skb);
206                         stats->drop++;
207                         return;
208                 }
209         }
210 }
211
212 static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
213                                     void **frames, int n,
214                                     struct xdp_cpumap_stats *stats)
215 {
216         struct xdp_rxq_info rxq;
217         struct xdp_buff xdp;
218         int i, nframes = 0;
219
220         xdp_set_return_frame_no_direct();
221         xdp.rxq = &rxq;
222
223         for (i = 0; i < n; i++) {
224                 struct xdp_frame *xdpf = frames[i];
225                 u32 act;
226                 int err;
227
228                 rxq.dev = xdpf->dev_rx;
229                 rxq.mem = xdpf->mem;
230                 /* TODO: report queue_index to xdp_rxq_info */
231
232                 xdp_convert_frame_to_buff(xdpf, &xdp);
233
234                 act = bpf_prog_run_xdp(rcpu->prog, &xdp);
235                 switch (act) {
236                 case XDP_PASS:
237                         err = xdp_update_frame_from_buff(&xdp, xdpf);
238                         if (err < 0) {
239                                 xdp_return_frame(xdpf);
240                                 stats->drop++;
241                         } else {
242                                 frames[nframes++] = xdpf;
243                                 stats->pass++;
244                         }
245                         break;
246                 case XDP_REDIRECT:
247                         err = xdp_do_redirect(xdpf->dev_rx, &xdp,
248                                               rcpu->prog);
249                         if (unlikely(err)) {
250                                 xdp_return_frame(xdpf);
251                                 stats->drop++;
252                         } else {
253                                 stats->redirect++;
254                         }
255                         break;
256                 default:
257                         bpf_warn_invalid_xdp_action(NULL, rcpu->prog, act);
258                         fallthrough;
259                 case XDP_DROP:
260                         xdp_return_frame(xdpf);
261                         stats->drop++;
262                         break;
263                 }
264         }
265
266         xdp_clear_return_frame_no_direct();
267
268         return nframes;
269 }
270
271 #define CPUMAP_BATCH 8
272
273 static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
274                                 int xdp_n, struct xdp_cpumap_stats *stats,
275                                 struct list_head *list)
276 {
277         int nframes;
278
279         if (!rcpu->prog)
280                 return xdp_n;
281
282         rcu_read_lock_bh();
283
284         nframes = cpu_map_bpf_prog_run_xdp(rcpu, frames, xdp_n, stats);
285
286         if (stats->redirect)
287                 xdp_do_flush();
288
289         if (unlikely(!list_empty(list)))
290                 cpu_map_bpf_prog_run_skb(rcpu, list, stats);
291
292         rcu_read_unlock_bh(); /* resched point, may call do_softirq() */
293
294         return nframes;
295 }
296
297
298 static int cpu_map_kthread_run(void *data)
299 {
300         struct bpf_cpu_map_entry *rcpu = data;
301
302         set_current_state(TASK_INTERRUPTIBLE);
303
304         /* When kthread gives stop order, then rcpu have been disconnected
305          * from map, thus no new packets can enter. Remaining in-flight
306          * per CPU stored packets are flushed to this queue.  Wait honoring
307          * kthread_stop signal until queue is empty.
308          */
309         while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) {
310                 struct xdp_cpumap_stats stats = {}; /* zero stats */
311                 unsigned int kmem_alloc_drops = 0, sched = 0;
312                 gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
313                 int i, n, m, nframes, xdp_n;
314                 void *frames[CPUMAP_BATCH];
315                 void *skbs[CPUMAP_BATCH];
316                 LIST_HEAD(list);
317
318                 /* Release CPU reschedule checks */
319                 if (__ptr_ring_empty(rcpu->queue)) {
320                         set_current_state(TASK_INTERRUPTIBLE);
321                         /* Recheck to avoid lost wake-up */
322                         if (__ptr_ring_empty(rcpu->queue)) {
323                                 schedule();
324                                 sched = 1;
325                         } else {
326                                 __set_current_state(TASK_RUNNING);
327                         }
328                 } else {
329                         sched = cond_resched();
330                 }
331
332                 /*
333                  * The bpf_cpu_map_entry is single consumer, with this
334                  * kthread CPU pinned. Lockless access to ptr_ring
335                  * consume side valid as no-resize allowed of queue.
336                  */
337                 n = __ptr_ring_consume_batched(rcpu->queue, frames,
338                                                CPUMAP_BATCH);
339                 for (i = 0, xdp_n = 0; i < n; i++) {
340                         void *f = frames[i];
341                         struct page *page;
342
343                         if (unlikely(__ptr_test_bit(0, &f))) {
344                                 struct sk_buff *skb = f;
345
346                                 __ptr_clear_bit(0, &skb);
347                                 list_add_tail(&skb->list, &list);
348                                 continue;
349                         }
350
351                         frames[xdp_n++] = f;
352                         page = virt_to_page(f);
353
354                         /* Bring struct page memory area to curr CPU. Read by
355                          * build_skb_around via page_is_pfmemalloc(), and when
356                          * freed written by page_frag_free call.
357                          */
358                         prefetchw(page);
359                 }
360
361                 /* Support running another XDP prog on this CPU */
362                 nframes = cpu_map_bpf_prog_run(rcpu, frames, xdp_n, &stats, &list);
363                 if (nframes) {
364                         m = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, skbs);
365                         if (unlikely(m == 0)) {
366                                 for (i = 0; i < nframes; i++)
367                                         skbs[i] = NULL; /* effect: xdp_return_frame */
368                                 kmem_alloc_drops += nframes;
369                         }
370                 }
371
372                 local_bh_disable();
373                 for (i = 0; i < nframes; i++) {
374                         struct xdp_frame *xdpf = frames[i];
375                         struct sk_buff *skb = skbs[i];
376
377                         skb = __xdp_build_skb_from_frame(xdpf, skb,
378                                                          xdpf->dev_rx);
379                         if (!skb) {
380                                 xdp_return_frame(xdpf);
381                                 continue;
382                         }
383
384                         list_add_tail(&skb->list, &list);
385                 }
386                 netif_receive_skb_list(&list);
387
388                 /* Feedback loop via tracepoint */
389                 trace_xdp_cpumap_kthread(rcpu->map_id, n, kmem_alloc_drops,
390                                          sched, &stats);
391
392                 local_bh_enable(); /* resched point, may call do_softirq() */
393         }
394         __set_current_state(TASK_RUNNING);
395
396         put_cpu_map_entry(rcpu);
397         return 0;
398 }
399
400 static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu,
401                                       struct bpf_map *map, int fd)
402 {
403         struct bpf_prog *prog;
404
405         prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
406         if (IS_ERR(prog))
407                 return PTR_ERR(prog);
408
409         if (prog->expected_attach_type != BPF_XDP_CPUMAP ||
410             !bpf_prog_map_compatible(map, prog)) {
411                 bpf_prog_put(prog);
412                 return -EINVAL;
413         }
414
415         rcpu->value.bpf_prog.id = prog->aux->id;
416         rcpu->prog = prog;
417
418         return 0;
419 }
420
421 static struct bpf_cpu_map_entry *
422 __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
423                       u32 cpu)
424 {
425         int numa, err, i, fd = value->bpf_prog.fd;
426         gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
427         struct bpf_cpu_map_entry *rcpu;
428         struct xdp_bulk_queue *bq;
429
430         /* Have map->numa_node, but choose node of redirect target CPU */
431         numa = cpu_to_node(cpu);
432
433         rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa);
434         if (!rcpu)
435                 return NULL;
436
437         /* Alloc percpu bulkq */
438         rcpu->bulkq = bpf_map_alloc_percpu(map, sizeof(*rcpu->bulkq),
439                                            sizeof(void *), gfp);
440         if (!rcpu->bulkq)
441                 goto free_rcu;
442
443         for_each_possible_cpu(i) {
444                 bq = per_cpu_ptr(rcpu->bulkq, i);
445                 bq->obj = rcpu;
446         }
447
448         /* Alloc queue */
449         rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp,
450                                            numa);
451         if (!rcpu->queue)
452                 goto free_bulkq;
453
454         err = ptr_ring_init(rcpu->queue, value->qsize, gfp);
455         if (err)
456                 goto free_queue;
457
458         rcpu->cpu    = cpu;
459         rcpu->map_id = map->id;
460         rcpu->value.qsize  = value->qsize;
461
462         if (fd > 0 && __cpu_map_load_bpf_program(rcpu, map, fd))
463                 goto free_ptr_ring;
464
465         /* Setup kthread */
466         rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
467                                                "cpumap/%d/map:%d", cpu,
468                                                map->id);
469         if (IS_ERR(rcpu->kthread))
470                 goto free_prog;
471
472         get_cpu_map_entry(rcpu); /* 1-refcnt for being in cmap->cpu_map[] */
473         get_cpu_map_entry(rcpu); /* 1-refcnt for kthread */
474
475         /* Make sure kthread runs on a single CPU */
476         kthread_bind(rcpu->kthread, cpu);
477         wake_up_process(rcpu->kthread);
478
479         return rcpu;
480
481 free_prog:
482         if (rcpu->prog)
483                 bpf_prog_put(rcpu->prog);
484 free_ptr_ring:
485         ptr_ring_cleanup(rcpu->queue, NULL);
486 free_queue:
487         kfree(rcpu->queue);
488 free_bulkq:
489         free_percpu(rcpu->bulkq);
490 free_rcu:
491         kfree(rcpu);
492         return NULL;
493 }
494
495 static void __cpu_map_entry_free(struct rcu_head *rcu)
496 {
497         struct bpf_cpu_map_entry *rcpu;
498
499         /* This cpu_map_entry have been disconnected from map and one
500          * RCU grace-period have elapsed.  Thus, XDP cannot queue any
501          * new packets and cannot change/set flush_needed that can
502          * find this entry.
503          */
504         rcpu = container_of(rcu, struct bpf_cpu_map_entry, rcu);
505
506         free_percpu(rcpu->bulkq);
507         /* Cannot kthread_stop() here, last put free rcpu resources */
508         put_cpu_map_entry(rcpu);
509 }
510
511 /* After xchg pointer to bpf_cpu_map_entry, use the call_rcu() to
512  * ensure any driver rcu critical sections have completed, but this
513  * does not guarantee a flush has happened yet. Because driver side
514  * rcu_read_lock/unlock only protects the running XDP program.  The
515  * atomic xchg and NULL-ptr check in __cpu_map_flush() makes sure a
516  * pending flush op doesn't fail.
517  *
518  * The bpf_cpu_map_entry is still used by the kthread, and there can
519  * still be pending packets (in queue and percpu bulkq).  A refcnt
520  * makes sure to last user (kthread_stop vs. call_rcu) free memory
521  * resources.
522  *
523  * The rcu callback __cpu_map_entry_free flush remaining packets in
524  * percpu bulkq to queue.  Due to caller map_delete_elem() disable
525  * preemption, cannot call kthread_stop() to make sure queue is empty.
526  * Instead a work_queue is started for stopping kthread,
527  * cpu_map_kthread_stop, which waits for an RCU grace period before
528  * stopping kthread, emptying the queue.
529  */
530 static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
531                                     u32 key_cpu, struct bpf_cpu_map_entry *rcpu)
532 {
533         struct bpf_cpu_map_entry *old_rcpu;
534
535         old_rcpu = unrcu_pointer(xchg(&cmap->cpu_map[key_cpu], RCU_INITIALIZER(rcpu)));
536         if (old_rcpu) {
537                 call_rcu(&old_rcpu->rcu, __cpu_map_entry_free);
538                 INIT_WORK(&old_rcpu->kthread_stop_wq, cpu_map_kthread_stop);
539                 schedule_work(&old_rcpu->kthread_stop_wq);
540         }
541 }
542
543 static long cpu_map_delete_elem(struct bpf_map *map, void *key)
544 {
545         struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
546         u32 key_cpu = *(u32 *)key;
547
548         if (key_cpu >= map->max_entries)
549                 return -EINVAL;
550
551         /* notice caller map_delete_elem() use preempt_disable() */
552         __cpu_map_entry_replace(cmap, key_cpu, NULL);
553         return 0;
554 }
555
556 static long cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
557                                 u64 map_flags)
558 {
559         struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
560         struct bpf_cpumap_val cpumap_value = {};
561         struct bpf_cpu_map_entry *rcpu;
562         /* Array index key correspond to CPU number */
563         u32 key_cpu = *(u32 *)key;
564
565         memcpy(&cpumap_value, value, map->value_size);
566
567         if (unlikely(map_flags > BPF_EXIST))
568                 return -EINVAL;
569         if (unlikely(key_cpu >= cmap->map.max_entries))
570                 return -E2BIG;
571         if (unlikely(map_flags == BPF_NOEXIST))
572                 return -EEXIST;
573         if (unlikely(cpumap_value.qsize > 16384)) /* sanity limit on qsize */
574                 return -EOVERFLOW;
575
576         /* Make sure CPU is a valid possible cpu */
577         if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu))
578                 return -ENODEV;
579
580         if (cpumap_value.qsize == 0) {
581                 rcpu = NULL; /* Same as deleting */
582         } else {
583                 /* Updating qsize cause re-allocation of bpf_cpu_map_entry */
584                 rcpu = __cpu_map_entry_alloc(map, &cpumap_value, key_cpu);
585                 if (!rcpu)
586                         return -ENOMEM;
587                 rcpu->cmap = cmap;
588         }
589         rcu_read_lock();
590         __cpu_map_entry_replace(cmap, key_cpu, rcpu);
591         rcu_read_unlock();
592         return 0;
593 }
594
595 static void cpu_map_free(struct bpf_map *map)
596 {
597         struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
598         u32 i;
599
600         /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
601          * so the bpf programs (can be more than one that used this map) were
602          * disconnected from events. Wait for outstanding critical sections in
603          * these programs to complete. The rcu critical section only guarantees
604          * no further "XDP/bpf-side" reads against bpf_cpu_map->cpu_map.
605          * It does __not__ ensure pending flush operations (if any) are
606          * complete.
607          */
608
609         synchronize_rcu();
610
611         /* For cpu_map the remote CPUs can still be using the entries
612          * (struct bpf_cpu_map_entry).
613          */
614         for (i = 0; i < cmap->map.max_entries; i++) {
615                 struct bpf_cpu_map_entry *rcpu;
616
617                 rcpu = rcu_dereference_raw(cmap->cpu_map[i]);
618                 if (!rcpu)
619                         continue;
620
621                 /* bq flush and cleanup happens after RCU grace-period */
622                 __cpu_map_entry_replace(cmap, i, NULL); /* call_rcu */
623         }
624         bpf_map_area_free(cmap->cpu_map);
625         bpf_map_area_free(cmap);
626 }
627
628 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
629  * by local_bh_disable() (from XDP calls inside NAPI). The
630  * rcu_read_lock_bh_held() below makes lockdep accept both.
631  */
632 static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
633 {
634         struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
635         struct bpf_cpu_map_entry *rcpu;
636
637         if (key >= map->max_entries)
638                 return NULL;
639
640         rcpu = rcu_dereference_check(cmap->cpu_map[key],
641                                      rcu_read_lock_bh_held());
642         return rcpu;
643 }
644
645 static void *cpu_map_lookup_elem(struct bpf_map *map, void *key)
646 {
647         struct bpf_cpu_map_entry *rcpu =
648                 __cpu_map_lookup_elem(map, *(u32 *)key);
649
650         return rcpu ? &rcpu->value : NULL;
651 }
652
653 static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
654 {
655         struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
656         u32 index = key ? *(u32 *)key : U32_MAX;
657         u32 *next = next_key;
658
659         if (index >= cmap->map.max_entries) {
660                 *next = 0;
661                 return 0;
662         }
663
664         if (index == cmap->map.max_entries - 1)
665                 return -ENOENT;
666         *next = index + 1;
667         return 0;
668 }
669
670 static long cpu_map_redirect(struct bpf_map *map, u64 index, u64 flags)
671 {
672         return __bpf_xdp_redirect_map(map, index, flags, 0,
673                                       __cpu_map_lookup_elem);
674 }
675
676 static u64 cpu_map_mem_usage(const struct bpf_map *map)
677 {
678         u64 usage = sizeof(struct bpf_cpu_map);
679
680         /* Currently the dynamically allocated elements are not counted */
681         usage += (u64)map->max_entries * sizeof(struct bpf_cpu_map_entry *);
682         return usage;
683 }
684
685 BTF_ID_LIST_SINGLE(cpu_map_btf_ids, struct, bpf_cpu_map)
686 const struct bpf_map_ops cpu_map_ops = {
687         .map_meta_equal         = bpf_map_meta_equal,
688         .map_alloc              = cpu_map_alloc,
689         .map_free               = cpu_map_free,
690         .map_delete_elem        = cpu_map_delete_elem,
691         .map_update_elem        = cpu_map_update_elem,
692         .map_lookup_elem        = cpu_map_lookup_elem,
693         .map_get_next_key       = cpu_map_get_next_key,
694         .map_check_btf          = map_check_no_btf,
695         .map_mem_usage          = cpu_map_mem_usage,
696         .map_btf_id             = &cpu_map_btf_ids[0],
697         .map_redirect           = cpu_map_redirect,
698 };
699
700 static void bq_flush_to_queue(struct xdp_bulk_queue *bq)
701 {
702         struct bpf_cpu_map_entry *rcpu = bq->obj;
703         unsigned int processed = 0, drops = 0;
704         const int to_cpu = rcpu->cpu;
705         struct ptr_ring *q;
706         int i;
707
708         if (unlikely(!bq->count))
709                 return;
710
711         q = rcpu->queue;
712         spin_lock(&q->producer_lock);
713
714         for (i = 0; i < bq->count; i++) {
715                 struct xdp_frame *xdpf = bq->q[i];
716                 int err;
717
718                 err = __ptr_ring_produce(q, xdpf);
719                 if (err) {
720                         drops++;
721                         xdp_return_frame_rx_napi(xdpf);
722                 }
723                 processed++;
724         }
725         bq->count = 0;
726         spin_unlock(&q->producer_lock);
727
728         __list_del_clearprev(&bq->flush_node);
729
730         /* Feedback loop via tracepoints */
731         trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu);
732 }
733
734 /* Runs under RCU-read-side, plus in softirq under NAPI protection.
735  * Thus, safe percpu variable access.
736  */
737 static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
738 {
739         struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
740         struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
741
742         if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
743                 bq_flush_to_queue(bq);
744
745         /* Notice, xdp_buff/page MUST be queued here, long enough for
746          * driver to code invoking us to finished, due to driver
747          * (e.g. ixgbe) recycle tricks based on page-refcnt.
748          *
749          * Thus, incoming xdp_frame is always queued here (else we race
750          * with another CPU on page-refcnt and remaining driver code).
751          * Queue time is very short, as driver will invoke flush
752          * operation, when completing napi->poll call.
753          */
754         bq->q[bq->count++] = xdpf;
755
756         if (!bq->flush_node.prev)
757                 list_add(&bq->flush_node, flush_list);
758 }
759
760 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
761                     struct net_device *dev_rx)
762 {
763         /* Info needed when constructing SKB on remote CPU */
764         xdpf->dev_rx = dev_rx;
765
766         bq_enqueue(rcpu, xdpf);
767         return 0;
768 }
769
770 int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
771                              struct sk_buff *skb)
772 {
773         int ret;
774
775         __skb_pull(skb, skb->mac_len);
776         skb_set_redirected(skb, false);
777         __ptr_set_bit(0, &skb);
778
779         ret = ptr_ring_produce(rcpu->queue, skb);
780         if (ret < 0)
781                 goto trace;
782
783         wake_up_process(rcpu->kthread);
784 trace:
785         trace_xdp_cpumap_enqueue(rcpu->map_id, !ret, !!ret, rcpu->cpu);
786         return ret;
787 }
788
789 void __cpu_map_flush(void)
790 {
791         struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
792         struct xdp_bulk_queue *bq, *tmp;
793
794         list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
795                 bq_flush_to_queue(bq);
796
797                 /* If already running, costs spin_lock_irqsave + smb_mb */
798                 wake_up_process(bq->obj->kthread);
799         }
800 }
801
802 static int __init cpu_map_init(void)
803 {
804         int cpu;
805
806         for_each_possible_cpu(cpu)
807                 INIT_LIST_HEAD(&per_cpu(cpu_map_flush_list, cpu));
808         return 0;
809 }
810
811 subsys_initcall(cpu_map_init);