Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
[platform/kernel/linux-starfive.git] / net / core / xdp.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* net/core/xdp.c
3  *
4  * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
5  */
6 #include <linux/bpf.h>
7 #include <linux/btf.h>
8 #include <linux/btf_ids.h>
9 #include <linux/filter.h>
10 #include <linux/types.h>
11 #include <linux/mm.h>
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/idr.h>
15 #include <linux/rhashtable.h>
16 #include <linux/bug.h>
17 #include <net/page_pool.h>
18
19 #include <net/xdp.h>
20 #include <net/xdp_priv.h> /* struct xdp_mem_allocator */
21 #include <trace/events/xdp.h>
22 #include <net/xdp_sock_drv.h>
23
24 #define REG_STATE_NEW           0x0
25 #define REG_STATE_REGISTERED    0x1
26 #define REG_STATE_UNREGISTERED  0x2
27 #define REG_STATE_UNUSED        0x3
28
29 static DEFINE_IDA(mem_id_pool);
30 static DEFINE_MUTEX(mem_id_lock);
31 #define MEM_ID_MAX 0xFFFE
32 #define MEM_ID_MIN 1
33 static int mem_id_next = MEM_ID_MIN;
34
35 static bool mem_id_init; /* false */
36 static struct rhashtable *mem_id_ht;
37
38 static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
39 {
40         const u32 *k = data;
41         const u32 key = *k;
42
43         BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id)
44                      != sizeof(u32));
45
46         /* Use cyclic increasing ID as direct hash key */
47         return key;
48 }
49
50 static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg,
51                           const void *ptr)
52 {
53         const struct xdp_mem_allocator *xa = ptr;
54         u32 mem_id = *(u32 *)arg->key;
55
56         return xa->mem.id != mem_id;
57 }
58
59 static const struct rhashtable_params mem_id_rht_params = {
60         .nelem_hint = 64,
61         .head_offset = offsetof(struct xdp_mem_allocator, node),
62         .key_offset  = offsetof(struct xdp_mem_allocator, mem.id),
63         .key_len = sizeof_field(struct xdp_mem_allocator, mem.id),
64         .max_size = MEM_ID_MAX,
65         .min_size = 8,
66         .automatic_shrinking = true,
67         .hashfn    = xdp_mem_id_hashfn,
68         .obj_cmpfn = xdp_mem_id_cmp,
69 };
70
71 static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
72 {
73         struct xdp_mem_allocator *xa;
74
75         xa = container_of(rcu, struct xdp_mem_allocator, rcu);
76
77         /* Allow this ID to be reused */
78         ida_simple_remove(&mem_id_pool, xa->mem.id);
79
80         kfree(xa);
81 }
82
83 static void mem_xa_remove(struct xdp_mem_allocator *xa)
84 {
85         trace_mem_disconnect(xa);
86
87         if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
88                 call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
89 }
90
91 static void mem_allocator_disconnect(void *allocator)
92 {
93         struct xdp_mem_allocator *xa;
94         struct rhashtable_iter iter;
95
96         mutex_lock(&mem_id_lock);
97
98         rhashtable_walk_enter(mem_id_ht, &iter);
99         do {
100                 rhashtable_walk_start(&iter);
101
102                 while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) {
103                         if (xa->allocator == allocator)
104                                 mem_xa_remove(xa);
105                 }
106
107                 rhashtable_walk_stop(&iter);
108
109         } while (xa == ERR_PTR(-EAGAIN));
110         rhashtable_walk_exit(&iter);
111
112         mutex_unlock(&mem_id_lock);
113 }
114
115 void xdp_unreg_mem_model(struct xdp_mem_info *mem)
116 {
117         struct xdp_mem_allocator *xa;
118         int type = mem->type;
119         int id = mem->id;
120
121         /* Reset mem info to defaults */
122         mem->id = 0;
123         mem->type = 0;
124
125         if (id == 0)
126                 return;
127
128         if (type == MEM_TYPE_PAGE_POOL) {
129                 rcu_read_lock();
130                 xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
131                 page_pool_destroy(xa->page_pool);
132                 rcu_read_unlock();
133         }
134 }
135 EXPORT_SYMBOL_GPL(xdp_unreg_mem_model);
136
137 void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
138 {
139         if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
140                 WARN(1, "Missing register, driver bug");
141                 return;
142         }
143
144         xdp_unreg_mem_model(&xdp_rxq->mem);
145 }
146 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model);
147
148 void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
149 {
150         /* Simplify driver cleanup code paths, allow unreg "unused" */
151         if (xdp_rxq->reg_state == REG_STATE_UNUSED)
152                 return;
153
154         xdp_rxq_info_unreg_mem_model(xdp_rxq);
155
156         xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
157         xdp_rxq->dev = NULL;
158 }
159 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
160
161 static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
162 {
163         memset(xdp_rxq, 0, sizeof(*xdp_rxq));
164 }
165
166 /* Returns 0 on success, negative on failure */
167 int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
168                        struct net_device *dev, u32 queue_index,
169                        unsigned int napi_id, u32 frag_size)
170 {
171         if (!dev) {
172                 WARN(1, "Missing net_device from driver");
173                 return -ENODEV;
174         }
175
176         if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
177                 WARN(1, "Driver promised not to register this");
178                 return -EINVAL;
179         }
180
181         if (xdp_rxq->reg_state == REG_STATE_REGISTERED) {
182                 WARN(1, "Missing unregister, handled but fix driver");
183                 xdp_rxq_info_unreg(xdp_rxq);
184         }
185
186         /* State either UNREGISTERED or NEW */
187         xdp_rxq_info_init(xdp_rxq);
188         xdp_rxq->dev = dev;
189         xdp_rxq->queue_index = queue_index;
190         xdp_rxq->napi_id = napi_id;
191         xdp_rxq->frag_size = frag_size;
192
193         xdp_rxq->reg_state = REG_STATE_REGISTERED;
194         return 0;
195 }
196 EXPORT_SYMBOL_GPL(__xdp_rxq_info_reg);
197
198 void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
199 {
200         xdp_rxq->reg_state = REG_STATE_UNUSED;
201 }
202 EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
203
204 bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
205 {
206         return (xdp_rxq->reg_state == REG_STATE_REGISTERED);
207 }
208 EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
209
210 static int __mem_id_init_hash_table(void)
211 {
212         struct rhashtable *rht;
213         int ret;
214
215         if (unlikely(mem_id_init))
216                 return 0;
217
218         rht = kzalloc(sizeof(*rht), GFP_KERNEL);
219         if (!rht)
220                 return -ENOMEM;
221
222         ret = rhashtable_init(rht, &mem_id_rht_params);
223         if (ret < 0) {
224                 kfree(rht);
225                 return ret;
226         }
227         mem_id_ht = rht;
228         smp_mb(); /* mutex lock should provide enough pairing */
229         mem_id_init = true;
230
231         return 0;
232 }
233
234 /* Allocate a cyclic ID that maps to allocator pointer.
235  * See: https://www.kernel.org/doc/html/latest/core-api/idr.html
236  *
237  * Caller must lock mem_id_lock.
238  */
239 static int __mem_id_cyclic_get(gfp_t gfp)
240 {
241         int retries = 1;
242         int id;
243
244 again:
245         id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp);
246         if (id < 0) {
247                 if (id == -ENOSPC) {
248                         /* Cyclic allocator, reset next id */
249                         if (retries--) {
250                                 mem_id_next = MEM_ID_MIN;
251                                 goto again;
252                         }
253                 }
254                 return id; /* errno */
255         }
256         mem_id_next = id + 1;
257
258         return id;
259 }
260
261 static bool __is_supported_mem_type(enum xdp_mem_type type)
262 {
263         if (type == MEM_TYPE_PAGE_POOL)
264                 return is_page_pool_compiled_in();
265
266         if (type >= MEM_TYPE_MAX)
267                 return false;
268
269         return true;
270 }
271
272 static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem,
273                                                      enum xdp_mem_type type,
274                                                      void *allocator)
275 {
276         struct xdp_mem_allocator *xdp_alloc;
277         gfp_t gfp = GFP_KERNEL;
278         int id, errno, ret;
279         void *ptr;
280
281         if (!__is_supported_mem_type(type))
282                 return ERR_PTR(-EOPNOTSUPP);
283
284         mem->type = type;
285
286         if (!allocator) {
287                 if (type == MEM_TYPE_PAGE_POOL)
288                         return ERR_PTR(-EINVAL); /* Setup time check page_pool req */
289                 return NULL;
290         }
291
292         /* Delay init of rhashtable to save memory if feature isn't used */
293         if (!mem_id_init) {
294                 mutex_lock(&mem_id_lock);
295                 ret = __mem_id_init_hash_table();
296                 mutex_unlock(&mem_id_lock);
297                 if (ret < 0) {
298                         WARN_ON(1);
299                         return ERR_PTR(ret);
300                 }
301         }
302
303         xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
304         if (!xdp_alloc)
305                 return ERR_PTR(-ENOMEM);
306
307         mutex_lock(&mem_id_lock);
308         id = __mem_id_cyclic_get(gfp);
309         if (id < 0) {
310                 errno = id;
311                 goto err;
312         }
313         mem->id = id;
314         xdp_alloc->mem = *mem;
315         xdp_alloc->allocator = allocator;
316
317         /* Insert allocator into ID lookup table */
318         ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
319         if (IS_ERR(ptr)) {
320                 ida_simple_remove(&mem_id_pool, mem->id);
321                 mem->id = 0;
322                 errno = PTR_ERR(ptr);
323                 goto err;
324         }
325
326         if (type == MEM_TYPE_PAGE_POOL)
327                 page_pool_use_xdp_mem(allocator, mem_allocator_disconnect, mem);
328
329         mutex_unlock(&mem_id_lock);
330
331         return xdp_alloc;
332 err:
333         mutex_unlock(&mem_id_lock);
334         kfree(xdp_alloc);
335         return ERR_PTR(errno);
336 }
337
338 int xdp_reg_mem_model(struct xdp_mem_info *mem,
339                       enum xdp_mem_type type, void *allocator)
340 {
341         struct xdp_mem_allocator *xdp_alloc;
342
343         xdp_alloc = __xdp_reg_mem_model(mem, type, allocator);
344         if (IS_ERR(xdp_alloc))
345                 return PTR_ERR(xdp_alloc);
346         return 0;
347 }
348 EXPORT_SYMBOL_GPL(xdp_reg_mem_model);
349
350 int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
351                                enum xdp_mem_type type, void *allocator)
352 {
353         struct xdp_mem_allocator *xdp_alloc;
354
355         if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
356                 WARN(1, "Missing register, driver bug");
357                 return -EFAULT;
358         }
359
360         xdp_alloc = __xdp_reg_mem_model(&xdp_rxq->mem, type, allocator);
361         if (IS_ERR(xdp_alloc))
362                 return PTR_ERR(xdp_alloc);
363
364         if (trace_mem_connect_enabled() && xdp_alloc)
365                 trace_mem_connect(xdp_alloc, xdp_rxq);
366         return 0;
367 }
368
369 EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
370
371 /* XDP RX runs under NAPI protection, and in different delivery error
372  * scenarios (e.g. queue full), it is possible to return the xdp_frame
373  * while still leveraging this protection.  The @napi_direct boolean
374  * is used for those calls sites.  Thus, allowing for faster recycling
375  * of xdp_frames/pages in those cases.
376  */
377 void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
378                   struct xdp_buff *xdp)
379 {
380         struct page *page;
381
382         switch (mem->type) {
383         case MEM_TYPE_PAGE_POOL:
384                 page = virt_to_head_page(data);
385                 if (napi_direct && xdp_return_frame_no_direct())
386                         napi_direct = false;
387                 /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE)
388                  * as mem->type knows this a page_pool page
389                  */
390                 page_pool_put_full_page(page->pp, page, napi_direct);
391                 break;
392         case MEM_TYPE_PAGE_SHARED:
393                 page_frag_free(data);
394                 break;
395         case MEM_TYPE_PAGE_ORDER0:
396                 page = virt_to_page(data); /* Assumes order0 page*/
397                 put_page(page);
398                 break;
399         case MEM_TYPE_XSK_BUFF_POOL:
400                 /* NB! Only valid from an xdp_buff! */
401                 xsk_buff_free(xdp);
402                 break;
403         default:
404                 /* Not possible, checked in xdp_rxq_info_reg_mem_model() */
405                 WARN(1, "Incorrect XDP memory type (%d) usage", mem->type);
406                 break;
407         }
408 }
409
410 void xdp_return_frame(struct xdp_frame *xdpf)
411 {
412         struct skb_shared_info *sinfo;
413         int i;
414
415         if (likely(!xdp_frame_has_frags(xdpf)))
416                 goto out;
417
418         sinfo = xdp_get_shared_info_from_frame(xdpf);
419         for (i = 0; i < sinfo->nr_frags; i++) {
420                 struct page *page = skb_frag_page(&sinfo->frags[i]);
421
422                 __xdp_return(page_address(page), &xdpf->mem, false, NULL);
423         }
424 out:
425         __xdp_return(xdpf->data, &xdpf->mem, false, NULL);
426 }
427 EXPORT_SYMBOL_GPL(xdp_return_frame);
428
429 void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
430 {
431         struct skb_shared_info *sinfo;
432         int i;
433
434         if (likely(!xdp_frame_has_frags(xdpf)))
435                 goto out;
436
437         sinfo = xdp_get_shared_info_from_frame(xdpf);
438         for (i = 0; i < sinfo->nr_frags; i++) {
439                 struct page *page = skb_frag_page(&sinfo->frags[i]);
440
441                 __xdp_return(page_address(page), &xdpf->mem, true, NULL);
442         }
443 out:
444         __xdp_return(xdpf->data, &xdpf->mem, true, NULL);
445 }
446 EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
447
448 /* XDP bulk APIs introduce a defer/flush mechanism to return
449  * pages belonging to the same xdp_mem_allocator object
450  * (identified via the mem.id field) in bulk to optimize
451  * I-cache and D-cache.
452  * The bulk queue size is set to 16 to be aligned to how
453  * XDP_REDIRECT bulking works. The bulk is flushed when
454  * it is full or when mem.id changes.
455  * xdp_frame_bulk is usually stored/allocated on the function
456  * call-stack to avoid locking penalties.
457  */
458 void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq)
459 {
460         struct xdp_mem_allocator *xa = bq->xa;
461
462         if (unlikely(!xa || !bq->count))
463                 return;
464
465         page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count);
466         /* bq->xa is not cleared to save lookup, if mem.id same in next bulk */
467         bq->count = 0;
468 }
469 EXPORT_SYMBOL_GPL(xdp_flush_frame_bulk);
470
471 /* Must be called with rcu_read_lock held */
472 void xdp_return_frame_bulk(struct xdp_frame *xdpf,
473                            struct xdp_frame_bulk *bq)
474 {
475         struct xdp_mem_info *mem = &xdpf->mem;
476         struct xdp_mem_allocator *xa;
477
478         if (mem->type != MEM_TYPE_PAGE_POOL) {
479                 xdp_return_frame(xdpf);
480                 return;
481         }
482
483         xa = bq->xa;
484         if (unlikely(!xa)) {
485                 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
486                 bq->count = 0;
487                 bq->xa = xa;
488         }
489
490         if (bq->count == XDP_BULK_QUEUE_SIZE)
491                 xdp_flush_frame_bulk(bq);
492
493         if (unlikely(mem->id != xa->mem.id)) {
494                 xdp_flush_frame_bulk(bq);
495                 bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
496         }
497
498         if (unlikely(xdp_frame_has_frags(xdpf))) {
499                 struct skb_shared_info *sinfo;
500                 int i;
501
502                 sinfo = xdp_get_shared_info_from_frame(xdpf);
503                 for (i = 0; i < sinfo->nr_frags; i++) {
504                         skb_frag_t *frag = &sinfo->frags[i];
505
506                         bq->q[bq->count++] = skb_frag_address(frag);
507                         if (bq->count == XDP_BULK_QUEUE_SIZE)
508                                 xdp_flush_frame_bulk(bq);
509                 }
510         }
511         bq->q[bq->count++] = xdpf->data;
512 }
513 EXPORT_SYMBOL_GPL(xdp_return_frame_bulk);
514
515 void xdp_return_buff(struct xdp_buff *xdp)
516 {
517         struct skb_shared_info *sinfo;
518         int i;
519
520         if (likely(!xdp_buff_has_frags(xdp)))
521                 goto out;
522
523         sinfo = xdp_get_shared_info_from_buff(xdp);
524         for (i = 0; i < sinfo->nr_frags; i++) {
525                 struct page *page = skb_frag_page(&sinfo->frags[i]);
526
527                 __xdp_return(page_address(page), &xdp->rxq->mem, true, xdp);
528         }
529 out:
530         __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);
531 }
532 EXPORT_SYMBOL_GPL(xdp_return_buff);
533
534 /* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
535 void __xdp_release_frame(void *data, struct xdp_mem_info *mem)
536 {
537         struct xdp_mem_allocator *xa;
538         struct page *page;
539
540         rcu_read_lock();
541         xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
542         page = virt_to_head_page(data);
543         if (xa)
544                 page_pool_release_page(xa->page_pool, page);
545         rcu_read_unlock();
546 }
547 EXPORT_SYMBOL_GPL(__xdp_release_frame);
548
549 void xdp_attachment_setup(struct xdp_attachment_info *info,
550                           struct netdev_bpf *bpf)
551 {
552         if (info->prog)
553                 bpf_prog_put(info->prog);
554         info->prog = bpf->prog;
555         info->flags = bpf->flags;
556 }
557 EXPORT_SYMBOL_GPL(xdp_attachment_setup);
558
559 struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
560 {
561         unsigned int metasize, totsize;
562         void *addr, *data_to_copy;
563         struct xdp_frame *xdpf;
564         struct page *page;
565
566         /* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */
567         metasize = xdp_data_meta_unsupported(xdp) ? 0 :
568                    xdp->data - xdp->data_meta;
569         totsize = xdp->data_end - xdp->data + metasize;
570
571         if (sizeof(*xdpf) + totsize > PAGE_SIZE)
572                 return NULL;
573
574         page = dev_alloc_page();
575         if (!page)
576                 return NULL;
577
578         addr = page_to_virt(page);
579         xdpf = addr;
580         memset(xdpf, 0, sizeof(*xdpf));
581
582         addr += sizeof(*xdpf);
583         data_to_copy = metasize ? xdp->data_meta : xdp->data;
584         memcpy(addr, data_to_copy, totsize);
585
586         xdpf->data = addr + metasize;
587         xdpf->len = totsize - metasize;
588         xdpf->headroom = 0;
589         xdpf->metasize = metasize;
590         xdpf->frame_sz = PAGE_SIZE;
591         xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
592
593         xsk_buff_free(xdp);
594         return xdpf;
595 }
596 EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame);
597
598 /* Used by XDP_WARN macro, to avoid inlining WARN() in fast-path */
599 void xdp_warn(const char *msg, const char *func, const int line)
600 {
601         WARN(1, "XDP_WARN: %s(line:%d): %s\n", func, line, msg);
602 };
603 EXPORT_SYMBOL_GPL(xdp_warn);
604
605 int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp)
606 {
607         n_skb = kmem_cache_alloc_bulk(skbuff_cache, gfp, n_skb, skbs);
608         if (unlikely(!n_skb))
609                 return -ENOMEM;
610
611         return 0;
612 }
613 EXPORT_SYMBOL_GPL(xdp_alloc_skb_bulk);
614
615 struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
616                                            struct sk_buff *skb,
617                                            struct net_device *dev)
618 {
619         struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
620         unsigned int headroom, frame_size;
621         void *hard_start;
622         u8 nr_frags;
623
624         /* xdp frags frame */
625         if (unlikely(xdp_frame_has_frags(xdpf)))
626                 nr_frags = sinfo->nr_frags;
627
628         /* Part of headroom was reserved to xdpf */
629         headroom = sizeof(*xdpf) + xdpf->headroom;
630
631         /* Memory size backing xdp_frame data already have reserved
632          * room for build_skb to place skb_shared_info in tailroom.
633          */
634         frame_size = xdpf->frame_sz;
635
636         hard_start = xdpf->data - headroom;
637         skb = build_skb_around(skb, hard_start, frame_size);
638         if (unlikely(!skb))
639                 return NULL;
640
641         skb_reserve(skb, headroom);
642         __skb_put(skb, xdpf->len);
643         if (xdpf->metasize)
644                 skb_metadata_set(skb, xdpf->metasize);
645
646         if (unlikely(xdp_frame_has_frags(xdpf)))
647                 xdp_update_skb_shared_info(skb, nr_frags,
648                                            sinfo->xdp_frags_size,
649                                            nr_frags * xdpf->frame_sz,
650                                            xdp_frame_is_frag_pfmemalloc(xdpf));
651
652         /* Essential SKB info: protocol and skb->dev */
653         skb->protocol = eth_type_trans(skb, dev);
654
655         /* Optional SKB info, currently missing:
656          * - HW checksum info           (skb->ip_summed)
657          * - HW RX hash                 (skb_set_hash)
658          * - RX ring dev queue index    (skb_record_rx_queue)
659          */
660
661         /* Until page_pool get SKB return path, release DMA here */
662         xdp_release_frame(xdpf);
663
664         /* Allow SKB to reuse area used by xdp_frame */
665         xdp_scrub_frame(xdpf);
666
667         return skb;
668 }
669 EXPORT_SYMBOL_GPL(__xdp_build_skb_from_frame);
670
671 struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
672                                          struct net_device *dev)
673 {
674         struct sk_buff *skb;
675
676         skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
677         if (unlikely(!skb))
678                 return NULL;
679
680         memset(skb, 0, offsetof(struct sk_buff, tail));
681
682         return __xdp_build_skb_from_frame(xdpf, skb, dev);
683 }
684 EXPORT_SYMBOL_GPL(xdp_build_skb_from_frame);
685
686 struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf)
687 {
688         unsigned int headroom, totalsize;
689         struct xdp_frame *nxdpf;
690         struct page *page;
691         void *addr;
692
693         headroom = xdpf->headroom + sizeof(*xdpf);
694         totalsize = headroom + xdpf->len;
695
696         if (unlikely(totalsize > PAGE_SIZE))
697                 return NULL;
698         page = dev_alloc_page();
699         if (!page)
700                 return NULL;
701         addr = page_to_virt(page);
702
703         memcpy(addr, xdpf, totalsize);
704
705         nxdpf = addr;
706         nxdpf->data = addr + headroom;
707         nxdpf->frame_sz = PAGE_SIZE;
708         nxdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
709         nxdpf->mem.id = 0;
710
711         return nxdpf;
712 }
713
714 __diag_push();
715 __diag_ignore_all("-Wmissing-prototypes",
716                   "Global functions as their definitions will be in vmlinux BTF");
717
718 /**
719  * bpf_xdp_metadata_rx_timestamp - Read XDP frame RX timestamp.
720  * @ctx: XDP context pointer.
721  * @timestamp: Return value pointer.
722  *
723  * Return:
724  * * Returns 0 on success or ``-errno`` on error.
725  * * ``-EOPNOTSUPP`` : means device driver does not implement kfunc
726  * * ``-ENODATA``    : means no RX-timestamp available for this frame
727  */
728 __bpf_kfunc int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
729 {
730         return -EOPNOTSUPP;
731 }
732
733 /**
734  * bpf_xdp_metadata_rx_hash - Read XDP frame RX hash.
735  * @ctx: XDP context pointer.
736  * @hash: Return value pointer.
737  *
738  * Return:
739  * * Returns 0 on success or ``-errno`` on error.
740  * * ``-EOPNOTSUPP`` : means device driver doesn't implement kfunc
741  * * ``-ENODATA``    : means no RX-hash available for this frame
742  */
743 __bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash)
744 {
745         return -EOPNOTSUPP;
746 }
747
748 __diag_pop();
749
750 BTF_SET8_START(xdp_metadata_kfunc_ids)
751 #define XDP_METADATA_KFUNC(_, name) BTF_ID_FLAGS(func, name, 0)
752 XDP_METADATA_KFUNC_xxx
753 #undef XDP_METADATA_KFUNC
754 BTF_SET8_END(xdp_metadata_kfunc_ids)
755
756 static const struct btf_kfunc_id_set xdp_metadata_kfunc_set = {
757         .owner = THIS_MODULE,
758         .set   = &xdp_metadata_kfunc_ids,
759 };
760
761 BTF_ID_LIST(xdp_metadata_kfunc_ids_unsorted)
762 #define XDP_METADATA_KFUNC(name, str) BTF_ID(func, str)
763 XDP_METADATA_KFUNC_xxx
764 #undef XDP_METADATA_KFUNC
765
766 u32 bpf_xdp_metadata_kfunc_id(int id)
767 {
768         /* xdp_metadata_kfunc_ids is sorted and can't be used */
769         return xdp_metadata_kfunc_ids_unsorted[id];
770 }
771
772 bool bpf_dev_bound_kfunc_id(u32 btf_id)
773 {
774         return btf_id_set8_contains(&xdp_metadata_kfunc_ids, btf_id);
775 }
776
777 static int __init xdp_metadata_init(void)
778 {
779         return register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &xdp_metadata_kfunc_set);
780 }
781 late_initcall(xdp_metadata_init);
782
783 void xdp_set_features_flag(struct net_device *dev, xdp_features_t val)
784 {
785         val &= NETDEV_XDP_ACT_MASK;
786         if (dev->xdp_features == val)
787                 return;
788
789         dev->xdp_features = val;
790
791         if (dev->reg_state == NETREG_REGISTERED)
792                 call_netdevice_notifiers(NETDEV_XDP_FEAT_CHANGE, dev);
793 }
794 EXPORT_SYMBOL_GPL(xdp_set_features_flag);
795
796 void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg)
797 {
798         xdp_features_t val = (dev->xdp_features | NETDEV_XDP_ACT_NDO_XMIT);
799
800         if (support_sg)
801                 val |= NETDEV_XDP_ACT_NDO_XMIT_SG;
802         xdp_set_features_flag(dev, val);
803 }
804 EXPORT_SYMBOL_GPL(xdp_features_set_redirect_target);
805
806 void xdp_features_clear_redirect_target(struct net_device *dev)
807 {
808         xdp_features_t val = dev->xdp_features;
809
810         val &= ~(NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_NDO_XMIT_SG);
811         xdp_set_features_flag(dev, val);
812 }
813 EXPORT_SYMBOL_GPL(xdp_features_clear_redirect_target);