1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
7 #include <linux/filter.h>
8 #include <linux/types.h>
10 #include <linux/netdevice.h>
11 #include <linux/slab.h>
12 #include <linux/idr.h>
13 #include <linux/rhashtable.h>
14 #include <linux/bug.h>
15 #include <net/page_pool.h>
18 #include <net/xdp_priv.h> /* struct xdp_mem_allocator */
19 #include <trace/events/xdp.h>
21 #define REG_STATE_NEW 0x0
22 #define REG_STATE_REGISTERED 0x1
23 #define REG_STATE_UNREGISTERED 0x2
24 #define REG_STATE_UNUSED 0x3
26 static DEFINE_IDA(mem_id_pool);
27 static DEFINE_MUTEX(mem_id_lock);
28 #define MEM_ID_MAX 0xFFFE
30 static int mem_id_next = MEM_ID_MIN;
32 static bool mem_id_init; /* false */
33 static struct rhashtable *mem_id_ht;
35 static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
40 BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id)
43 /* Use cyclic increasing ID as direct hash key */
47 static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg,
50 const struct xdp_mem_allocator *xa = ptr;
51 u32 mem_id = *(u32 *)arg->key;
53 return xa->mem.id != mem_id;
56 static const struct rhashtable_params mem_id_rht_params = {
58 .head_offset = offsetof(struct xdp_mem_allocator, node),
59 .key_offset = offsetof(struct xdp_mem_allocator, mem.id),
60 .key_len = sizeof_field(struct xdp_mem_allocator, mem.id),
61 .max_size = MEM_ID_MAX,
63 .automatic_shrinking = true,
64 .hashfn = xdp_mem_id_hashfn,
65 .obj_cmpfn = xdp_mem_id_cmp,
68 static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
70 struct xdp_mem_allocator *xa;
72 xa = container_of(rcu, struct xdp_mem_allocator, rcu);
74 /* Allow this ID to be reused */
75 ida_simple_remove(&mem_id_pool, xa->mem.id);
80 static void mem_xa_remove(struct xdp_mem_allocator *xa)
82 trace_mem_disconnect(xa);
84 if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
85 call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
88 static void mem_allocator_disconnect(void *allocator)
90 struct xdp_mem_allocator *xa;
91 struct rhashtable_iter iter;
93 mutex_lock(&mem_id_lock);
95 rhashtable_walk_enter(mem_id_ht, &iter);
97 rhashtable_walk_start(&iter);
99 while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) {
100 if (xa->allocator == allocator)
104 rhashtable_walk_stop(&iter);
106 } while (xa == ERR_PTR(-EAGAIN));
107 rhashtable_walk_exit(&iter);
109 mutex_unlock(&mem_id_lock);
112 static void mem_id_disconnect(int id)
114 struct xdp_mem_allocator *xa;
116 mutex_lock(&mem_id_lock);
118 xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params);
120 mutex_unlock(&mem_id_lock);
121 WARN(1, "Request remove non-existing id(%d), driver bug?", id);
125 trace_mem_disconnect(xa);
127 if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
128 call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
130 mutex_unlock(&mem_id_lock);
133 void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
135 struct xdp_mem_allocator *xa;
136 int id = xdp_rxq->mem.id;
138 if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
139 WARN(1, "Missing register, driver bug");
146 if (xdp_rxq->mem.type == MEM_TYPE_ZERO_COPY)
147 return mem_id_disconnect(id);
149 if (xdp_rxq->mem.type == MEM_TYPE_PAGE_POOL) {
151 xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
152 page_pool_destroy(xa->page_pool);
156 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model);
158 void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
160 /* Simplify driver cleanup code paths, allow unreg "unused" */
161 if (xdp_rxq->reg_state == REG_STATE_UNUSED)
164 WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG");
166 xdp_rxq_info_unreg_mem_model(xdp_rxq);
168 xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
171 /* Reset mem info to defaults */
173 xdp_rxq->mem.type = 0;
175 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
177 static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
179 memset(xdp_rxq, 0, sizeof(*xdp_rxq));
182 /* Returns 0 on success, negative on failure */
183 int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
184 struct net_device *dev, u32 queue_index)
186 if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
187 WARN(1, "Driver promised not to register this");
191 if (xdp_rxq->reg_state == REG_STATE_REGISTERED) {
192 WARN(1, "Missing unregister, handled but fix driver");
193 xdp_rxq_info_unreg(xdp_rxq);
197 WARN(1, "Missing net_device from driver");
201 /* State either UNREGISTERED or NEW */
202 xdp_rxq_info_init(xdp_rxq);
204 xdp_rxq->queue_index = queue_index;
206 xdp_rxq->reg_state = REG_STATE_REGISTERED;
209 EXPORT_SYMBOL_GPL(xdp_rxq_info_reg);
211 void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
213 xdp_rxq->reg_state = REG_STATE_UNUSED;
215 EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
217 bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
219 return (xdp_rxq->reg_state == REG_STATE_REGISTERED);
221 EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
223 static int __mem_id_init_hash_table(void)
225 struct rhashtable *rht;
228 if (unlikely(mem_id_init))
231 rht = kzalloc(sizeof(*rht), GFP_KERNEL);
235 ret = rhashtable_init(rht, &mem_id_rht_params);
241 smp_mb(); /* mutex lock should provide enough pairing */
247 /* Allocate a cyclic ID that maps to allocator pointer.
248 * See: https://www.kernel.org/doc/html/latest/core-api/idr.html
250 * Caller must lock mem_id_lock.
252 static int __mem_id_cyclic_get(gfp_t gfp)
258 id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp);
261 /* Cyclic allocator, reset next id */
263 mem_id_next = MEM_ID_MIN;
267 return id; /* errno */
269 mem_id_next = id + 1;
274 static bool __is_supported_mem_type(enum xdp_mem_type type)
276 if (type == MEM_TYPE_PAGE_POOL)
277 return is_page_pool_compiled_in();
279 if (type >= MEM_TYPE_MAX)
285 int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
286 enum xdp_mem_type type, void *allocator)
288 struct xdp_mem_allocator *xdp_alloc;
289 gfp_t gfp = GFP_KERNEL;
293 if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
294 WARN(1, "Missing register, driver bug");
298 if (!__is_supported_mem_type(type))
301 xdp_rxq->mem.type = type;
304 if (type == MEM_TYPE_PAGE_POOL || type == MEM_TYPE_ZERO_COPY)
305 return -EINVAL; /* Setup time check page_pool req */
309 /* Delay init of rhashtable to save memory if feature isn't used */
311 mutex_lock(&mem_id_lock);
312 ret = __mem_id_init_hash_table();
313 mutex_unlock(&mem_id_lock);
320 xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
324 mutex_lock(&mem_id_lock);
325 id = __mem_id_cyclic_get(gfp);
330 xdp_rxq->mem.id = id;
331 xdp_alloc->mem = xdp_rxq->mem;
332 xdp_alloc->allocator = allocator;
334 /* Insert allocator into ID lookup table */
335 ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
337 ida_simple_remove(&mem_id_pool, xdp_rxq->mem.id);
339 errno = PTR_ERR(ptr);
343 if (type == MEM_TYPE_PAGE_POOL)
344 page_pool_use_xdp_mem(allocator, mem_allocator_disconnect);
346 mutex_unlock(&mem_id_lock);
348 trace_mem_connect(xdp_alloc, xdp_rxq);
351 mutex_unlock(&mem_id_lock);
355 EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
357 /* XDP RX runs under NAPI protection, and in different delivery error
358 * scenarios (e.g. queue full), it is possible to return the xdp_frame
359 * while still leveraging this protection. The @napi_direct boolean
360 * is used for those calls sites. Thus, allowing for faster recycling
361 * of xdp_frames/pages in those cases.
363 static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
364 unsigned long handle)
366 struct xdp_mem_allocator *xa;
370 case MEM_TYPE_PAGE_POOL:
372 /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
373 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
374 page = virt_to_head_page(data);
375 napi_direct &= !xdp_return_frame_no_direct();
376 page_pool_put_full_page(xa->page_pool, page, napi_direct);
379 case MEM_TYPE_PAGE_SHARED:
380 page_frag_free(data);
382 case MEM_TYPE_PAGE_ORDER0:
383 page = virt_to_page(data); /* Assumes order0 page*/
386 case MEM_TYPE_ZERO_COPY:
387 /* NB! Only valid from an xdp_buff! */
389 /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
390 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
391 xa->zc_alloc->free(xa->zc_alloc, handle);
394 /* Not possible, checked in xdp_rxq_info_reg_mem_model() */
399 void xdp_return_frame(struct xdp_frame *xdpf)
401 __xdp_return(xdpf->data, &xdpf->mem, false, 0);
403 EXPORT_SYMBOL_GPL(xdp_return_frame);
405 void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
407 __xdp_return(xdpf->data, &xdpf->mem, true, 0);
409 EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
411 void xdp_return_buff(struct xdp_buff *xdp)
413 __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp->handle);
415 EXPORT_SYMBOL_GPL(xdp_return_buff);
417 /* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
418 void __xdp_release_frame(void *data, struct xdp_mem_info *mem)
420 struct xdp_mem_allocator *xa;
424 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
425 page = virt_to_head_page(data);
427 page_pool_release_page(xa->page_pool, page);
430 EXPORT_SYMBOL_GPL(__xdp_release_frame);
432 int xdp_attachment_query(struct xdp_attachment_info *info,
433 struct netdev_bpf *bpf)
435 bpf->prog_id = info->prog ? info->prog->aux->id : 0;
436 bpf->prog_flags = info->prog ? info->flags : 0;
439 EXPORT_SYMBOL_GPL(xdp_attachment_query);
441 bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
442 struct netdev_bpf *bpf)
444 if (info->prog && (bpf->flags ^ info->flags) & XDP_FLAGS_MODES) {
445 NL_SET_ERR_MSG(bpf->extack,
446 "program loaded with different flags");
451 EXPORT_SYMBOL_GPL(xdp_attachment_flags_ok);
453 void xdp_attachment_setup(struct xdp_attachment_info *info,
454 struct netdev_bpf *bpf)
457 bpf_prog_put(info->prog);
458 info->prog = bpf->prog;
459 info->flags = bpf->flags;
461 EXPORT_SYMBOL_GPL(xdp_attachment_setup);
463 struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
465 unsigned int metasize, totsize;
466 void *addr, *data_to_copy;
467 struct xdp_frame *xdpf;
470 /* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */
471 metasize = xdp_data_meta_unsupported(xdp) ? 0 :
472 xdp->data - xdp->data_meta;
473 totsize = xdp->data_end - xdp->data + metasize;
475 if (sizeof(*xdpf) + totsize > PAGE_SIZE)
478 page = dev_alloc_page();
482 addr = page_to_virt(page);
484 memset(xdpf, 0, sizeof(*xdpf));
486 addr += sizeof(*xdpf);
487 data_to_copy = metasize ? xdp->data_meta : xdp->data;
488 memcpy(addr, data_to_copy, totsize);
490 xdpf->data = addr + metasize;
491 xdpf->len = totsize - metasize;
493 xdpf->metasize = metasize;
494 xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
496 xdp_return_buff(xdp);
499 EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame);
501 /* Used by XDP_WARN macro, to avoid inlining WARN() in fast-path */
502 void xdp_warn(const char *msg, const char *func, const int line)
504 WARN(1, "XDP_WARN: %s(line:%d): %s\n", func, line, msg);
506 EXPORT_SYMBOL_GPL(xdp_warn);