1 /* SPDX-License-Identifier: GPL-2.0
4 * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
5 * Copyright (C) 2016 Red Hat, Inc.
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/device.h>
13 #include <net/page_pool.h>
16 #include <linux/dma-direction.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/page-flags.h>
19 #include <linux/mm.h> /* for __put_page() */
20 #include <linux/poison.h>
22 #include <trace/events/page_pool.h>
24 #define DEFER_TIME (msecs_to_jiffies(1000))
25 #define DEFER_WARN_INTERVAL (60 * HZ)
27 #define BIAS_MAX LONG_MAX
29 static int page_pool_init(struct page_pool *pool,
30 const struct page_pool_params *params)
32 unsigned int ring_qsize = 1024; /* Default */
34 memcpy(&pool->p, params, sizeof(pool->p));
36 /* Validate only known flags were used */
37 if (pool->p.flags & ~(PP_FLAG_ALL))
40 if (pool->p.pool_size)
41 ring_qsize = pool->p.pool_size;
43 /* Sanity limit mem that can be pinned down */
44 if (ring_qsize > 32768)
47 /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
48 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
49 * which is the XDP_TX use-case.
51 if (pool->p.flags & PP_FLAG_DMA_MAP) {
52 /* DMA-mapping is not supported on 32-bit systems with
55 if (sizeof(dma_addr_t) > sizeof(unsigned long))
58 if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
59 (pool->p.dma_dir != DMA_BIDIRECTIONAL))
63 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
64 /* In order to request DMA-sync-for-device the page
67 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
73 /* pool->p.offset has to be set according to the address
74 * offset used by the DMA engine to start copying rx data
78 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
81 atomic_set(&pool->pages_state_release_cnt, 0);
83 /* Driver calling page_pool_create() also call page_pool_destroy() */
84 refcount_set(&pool->user_cnt, 1);
86 if (pool->p.flags & PP_FLAG_DMA_MAP)
87 get_device(pool->p.dev);
92 struct page_pool *page_pool_create(const struct page_pool_params *params)
94 struct page_pool *pool;
97 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
99 return ERR_PTR(-ENOMEM);
101 err = page_pool_init(pool, params);
103 pr_warn("%s() gave up with errno %d\n", __func__, err);
110 EXPORT_SYMBOL(page_pool_create);
112 static void page_pool_return_page(struct page_pool *pool, struct page *page);
115 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
117 struct ptr_ring *r = &pool->ring;
119 int pref_nid; /* preferred NUMA node */
121 /* Quicker fallback, avoid locks when ring is empty */
122 if (__ptr_ring_empty(r))
125 /* Softirq guarantee CPU and thus NUMA node is stable. This,
126 * assumes CPU refilling driver RX-ring will also run RX-NAPI.
129 pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid;
131 /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */
132 pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */
135 /* Slower-path: Get pages from locked ring queue */
136 spin_lock(&r->consumer_lock);
138 /* Refill alloc array, but only if NUMA match */
140 page = __ptr_ring_consume(r);
144 if (likely(page_to_nid(page) == pref_nid)) {
145 pool->alloc.cache[pool->alloc.count++] = page;
148 * (1) release 1 page to page-allocator and
149 * (2) break out to fallthrough to alloc_pages_node.
150 * This limit stress on page buddy alloactor.
152 page_pool_return_page(pool, page);
156 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
158 /* Return last page */
159 if (likely(pool->alloc.count > 0))
160 page = pool->alloc.cache[--pool->alloc.count];
162 spin_unlock(&r->consumer_lock);
167 static struct page *__page_pool_get_cached(struct page_pool *pool)
171 /* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
172 if (likely(pool->alloc.count)) {
174 page = pool->alloc.cache[--pool->alloc.count];
176 page = page_pool_refill_alloc_cache(pool);
182 static void page_pool_dma_sync_for_device(struct page_pool *pool,
184 unsigned int dma_sync_size)
186 dma_addr_t dma_addr = page_pool_get_dma_addr(page);
188 dma_sync_size = min(dma_sync_size, pool->p.max_len);
189 dma_sync_single_range_for_device(pool->p.dev, dma_addr,
190 pool->p.offset, dma_sync_size,
194 static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
198 /* Setup DMA mapping: use 'struct page' area for storing DMA-addr
199 * since dma_addr_t can be either 32 or 64 bits and does not always fit
200 * into page private data (i.e 32bit cpu with 64bit DMA caps)
201 * This mapping is kept for lifetime of page, until leaving pool.
203 dma = dma_map_page_attrs(pool->p.dev, page, 0,
204 (PAGE_SIZE << pool->p.order),
205 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
206 if (dma_mapping_error(pool->p.dev, dma))
209 page_pool_set_dma_addr(page, dma);
211 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
212 page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
217 static void page_pool_set_pp_info(struct page_pool *pool,
221 page->pp_magic |= PP_SIGNATURE;
224 static void page_pool_clear_pp_info(struct page *page)
230 static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
236 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
240 if ((pool->p.flags & PP_FLAG_DMA_MAP) &&
241 unlikely(!page_pool_dma_map(pool, page))) {
246 page_pool_set_pp_info(pool, page);
248 /* Track how many pages are held 'in-flight' */
249 pool->pages_state_hold_cnt++;
250 trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
256 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
259 const int bulk = PP_ALLOC_CACHE_REFILL;
260 unsigned int pp_flags = pool->p.flags;
261 unsigned int pp_order = pool->p.order;
265 /* Don't support bulk alloc for high-order pages */
266 if (unlikely(pp_order))
267 return __page_pool_alloc_page_order(pool, gfp);
269 /* Unnecessary as alloc cache is empty, but guarantees zero count */
270 if (unlikely(pool->alloc.count > 0))
271 return pool->alloc.cache[--pool->alloc.count];
273 /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */
274 memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
276 nr_pages = alloc_pages_bulk_array(gfp, bulk, pool->alloc.cache);
277 if (unlikely(!nr_pages))
280 /* Pages have been filled into alloc.cache array, but count is zero and
281 * page element have not been (possibly) DMA mapped.
283 for (i = 0; i < nr_pages; i++) {
284 page = pool->alloc.cache[i];
285 if ((pp_flags & PP_FLAG_DMA_MAP) &&
286 unlikely(!page_pool_dma_map(pool, page))) {
291 page_pool_set_pp_info(pool, page);
292 pool->alloc.cache[pool->alloc.count++] = page;
293 /* Track how many pages are held 'in-flight' */
294 pool->pages_state_hold_cnt++;
295 trace_page_pool_state_hold(pool, page,
296 pool->pages_state_hold_cnt);
299 /* Return last page */
300 if (likely(pool->alloc.count > 0))
301 page = pool->alloc.cache[--pool->alloc.count];
305 /* When page just alloc'ed is should/must have refcnt 1. */
309 /* For using page_pool replace: alloc_pages() API calls, but provide
310 * synchronization guarantee for allocation side.
312 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
316 /* Fast-path: Get a page from cache */
317 page = __page_pool_get_cached(pool);
321 /* Slow-path: cache empty, do real allocation */
322 page = __page_pool_alloc_pages_slow(pool, gfp);
325 EXPORT_SYMBOL(page_pool_alloc_pages);
327 /* Calculate distance between two u32 values, valid if distance is below 2^(31)
328 * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
330 #define _distance(a, b) (s32)((a) - (b))
332 static s32 page_pool_inflight(struct page_pool *pool)
334 u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
335 u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
338 inflight = _distance(hold_cnt, release_cnt);
340 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
341 WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight);
346 /* Disconnects a page (from a page_pool). API users can have a need
347 * to disconnect a page (from a page_pool), to allow it to be used as
348 * a regular page (that will eventually be returned to the normal
349 * page-allocator via put_page).
351 void page_pool_release_page(struct page_pool *pool, struct page *page)
356 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
357 /* Always account for inflight pages, even if we didn't
362 dma = page_pool_get_dma_addr(page);
364 /* When page is unmapped, it cannot be returned to our pool */
365 dma_unmap_page_attrs(pool->p.dev, dma,
366 PAGE_SIZE << pool->p.order, pool->p.dma_dir,
367 DMA_ATTR_SKIP_CPU_SYNC);
368 page_pool_set_dma_addr(page, 0);
370 page_pool_clear_pp_info(page);
372 /* This may be the last page returned, releasing the pool, so
373 * it is not safe to reference pool afterwards.
375 count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt);
376 trace_page_pool_state_release(pool, page, count);
378 EXPORT_SYMBOL(page_pool_release_page);
380 /* Return a page to the page allocator, cleaning up our state */
381 static void page_pool_return_page(struct page_pool *pool, struct page *page)
383 page_pool_release_page(pool, page);
386 /* An optimization would be to call __free_pages(page, pool->p.order)
387 * knowing page is not part of page-cache (thus avoiding a
388 * __page_cache_release() call).
392 static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
395 /* BH protection not needed if current is serving softirq */
396 if (in_serving_softirq())
397 ret = ptr_ring_produce(&pool->ring, page);
399 ret = ptr_ring_produce_bh(&pool->ring, page);
401 return (ret == 0) ? true : false;
404 /* Only allow direct recycling in special circumstances, into the
405 * alloc side cache. E.g. during RX-NAPI processing for XDP_DROP use-case.
407 * Caller must provide appropriate safe context.
409 static bool page_pool_recycle_in_cache(struct page *page,
410 struct page_pool *pool)
412 if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
415 /* Caller MUST have verified/know (page_ref_count(page) == 1) */
416 pool->alloc.cache[pool->alloc.count++] = page;
420 /* If the page refcnt == 1, this will try to recycle the page.
421 * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
422 * the configured size min(dma_sync_size, pool->max_len).
423 * If the page refcnt != 1, then the page will be returned to memory
426 static __always_inline struct page *
427 __page_pool_put_page(struct page_pool *pool, struct page *page,
428 unsigned int dma_sync_size, bool allow_direct)
430 /* It is not the last user for the page frag case */
431 if (pool->p.flags & PP_FLAG_PAGE_FRAG &&
432 page_pool_atomic_sub_frag_count_return(page, 1))
435 /* This allocator is optimized for the XDP mode that uses
436 * one-frame-per-page, but have fallbacks that act like the
437 * regular page allocator APIs.
439 * refcnt == 1 means page_pool owns page, and can recycle it.
441 * page is NOT reusable when allocated when system is under
442 * some pressure. (page_is_pfmemalloc)
444 if (likely(page_ref_count(page) == 1 && !page_is_pfmemalloc(page))) {
445 /* Read barrier done in page_ref_count / READ_ONCE */
447 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
448 page_pool_dma_sync_for_device(pool, page,
451 if (allow_direct && in_serving_softirq() &&
452 page_pool_recycle_in_cache(page, pool))
455 /* Page found as candidate for recycling */
458 /* Fallback/non-XDP mode: API user have elevated refcnt.
460 * Many drivers split up the page into fragments, and some
461 * want to keep doing this to save memory and do refcnt based
462 * recycling. Support this use case too, to ease drivers
463 * switching between XDP/non-XDP.
465 * In-case page_pool maintains the DMA mapping, API user must
466 * call page_pool_put_page once. In this elevated refcnt
467 * case, the DMA is unmapped/released, as driver is likely
468 * doing refcnt based recycle tricks, meaning another process
469 * will be invoking put_page.
471 /* Do not replace this with page_pool_return_page() */
472 page_pool_release_page(pool, page);
478 void page_pool_put_page(struct page_pool *pool, struct page *page,
479 unsigned int dma_sync_size, bool allow_direct)
481 page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
482 if (page && !page_pool_recycle_in_ring(pool, page)) {
483 /* Cache full, fallback to free pages */
484 page_pool_return_page(pool, page);
487 EXPORT_SYMBOL(page_pool_put_page);
489 /* Caller must not use data area after call, as this function overwrites it */
490 void page_pool_put_page_bulk(struct page_pool *pool, void **data,
495 for (i = 0; i < count; i++) {
496 struct page *page = virt_to_head_page(data[i]);
498 page = __page_pool_put_page(pool, page, -1, false);
499 /* Approved for bulk recycling in ptr_ring cache */
501 data[bulk_len++] = page;
504 if (unlikely(!bulk_len))
507 /* Bulk producer into ptr_ring page_pool cache */
508 page_pool_ring_lock(pool);
509 for (i = 0; i < bulk_len; i++) {
510 if (__ptr_ring_produce(&pool->ring, data[i]))
511 break; /* ring full */
513 page_pool_ring_unlock(pool);
515 /* Hopefully all pages was return into ptr_ring */
516 if (likely(i == bulk_len))
519 /* ptr_ring cache full, free remaining pages outside producer lock
520 * since put_page() with refcnt == 1 can be an expensive operation
522 for (; i < bulk_len; i++)
523 page_pool_return_page(pool, data[i]);
525 EXPORT_SYMBOL(page_pool_put_page_bulk);
527 static struct page *page_pool_drain_frag(struct page_pool *pool,
530 long drain_count = BIAS_MAX - pool->frag_users;
532 /* Some user is still using the page frag */
533 if (likely(page_pool_atomic_sub_frag_count_return(page,
537 if (page_ref_count(page) == 1 && !page_is_pfmemalloc(page)) {
538 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
539 page_pool_dma_sync_for_device(pool, page, -1);
544 page_pool_return_page(pool, page);
548 static void page_pool_free_frag(struct page_pool *pool)
550 long drain_count = BIAS_MAX - pool->frag_users;
551 struct page *page = pool->frag_page;
553 pool->frag_page = NULL;
556 page_pool_atomic_sub_frag_count_return(page, drain_count))
559 page_pool_return_page(pool, page);
562 struct page *page_pool_alloc_frag(struct page_pool *pool,
563 unsigned int *offset,
564 unsigned int size, gfp_t gfp)
566 unsigned int max_size = PAGE_SIZE << pool->p.order;
567 struct page *page = pool->frag_page;
569 if (WARN_ON(!(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
573 size = ALIGN(size, dma_get_cache_alignment());
574 *offset = pool->frag_offset;
576 if (page && *offset + size > max_size) {
577 page = page_pool_drain_frag(pool, page);
583 page = page_pool_alloc_pages(pool, gfp);
584 if (unlikely(!page)) {
585 pool->frag_page = NULL;
589 pool->frag_page = page;
592 pool->frag_users = 1;
594 pool->frag_offset = size;
595 page_pool_set_frag_count(page, BIAS_MAX);
600 pool->frag_offset = *offset + size;
603 EXPORT_SYMBOL(page_pool_alloc_frag);
605 static void page_pool_empty_ring(struct page_pool *pool)
609 /* Empty recycle ring */
610 while ((page = ptr_ring_consume_bh(&pool->ring))) {
611 /* Verify the refcnt invariant of cached pages */
612 if (!(page_ref_count(page) == 1))
613 pr_crit("%s() page_pool refcnt %d violation\n",
614 __func__, page_ref_count(page));
616 page_pool_return_page(pool, page);
620 static void page_pool_free(struct page_pool *pool)
622 if (pool->disconnect)
623 pool->disconnect(pool);
625 ptr_ring_cleanup(&pool->ring, NULL);
627 if (pool->p.flags & PP_FLAG_DMA_MAP)
628 put_device(pool->p.dev);
633 static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
637 if (pool->destroy_cnt)
640 /* Empty alloc cache, assume caller made sure this is
641 * no-longer in use, and page_pool_alloc_pages() cannot be
644 while (pool->alloc.count) {
645 page = pool->alloc.cache[--pool->alloc.count];
646 page_pool_return_page(pool, page);
650 static void page_pool_scrub(struct page_pool *pool)
652 page_pool_empty_alloc_cache_once(pool);
655 /* No more consumers should exist, but producers could still
658 page_pool_empty_ring(pool);
661 static int page_pool_release(struct page_pool *pool)
665 page_pool_scrub(pool);
666 inflight = page_pool_inflight(pool);
668 page_pool_free(pool);
673 static void page_pool_release_retry(struct work_struct *wq)
675 struct delayed_work *dwq = to_delayed_work(wq);
676 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
679 inflight = page_pool_release(pool);
683 /* Periodic warning */
684 if (time_after_eq(jiffies, pool->defer_warn)) {
685 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
687 pr_warn("%s() stalled pool shutdown %d inflight %d sec\n",
688 __func__, inflight, sec);
689 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
692 /* Still not ready to be disconnected, retry later */
693 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
696 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *))
698 refcount_inc(&pool->user_cnt);
699 pool->disconnect = disconnect;
702 void page_pool_destroy(struct page_pool *pool)
707 if (!page_pool_put(pool))
710 page_pool_free_frag(pool);
712 if (!page_pool_release(pool))
715 pool->defer_start = jiffies;
716 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
718 INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
719 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
721 EXPORT_SYMBOL(page_pool_destroy);
723 /* Caller must provide appropriate safe context, e.g. NAPI. */
724 void page_pool_update_nid(struct page_pool *pool, int new_nid)
728 trace_page_pool_update_nid(pool, new_nid);
729 pool->p.nid = new_nid;
731 /* Flush pool alloc cache, as refill will check NUMA node */
732 while (pool->alloc.count) {
733 page = pool->alloc.cache[--pool->alloc.count];
734 page_pool_return_page(pool, page);
737 EXPORT_SYMBOL(page_pool_update_nid);
739 bool page_pool_return_skb_page(struct page *page)
741 struct page_pool *pp;
743 page = compound_head(page);
745 /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation
746 * in order to preserve any existing bits, such as bit 0 for the
747 * head page of compound page and bit 1 for pfmemalloc page, so
748 * mask those bits for freeing side when doing below checking,
749 * and page_is_pfmemalloc() is checked in __page_pool_put_page()
750 * to avoid recycling the pfmemalloc page.
752 if (unlikely((page->pp_magic & ~0x3UL) != PP_SIGNATURE))
757 /* Driver set this to memory recycling info. Reset it on recycle.
758 * This will *not* work for NIC using a split-page memory model.
759 * The page will be returned to the pool here regardless of the
760 * 'flipped' fragment being in use or not.
762 page_pool_put_full_page(pp, page, false);
766 EXPORT_SYMBOL(page_pool_return_skb_page);