1 /* SPDX-License-Identifier: GPL-2.0
4 * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
5 * Copyright (C) 2016 Red Hat, Inc.
9 * DOC: page_pool allocator
11 * This page_pool allocator is optimized for the XDP mode that
12 * uses one-frame-per-page, but have fallbacks that act like the
13 * regular page allocator APIs.
15 * Basic use involve replacing alloc_pages() calls with the
16 * page_pool_alloc_pages() call. Drivers should likely use
17 * page_pool_dev_alloc_pages() replacing dev_alloc_pages().
19 * API keeps track of in-flight pages, in-order to let API user know
20 * when it is safe to dealloactor page_pool object. Thus, API users
21 * must make sure to call page_pool_release_page() when a page is
22 * "leaving" the page_pool. Or call page_pool_put_page() where
23 * appropiate. For maintaining correct accounting.
25 * API user must only call page_pool_put_page() once on a page, as it
26 * will either recycle the page, or in case of elevated refcnt, it
27 * will release the DMA mapping and in-flight state accounting. We
28 * hope to lift this requirement in the future.
30 #ifndef _NET_PAGE_POOL_H
31 #define _NET_PAGE_POOL_H
33 #include <linux/mm.h> /* Needed by ptr_ring */
34 #include <linux/ptr_ring.h>
35 #include <linux/dma-direction.h>
37 #define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
40 #define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets
41 * from page_pool will be
42 * DMA-synced-for-device according to
43 * the length provided by the device
45 * Please note DMA-sync-for-CPU is still
46 * device driver responsibility
48 #define PP_FLAG_PAGE_FRAG BIT(2) /* for page frag feature */
49 #define PP_FLAG_ALL (PP_FLAG_DMA_MAP |\
50 PP_FLAG_DMA_SYNC_DEV |\
54 * Fast allocation side cache array/stack
56 * The cache size and refill watermark is related to the network
57 * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX
58 * ring is usually refilled and the max consumed elements will be 64,
59 * thus a natural max size of objects needed in the cache.
61 * Keeping room for more objects, is due to XDP_DROP use-case. As
62 * XDP_DROP allows the opportunity to recycle objects directly into
63 * this array, as it shares the same softirq/NAPI protection. If
64 * cache is already full (or partly full) then the XDP_DROP recycles
65 * would have to take a slower code path.
67 #define PP_ALLOC_CACHE_SIZE 128
68 #define PP_ALLOC_CACHE_REFILL 64
69 struct pp_alloc_cache {
71 struct page *cache[PP_ALLOC_CACHE_SIZE];
74 struct page_pool_params {
77 unsigned int pool_size;
78 int nid; /* Numa node id to allocate from pages from */
79 struct device *dev; /* device, for DMA pre-mapping purposes */
80 enum dma_data_direction dma_dir; /* DMA mapping direction */
81 unsigned int max_len; /* max DMA sync memory size */
82 unsigned int offset; /* DMA addr offset */
83 void (*init_callback)(struct page *page, void *arg);
87 #ifdef CONFIG_PAGE_POOL_STATS
88 struct page_pool_alloc_stats {
89 u64 fast; /* fast path allocations */
90 u64 slow; /* slow-path order 0 allocations */
91 u64 slow_high_order; /* slow-path high order allocations */
92 u64 empty; /* failed refills due to empty ptr ring, forcing
93 * slow path allocation
95 u64 refill; /* allocations via successful refill */
96 u64 waive; /* failed refills due to numa zone mismatch */
99 struct page_pool_recycle_stats {
100 u64 cached; /* recycling placed page in the cache. */
101 u64 cache_full; /* cache was full */
102 u64 ring; /* recycling placed page back into ptr ring */
103 u64 ring_full; /* page was released from page-pool because
106 u64 released_refcnt; /* page released because of elevated
111 /* This struct wraps the above stats structs so users of the
112 * page_pool_get_stats API can pass a single argument when requesting the
113 * stats for the page pool.
115 struct page_pool_stats {
116 struct page_pool_alloc_stats alloc_stats;
117 struct page_pool_recycle_stats recycle_stats;
120 int page_pool_ethtool_stats_get_count(void);
121 u8 *page_pool_ethtool_stats_get_strings(u8 *data);
122 u64 *page_pool_ethtool_stats_get(u64 *data, void *stats);
125 * Drivers that wish to harvest page pool stats and report them to users
126 * (perhaps via ethtool, debugfs, or another mechanism) can allocate a
127 * struct page_pool_stats call page_pool_get_stats to get stats for the specified pool.
129 bool page_pool_get_stats(struct page_pool *pool,
130 struct page_pool_stats *stats);
133 static inline int page_pool_ethtool_stats_get_count(void)
138 static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data)
143 static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
151 struct page_pool_params p;
153 struct delayed_work release_dw;
154 void (*disconnect)(void *);
155 unsigned long defer_start;
156 unsigned long defer_warn;
158 u32 pages_state_hold_cnt;
159 unsigned int frag_offset;
160 struct page *frag_page;
163 #ifdef CONFIG_PAGE_POOL_STATS
164 /* these stats are incremented while in softirq context */
165 struct page_pool_alloc_stats alloc_stats;
170 * Data structure for allocation side
172 * Drivers allocation side usually already perform some kind
173 * of resource protection. Piggyback on this protection, and
174 * require driver to protect allocation side.
176 * For NIC drivers this means, allocate a page_pool per
177 * RX-queue. As the RX-queue is already protected by
178 * Softirq/BH scheduling and napi_schedule. NAPI schedule
179 * guarantee that a single napi_struct will only be scheduled
180 * on a single CPU (see napi_schedule).
182 struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
184 /* Data structure for storing recycled pages.
186 * Returning/freeing pages is more complicated synchronization
187 * wise, because free's can happen on remote CPUs, with no
188 * association with allocation resource.
190 * Use ptr_ring, as it separates consumer and producer
191 * effeciently, it a way that doesn't bounce cache-lines.
193 * TODO: Implement bulk return pages into this structure.
195 struct ptr_ring ring;
197 #ifdef CONFIG_PAGE_POOL_STATS
198 /* recycle stats are per-cpu to avoid locking */
199 struct page_pool_recycle_stats __percpu *recycle_stats;
201 atomic_t pages_state_release_cnt;
203 /* A page_pool is strictly tied to a single RX-queue being
204 * protected by NAPI, due to above pp_alloc_cache. This
205 * refcnt serves purpose is to simplify drivers error handling.
212 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
214 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
216 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
218 return page_pool_alloc_pages(pool, gfp);
221 struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
222 unsigned int size, gfp_t gfp);
224 static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
225 unsigned int *offset,
228 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
230 return page_pool_alloc_frag(pool, offset, size, gfp);
233 /* get the stored dma direction. A driver might decide to treat this locally and
234 * avoid the extra cache line from page_pool to determine the direction
237 inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
239 return pool->p.dma_dir;
242 bool page_pool_return_skb_page(struct page *page);
244 struct page_pool *page_pool_create(const struct page_pool_params *params);
248 #ifdef CONFIG_PAGE_POOL
249 void page_pool_destroy(struct page_pool *pool);
250 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
251 struct xdp_mem_info *mem);
252 void page_pool_release_page(struct page_pool *pool, struct page *page);
253 void page_pool_put_page_bulk(struct page_pool *pool, void **data,
256 static inline void page_pool_destroy(struct page_pool *pool)
260 static inline void page_pool_use_xdp_mem(struct page_pool *pool,
261 void (*disconnect)(void *),
262 struct xdp_mem_info *mem)
265 static inline void page_pool_release_page(struct page_pool *pool,
270 static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
276 void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
277 unsigned int dma_sync_size,
280 /* pp_frag_count represents the number of writers who can update the page
281 * either by updating skb->data or via DMA mappings for the device.
282 * We can't rely on the page refcnt for that as we don't know who might be
283 * holding page references and we can't reliably destroy or sync DMA mappings
286 * When pp_frag_count reaches 0 we can either recycle the page if the page
287 * refcnt is 1 or return it back to the memory allocator and destroy any
290 static inline void page_pool_fragment_page(struct page *page, long nr)
292 atomic_long_set(&page->pp_frag_count, nr);
295 static inline long page_pool_defrag_page(struct page *page, long nr)
299 /* If nr == pp_frag_count then we have cleared all remaining
300 * references to the page. No need to actually overwrite it, instead
301 * we can leave this to be overwritten by the calling function.
303 * The main advantage to doing this is that an atomic_read is
304 * generally a much cheaper operation than an atomic update,
305 * especially when dealing with a page that may be partitioned
306 * into only 2 or 3 pieces.
308 if (atomic_long_read(&page->pp_frag_count) == nr)
311 ret = atomic_long_sub_return(nr, &page->pp_frag_count);
316 static inline bool page_pool_is_last_frag(struct page_pool *pool,
319 /* If fragments aren't enabled or count is 0 we were the last user */
320 return !(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
321 (page_pool_defrag_page(page, 1) == 0);
324 static inline void page_pool_put_page(struct page_pool *pool,
326 unsigned int dma_sync_size,
329 /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
330 * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
332 #ifdef CONFIG_PAGE_POOL
333 if (!page_pool_is_last_frag(pool, page))
336 page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct);
340 /* Same as above but will try to sync the entire area pool->max_len */
341 static inline void page_pool_put_full_page(struct page_pool *pool,
342 struct page *page, bool allow_direct)
344 page_pool_put_page(pool, page, -1, allow_direct);
347 /* Same as above but the caller must guarantee safe context. e.g NAPI */
348 static inline void page_pool_recycle_direct(struct page_pool *pool,
351 page_pool_put_full_page(pool, page, true);
354 #define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \
355 (sizeof(dma_addr_t) > sizeof(unsigned long))
357 static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
359 dma_addr_t ret = page->dma_addr;
361 if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
362 ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;
367 static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
369 page->dma_addr = addr;
370 if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
371 page->dma_addr_upper = upper_32_bits(addr);
374 static inline bool is_page_pool_compiled_in(void)
376 #ifdef CONFIG_PAGE_POOL
383 static inline bool page_pool_put(struct page_pool *pool)
385 return refcount_dec_and_test(&pool->user_cnt);
388 /* Caller must provide appropriate safe context, e.g. NAPI. */
389 void page_pool_update_nid(struct page_pool *pool, int new_nid);
390 static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
392 if (unlikely(pool->p.nid != new_nid))
393 page_pool_update_nid(pool, new_nid);
396 static inline void page_pool_ring_lock(struct page_pool *pool)
397 __acquires(&pool->ring.producer_lock)
400 spin_lock(&pool->ring.producer_lock);
402 spin_lock_bh(&pool->ring.producer_lock);
405 static inline void page_pool_ring_unlock(struct page_pool *pool)
406 __releases(&pool->ring.producer_lock)
409 spin_unlock(&pool->ring.producer_lock);
411 spin_unlock_bh(&pool->ring.producer_lock);
414 #endif /* _NET_PAGE_POOL_H */