net: page_pool: hide page_pool_release_page()
authorJakub Kicinski <kuba@kernel.org>
Thu, 20 Jul 2023 01:04:08 +0000 (18:04 -0700)
committerJakub Kicinski <kuba@kernel.org>
Sat, 22 Jul 2023 01:50:18 +0000 (18:50 -0700)
There seems to be no user calling page_pool_release_page()
for legit reasons, all the users simply haven't been converted
to skb-based recycling, yet. Previous changes converted them.
Update the docs, and unexport the function.

Link: https://lore.kernel.org/r/20230720010409.1967072-4-kuba@kernel.org
Reviewed-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Documentation/networking/page_pool.rst
include/net/page_pool.h
net/core/page_pool.c

index 873efd9..0aa850c 100644 (file)
@@ -13,9 +13,9 @@ replacing dev_alloc_pages().
 
 API keeps track of in-flight pages, in order to let API user know
 when it is safe to free a page_pool object.  Thus, API users
 
 API keeps track of in-flight pages, in order to let API user know
 when it is safe to free a page_pool object.  Thus, API users
-must run page_pool_release_page() when a page is leaving the page_pool or
-call page_pool_put_page() where appropriate in order to maintain correct
-accounting.
+must call page_pool_put_page() to free the page, or attach
+the page to a page_pool-aware objects like skbs marked with
+skb_mark_for_recycle().
 
 API user must call page_pool_put_page() once on a page, as it
 will either recycle the page, or in case of refcnt > 1, it will
 
 API user must call page_pool_put_page() once on a page, as it
 will either recycle the page, or in case of refcnt > 1, it will
@@ -87,9 +87,6 @@ a page will cause no race conditions is enough.
   must guarantee safe context (e.g NAPI), since it will recycle the page
   directly into the pool fast cache.
 
   must guarantee safe context (e.g NAPI), since it will recycle the page
   directly into the pool fast cache.
 
-* page_pool_release_page(): Unmap the page (if mapped) and account for it on
-  in-flight counters.
-
 * page_pool_dev_alloc_pages(): Get a page from the page allocator or page_pool
   caches.
 
 * page_pool_dev_alloc_pages(): Get a page from the page allocator or page_pool
   caches.
 
@@ -194,7 +191,7 @@ NAPI poller
             if XDP_DROP:
                 page_pool_recycle_direct(page_pool, page);
         } else (packet_is_skb) {
             if XDP_DROP:
                 page_pool_recycle_direct(page_pool, page);
         } else (packet_is_skb) {
-            page_pool_release_page(page_pool, page);
+            skb_mark_for_recycle(skb);
             new_page = page_pool_dev_alloc_pages(page_pool);
         }
     }
             new_page = page_pool_dev_alloc_pages(page_pool);
         }
     }
index 126f9e2..f1d5cc1 100644 (file)
@@ -18,9 +18,8 @@
  *
  * API keeps track of in-flight pages, in-order to let API user know
  * when it is safe to dealloactor page_pool object.  Thus, API users
  *
  * API keeps track of in-flight pages, in-order to let API user know
  * when it is safe to dealloactor page_pool object.  Thus, API users
- * must make sure to call page_pool_release_page() when a page is
- * "leaving" the page_pool.  Or call page_pool_put_page() where
- * appropiate.  For maintaining correct accounting.
+ * must call page_pool_put_page() where appropriate and only attach
+ * the page to a page_pool-aware objects, like skbs marked for recycling.
  *
  * API user must only call page_pool_put_page() once on a page, as it
  * will either recycle the page, or in case of elevated refcnt, it
  *
  * API user must only call page_pool_put_page() once on a page, as it
  * will either recycle the page, or in case of elevated refcnt, it
@@ -251,7 +250,6 @@ void page_pool_unlink_napi(struct page_pool *pool);
 void page_pool_destroy(struct page_pool *pool);
 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
                           struct xdp_mem_info *mem);
 void page_pool_destroy(struct page_pool *pool);
 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
                           struct xdp_mem_info *mem);
-void page_pool_release_page(struct page_pool *pool, struct page *page);
 void page_pool_put_page_bulk(struct page_pool *pool, void **data,
                             int count);
 #else
 void page_pool_put_page_bulk(struct page_pool *pool, void **data,
                             int count);
 #else
@@ -268,10 +266,6 @@ static inline void page_pool_use_xdp_mem(struct page_pool *pool,
                                         struct xdp_mem_info *mem)
 {
 }
                                         struct xdp_mem_info *mem)
 {
 }
-static inline void page_pool_release_page(struct page_pool *pool,
-                                         struct page *page)
-{
-}
 
 static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
                                           int count)
 
 static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
                                           int count)
index a3e12a6..2c7cf5f 100644 (file)
@@ -492,7 +492,7 @@ static s32 page_pool_inflight(struct page_pool *pool)
  * a regular page (that will eventually be returned to the normal
  * page-allocator via put_page).
  */
  * a regular page (that will eventually be returned to the normal
  * page-allocator via put_page).
  */
-void page_pool_release_page(struct page_pool *pool, struct page *page)
+static void page_pool_release_page(struct page_pool *pool, struct page *page)
 {
        dma_addr_t dma;
        int count;
 {
        dma_addr_t dma;
        int count;
@@ -519,7 +519,6 @@ skip_dma_unmap:
        count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt);
        trace_page_pool_state_release(pool, page, count);
 }
        count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt);
        trace_page_pool_state_release(pool, page, count);
 }
-EXPORT_SYMBOL(page_pool_release_page);
 
 /* Return a page to the page allocator, cleaning up our state */
 static void page_pool_return_page(struct page_pool *pool, struct page *page)
 
 /* Return a page to the page allocator, cleaning up our state */
 static void page_pool_return_page(struct page_pool *pool, struct page *page)