net: page_pool: refactor dma_map into own function page_pool_dma_map
authorJesper Dangaard Brouer <brouer@redhat.com>
Fri, 30 Apr 2021 06:02:04 +0000 (23:02 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 30 Apr 2021 18:20:43 +0000 (11:20 -0700)
In preparation for next patch, move the dma mapping into its own function,
as this will make it easier to follow the changes.

[ilias.apalodimas: make page_pool_dma_map return boolean]

Link: https://lkml.kernel.org/r/20210325114228.27719-9-mgorman@techsingularity.net
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Reviewed-by: Alexander Lobakin <alobakin@pm.me>
Cc: Alexander Duyck <alexander.duyck@gmail.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: David Miller <davem@davemloft.net>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
net/core/page_pool.c

index ad8b0707af04b8be13dcb003dc1058b33f374946..40e1b2beaa6c4e19e08d4f3b5ec06efa289e411c 100644 (file)
@@ -180,14 +180,37 @@ static void page_pool_dma_sync_for_device(struct page_pool *pool,
                                         pool->p.dma_dir);
 }
 
+static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
+{
+       dma_addr_t dma;
+
+       /* Setup DMA mapping: use 'struct page' area for storing DMA-addr
+        * since dma_addr_t can be either 32 or 64 bits and does not always fit
+        * into page private data (i.e 32bit cpu with 64bit DMA caps)
+        * This mapping is kept for lifetime of page, until leaving pool.
+        */
+       dma = dma_map_page_attrs(pool->p.dev, page, 0,
+                                (PAGE_SIZE << pool->p.order),
+                                pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+       if (dma_mapping_error(pool->p.dev, dma))
+               return false;
+
+       page->dma_addr = dma;
+
+       if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
+               page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
+
+       return true;
+}
+
 /* slow path */
 noinline
 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
                                                 gfp_t _gfp)
 {
+       unsigned int pp_flags = pool->p.flags;
        struct page *page;
        gfp_t gfp = _gfp;
-       dma_addr_t dma;
 
        /* We could always set __GFP_COMP, and avoid this branch, as
         * prep_new_page() can handle order-0 with __GFP_COMP.
@@ -211,30 +234,14 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
        if (!page)
                return NULL;
 
-       if (!(pool->p.flags & PP_FLAG_DMA_MAP))
-               goto skip_dma_map;
-
-       /* Setup DMA mapping: use 'struct page' area for storing DMA-addr
-        * since dma_addr_t can be either 32 or 64 bits and does not always fit
-        * into page private data (i.e 32bit cpu with 64bit DMA caps)
-        * This mapping is kept for lifetime of page, until leaving pool.
-        */
-       dma = dma_map_page_attrs(pool->p.dev, page, 0,
-                                (PAGE_SIZE << pool->p.order),
-                                pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
-       if (dma_mapping_error(pool->p.dev, dma)) {
+       if ((pp_flags & PP_FLAG_DMA_MAP) &&
+           unlikely(!page_pool_dma_map(pool, page))) {
                put_page(page);
                return NULL;
        }
-       page->dma_addr = dma;
 
-       if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
-               page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
-
-skip_dma_map:
        /* Track how many pages are held 'in-flight' */
        pool->pages_state_hold_cnt++;
-
        trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
 
        /* When page just alloc'ed is should/must have refcnt 1. */