page_pool: remove PP_FLAG_PAGE_FRAG 09/309709/1
authorYunsheng Lin <linyunsheng@huawei.com>
Fri, 20 Oct 2023 09:59:49 +0000 (17:59 +0800)
committerJaehoon Chung <jh80.chung@samsung.com>
Tue, 16 Apr 2024 02:24:46 +0000 (11:24 +0900)
PP_FLAG_PAGE_FRAG is not really needed after pp_frag_count
handling is unified and page_pool_alloc_frag() is supported
in 32-bit arch with 64-bit DMA, so remove it.

Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
CC: Lorenzo Bianconi <lorenzo@kernel.org>
CC: Alexander Duyck <alexander.duyck@gmail.com>
CC: Liang Chen <liangchen.linux@gmail.com>
CC: Alexander Lobakin <aleksander.lobakin@intel.com>
Link: https://lore.kernel.org/r/20231020095952.11055-3-linyunsheng@huawei.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
(cherry picked from commit 09d96ee5674a0eaa800c664353756ecc45c4a87f)
Signed-off-by: Jaehoon Chung <jh80.chung@samsung.com>
Change-Id: I5d7223d1cb9baefea5c7e86f9a4de3e972c04a4a

drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/wireless/mediatek/mt76/mac80211.c
include/net/page_pool/types.h
net/core/page_pool.c
net/core/skbuff.c

index dac4f95..06b3789 100644 (file)
@@ -3214,8 +3214,6 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
        pp.dma_dir = bp->rx_dir;
        pp.max_len = PAGE_SIZE;
        pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
-       if (PAGE_SIZE > BNXT_RX_PAGE_SIZE)
-               pp.flags |= PP_FLAG_PAGE_FRAG;
 
        rxr->page_pool = page_pool_create(&pp);
        if (IS_ERR(rxr->page_pool)) {
index 677cfaa..b618797 100644 (file)
@@ -4940,8 +4940,7 @@ static void hns3_put_ring_config(struct hns3_nic_priv *priv)
 static void hns3_alloc_page_pool(struct hns3_enet_ring *ring)
 {
        struct page_pool_params pp_params = {
-               .flags = PP_FLAG_DMA_MAP | PP_FLAG_PAGE_FRAG |
-                               PP_FLAG_DMA_SYNC_DEV,
+               .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
                .order = hns3_page_order(ring),
                .pool_size = ring->desc_num * hns3_buf_size(ring) /
                                (PAGE_SIZE << hns3_page_order(ring)),
index e6df4e6..02d0b70 100644 (file)
@@ -1409,7 +1409,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
        }
 
        pp_params.order = get_order(buf_size);
-       pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP;
+       pp_params.flags = PP_FLAG_DMA_MAP;
        pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
        pp_params.nid = NUMA_NO_NODE;
        pp_params.dev = pfvf->dev;
index c3961c2..8707691 100644 (file)
@@ -834,7 +834,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
                struct page_pool_params pp_params = { 0 };
 
                pp_params.order     = 0;
-               pp_params.flags     = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | PP_FLAG_PAGE_FRAG;
+               pp_params.flags     = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
                pp_params.pool_size = pool_size;
                pp_params.nid       = node;
                pp_params.dev       = rq->pdev;
index dbab400..8d66825 100644 (file)
@@ -566,7 +566,7 @@ int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
 {
        struct page_pool_params pp_params = {
                .order = 0,
-               .flags = PP_FLAG_PAGE_FRAG,
+               .flags = 0,
                .nid = NUMA_NO_NODE,
                .dev = dev->dma_dev,
        };
index 887e794..6fc5134 100644 (file)
                                        * Please note DMA-sync-for-CPU is still
                                        * device driver responsibility
                                        */
-#define PP_FLAG_PAGE_FRAG      BIT(2) /* for page frag feature */
 #define PP_FLAG_ALL            (PP_FLAG_DMA_MAP |\
-                                PP_FLAG_DMA_SYNC_DEV |\
-                                PP_FLAG_PAGE_FRAG)
+                                PP_FLAG_DMA_SYNC_DEV)
 
 /*
  * Fast allocation side cache array/stack
@@ -45,7 +43,7 @@ struct pp_alloc_cache {
 
 /**
  * struct page_pool_params - page pool parameters
- * @flags:     PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_PAGE_FRAG
+ * @flags:     PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV
  * @order:     2^order pages on allocation
  * @pool_size: size of the ptr_ring
  * @nid:       NUMA node id to allocate from pages from
index 59bad96..11ce15a 100644 (file)
@@ -760,8 +760,7 @@ struct page *page_pool_alloc_frag(struct page_pool *pool,
        unsigned int max_size = PAGE_SIZE << pool->p.order;
        struct page *page = pool->frag_page;
 
-       if (WARN_ON(!(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
-                   size > max_size))
+       if (WARN_ON(size > max_size))
                return NULL;
 
        size = ALIGN(size, dma_get_cache_alignment());
index 9a9fbe1..8c1dd63 100644 (file)
@@ -5752,7 +5752,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
        /* In general, avoid mixing page_pool and non-page_pool allocated
         * pages within the same SKB. Additionally avoid dealing with clones
         * with page_pool pages, in case the SKB is using page_pool fragment
-        * references (PP_FLAG_PAGE_FRAG). Since we only take full page
+        * references (page_pool_alloc_frag()). Since we only take full page
         * references for cloned SKBs at the moment that would result in
         * inconsistent reference counts.
         * In theory we could take full references if @from is cloned and