net, neigh: Fix null-ptr-deref in neigh_table_clear()
[platform/kernel/linux-rpi.git] / net / core / page_pool.c
index 3c4c4c7..1a69784 100644 (file)
 #include <linux/dma-mapping.h>
 #include <linux/page-flags.h>
 #include <linux/mm.h> /* for __put_page() */
+#include <linux/poison.h>
 
 #include <trace/events/page_pool.h>
 
 #define DEFER_TIME (msecs_to_jiffies(1000))
 #define DEFER_WARN_INTERVAL (60 * HZ)
 
+#define BIAS_MAX       LONG_MAX
+
 static int page_pool_init(struct page_pool *pool,
                          const struct page_pool_params *params)
 {
@@ -66,6 +69,10 @@ static int page_pool_init(struct page_pool *pool,
                 */
        }
 
+       if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT &&
+           pool->p.flags & PP_FLAG_PAGE_FRAG)
+               return -EINVAL;
+
        if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
                return -ENOMEM;
 
@@ -205,6 +212,19 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
        return true;
 }
 
+static void page_pool_set_pp_info(struct page_pool *pool,
+                                 struct page *page)
+{
+       page->pp = pool;
+       page->pp_magic |= PP_SIGNATURE;
+}
+
+static void page_pool_clear_pp_info(struct page *page)
+{
+       page->pp_magic = 0;
+       page->pp = NULL;
+}
+
 static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
                                                 gfp_t gfp)
 {
@@ -221,6 +241,8 @@ static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
                return NULL;
        }
 
+       page_pool_set_pp_info(pool, page);
+
        /* Track how many pages are held 'in-flight' */
        pool->pages_state_hold_cnt++;
        trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
@@ -263,6 +285,8 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
                        put_page(page);
                        continue;
                }
+
+               page_pool_set_pp_info(pool, page);
                pool->alloc.cache[pool->alloc.count++] = page;
                /* Track how many pages are held 'in-flight' */
                pool->pages_state_hold_cnt++;
@@ -341,10 +365,12 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
                             DMA_ATTR_SKIP_CPU_SYNC);
        page_pool_set_dma_addr(page, 0);
 skip_dma_unmap:
+       page_pool_clear_pp_info(page);
+
        /* This may be the last page returned, releasing the pool, so
         * it is not safe to reference pool afterwards.
         */
-       count = atomic_inc_return(&pool->pages_state_release_cnt);
+       count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt);
        trace_page_pool_state_release(pool, page, count);
 }
 EXPORT_SYMBOL(page_pool_release_page);
@@ -399,6 +425,11 @@ static __always_inline struct page *
 __page_pool_put_page(struct page_pool *pool, struct page *page,
                     unsigned int dma_sync_size, bool allow_direct)
 {
+       /* It is not the last user for the page frag case */
+       if (pool->p.flags & PP_FLAG_PAGE_FRAG &&
+           page_pool_atomic_sub_frag_count_return(page, 1))
+               return NULL;
+
        /* This allocator is optimized for the XDP mode that uses
         * one-frame-per-page, but have fallbacks that act like the
         * regular page allocator APIs.
@@ -491,6 +522,84 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
 }
 EXPORT_SYMBOL(page_pool_put_page_bulk);
 
+static struct page *page_pool_drain_frag(struct page_pool *pool,
+                                        struct page *page)
+{
+       long drain_count = BIAS_MAX - pool->frag_users;
+
+       /* Some user is still using the page frag */
+       if (likely(page_pool_atomic_sub_frag_count_return(page,
+                                                         drain_count)))
+               return NULL;
+
+       if (page_ref_count(page) == 1 && !page_is_pfmemalloc(page)) {
+               if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
+                       page_pool_dma_sync_for_device(pool, page, -1);
+
+               return page;
+       }
+
+       page_pool_return_page(pool, page);
+       return NULL;
+}
+
+static void page_pool_free_frag(struct page_pool *pool)
+{
+       long drain_count = BIAS_MAX - pool->frag_users;
+       struct page *page = pool->frag_page;
+
+       pool->frag_page = NULL;
+
+       if (!page ||
+           page_pool_atomic_sub_frag_count_return(page, drain_count))
+               return;
+
+       page_pool_return_page(pool, page);
+}
+
+struct page *page_pool_alloc_frag(struct page_pool *pool,
+                                 unsigned int *offset,
+                                 unsigned int size, gfp_t gfp)
+{
+       unsigned int max_size = PAGE_SIZE << pool->p.order;
+       struct page *page = pool->frag_page;
+
+       if (WARN_ON(!(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
+                   size > max_size))
+               return NULL;
+
+       size = ALIGN(size, dma_get_cache_alignment());
+       *offset = pool->frag_offset;
+
+       if (page && *offset + size > max_size) {
+               page = page_pool_drain_frag(pool, page);
+               if (page)
+                       goto frag_reset;
+       }
+
+       if (!page) {
+               page = page_pool_alloc_pages(pool, gfp);
+               if (unlikely(!page)) {
+                       pool->frag_page = NULL;
+                       return NULL;
+               }
+
+               pool->frag_page = page;
+
+frag_reset:
+               pool->frag_users = 1;
+               *offset = 0;
+               pool->frag_offset = size;
+               page_pool_set_frag_count(page, BIAS_MAX);
+               return page;
+       }
+
+       pool->frag_users++;
+       pool->frag_offset = *offset + size;
+       return page;
+}
+EXPORT_SYMBOL(page_pool_alloc_frag);
+
 static void page_pool_empty_ring(struct page_pool *pool)
 {
        struct page *page;
@@ -596,6 +705,8 @@ void page_pool_destroy(struct page_pool *pool)
        if (!page_pool_put(pool))
                return;
 
+       page_pool_free_frag(pool);
+
        if (!page_pool_release(pool))
                return;
 
@@ -622,3 +733,32 @@ void page_pool_update_nid(struct page_pool *pool, int new_nid)
        }
 }
 EXPORT_SYMBOL(page_pool_update_nid);
+
+bool page_pool_return_skb_page(struct page *page)
+{
+       struct page_pool *pp;
+
+       page = compound_head(page);
+
+       /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation
+        * in order to preserve any existing bits, such as bit 0 for the
+        * head page of compound page and bit 1 for pfmemalloc page, so
+        * mask those bits for freeing side when doing below checking,
+        * and page_is_pfmemalloc() is checked in __page_pool_put_page()
+        * to avoid recycling the pfmemalloc page.
+        */
+       if (unlikely((page->pp_magic & ~0x3UL) != PP_SIGNATURE))
+               return false;
+
+       pp = page->pp;
+
+       /* Driver set this to memory recycling info. Reset it on recycle.
+        * This will *not* work for NIC using a split-page memory model.
+        * The page will be returned to the pool here regardless of the
+        * 'flipped' fragment being in use or not.
+        */
+       page_pool_put_full_page(pp, page, false);
+
+       return true;
+}
+EXPORT_SYMBOL(page_pool_return_skb_page);