dmapool: move debug code to own functions
authorKeith Busch <kbusch@kernel.org>
Thu, 26 Jan 2023 21:51:19 +0000 (13:51 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 6 May 2023 17:33:37 +0000 (10:33 -0700)
Clean up the normal path by moving the debug code outside it.

Link: https://lkml.kernel.org/r/20230126215125.4069751-7-kbusch@meta.com
Fixes: 2d55c16c0c54 ("dmapool: create/destroy cleanup")
Signed-off-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Tony Battersby <tonyb@cybernetics.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/dmapool.c

index eaed3ffb42aa88462ef5f1ca4cac6f4a5542abe5..30b069e999968c4d6c4e62ff9d08d4fdfc2a0fb1 100644 (file)
@@ -96,6 +96,78 @@ static ssize_t pools_show(struct device *dev, struct device_attribute *attr, cha
 
 static DEVICE_ATTR_RO(pools);
 
+#ifdef DMAPOOL_DEBUG
+static void pool_check_block(struct dma_pool *pool, void *retval,
+                            unsigned int offset, gfp_t mem_flags)
+{
+       int i;
+       u8 *data = retval;
+       /* page->offset is stored in first 4 bytes */
+       for (i = sizeof(offset); i < pool->size; i++) {
+               if (data[i] == POOL_POISON_FREED)
+                       continue;
+               dev_err(pool->dev, "%s %s, %p (corrupted)\n",
+                       __func__, pool->name, retval);
+
+               /*
+                * Dump the first 4 bytes even if they are not
+                * POOL_POISON_FREED
+                */
+               print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
+                               data, pool->size, 1);
+               break;
+       }
+       if (!want_init_on_alloc(mem_flags))
+               memset(retval, POOL_POISON_ALLOCATED, pool->size);
+}
+
+static bool pool_page_err(struct dma_pool *pool, struct dma_page *page,
+                         void *vaddr, dma_addr_t dma)
+{
+       unsigned int offset = vaddr - page->vaddr;
+       unsigned int chain = page->offset;
+
+       if ((dma - page->dma) != offset) {
+               dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
+                       __func__, pool->name, vaddr, &dma);
+               return true;
+       }
+
+       while (chain < pool->allocation) {
+               if (chain != offset) {
+                       chain = *(int *)(page->vaddr + chain);
+                       continue;
+               }
+               dev_err(pool->dev, "%s %s, dma %pad already free\n",
+                       __func__, pool->name, &dma);
+               return true;
+       }
+       memset(vaddr, POOL_POISON_FREED, pool->size);
+       return false;
+}
+
+static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
+{
+       memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
+}
+#else
+static void pool_check_block(struct dma_pool *pool, void *retval,
+                            unsigned int offset, gfp_t mem_flags)
+
+{
+}
+
+static bool pool_page_err(struct dma_pool *pool, struct dma_page *page,
+                         void *vaddr, dma_addr_t dma)
+{
+       return false;
+}
+
+static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
+{
+}
+#endif
+
 /**
  * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
  * @name: name of pool, for diagnostics
@@ -223,9 +295,7 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
        page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
                                         &page->dma, mem_flags);
        if (page->vaddr) {
-#ifdef DMAPOOL_DEBUG
-               memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
-#endif
+               pool_init_page(pool, page);
                pool_initialise_page(pool, page);
                page->in_use = 0;
                page->offset = 0;
@@ -245,9 +315,7 @@ static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
 {
        dma_addr_t dma = page->dma;
 
-#ifdef DMAPOOL_DEBUG
-       memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
-#endif
+       pool_init_page(pool, page);
        dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
        list_del(&page->page_list);
        kfree(page);
@@ -336,29 +404,7 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
        page->offset = *(int *)(page->vaddr + offset);
        retval = offset + page->vaddr;
        *handle = offset + page->dma;
-#ifdef DMAPOOL_DEBUG
-       {
-               int i;
-               u8 *data = retval;
-               /* page->offset is stored in first 4 bytes */
-               for (i = sizeof(page->offset); i < pool->size; i++) {
-                       if (data[i] == POOL_POISON_FREED)
-                               continue;
-                       dev_err(pool->dev, "%s %s, %p (corrupted)\n",
-                               __func__, pool->name, retval);
-
-                       /*
-                        * Dump the first 4 bytes even if they are not
-                        * POOL_POISON_FREED
-                        */
-                       print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
-                                       data, pool->size, 1);
-                       break;
-               }
-       }
-       if (!want_init_on_alloc(mem_flags))
-               memset(retval, POOL_POISON_ALLOCATED, pool->size);
-#endif
+       pool_check_block(pool, retval, offset, mem_flags);
        spin_unlock_irqrestore(&pool->lock, flags);
 
        if (want_init_on_alloc(mem_flags))
@@ -394,7 +440,6 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
 {
        struct dma_page *page;
        unsigned long flags;
-       unsigned int offset;
 
        spin_lock_irqsave(&pool->lock, flags);
        page = pool_find_page(pool, dma);
@@ -405,35 +450,16 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
                return;
        }
 
-       offset = vaddr - page->vaddr;
        if (want_init_on_free())
                memset(vaddr, 0, pool->size);
-#ifdef DMAPOOL_DEBUG
-       if ((dma - page->dma) != offset) {
+       if (pool_page_err(pool, page, vaddr, dma)) {
                spin_unlock_irqrestore(&pool->lock, flags);
-               dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
-                       __func__, pool->name, vaddr, &dma);
                return;
        }
-       {
-               unsigned int chain = page->offset;
-               while (chain < pool->allocation) {
-                       if (chain != offset) {
-                               chain = *(int *)(page->vaddr + chain);
-                               continue;
-                       }
-                       spin_unlock_irqrestore(&pool->lock, flags);
-                       dev_err(pool->dev, "%s %s, dma %pad already free\n",
-                               __func__, pool->name, &dma);
-                       return;
-               }
-       }
-       memset(vaddr, POOL_POISON_FREED, pool->size);
-#endif
 
        page->in_use--;
        *(int *)vaddr = page->offset;
-       page->offset = offset;
+       page->offset = vaddr - page->vaddr;
        /*
         * Resist a temptation to do
         *    if (!is_page_busy(page)) pool_free_page(pool, page);