mm/vmalloc: add a safer version of find_vm_area() for debug
authorJoel Fernandes (Google) <joel@joelfernandes.org>
Mon, 4 Sep 2023 18:08:04 +0000 (18:08 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 13 Sep 2023 07:43:00 +0000 (09:43 +0200)
commit 0818e739b5c061b0251c30152380600fb9b84c0c upstream.

It is unsafe to dump vmalloc area information when trying to do so from
some contexts.  Add a safer trylock version of the same function to do a
best-effort VMA finding and use it from vmalloc_dump_obj().

[applied test robot feedback on unused function fix.]
[applied Uladzislau feedback on locking.]
Link: https://lkml.kernel.org/r/20230904180806.1002832-1-joel@joelfernandes.org
Fixes: 98f180837a89 ("mm: Make mem_dump_obj() handle vmalloc() memory")
Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Reported-by: Zhen Lei <thunder.leizhen@huaweicloud.com>
Cc: Paul E. McKenney <paulmck@kernel.org>
Cc: Zqiang <qiang.zhang1211@gmail.com>
Cc: <stable@vger.kernel.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
mm/vmalloc.c

index 80bd104..67a10a0 100644 (file)
@@ -4041,14 +4041,32 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
 #ifdef CONFIG_PRINTK
 bool vmalloc_dump_obj(void *object)
 {
-       struct vm_struct *vm;
        void *objp = (void *)PAGE_ALIGN((unsigned long)object);
+       const void *caller;
+       struct vm_struct *vm;
+       struct vmap_area *va;
+       unsigned long addr;
+       unsigned int nr_pages;
+
+       if (!spin_trylock(&vmap_area_lock))
+               return false;
+       va = __find_vmap_area((unsigned long)objp, &vmap_area_root);
+       if (!va) {
+               spin_unlock(&vmap_area_lock);
+               return false;
+       }
 
-       vm = find_vm_area(objp);
-       if (!vm)
+       vm = va->vm;
+       if (!vm) {
+               spin_unlock(&vmap_area_lock);
                return false;
+       }
+       addr = (unsigned long)vm->addr;
+       caller = vm->caller;
+       nr_pages = vm->nr_pages;
+       spin_unlock(&vmap_area_lock);
        pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
-               vm->nr_pages, (unsigned long)vm->addr, vm->caller);
+               nr_pages, addr, caller);
        return true;
 }
 #endif