mm: remove __vfree_deferred
authorChristoph Hellwig <hch@lst.de>
Sat, 21 Jan 2023 07:10:44 +0000 (08:10 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 3 Feb 2023 06:33:31 +0000 (22:33 -0800)
Fold __vfree_deferred into vfree_atomic, and call vfree_atomic early on
from vfree if called from interrupt context so that the extra low-level
helper can be avoided.

Link: https://lkml.kernel.org/r/20230121071051.1143058-4-hch@lst.de
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/vmalloc.c

index 67fc9d7..cfd7965 100644 (file)
@@ -2754,20 +2754,6 @@ static void __vunmap(const void *addr, int deallocate_pages)
        kfree(area);
 }
 
-static inline void __vfree_deferred(const void *addr)
-{
-       /*
-        * Use raw_cpu_ptr() because this can be called from preemptible
-        * context. Preemption is absolutely fine here, because the llist_add()
-        * implementation is lockless, so it works even if we are adding to
-        * another cpu's list. schedule_work() should be fine with this too.
-        */
-       struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
-
-       if (llist_add((struct llist_node *)addr, &p->list))
-               schedule_work(&p->wq);
-}
-
 /**
  * vfree_atomic - release memory allocated by vmalloc()
  * @addr:        memory base address
@@ -2777,13 +2763,19 @@ static inline void __vfree_deferred(const void *addr)
  */
 void vfree_atomic(const void *addr)
 {
-       BUG_ON(in_nmi());
+       struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
 
+       BUG_ON(in_nmi());
        kmemleak_free(addr);
 
-       if (!addr)
-               return;
-       __vfree_deferred(addr);
+       /*
+        * Use raw_cpu_ptr() because this can be called from preemptible
+        * context. Preemption is absolutely fine here, because the llist_add()
+        * implementation is lockless, so it works even if we are adding to
+        * another cpu's list. schedule_work() should be fine with this too.
+        */
+       if (addr && llist_add((struct llist_node *)addr, &p->list))
+               schedule_work(&p->wq);
 }
 
 /**
@@ -2805,17 +2797,16 @@ void vfree_atomic(const void *addr)
  */
 void vfree(const void *addr)
 {
-       BUG_ON(in_nmi());
+       if (unlikely(in_interrupt())) {
+               vfree_atomic(addr);
+               return;
+       }
 
+       BUG_ON(in_nmi());
        kmemleak_free(addr);
+       might_sleep();
 
-       might_sleep_if(!in_interrupt());
-
-       if (!addr)
-               return;
-       if (unlikely(in_interrupt()))
-               __vfree_deferred(addr);
-       else
+       if (addr)
                __vunmap(addr, 1);
 }
 EXPORT_SYMBOL(vfree);