mm: rcu safe VMA freeing
authorMichel Lespinasse <michel@lespinasse.org>
Mon, 27 Feb 2023 17:36:09 +0000 (09:36 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 6 Apr 2023 03:02:57 +0000 (20:02 -0700)
This prepares for page faults handling under VMA lock, looking up VMAs
under protection of an rcu read lock, instead of the usual mmap read lock.

Link: https://lkml.kernel.org/r/20230227173632.3292573-11-surenb@google.com
Signed-off-by: Michel Lespinasse <michel@lespinasse.org>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm_types.h
kernel/fork.c

index 651a5c2..f203e25 100644 (file)
@@ -480,9 +480,16 @@ struct anon_vma_name {
 struct vm_area_struct {
        /* The first cache line has the info for VMA tree walking. */
 
-       unsigned long vm_start;         /* Our start address within vm_mm. */
-       unsigned long vm_end;           /* The first byte after our end address
-                                          within vm_mm. */
+       union {
+               struct {
+                       /* VMA covers [vm_start; vm_end) addresses within mm */
+                       unsigned long vm_start;
+                       unsigned long vm_end;
+               };
+#ifdef CONFIG_PER_VMA_LOCK
+               struct rcu_head vm_rcu; /* Used for deferred freeing. */
+#endif
+       };
 
        struct mm_struct *vm_mm;        /* The address space we belong to. */
        pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
index cea99f0..93ec6e1 100644 (file)
@@ -479,12 +479,30 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
        return new;
 }
 
-void vm_area_free(struct vm_area_struct *vma)
+static void __vm_area_free(struct vm_area_struct *vma)
 {
        free_anon_vma_name(vma);
        kmem_cache_free(vm_area_cachep, vma);
 }
 
+#ifdef CONFIG_PER_VMA_LOCK
+static void vm_area_free_rcu_cb(struct rcu_head *head)
+{
+       struct vm_area_struct *vma = container_of(head, struct vm_area_struct,
+                                                 vm_rcu);
+       __vm_area_free(vma);
+}
+#endif
+
+void vm_area_free(struct vm_area_struct *vma)
+{
+#ifdef CONFIG_PER_VMA_LOCK
+       call_rcu(&vma->vm_rcu, vm_area_free_rcu_cb);
+#else
+       __vm_area_free(vma);
+#endif
+}
+
 static void account_kernel_stack(struct task_struct *tsk, int account)
 {
        if (IS_ENABLED(CONFIG_VMAP_STACK)) {