mm: rename anon_vma_lock to vma_lock_anon_vma
authorRik van Riel <riel@redhat.com>
Tue, 10 Aug 2010 00:18:37 +0000 (17:18 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 10 Aug 2010 03:44:54 +0000 (20:44 -0700)
Rename anon_vma_lock to vma_lock_anon_vma.  This matches the naming style
used in page_lock_anon_vma and will come in really handy further down in
this patch series.

Signed-off-by: Rik van Riel <riel@redhat.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Tested-by: Larry Woodman <lwoodman@redhat.com>
Acked-by: Larry Woodman <lwoodman@redhat.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/rmap.h
mm/mmap.c

index 7721674..80cd162 100644 (file)
@@ -99,14 +99,14 @@ static inline struct anon_vma *page_anon_vma(struct page *page)
        return page_rmapping(page);
 }
 
-static inline void anon_vma_lock(struct vm_area_struct *vma)
+static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
 {
        struct anon_vma *anon_vma = vma->anon_vma;
        if (anon_vma)
                spin_lock(&anon_vma->lock);
 }
 
-static inline void anon_vma_unlock(struct vm_area_struct *vma)
+static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
 {
        struct anon_vma *anon_vma = vma->anon_vma;
        if (anon_vma)
index e38e910..e26f1ea 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -452,12 +452,12 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
                spin_lock(&mapping->i_mmap_lock);
                vma->vm_truncate_count = mapping->truncate_count;
        }
-       anon_vma_lock(vma);
+       vma_lock_anon_vma(vma);
 
        __vma_link(mm, vma, prev, rb_link, rb_parent);
        __vma_link_file(vma);
 
-       anon_vma_unlock(vma);
+       vma_unlock_anon_vma(vma);
        if (mapping)
                spin_unlock(&mapping->i_mmap_lock);
 
@@ -1710,7 +1710,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
         */
        if (unlikely(anon_vma_prepare(vma)))
                return -ENOMEM;
-       anon_vma_lock(vma);
+       vma_lock_anon_vma(vma);
 
        /*
         * vma->vm_start/vm_end cannot change under us because the caller
@@ -1721,7 +1721,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
        if (address < PAGE_ALIGN(address+4))
                address = PAGE_ALIGN(address+4);
        else {
-               anon_vma_unlock(vma);
+               vma_unlock_anon_vma(vma);
                return -ENOMEM;
        }
        error = 0;
@@ -1739,7 +1739,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                        perf_event_mmap(vma);
                }
        }
-       anon_vma_unlock(vma);
+       vma_unlock_anon_vma(vma);
        return error;
 }
 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
@@ -1764,7 +1764,7 @@ static int expand_downwards(struct vm_area_struct *vma,
        if (error)
                return error;
 
-       anon_vma_lock(vma);
+       vma_lock_anon_vma(vma);
 
        /*
         * vma->vm_start/vm_end cannot change under us because the caller
@@ -1786,7 +1786,7 @@ static int expand_downwards(struct vm_area_struct *vma,
                        perf_event_mmap(vma);
                }
        }
-       anon_vma_unlock(vma);
+       vma_unlock_anon_vma(vma);
        return error;
 }