mm/vmalloc.c: clean up map_vm_area third argument
authorWANG Chao <chaowang@redhat.com>
Wed, 6 Aug 2014 23:06:58 +0000 (16:06 -0700)
committerLiviu Dudau <Liviu.Dudau@arm.com>
Fri, 31 Oct 2014 12:22:12 +0000 (12:22 +0000)
Currently map_vm_area() takes (struct page *** pages) as third argument,
and after mapping, it moves (*pages) to point to (*pages +
nr_mappped_pages).

It looks like this kind of increment is useless to its caller these
days.  The callers don't care about the increments and actually they're
trying to avoid this by passing another copy to map_vm_area().

The caller can always guarantee all the pages can be mapped into vm_area
as specified in first argument and the caller only cares about whether
map_vm_area() fails or not.

This patch cleans up the pointer movement in map_vm_area() and updates
its callers accordingly.

Signed-off-by: WANG Chao <chaowang@redhat.com>
Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/tile/kernel/module.c
drivers/lguest/core.c
drivers/staging/android/binder.c
include/linux/vmalloc.h
mm/vmalloc.c
mm/zsmalloc.c

index 4918d91bc3a660942a6aec8585be2bb41425d3df..d19b13e3a59fc967e175a5d51e829f98aac763dd 100644 (file)
@@ -58,7 +58,7 @@ void *module_alloc(unsigned long size)
        area->nr_pages = npages;
        area->pages = pages;
 
-       if (map_vm_area(area, prot_rwx, &pages)) {
+       if (map_vm_area(area, prot_rwx, pages)) {
                vunmap(area->addr);
                goto error;
        }
index 0bf1e4edf04d04a838c9d4cdc8f96aa17dfaa896..6590558d1d31c600b23c8d50f48b4e3b1c461326 100644 (file)
@@ -42,7 +42,6 @@ DEFINE_MUTEX(lguest_lock);
 static __init int map_switcher(void)
 {
        int i, err;
-       struct page **pagep;
 
        /*
         * Map the Switcher in to high memory.
@@ -110,11 +109,9 @@ static __init int map_switcher(void)
         * This code actually sets up the pages we've allocated to appear at
         * switcher_addr.  map_vm_area() takes the vma we allocated above, the
         * kind of pages we're mapping (kernel pages), and a pointer to our
-        * array of struct pages.  It increments that pointer, but we don't
-        * care.
+        * array of struct pages.
         */
-       pagep = lg_switcher_pages;
-       err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep);
+       err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, lg_switcher_pages);
        if (err) {
                printk("lguest: map_vm_area failed: %i\n", err);
                goto free_vma;
index cfe4bc8f05cb82ec83b4e127ba53f8e72968a85c..9dfef3f7a745a10031fc5dc7e3f6ccc5d99c9206 100644 (file)
@@ -583,7 +583,6 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
 
        for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
                int ret;
-               struct page **page_array_ptr;
                page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
 
                BUG_ON(*page);
@@ -595,8 +594,7 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
                }
                tmp_area.addr = page_addr;
                tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
-               page_array_ptr = page;
-               ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
+               ret = map_vm_area(&tmp_area, PAGE_KERNEL, page);
                if (ret) {
                        pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
                               proc->pid, page_addr);
index 4b8a89189a296143a5d17eac9e5bec5dafa42afe..b87696fdf06ab14d14ff01f5cfd7b2650f1e6c96 100644 (file)
@@ -113,7 +113,7 @@ extern struct vm_struct *remove_vm_area(const void *addr);
 extern struct vm_struct *find_vm_area(const void *addr);
 
 extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
-                       struct page ***pages);
+                       struct page **pages);
 #ifdef CONFIG_MMU
 extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
                                    pgprot_t prot, struct page **pages);
index ddaf70b21b59b0487293111dc5a7a81bfd9860b5..617ca0ad472168d47d04ef0e4ab65e1a72f4af1c 100644 (file)
@@ -1269,19 +1269,15 @@ void unmap_kernel_range(unsigned long addr, unsigned long size)
        flush_tlb_kernel_range(addr, end);
 }
 
-int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
+int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
 {
        unsigned long addr = (unsigned long)area->addr;
        unsigned long end = addr + get_vm_area_size(area);
        int err;
 
-       err = vmap_page_range(addr, end, prot, *pages);
-       if (err > 0) {
-               *pages += err;
-               err = 0;
-       }
+       err = vmap_page_range(addr, end, prot, pages);
 
-       return err;
+       return err > 0 ? 0 : err;
 }
 EXPORT_SYMBOL_GPL(map_vm_area);
 
@@ -1547,7 +1543,7 @@ void *vmap(struct page **pages, unsigned int count,
        if (!area)
                return NULL;
 
-       if (map_vm_area(area, prot, &pages)) {
+       if (map_vm_area(area, prot, pages)) {
                vunmap(area->addr);
                return NULL;
        }
@@ -1603,7 +1599,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
                area->pages[i] = page;
        }
 
-       if (map_vm_area(area, prot, &pages))
+       if (map_vm_area(area, prot, pages))
                goto fail;
        return area->addr;
 
index 5ae5d85b629d3a56f520ae7f4211c3024b0191f3..8698fe6048d0fb3d6b34edb5bc7a25f78192d7d4 100644 (file)
@@ -690,7 +690,7 @@ static inline void __zs_cpu_down(struct mapping_area *area)
 static inline void *__zs_map_object(struct mapping_area *area,
                                struct page *pages[2], int off, int size)
 {
-       BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, &pages));
+       BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages));
        area->vm_addr = area->vm->addr;
        return area->vm_addr + off;
 }