mm, vmalloc: iterate vmap_area_list, instead of vmlist in vread/vwrite()
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / vmalloc.c
index 0f751f2..59aa328 100644 (file)
@@ -1290,12 +1290,14 @@ struct vm_struct *vmlist;
 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
                              unsigned long flags, const void *caller)
 {
+       spin_lock(&vmap_area_lock);
        vm->flags = flags;
        vm->addr = (void *)va->va_start;
        vm->size = va->va_end - va->va_start;
        vm->caller = caller;
        va->vm = vm;
        va->flags |= VM_VM_AREA;
+       spin_unlock(&vmap_area_lock);
 }
 
 static void insert_vmalloc_vmlist(struct vm_struct *vm)
@@ -1447,6 +1449,11 @@ struct vm_struct *remove_vm_area(const void *addr)
        if (va && va->flags & VM_VM_AREA) {
                struct vm_struct *vm = va->vm;
 
+               spin_lock(&vmap_area_lock);
+               va->vm = NULL;
+               va->flags &= ~VM_VM_AREA;
+               spin_unlock(&vmap_area_lock);
+
                if (!(vm->flags & VM_UNLIST)) {
                        struct vm_struct *tmp, **p;
                        /*
@@ -2005,7 +2012,8 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count)
 
 long vread(char *buf, char *addr, unsigned long count)
 {
-       struct vm_struct *tmp;
+       struct vmap_area *va;
+       struct vm_struct *vm;
        char *vaddr, *buf_start = buf;
        unsigned long buflen = count;
        unsigned long n;
@@ -2014,10 +2022,17 @@ long vread(char *buf, char *addr, unsigned long count)
        if ((unsigned long) addr + count < count)
                count = -(unsigned long) addr;
 
-       read_lock(&vmlist_lock);
-       for (tmp = vmlist; count && tmp; tmp = tmp->next) {
-               vaddr = (char *) tmp->addr;
-               if (addr >= vaddr + tmp->size - PAGE_SIZE)
+       spin_lock(&vmap_area_lock);
+       list_for_each_entry(va, &vmap_area_list, list) {
+               if (!count)
+                       break;
+
+               if (!(va->flags & VM_VM_AREA))
+                       continue;
+
+               vm = va->vm;
+               vaddr = (char *) vm->addr;
+               if (addr >= vaddr + vm->size - PAGE_SIZE)
                        continue;
                while (addr < vaddr) {
                        if (count == 0)
@@ -2027,10 +2042,10 @@ long vread(char *buf, char *addr, unsigned long count)
                        addr++;
                        count--;
                }
-               n = vaddr + tmp->size - PAGE_SIZE - addr;
+               n = vaddr + vm->size - PAGE_SIZE - addr;
                if (n > count)
                        n = count;
-               if (!(tmp->flags & VM_IOREMAP))
+               if (!(vm->flags & VM_IOREMAP))
                        aligned_vread(buf, addr, n);
                else /* IOREMAP area is treated as memory hole */
                        memset(buf, 0, n);
@@ -2039,7 +2054,7 @@ long vread(char *buf, char *addr, unsigned long count)
                count -= n;
        }
 finished:
-       read_unlock(&vmlist_lock);
+       spin_unlock(&vmap_area_lock);
 
        if (buf == buf_start)
                return 0;
@@ -2078,7 +2093,8 @@ finished:
 
 long vwrite(char *buf, char *addr, unsigned long count)
 {
-       struct vm_struct *tmp;
+       struct vmap_area *va;
+       struct vm_struct *vm;
        char *vaddr;
        unsigned long n, buflen;
        int copied = 0;
@@ -2088,10 +2104,17 @@ long vwrite(char *buf, char *addr, unsigned long count)
                count = -(unsigned long) addr;
        buflen = count;
 
-       read_lock(&vmlist_lock);
-       for (tmp = vmlist; count && tmp; tmp = tmp->next) {
-               vaddr = (char *) tmp->addr;
-               if (addr >= vaddr + tmp->size - PAGE_SIZE)
+       spin_lock(&vmap_area_lock);
+       list_for_each_entry(va, &vmap_area_list, list) {
+               if (!count)
+                       break;
+
+               if (!(va->flags & VM_VM_AREA))
+                       continue;
+
+               vm = va->vm;
+               vaddr = (char *) vm->addr;
+               if (addr >= vaddr + vm->size - PAGE_SIZE)
                        continue;
                while (addr < vaddr) {
                        if (count == 0)
@@ -2100,10 +2123,10 @@ long vwrite(char *buf, char *addr, unsigned long count)
                        addr++;
                        count--;
                }
-               n = vaddr + tmp->size - PAGE_SIZE - addr;
+               n = vaddr + vm->size - PAGE_SIZE - addr;
                if (n > count)
                        n = count;
-               if (!(tmp->flags & VM_IOREMAP)) {
+               if (!(vm->flags & VM_IOREMAP)) {
                        aligned_vwrite(buf, addr, n);
                        copied++;
                }
@@ -2112,7 +2135,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
                count -= n;
        }
 finished:
-       read_unlock(&vmlist_lock);
+       spin_unlock(&vmap_area_lock);
        if (!copied)
                return 0;
        return buflen;
@@ -2645,5 +2668,49 @@ static int __init proc_vmalloc_init(void)
        return 0;
 }
 module_init(proc_vmalloc_init);
+
+void get_vmalloc_info(struct vmalloc_info *vmi)
+{
+       struct vm_struct *vma;
+       unsigned long free_area_size;
+       unsigned long prev_end;
+
+       vmi->used = 0;
+
+       if (!vmlist) {
+               vmi->largest_chunk = VMALLOC_TOTAL;
+       } else {
+               vmi->largest_chunk = 0;
+
+               prev_end = VMALLOC_START;
+
+               read_lock(&vmlist_lock);
+
+               for (vma = vmlist; vma; vma = vma->next) {
+                       unsigned long addr = (unsigned long) vma->addr;
+
+                       /*
+                        * Some archs keep another range for modules in vmlist
+                        */
+                       if (addr < VMALLOC_START)
+                               continue;
+                       if (addr >= VMALLOC_END)
+                               break;
+
+                       vmi->used += vma->size;
+
+                       free_area_size = addr - prev_end;
+                       if (vmi->largest_chunk < free_area_size)
+                               vmi->largest_chunk = free_area_size;
+
+                       prev_end = vma->size + addr;
+               }
+
+               if (VMALLOC_END - prev_end > vmi->largest_chunk)
+                       vmi->largest_chunk = VMALLOC_END - prev_end;
+
+               read_unlock(&vmlist_lock);
+       }
+}
 #endif