swap: redirty page if page write fails on swap file
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / vmalloc.c
index bda6cef..72043d6 100644 (file)
@@ -249,19 +249,9 @@ EXPORT_SYMBOL(vmalloc_to_pfn);
 #define VM_LAZY_FREEING        0x02
 #define VM_VM_AREA     0x04
 
-struct vmap_area {
-       unsigned long va_start;
-       unsigned long va_end;
-       unsigned long flags;
-       struct rb_node rb_node;         /* address sorted rbtree */
-       struct list_head list;          /* address sorted list */
-       struct list_head purge_list;    /* "lazy purge" list */
-       struct vm_struct *vm;
-       struct rcu_head rcu_head;
-};
-
 static DEFINE_SPINLOCK(vmap_area_lock);
-static LIST_HEAD(vmap_area_list);
+/* Export for kexec only */
+LIST_HEAD(vmap_area_list);
 static struct rb_root vmap_area_root = RB_ROOT;
 
 /* The vmap cache globals are protected by vmap_area_lock */
@@ -313,7 +303,7 @@ static void __insert_vmap_area(struct vmap_area *va)
        rb_link_node(&va->rb_node, parent, p);
        rb_insert_color(&va->rb_node, &vmap_area_root);
 
-       /* address-sort this list so it is usable like the vmlist */
+       /* address-sort this list */
        tmp = rb_prev(&va->rb_node);
        if (tmp) {
                struct vmap_area *prev;
@@ -1125,6 +1115,7 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
 }
 EXPORT_SYMBOL(vm_map_ram);
 
+static struct vm_struct *vmlist __initdata;
 /**
  * vm_area_add_early - add vmap area early during boot
  * @vm: vm_struct to add
@@ -1283,10 +1274,6 @@ int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
 }
 EXPORT_SYMBOL_GPL(map_vm_area);
 
-/*** Old vmalloc interfaces ***/
-DEFINE_RWLOCK(vmlist_lock);
-struct vm_struct *vmlist;
-
 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
                              unsigned long flags, const void *caller)
 {
@@ -1300,10 +1287,8 @@ static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
        spin_unlock(&vmap_area_lock);
 }
 
-static void insert_vmalloc_vmlist(struct vm_struct *vm)
+static void clear_vm_unlist(struct vm_struct *vm)
 {
-       struct vm_struct *tmp, **p;
-
        /*
         * Before removing VM_UNLIST,
         * we should make sure that vm has proper values.
@@ -1311,22 +1296,13 @@ static void insert_vmalloc_vmlist(struct vm_struct *vm)
         */
        smp_wmb();
        vm->flags &= ~VM_UNLIST;
-
-       write_lock(&vmlist_lock);
-       for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
-               if (tmp->addr >= vm->addr)
-                       break;
-       }
-       vm->next = *p;
-       *p = vm;
-       write_unlock(&vmlist_lock);
 }
 
 static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
                              unsigned long flags, const void *caller)
 {
        setup_vmalloc_vm(vm, va, flags, caller);
-       insert_vmalloc_vmlist(vm);
+       clear_vm_unlist(vm);
 }
 
 static struct vm_struct *__get_vm_area_node(unsigned long size,
@@ -1369,10 +1345,9 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
 
        /*
         * When this function is called from __vmalloc_node_range,
-        * we do not add vm_struct to vmlist here to avoid
-        * accessing uninitialized members of vm_struct such as
-        * pages and nr_pages fields. They will be set later.
-        * To distinguish it from others, we use a VM_UNLIST flag.
+        * we add VM_UNLIST flag to avoid accessing uninitialized
+        * members of vm_struct such as pages and nr_pages fields.
+        * They will be set later.
         */
        if (flags & VM_UNLIST)
                setup_vmalloc_vm(area, va, flags, caller);
@@ -1461,20 +1436,6 @@ struct vm_struct *remove_vm_area(const void *addr)
                va->flags &= ~VM_VM_AREA;
                spin_unlock(&vmap_area_lock);
 
-               if (!(vm->flags & VM_UNLIST)) {
-                       struct vm_struct *tmp, **p;
-                       /*
-                        * remove from list and disallow access to
-                        * this vm_struct before unmap. (address range
-                        * confliction is maintained by vmap.)
-                        */
-                       write_lock(&vmlist_lock);
-                       for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next)
-                               ;
-                       *p = tmp->next;
-                       write_unlock(&vmlist_lock);
-               }
-
                vmap_debug_free_range(va->va_start, va->va_end);
                free_unmap_vmap_area(va);
                vm->size -= PAGE_SIZE;
@@ -1694,10 +1655,11 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
                return NULL;
 
        /*
-        * In this function, newly allocated vm_struct is not added
-        * to vmlist at __get_vm_area_node(). so, it is added here.
+        * In this function, newly allocated vm_struct has VM_UNLIST flag.
+        * It means that vm_struct is not fully initialized.
+        * Now, it is fully initialized, so remove this flag here.
         */
-       insert_vmalloc_vmlist(area);
+       clear_vm_unlist(area);
 
        /*
         * A ref_count = 3 is needed because the vm_struct and vmap_area
@@ -2593,7 +2555,7 @@ static void show_numa_info(struct seq_file *m, struct vm_struct *v)
                if (!counters)
                        return;
 
-               /* Pair with smp_wmb() in insert_vmalloc_vmlist() */
+               /* Pair with smp_wmb() in clear_vm_unlist() */
                smp_rmb();
                if (v->flags & VM_UNLIST)
                        return;