x86: add tizen_qemu_x86_defconfig & tizen_qemu_x86_64_defconfig
[platform/kernel/linux-rpi.git] / mm / vmalloc.c
index d77830f..f81f11b 100644 (file)
@@ -1918,11 +1918,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
                return ERR_PTR(err);
        }
 
-       vbq = &get_cpu_var(vmap_block_queue);
+       get_cpu_light();
+       vbq = this_cpu_ptr(&vmap_block_queue);
        spin_lock(&vbq->lock);
        list_add_tail_rcu(&vb->free_list, &vbq->free);
        spin_unlock(&vbq->lock);
-       put_cpu_var(vmap_block_queue);
+       put_cpu_light();
 
        return vaddr;
 }
@@ -2001,7 +2002,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
        order = get_order(size);
 
        rcu_read_lock();
-       vbq = &get_cpu_var(vmap_block_queue);
+       get_cpu_light();
+       vbq = this_cpu_ptr(&vmap_block_queue);
        list_for_each_entry_rcu(vb, &vbq->free, free_list) {
                unsigned long pages_off;
 
@@ -2024,7 +2026,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
                break;
        }
 
-       put_cpu_var(vmap_block_queue);
+       put_cpu_light();
        rcu_read_unlock();
 
        /* Allocate new block if nothing was found */
@@ -2816,6 +2818,8 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                unsigned int order, unsigned int nr_pages, struct page **pages)
 {
        unsigned int nr_allocated = 0;
+       struct page *page;
+       int i;
 
        /*
         * For order-0 pages we make use of bulk allocator, if
@@ -2823,7 +2827,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
         * to fails, fallback to a single page allocator that is
         * more permissive.
         */
-       if (!order) {
+       if (!order && nid != NUMA_NO_NODE) {
                while (nr_allocated < nr_pages) {
                        unsigned int nr, nr_pages_request;
 
@@ -2848,7 +2852,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                        if (nr != nr_pages_request)
                                break;
                }
-       } else
+       } else if (order)
                /*
                 * Compound pages required for remap_vmalloc_page if
                 * high-order pages.
@@ -2856,11 +2860,12 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                gfp |= __GFP_COMP;
 
        /* High-order pages or fallback path if "bulk" fails. */
-       while (nr_allocated < nr_pages) {
-               struct page *page;
-               int i;
 
-               page = alloc_pages_node(nid, gfp, order);
+       while (nr_allocated < nr_pages) {
+               if (nid == NUMA_NO_NODE)
+                       page = alloc_pages(gfp, order);
+               else
+                       page = alloc_pages_node(nid, gfp, order);
                if (unlikely(!page))
                        break;
 
@@ -3029,7 +3034,8 @@ again:
        clear_vm_uninitialized_flag(area);
 
        size = PAGE_ALIGN(size);
-       kmemleak_vmalloc(area, size, gfp_mask);
+       if (!(vm_flags & VM_DEFER_KMEMLEAK))
+               kmemleak_vmalloc(area, size, gfp_mask);
 
        return addr;