ARM: configs: tizen_bcm2711_defconfig: Enable Crypto extension configs
[platform/kernel/linux-rpi.git] / mm / vmalloc.c
index d77830f..8375eec 100644 (file)
@@ -2816,6 +2816,8 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                unsigned int order, unsigned int nr_pages, struct page **pages)
 {
        unsigned int nr_allocated = 0;
+       struct page *page;
+       int i;
 
        /*
         * For order-0 pages we make use of bulk allocator, if
@@ -2823,7 +2825,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
         * to fails, fallback to a single page allocator that is
         * more permissive.
         */
-       if (!order) {
+       if (!order && nid != NUMA_NO_NODE) {
                while (nr_allocated < nr_pages) {
                        unsigned int nr, nr_pages_request;
 
@@ -2848,7 +2850,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                        if (nr != nr_pages_request)
                                break;
                }
-       } else
+       } else if (order)
                /*
                 * Compound pages required for remap_vmalloc_page if
                 * high-order pages.
@@ -2856,11 +2858,12 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                gfp |= __GFP_COMP;
 
        /* High-order pages or fallback path if "bulk" fails. */
-       while (nr_allocated < nr_pages) {
-               struct page *page;
-               int i;
 
-               page = alloc_pages_node(nid, gfp, order);
+       while (nr_allocated < nr_pages) {
+               if (nid == NUMA_NO_NODE)
+                       page = alloc_pages(gfp, order);
+               else
+                       page = alloc_pages_node(nid, gfp, order);
                if (unlikely(!page))
                        break;
 
@@ -3029,7 +3032,8 @@ again:
        clear_vm_uninitialized_flag(area);
 
        size = PAGE_ALIGN(size);
-       kmemleak_vmalloc(area, size, gfp_mask);
+       if (!(vm_flags & VM_DEFER_KMEMLEAK))
+               kmemleak_vmalloc(area, size, gfp_mask);
 
        return addr;