ARM: configs: tizen_bcm2711_defconfig: Enable Crypto extension configs
[platform/kernel/linux-rpi.git] / mm / vmalloc.c
index 3824dc1..8375eec 100644 (file)
 #include "internal.h"
 #include "pgalloc-track.h"
 
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1;
+
+static int __init set_nohugeiomap(char *str)
+{
+       ioremap_max_page_shift = PAGE_SHIFT;
+       return 0;
+}
+early_param("nohugeiomap", set_nohugeiomap);
+#else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+static const unsigned int ioremap_max_page_shift = PAGE_SHIFT;
+#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+
 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
 static bool __ro_after_init vmap_allow_huge = true;
 
@@ -298,15 +311,14 @@ static int vmap_range_noflush(unsigned long addr, unsigned long end,
        return err;
 }
 
-int vmap_range(unsigned long addr, unsigned long end,
-                       phys_addr_t phys_addr, pgprot_t prot,
-                       unsigned int max_page_shift)
+int ioremap_page_range(unsigned long addr, unsigned long end,
+               phys_addr_t phys_addr, pgprot_t prot)
 {
        int err;
 
-       err = vmap_range_noflush(addr, end, phys_addr, prot, max_page_shift);
+       err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
+                                ioremap_max_page_shift);
        flush_cache_vmap(addr, end);
-
        return err;
 }
 
@@ -2804,6 +2816,8 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                unsigned int order, unsigned int nr_pages, struct page **pages)
 {
        unsigned int nr_allocated = 0;
+       struct page *page;
+       int i;
 
        /*
         * For order-0 pages we make use of bulk allocator, if
@@ -2811,7 +2825,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
         * to fails, fallback to a single page allocator that is
         * more permissive.
         */
-       if (!order) {
+       if (!order && nid != NUMA_NO_NODE) {
                while (nr_allocated < nr_pages) {
                        unsigned int nr, nr_pages_request;
 
@@ -2836,7 +2850,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                        if (nr != nr_pages_request)
                                break;
                }
-       } else
+       } else if (order)
                /*
                 * Compound pages required for remap_vmalloc_page if
                 * high-order pages.
@@ -2844,11 +2858,12 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                gfp |= __GFP_COMP;
 
        /* High-order pages or fallback path if "bulk" fails. */
-       while (nr_allocated < nr_pages) {
-               struct page *page;
-               int i;
 
-               page = alloc_pages_node(nid, gfp, order);
+       while (nr_allocated < nr_pages) {
+               if (nid == NUMA_NO_NODE)
+                       page = alloc_pages(gfp, order);
+               else
+                       page = alloc_pages_node(nid, gfp, order);
                if (unlikely(!page))
                        break;
 
@@ -3017,7 +3032,8 @@ again:
        clear_vm_uninitialized_flag(area);
 
        size = PAGE_ALIGN(size);
-       kmemleak_vmalloc(area, size, gfp_mask);
+       if (!(vm_flags & VM_DEFER_KMEMLEAK))
+               kmemleak_vmalloc(area, size, gfp_mask);
 
        return addr;