x86, numa, acpi, memory-hotplug: make movable_node have higher priority
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / page_alloc.c
index 5248fe0..4f59d19 100644 (file)
@@ -2072,13 +2072,6 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
                return;
 
        /*
-        * Walking all memory to count page types is very expensive and should
-        * be inhibited in non-blockable contexts.
-        */
-       if (!(gfp_mask & __GFP_WAIT))
-               filter |= SHOW_MEM_FILTER_PAGE_COUNT;
-
-       /*
         * This documents exceptions given to allocations in certain
         * contexts that are allowed to allocate outside current's set
         * of allowed nodes.
@@ -3901,6 +3894,7 @@ static void setup_zone_migrate_reserve(struct zone *zone)
        struct page *page;
        unsigned long block_migratetype;
        int reserve;
+       int old_reserve;
 
        /*
         * Get the start pfn, end pfn and the number of blocks to reserve
@@ -3922,6 +3916,12 @@ static void setup_zone_migrate_reserve(struct zone *zone)
         * future allocation of hugepages at runtime.
         */
        reserve = min(2, reserve);
+       old_reserve = zone->nr_migrate_reserve_block;
+
+       /* When memory hot-add, we almost always need to do nothing */
+       if (reserve == old_reserve)
+               return;
+       zone->nr_migrate_reserve_block = reserve;
 
        for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
                if (!pfn_valid(pfn))
@@ -3959,6 +3959,12 @@ static void setup_zone_migrate_reserve(struct zone *zone)
                                reserve--;
                                continue;
                        }
+               } else if (!old_reserve) {
+                       /*
+                        * At boot time we don't need to scan the whole zone
+                        * for turning off MIGRATE_RESERVE.
+                        */
+                       break;
                }
 
                /*
@@ -5012,9 +5018,33 @@ static void __init find_zone_movable_pfns_for_nodes(void)
        nodemask_t saved_node_state = node_states[N_MEMORY];
        unsigned long totalpages = early_calculate_totalpages();
        int usable_nodes = nodes_weight(node_states[N_MEMORY]);
+       struct memblock_type *type = &memblock.memory;
+
+       /* Need to find movable_zone earlier when movable_node is specified. */
+       find_usable_zone_for_movable();
 
        /*
-        * If movablecore was specified, calculate what size of
+        * If movable_node is specified, ignore kernelcore and movablecore
+        * options.
+        */
+       if (movable_node_is_enabled()) {
+               for (i = 0; i < type->cnt; i++) {
+                       if (!memblock_is_hotpluggable(&type->regions[i]))
+                               continue;
+
+                       nid = type->regions[i].nid;
+
+                       usable_startpfn = PFN_DOWN(type->regions[i].base);
+                       zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
+                               min(usable_startpfn, zone_movable_pfn[nid]) :
+                               usable_startpfn;
+               }
+
+               goto out2;
+       }
+
+       /*
+        * If movablecore=nn[KMG] was specified, calculate what size of
         * kernelcore that corresponds so that memory usable for
         * any allocation type is evenly spread. If both kernelcore
         * and movablecore are specified, then the value of kernelcore
@@ -5040,7 +5070,6 @@ static void __init find_zone_movable_pfns_for_nodes(void)
                goto out;
 
        /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
-       find_usable_zone_for_movable();
        usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
 
 restart:
@@ -5131,6 +5160,7 @@ restart:
        if (usable_nodes && required_kernelcore > usable_nodes)
                goto restart;
 
+out2:
        /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
        for (nid = 0; nid < MAX_NUMNODES; nid++)
                zone_movable_pfn[nid] =