Merge tag 'backport/v3.14.24-ltsi-rc1/rcar-snd-to-v3.18-rc6' into backport/v3.14...
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / page_alloc.c
index 538de79..4b25829 100644 (file)
@@ -943,6 +943,7 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
                rmv_page_order(page);
                area->nr_free--;
                expand(zone, page, order, current_order, area, migratetype);
+               set_freepage_migratetype(page, migratetype);
                return page;
        }
 
@@ -1069,7 +1070,9 @@ static int try_to_steal_freepages(struct zone *zone, struct page *page,
 
        /*
         * When borrowing from MIGRATE_CMA, we need to release the excess
-        * buddy pages to CMA itself.
+        * buddy pages to CMA itself. We also ensure the freepage_migratetype
+        * is set to CMA so it is returned to the correct freelist in case
+        * the page ends up being not actually allocated from the pcp lists.
         */
        if (is_migrate_cma(fallback_type))
                return fallback_type;
@@ -1137,6 +1140,12 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
 
                        expand(zone, page, order, current_order, area,
                               new_type);
+                       /* The freepage_migratetype may differ from pageblock's
+                        * migratetype depending on the decisions in
+                        * try_to_steal_freepages. This is OK as long as it does
+                        * not differ for MIGRATE_CMA type.
+                        */
+                       set_freepage_migratetype(page, new_type);
 
                        trace_mm_page_alloc_extfrag(page, order, current_order,
                                start_migratetype, migratetype, new_type);
@@ -1187,7 +1196,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
                        unsigned long count, struct list_head *list,
                        int migratetype, int cold)
 {
-       int mt = migratetype, i;
+       int i;
 
        spin_lock(&zone->lock);
        for (i = 0; i < count; ++i) {
@@ -1208,14 +1217,8 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
                        list_add(&page->lru, list);
                else
                        list_add_tail(&page->lru, list);
-               if (IS_ENABLED(CONFIG_CMA)) {
-                       mt = get_pageblock_migratetype(page);
-                       if (!is_migrate_cma(mt) && !is_migrate_isolate(mt))
-                               mt = migratetype;
-               }
-               set_freepage_migratetype(page, mt);
                list = &page->lru;
-               if (is_migrate_cma(mt))
+               if (is_migrate_cma(get_freepage_migratetype(page)))
                        __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
                                              -(1 << order));
        }
@@ -1584,7 +1587,7 @@ again:
                if (!page)
                        goto failed;
                __mod_zone_freepage_state(zone, -(1 << order),
-                                         get_pageblock_migratetype(page));
+                                         get_freepage_migratetype(page));
        }
 
        __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));