cpuset,mm: fix no node to alloc memory when changing cpuset's mems
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / page_alloc.c
index a6326c7..f7da2a2 100644 (file)
@@ -475,6 +475,8 @@ static inline void __free_one_page(struct page *page,
                int migratetype)
 {
        unsigned long page_idx;
+       unsigned long combined_idx;
+       struct page *buddy;
 
        if (unlikely(PageCompound(page)))
                if (unlikely(destroy_compound_page(page, order)))
@@ -488,9 +490,6 @@ static inline void __free_one_page(struct page *page,
        VM_BUG_ON(bad_range(zone, page));
 
        while (order < MAX_ORDER-1) {
-               unsigned long combined_idx;
-               struct page *buddy;
-
                buddy = __page_find_buddy(page, page_idx, order);
                if (!page_is_buddy(page, buddy, order))
                        break;
@@ -505,8 +504,29 @@ static inline void __free_one_page(struct page *page,
                order++;
        }
        set_page_order(page, order);
-       list_add(&page->lru,
-               &zone->free_area[order].free_list[migratetype]);
+
+       /*
+        * If this is not the largest possible page, check if the buddy
+        * of the next-highest order is free. If it is, it's possible
+        * that pages are being freed that will coalesce soon. In case,
+        * that is happening, add the free page to the tail of the list
+        * so it's less likely to be used soon and more likely to be merged
+        * as a higher order page
+        */
+       if ((order < MAX_ORDER-1) && pfn_valid_within(page_to_pfn(buddy))) {
+               struct page *higher_page, *higher_buddy;
+               combined_idx = __find_combined_index(page_idx, order);
+               higher_page = page + combined_idx - page_idx;
+               higher_buddy = __page_find_buddy(higher_page, combined_idx, order + 1);
+               if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
+                       list_add_tail(&page->lru,
+                               &zone->free_area[order].free_list[migratetype]);
+                       goto out;
+               }
+       }
+
+       list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
+out:
        zone->free_area[order].nr_free++;
 }
 
@@ -1970,10 +1990,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
        if (unlikely(!zonelist->_zonerefs->zone))
                return NULL;
 
+       get_mems_allowed();
        /* The preferred zone is used for statistics later */
        first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
-       if (!preferred_zone)
+       if (!preferred_zone) {
+               put_mems_allowed();
                return NULL;
+       }
 
        /* First allocation attempt */
        page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
@@ -1983,6 +2006,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
                page = __alloc_pages_slowpath(gfp_mask, order,
                                zonelist, high_zoneidx, nodemask,
                                preferred_zone, migratetype);
+       put_mems_allowed();
 
        trace_mm_page_alloc(page, order, gfp_mask, migratetype);
        return page;