tizen: packaging: Add baselibs.conf to provide 64-bit kernel & modules for 32-bit...
[platform/kernel/linux-rpi.git] / mm / page_alloc.c
index 95546f3..4c6a502 100644 (file)
@@ -204,6 +204,27 @@ EXPORT_SYMBOL(node_states);
 
 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
 
+#define ALLOC_IN_CMA_THRESHOLD_MAX 16
+#define ALLOC_IN_CMA_THRESHOLD_DEFAULT 12
+
+static unsigned long _alloc_in_cma_threshold __read_mostly
+                               = ALLOC_IN_CMA_THRESHOLD_DEFAULT;
+
+static int __init alloc_in_cma_threshold_setup(char *buf)
+{
+       unsigned long res;
+
+       if (kstrtoul(buf, 10, &res) < 0 ||
+           res > ALLOC_IN_CMA_THRESHOLD_MAX) {
+               pr_err("Bad alloc_cma_threshold value\n");
+               return 0;
+       }
+       _alloc_in_cma_threshold = res;
+       pr_info("Setting alloc_in_cma_threshold to %lu\n", res);
+       return 0;
+}
+early_param("alloc_in_cma_threshold", alloc_in_cma_threshold_setup);
+
 /*
  * A cached value of the page's pageblock's migratetype, used when the page is
  * put on a pcplist. Used to avoid the pageblock migratetype lookup when
@@ -2091,12 +2112,13 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
        if (IS_ENABLED(CONFIG_CMA)) {
                /*
                 * Balance movable allocations between regular and CMA areas by
-                * allocating from CMA when over half of the zone's free memory
-                * is in the CMA area.
+                * allocating from CMA when over more than a given proportion of
+                * the zone's free memory is in the CMA area.
                 */
                if (alloc_flags & ALLOC_CMA &&
                    zone_page_state(zone, NR_FREE_CMA_PAGES) >
-                   zone_page_state(zone, NR_FREE_PAGES) / 2) {
+                   zone_page_state(zone, NR_FREE_PAGES) / ALLOC_IN_CMA_THRESHOLD_MAX
+                   * _alloc_in_cma_threshold) {
                        page = __rmqueue_cma_fallback(zone, order);
                        if (page)
                                return page;
@@ -3809,14 +3831,9 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
        else
                (*no_progress_loops)++;
 
-       /*
-        * Make sure we converge to OOM if we cannot make any progress
-        * several times in the row.
-        */
-       if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
-               /* Before OOM, exhaust highatomic_reserve */
-               return unreserve_highatomic_pageblock(ac, true);
-       }
+       if (*no_progress_loops > MAX_RECLAIM_RETRIES)
+               goto out;
+
 
        /*
         * Keep reclaiming pages while there is a chance this will lead
@@ -3859,6 +3876,11 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
                schedule_timeout_uninterruptible(1);
        else
                cond_resched();
+out:
+       /* Before OOM, exhaust highatomic_reserve */
+       if (!ret)
+               return unreserve_highatomic_pageblock(ac, true);
+
        return ret;
 }
 
@@ -6475,6 +6497,7 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
                        next_page = page;
                        current_buddy = page + size;
                }
+               page = next_page;
 
                if (set_page_guard(zone, current_buddy, high, migratetype))
                        continue;
@@ -6482,7 +6505,6 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
                if (current_buddy != target) {
                        add_to_free_list(current_buddy, zone, high, migratetype);
                        set_buddy_order(current_buddy, high);
-                       page = next_page;
                }
        }
 }