mm/compaction: clean-up code on success of ballon isolation
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / compaction.c
index f58bcd0..fcf1b1d 100644 (file)
@@ -217,21 +217,12 @@ static inline bool compact_trylock_irqsave(spinlock_t *lock,
 /* Returns true if the page is within a block suitable for migration to */
 static bool suitable_migration_target(struct page *page)
 {
-       int migratetype = get_pageblock_migratetype(page);
-
-       /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
-       if (migratetype == MIGRATE_RESERVE)
-               return false;
-
-       if (is_migrate_isolate(migratetype))
-               return false;
-
-       /* If the page is a large free page, then allow migration */
+       /* If the page is a large free page, then disallow migration */
        if (PageBuddy(page) && page_order(page) >= pageblock_order)
-               return true;
+               return false;
 
        /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
-       if (migrate_async_suitable(migratetype))
+       if (migrate_async_suitable(get_pageblock_migratetype(page)))
                return true;
 
        /* Otherwise skip the block */
@@ -251,9 +242,9 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 {
        int nr_scanned = 0, total_isolated = 0;
        struct page *cursor, *valid_page = NULL;
-       unsigned long nr_strict_required = end_pfn - blockpfn;
        unsigned long flags;
        bool locked = false;
+       bool checked_pageblock = false;
 
        cursor = pfn_to_page(blockpfn);
 
@@ -264,11 +255,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 
                nr_scanned++;
                if (!pfn_valid_within(blockpfn))
-                       continue;
+                       goto isolate_fail;
+
                if (!valid_page)
                        valid_page = page;
                if (!PageBuddy(page))
-                       continue;
+                       goto isolate_fail;
 
                /*
                 * The zone lock must be held to isolate freepages.
@@ -284,17 +276,23 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
                        break;
 
                /* Recheck this is a suitable migration target under lock */
-               if (!strict && !suitable_migration_target(page))
-                       break;
+               if (!strict && !checked_pageblock) {
+                       /*
+                        * We need to check suitability of pageblock only once
+                        * and this isolate_freepages_block() is called with
+                        * pageblock range, so just check once is sufficient.
+                        */
+                       checked_pageblock = true;
+                       if (!suitable_migration_target(page))
+                               break;
+               }
 
                /* Recheck this is a buddy page under lock */
                if (!PageBuddy(page))
-                       continue;
+                       goto isolate_fail;
 
                /* Found a free page, break it into order-0 pages */
                isolated = split_free_page(page);
-               if (!isolated && strict)
-                       break;
                total_isolated += isolated;
                for (i = 0; i < isolated; i++) {
                        list_add(&page->lru, freelist);
@@ -305,7 +303,15 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
                if (isolated) {
                        blockpfn += isolated - 1;
                        cursor += isolated - 1;
+                       continue;
                }
+
+isolate_fail:
+               if (strict)
+                       break;
+               else
+                       continue;
+
        }
 
        trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
@@ -315,7 +321,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
         * pages requested were isolated. If there were any failures, 0 is
         * returned and CMA will fail.
         */
-       if (strict && nr_strict_required > total_isolated)
+       if (strict && blockpfn < end_pfn)
                total_isolated = 0;
 
        if (locked)
@@ -459,6 +465,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
        unsigned long flags;
        bool locked = false;
        struct page *page = NULL, *valid_page = NULL;
+       bool skipped_async_unsuitable = false;
 
        /*
         * Ensure that there are not too many pages isolated from the LRU
@@ -480,7 +487,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
        cond_resched();
        for (; low_pfn < end_pfn; low_pfn++) {
                /* give a chance to irqs before checking need_resched() */
-               if (locked && !((low_pfn+1) % SWAP_CLUSTER_MAX)) {
+               if (locked && !(low_pfn % SWAP_CLUSTER_MAX)) {
                        if (should_release_lock(&zone->lru_lock)) {
                                spin_unlock_irqrestore(&zone->lru_lock, flags);
                                locked = false;
@@ -519,23 +526,32 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
 
                /* If isolation recently failed, do not retry */
                pageblock_nr = low_pfn >> pageblock_order;
-               if (!isolation_suitable(cc, page))
-                       goto next_pageblock;
+               if (last_pageblock_nr != pageblock_nr) {
+                       int mt;
 
-               /* Skip if free */
-               if (PageBuddy(page))
-                       continue;
+                       last_pageblock_nr = pageblock_nr;
+                       if (!isolation_suitable(cc, page))
+                               goto next_pageblock;
+
+                       /*
+                        * For async migration, also only scan in MOVABLE
+                        * blocks. Async migration is optimistic to see if
+                        * the minimum amount of work satisfies the allocation
+                        */
+                       mt = get_pageblock_migratetype(page);
+                       if (!cc->sync && !migrate_async_suitable(mt)) {
+                               cc->finished_update_migrate = true;
+                               skipped_async_unsuitable = true;
+                               goto next_pageblock;
+                       }
+               }
 
                /*
-                * For async migration, also only scan in MOVABLE blocks. Async
-                * migration is optimistic to see if the minimum amount of work
-                * satisfies the allocation
+                * Skip if free. page_order cannot be used without zone->lock
+                * as nothing prevents parallel allocations or buddy merging.
                 */
-               if (!cc->sync && last_pageblock_nr != pageblock_nr &&
-                   !migrate_async_suitable(get_pageblock_migratetype(page))) {
-                       cc->finished_update_migrate = true;
-                       goto next_pageblock;
-               }
+               if (PageBuddy(page))
+                       continue;
 
                /*
                 * Check may be lockless but that's ok as we recheck later.
@@ -546,11 +562,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                        if (unlikely(balloon_page_movable(page))) {
                                if (locked && balloon_page_isolate(page)) {
                                        /* Successfully isolated */
-                                       cc->finished_update_migrate = true;
-                                       list_add(&page->lru, migratelist);
-                                       cc->nr_migratepages++;
-                                       nr_isolated++;
-                                       goto check_compact_cluster;
+                                       goto isolate_success;
                                }
                        }
                        continue;
@@ -573,6 +585,15 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                        continue;
                }
 
+               /*
+                * Migration will fail if an anonymous page is pinned in memory,
+                * so avoid taking lru_lock and isolating it unnecessarily in an
+                * admittedly racy check.
+                */
+               if (!page_mapping(page) &&
+                   page_count(page) > page_mapcount(page))
+                       continue;
+
                /* Check if it is ok to still hold the lock */
                locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
                                                                locked, cc);
@@ -599,16 +620,17 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                if (__isolate_lru_page(page, mode) != 0)
                        continue;
 
-               VM_BUG_ON(PageTransCompound(page));
+               VM_BUG_ON_PAGE(PageTransCompound(page), page);
 
                /* Successfully isolated */
-               cc->finished_update_migrate = true;
                del_page_from_lru_list(page, lruvec, page_lru(page));
+
+isolate_success:
+               cc->finished_update_migrate = true;
                list_add(&page->lru, migratelist);
                cc->nr_migratepages++;
                nr_isolated++;
 
-check_compact_cluster:
                /* Avoid isolating too much */
                if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
                        ++low_pfn;
@@ -619,7 +641,6 @@ check_compact_cluster:
 
 next_pageblock:
                low_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages) - 1;
-               last_pageblock_nr = pageblock_nr;
        }
 
        acct_isolated(zone, locked, cc);
@@ -627,8 +648,13 @@ next_pageblock:
        if (locked)
                spin_unlock_irqrestore(&zone->lru_lock, flags);
 
-       /* Update the pageblock-skip if the whole pageblock was scanned */
-       if (low_pfn == end_pfn)
+       /*
+        * Update the pageblock-skip information and cached scanner pfn,
+        * if the whole pageblock was scanned without isolating any page.
+        * This is not done when pageblock was skipped due to being unsuitable
+        * for async compaction, so that eventual sync compaction can try.
+        */
+       if (low_pfn == end_pfn && !skipped_async_unsuitable)
                update_pageblock_skip(cc, valid_page, nr_isolated, true);
 
        trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
@@ -650,17 +676,21 @@ static void isolate_freepages(struct zone *zone,
                                struct compact_control *cc)
 {
        struct page *page;
-       unsigned long high_pfn, low_pfn, pfn, z_end_pfn, end_pfn;
+       unsigned long high_pfn, low_pfn, pfn, z_end_pfn;
        int nr_freepages = cc->nr_freepages;
        struct list_head *freelist = &cc->freepages;
 
        /*
         * Initialise the free scanner. The starting point is where we last
-        * scanned from (or the end of the zone if starting). The low point
-        * is the end of the pageblock the migration scanner is using.
+        * successfully isolated from, zone-cached value, or the end of the
+        * zone when isolating for the first time. We need this aligned to
+        * the pageblock boundary, because we do pfn -= pageblock_nr_pages
+        * in the for loop.
+        * The low boundary is the end of the pageblock the migration scanner
+        * is using.
         */
-       pfn = cc->free_pfn;
-       low_pfn = cc->migrate_pfn + pageblock_nr_pages;
+       pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
+       low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
 
        /*
         * Take care that if the migration scanner is at the end of the zone
@@ -676,9 +706,10 @@ static void isolate_freepages(struct zone *zone,
         * pages on cc->migratepages. We stop searching if the migrate
         * and free page scanners meet or enough free pages are isolated.
         */
-       for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
+       for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
                                        pfn -= pageblock_nr_pages) {
                unsigned long isolated;
+               unsigned long end_pfn;
 
                /*
                 * This can iterate a massively long zone without finding any
@@ -713,13 +744,10 @@ static void isolate_freepages(struct zone *zone,
                isolated = 0;
 
                /*
-                * As pfn may not start aligned, pfn+pageblock_nr_page
-                * may cross a MAX_ORDER_NR_PAGES boundary and miss
-                * a pfn_valid check. Ensure isolate_freepages_block()
-                * only scans within a pageblock
+                * Take care when isolating in last pageblock of a zone which
+                * ends in the middle of a pageblock.
                 */
-               end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
-               end_pfn = min(end_pfn, z_end_pfn);
+               end_pfn = min(pfn + pageblock_nr_pages, z_end_pfn);
                isolated = isolate_freepages_block(cc, pfn, end_pfn,
                                                   freelist, false);
                nr_freepages += isolated;
@@ -738,7 +766,14 @@ static void isolate_freepages(struct zone *zone,
        /* split_free_page does not map the pages */
        map_pages(freelist);
 
-       cc->free_pfn = high_pfn;
+       /*
+        * If we crossed the migrate scanner, we want to keep it that way
+        * so that compact_finished() may detect this
+        */
+       if (pfn < low_pfn)
+               cc->free_pfn = max(pfn, zone->zone_start_pfn);
+       else
+               cc->free_pfn = high_pfn;
        cc->nr_freepages = nr_freepages;
 }
 
@@ -837,6 +872,10 @@ static int compact_finished(struct zone *zone,
 
        /* Compaction run completes if the migrate and free scanner meet */
        if (cc->free_pfn <= cc->migrate_pfn) {
+               /* Let the next compaction start anew. */
+               zone->compact_cached_migrate_pfn = zone->zone_start_pfn;
+               zone->compact_cached_free_pfn = zone_end_pfn(zone);
+
                /*
                 * Mark that the PG_migrate_skip information should be cleared
                 * by kswapd when it goes to sleep. kswapd does not set the
@@ -947,6 +986,14 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
        }
 
        /*
+        * Clear pageblock skip if there were failures recently and compaction
+        * is about to be retried after being deferred. kswapd does not do
+        * this reset as it'll reset the cached information when going to sleep.
+        */
+       if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
+               __reset_isolation_suitable(zone);
+
+       /*
         * Setup to move all movable pages to the end of the zone. Used cached
         * information on where the scanners should start but check that it
         * is initialised by ensuring the values are within zone boundaries.
@@ -962,13 +1009,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                zone->compact_cached_migrate_pfn = cc->migrate_pfn;
        }
 
-       /*
-        * Clear pageblock skip if there were failures recently and compaction
-        * is about to be retried after being deferred. kswapd does not do
-        * this reset as it'll reset the cached information when going to sleep.
-        */
-       if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
-               __reset_isolation_suitable(zone);
+       trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);
 
        migrate_prep_local();
 
@@ -1003,7 +1044,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                if (err) {
                        putback_movable_pages(&cc->migratepages);
                        cc->nr_migratepages = 0;
-                       if (err == -ENOMEM) {
+                       /*
+                        * migrate_pages() may return -ENOMEM when scanners meet
+                        * and we want compact_finished() to detect it
+                        */
+                       if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) {
                                ret = COMPACT_PARTIAL;
                                goto out;
                        }
@@ -1015,6 +1060,8 @@ out:
        cc->nr_freepages -= release_freepages(&cc->freepages);
        VM_BUG_ON(cc->nr_freepages != 0);
 
+       trace_mm_compaction_end(ret);
+
        return ret;
 }
 
@@ -1120,12 +1167,11 @@ static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
                        compact_zone(zone, cc);
 
                if (cc->order > 0) {
-                       int ok = zone_watermark_ok(zone, cc->order,
-                                               low_wmark_pages(zone), 0, 0);
-                       if (ok && cc->order >= zone->compact_order_failed)
-                               zone->compact_order_failed = cc->order + 1;
+                       if (zone_watermark_ok(zone, cc->order,
+                                               low_wmark_pages(zone), 0, 0))
+                               compaction_defer_reset(zone, cc->order, false);
                        /* Currently async compaction is never deferred. */
-                       else if (!ok && cc->sync)
+                       else if (cc->sync)
                                defer_compaction(zone, cc->order);
                }