Merge tag 'backport/v3.14.24-ltsi-rc1/mach-shmobile-to-renesas-next-20141119-v3.18...
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / compaction.c
index b48c525..4229fc2 100644 (file)
@@ -89,7 +89,8 @@ static void __reset_isolation_suitable(struct zone *zone)
        unsigned long end_pfn = zone_end_pfn(zone);
        unsigned long pfn;
 
-       zone->compact_cached_migrate_pfn = start_pfn;
+       zone->compact_cached_migrate_pfn[0] = start_pfn;
+       zone->compact_cached_migrate_pfn[1] = start_pfn;
        zone->compact_cached_free_pfn = end_pfn;
        zone->compact_blockskip_flush = false;
 
@@ -131,9 +132,10 @@ void reset_isolation_suitable(pg_data_t *pgdat)
  */
 static void update_pageblock_skip(struct compact_control *cc,
                        struct page *page, unsigned long nr_isolated,
-                       bool migrate_scanner)
+                       bool set_unsuitable, bool migrate_scanner)
 {
        struct zone *zone = cc->zone;
+       unsigned long pfn;
 
        if (cc->ignore_skip_hint)
                return;
@@ -141,20 +143,32 @@ static void update_pageblock_skip(struct compact_control *cc,
        if (!page)
                return;
 
-       if (!nr_isolated) {
-               unsigned long pfn = page_to_pfn(page);
+       if (nr_isolated)
+               return;
+
+       /*
+        * Only skip pageblocks when all forms of compaction will be known to
+        * fail in the near future.
+        */
+       if (set_unsuitable)
                set_pageblock_skip(page);
 
-               /* Update where compaction should restart */
-               if (migrate_scanner) {
-                       if (!cc->finished_update_migrate &&
-                           pfn > zone->compact_cached_migrate_pfn)
-                               zone->compact_cached_migrate_pfn = pfn;
-               } else {
-                       if (!cc->finished_update_free &&
-                           pfn < zone->compact_cached_free_pfn)
-                               zone->compact_cached_free_pfn = pfn;
-               }
+       pfn = page_to_pfn(page);
+
+       /* Update where async and sync compaction should restart */
+       if (migrate_scanner) {
+               if (cc->finished_update_migrate)
+                       return;
+               if (pfn > zone->compact_cached_migrate_pfn[0])
+                       zone->compact_cached_migrate_pfn[0] = pfn;
+               if (cc->mode != MIGRATE_ASYNC &&
+                   pfn > zone->compact_cached_migrate_pfn[1])
+                       zone->compact_cached_migrate_pfn[1] = pfn;
+       } else {
+               if (cc->finished_update_free)
+                       return;
+               if (pfn < zone->compact_cached_free_pfn)
+                       zone->compact_cached_free_pfn = pfn;
        }
 }
 #else
@@ -166,7 +180,7 @@ static inline bool isolation_suitable(struct compact_control *cc,
 
 static void update_pageblock_skip(struct compact_control *cc,
                        struct page *page, unsigned long nr_isolated,
-                       bool migrate_scanner)
+                       bool set_unsuitable, bool migrate_scanner)
 {
 }
 #endif /* CONFIG_COMPACTION */
@@ -195,7 +209,7 @@ static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
                }
 
                /* async aborts if taking too long or contended */
-               if (!cc->sync) {
+               if (cc->mode == MIGRATE_ASYNC) {
                        cc->contended = true;
                        return false;
                }
@@ -208,30 +222,39 @@ static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
        return true;
 }
 
-static inline bool compact_trylock_irqsave(spinlock_t *lock,
-                       unsigned long *flags, struct compact_control *cc)
+/*
+ * Aside from avoiding lock contention, compaction also periodically checks
+ * need_resched() and either schedules in sync compaction or aborts async
+ * compaction. This is similar to what compact_checklock_irqsave() does, but
+ * is used where no lock is concerned.
+ *
+ * Returns false when no scheduling was needed, or sync compaction scheduled.
+ * Returns true when async compaction should abort.
+ */
+static inline bool compact_should_abort(struct compact_control *cc)
 {
-       return compact_checklock_irqsave(lock, flags, false, cc);
+       /* async compaction aborts if contended */
+       if (need_resched()) {
+               if (cc->mode == MIGRATE_ASYNC) {
+                       cc->contended = true;
+                       return true;
+               }
+
+               cond_resched();
+       }
+
+       return false;
 }
 
 /* Returns true if the page is within a block suitable for migration to */
 static bool suitable_migration_target(struct page *page)
 {
-       int migratetype = get_pageblock_migratetype(page);
-
-       /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
-       if (migratetype == MIGRATE_RESERVE)
-               return false;
-
-       if (is_migrate_isolate(migratetype))
-               return false;
-
-       /* If the page is a large free page, then allow migration */
+       /* If the page is a large free page, then disallow migration */
        if (PageBuddy(page) && page_order(page) >= pageblock_order)
-               return true;
+               return false;
 
        /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
-       if (migrate_async_suitable(migratetype))
+       if (migrate_async_suitable(get_pageblock_migratetype(page)))
                return true;
 
        /* Otherwise skip the block */
@@ -251,9 +274,9 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 {
        int nr_scanned = 0, total_isolated = 0;
        struct page *cursor, *valid_page = NULL;
-       unsigned long nr_strict_required = end_pfn - blockpfn;
        unsigned long flags;
        bool locked = false;
+       bool checked_pageblock = false;
 
        cursor = pfn_to_page(blockpfn);
 
@@ -264,11 +287,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 
                nr_scanned++;
                if (!pfn_valid_within(blockpfn))
-                       continue;
+                       goto isolate_fail;
+
                if (!valid_page)
                        valid_page = page;
                if (!PageBuddy(page))
-                       continue;
+                       goto isolate_fail;
 
                /*
                 * The zone lock must be held to isolate freepages.
@@ -284,17 +308,23 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
                        break;
 
                /* Recheck this is a suitable migration target under lock */
-               if (!strict && !suitable_migration_target(page))
-                       break;
+               if (!strict && !checked_pageblock) {
+                       /*
+                        * We need to check suitability of pageblock only once
+                        * and this isolate_freepages_block() is called with
+                        * pageblock range, so just check once is sufficient.
+                        */
+                       checked_pageblock = true;
+                       if (!suitable_migration_target(page))
+                               break;
+               }
 
                /* Recheck this is a buddy page under lock */
                if (!PageBuddy(page))
-                       continue;
+                       goto isolate_fail;
 
                /* Found a free page, break it into order-0 pages */
                isolated = split_free_page(page);
-               if (!isolated && strict)
-                       break;
                total_isolated += isolated;
                for (i = 0; i < isolated; i++) {
                        list_add(&page->lru, freelist);
@@ -305,7 +335,15 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
                if (isolated) {
                        blockpfn += isolated - 1;
                        cursor += isolated - 1;
+                       continue;
                }
+
+isolate_fail:
+               if (strict)
+                       break;
+               else
+                       continue;
+
        }
 
        trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
@@ -315,7 +353,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
         * pages requested were isolated. If there were any failures, 0 is
         * returned and CMA will fail.
         */
-       if (strict && nr_strict_required > total_isolated)
+       if (strict && blockpfn < end_pfn)
                total_isolated = 0;
 
        if (locked)
@@ -323,7 +361,8 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 
        /* Update the pageblock-skip if the whole pageblock was scanned */
        if (blockpfn == end_pfn)
-               update_pageblock_skip(cc, valid_page, total_isolated, false);
+               update_pageblock_skip(cc, valid_page, total_isolated, true,
+                                     false);
 
        count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
        if (total_isolated)
@@ -454,12 +493,14 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
        unsigned long last_pageblock_nr = 0, pageblock_nr;
        unsigned long nr_scanned = 0, nr_isolated = 0;
        struct list_head *migratelist = &cc->migratepages;
-       isolate_mode_t mode = 0;
        struct lruvec *lruvec;
        unsigned long flags;
        bool locked = false;
        struct page *page = NULL, *valid_page = NULL;
-       bool skipped_async_unsuitable = false;
+       bool set_unsuitable = true;
+       const isolate_mode_t mode = (cc->mode == MIGRATE_ASYNC ?
+                                       ISOLATE_ASYNC_MIGRATE : 0) |
+                                   (unevictable ? ISOLATE_UNEVICTABLE : 0);
 
        /*
         * Ensure that there are not too many pages isolated from the LRU
@@ -468,7 +509,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
         */
        while (unlikely(too_many_isolated(zone))) {
                /* async migration should just abort */
-               if (!cc->sync)
+               if (cc->mode == MIGRATE_ASYNC)
                        return 0;
 
                congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -477,11 +518,13 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                        return 0;
        }
 
+       if (compact_should_abort(cc))
+               return 0;
+
        /* Time to isolate some pages for migration */
-       cond_resched();
        for (; low_pfn < end_pfn; low_pfn++) {
                /* give a chance to irqs before checking need_resched() */
-               if (locked && !((low_pfn+1) % SWAP_CLUSTER_MAX)) {
+               if (locked && !(low_pfn % SWAP_CLUSTER_MAX)) {
                        if (should_release_lock(&zone->lru_lock)) {
                                spin_unlock_irqrestore(&zone->lru_lock, flags);
                                locked = false;
@@ -520,8 +563,25 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
 
                /* If isolation recently failed, do not retry */
                pageblock_nr = low_pfn >> pageblock_order;
-               if (!isolation_suitable(cc, page))
-                       goto next_pageblock;
+               if (last_pageblock_nr != pageblock_nr) {
+                       int mt;
+
+                       last_pageblock_nr = pageblock_nr;
+                       if (!isolation_suitable(cc, page))
+                               goto next_pageblock;
+
+                       /*
+                        * For async migration, also only scan in MOVABLE
+                        * blocks. Async migration is optimistic to see if
+                        * the minimum amount of work satisfies the allocation
+                        */
+                       mt = get_pageblock_migratetype(page);
+                       if (cc->mode == MIGRATE_ASYNC &&
+                           !migrate_async_suitable(mt)) {
+                               set_unsuitable = false;
+                               goto next_pageblock;
+                       }
+               }
 
                /*
                 * Skip if free. page_order cannot be used without zone->lock
@@ -531,18 +591,6 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                        continue;
 
                /*
-                * For async migration, also only scan in MOVABLE blocks. Async
-                * migration is optimistic to see if the minimum amount of work
-                * satisfies the allocation
-                */
-               if (!cc->sync && last_pageblock_nr != pageblock_nr &&
-                   !migrate_async_suitable(get_pageblock_migratetype(page))) {
-                       cc->finished_update_migrate = true;
-                       skipped_async_unsuitable = true;
-                       goto next_pageblock;
-               }
-
-               /*
                 * Check may be lockless but that's ok as we recheck later.
                 * It's possible to migrate LRU pages and balloon pages
                 * Skip any other type of page
@@ -551,11 +599,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                        if (unlikely(balloon_page_movable(page))) {
                                if (locked && balloon_page_isolate(page)) {
                                        /* Successfully isolated */
-                                       cc->finished_update_migrate = true;
-                                       list_add(&page->lru, migratelist);
-                                       cc->nr_migratepages++;
-                                       nr_isolated++;
-                                       goto check_compact_cluster;
+                                       goto isolate_success;
                                }
                        }
                        continue;
@@ -578,6 +622,15 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                        continue;
                }
 
+               /*
+                * Migration will fail if an anonymous page is pinned in memory,
+                * so avoid taking lru_lock and isolating it unnecessarily in an
+                * admittedly racy check.
+                */
+               if (!page_mapping(page) &&
+                   page_count(page) > page_mapcount(page))
+                       continue;
+
                /* Check if it is ok to still hold the lock */
                locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
                                                                locked, cc);
@@ -592,12 +645,6 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                        continue;
                }
 
-               if (!cc->sync)
-                       mode |= ISOLATE_ASYNC_MIGRATE;
-
-               if (unevictable)
-                       mode |= ISOLATE_UNEVICTABLE;
-
                lruvec = mem_cgroup_page_lruvec(page, zone);
 
                /* Try isolate the page */
@@ -607,13 +654,14 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                VM_BUG_ON_PAGE(PageTransCompound(page), page);
 
                /* Successfully isolated */
-               cc->finished_update_migrate = true;
                del_page_from_lru_list(page, lruvec, page_lru(page));
+
+isolate_success:
+               cc->finished_update_migrate = true;
                list_add(&page->lru, migratelist);
                cc->nr_migratepages++;
                nr_isolated++;
 
-check_compact_cluster:
                /* Avoid isolating too much */
                if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
                        ++low_pfn;
@@ -624,7 +672,6 @@ check_compact_cluster:
 
 next_pageblock:
                low_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages) - 1;
-               last_pageblock_nr = pageblock_nr;
        }
 
        acct_isolated(zone, locked, cc);
@@ -635,11 +682,10 @@ next_pageblock:
        /*
         * Update the pageblock-skip information and cached scanner pfn,
         * if the whole pageblock was scanned without isolating any page.
-        * This is not done when pageblock was skipped due to being unsuitable
-        * for async compaction, so that eventual sync compaction can try.
         */
-       if (low_pfn == end_pfn && !skipped_async_unsuitable)
-               update_pageblock_skip(cc, valid_page, nr_isolated, true);
+       if (low_pfn == end_pfn)
+               update_pageblock_skip(cc, valid_page, nr_isolated,
+                                     set_unsuitable, true);
 
        trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
 
@@ -660,44 +706,48 @@ static void isolate_freepages(struct zone *zone,
                                struct compact_control *cc)
 {
        struct page *page;
-       unsigned long high_pfn, low_pfn, pfn, z_end_pfn, end_pfn;
+       unsigned long block_start_pfn;  /* start of current pageblock */
+       unsigned long block_end_pfn;    /* end of current pageblock */
+       unsigned long low_pfn;       /* lowest pfn scanner is able to scan */
        int nr_freepages = cc->nr_freepages;
        struct list_head *freelist = &cc->freepages;
 
        /*
         * Initialise the free scanner. The starting point is where we last
-        * scanned from (or the end of the zone if starting). The low point
-        * is the end of the pageblock the migration scanner is using.
+        * successfully isolated from, zone-cached value, or the end of the
+        * zone when isolating for the first time. We need this aligned to
+        * the pageblock boundary, because we do
+        * block_start_pfn -= pageblock_nr_pages in the for loop.
+        * For ending point, take care when isolating in last pageblock of a
+        * a zone which ends in the middle of a pageblock.
+        * The low boundary is the end of the pageblock the migration scanner
+        * is using.
         */
-       pfn = cc->free_pfn;
+       block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
+       block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
+                                               zone_end_pfn(zone));
        low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
 
        /*
-        * Take care that if the migration scanner is at the end of the zone
-        * that the free scanner does not accidentally move to the next zone
-        * in the next isolation cycle.
-        */
-       high_pfn = min(low_pfn, pfn);
-
-       z_end_pfn = zone_end_pfn(zone);
-
-       /*
         * Isolate free pages until enough are available to migrate the
         * pages on cc->migratepages. We stop searching if the migrate
         * and free page scanners meet or enough free pages are isolated.
         */
-       for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
-                                       pfn -= pageblock_nr_pages) {
+       for (; block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
+                               block_end_pfn = block_start_pfn,
+                               block_start_pfn -= pageblock_nr_pages) {
                unsigned long isolated;
 
                /*
                 * This can iterate a massively long zone without finding any
                 * suitable migration targets, so periodically check if we need
-                * to schedule.
+                * to schedule, or even abort async compaction.
                 */
-               cond_resched();
+               if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
+                                               && compact_should_abort(cc))
+                       break;
 
-               if (!pfn_valid(pfn))
+               if (!pfn_valid(block_start_pfn))
                        continue;
 
                /*
@@ -707,7 +757,7 @@ static void isolate_freepages(struct zone *zone,
                 * i.e. it's possible that all pages within a zones range of
                 * pages do not belong to a single zone.
                 */
-               page = pfn_to_page(pfn);
+               page = pfn_to_page(block_start_pfn);
                if (page_zone(page) != zone)
                        continue;
 
@@ -720,29 +770,26 @@ static void isolate_freepages(struct zone *zone,
                        continue;
 
                /* Found a block suitable for isolating free pages from */
-               isolated = 0;
+               cc->free_pfn = block_start_pfn;
+               isolated = isolate_freepages_block(cc, block_start_pfn,
+                                       block_end_pfn, freelist, false);
+               nr_freepages += isolated;
 
                /*
-                * As pfn may not start aligned, pfn+pageblock_nr_page
-                * may cross a MAX_ORDER_NR_PAGES boundary and miss
-                * a pfn_valid check. Ensure isolate_freepages_block()
-                * only scans within a pageblock
+                * Set a flag that we successfully isolated in this pageblock.
+                * In the next loop iteration, zone->compact_cached_free_pfn
+                * will not be updated and thus it will effectively contain the
+                * highest pageblock we isolated pages from.
                 */
-               end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
-               end_pfn = min(end_pfn, z_end_pfn);
-               isolated = isolate_freepages_block(cc, pfn, end_pfn,
-                                                  freelist, false);
-               nr_freepages += isolated;
+               if (isolated)
+                       cc->finished_update_free = true;
 
                /*
-                * Record the highest PFN we isolated pages from. When next
-                * looking for free pages, the search will restart here as
-                * page migration may have returned some pages to the allocator
+                * isolate_freepages_block() might have aborted due to async
+                * compaction being contended
                 */
-               if (isolated) {
-                       cc->finished_update_free = true;
-                       high_pfn = max(high_pfn, pfn);
-               }
+               if (cc->contended)
+                       break;
        }
 
        /* split_free_page does not map the pages */
@@ -752,10 +799,9 @@ static void isolate_freepages(struct zone *zone,
         * If we crossed the migrate scanner, we want to keep it that way
         * so that compact_finished() may detect this
         */
-       if (pfn < low_pfn)
-               cc->free_pfn = max(pfn, zone->zone_start_pfn);
-       else
-               cc->free_pfn = high_pfn;
+       if (block_start_pfn < low_pfn)
+               cc->free_pfn = cc->migrate_pfn;
+
        cc->nr_freepages = nr_freepages;
 }
 
@@ -770,9 +816,13 @@ static struct page *compaction_alloc(struct page *migratepage,
        struct compact_control *cc = (struct compact_control *)data;
        struct page *freepage;
 
-       /* Isolate free pages if necessary */
+       /*
+        * Isolate free pages if necessary, and if we are not aborting due to
+        * contention.
+        */
        if (list_empty(&cc->freepages)) {
-               isolate_freepages(cc->zone, cc);
+               if (!cc->contended)
+                       isolate_freepages(cc->zone, cc);
 
                if (list_empty(&cc->freepages))
                        return NULL;
@@ -786,23 +836,16 @@ static struct page *compaction_alloc(struct page *migratepage,
 }
 
 /*
- * We cannot control nr_migratepages and nr_freepages fully when migration is
- * running as migrate_pages() has no knowledge of compact_control. When
- * migration is complete, we count the number of pages on the lists by hand.
+ * This is a migrate-callback that "frees" freepages back to the isolated
+ * freelist.  All pages on the freelist are from the same zone, so there is no
+ * special handling needed for NUMA.
  */
-static void update_nr_listpages(struct compact_control *cc)
+static void compaction_free(struct page *page, unsigned long data)
 {
-       int nr_migratepages = 0;
-       int nr_freepages = 0;
-       struct page *page;
-
-       list_for_each_entry(page, &cc->migratepages, lru)
-               nr_migratepages++;
-       list_for_each_entry(page, &cc->freepages, lru)
-               nr_freepages++;
+       struct compact_control *cc = (struct compact_control *)data;
 
-       cc->nr_migratepages = nr_migratepages;
-       cc->nr_freepages = nr_freepages;
+       list_add(&page->lru, &cc->freepages);
+       cc->nr_freepages++;
 }
 
 /* possible outcome of isolate_migratepages */
@@ -849,13 +892,14 @@ static int compact_finished(struct zone *zone,
        unsigned int order;
        unsigned long watermark;
 
-       if (fatal_signal_pending(current))
+       if (cc->contended || fatal_signal_pending(current))
                return COMPACT_PARTIAL;
 
        /* Compaction run completes if the migrate and free scanner meet */
        if (cc->free_pfn <= cc->migrate_pfn) {
                /* Let the next compaction start anew. */
-               zone->compact_cached_migrate_pfn = zone->zone_start_pfn;
+               zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
+               zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
                zone->compact_cached_free_pfn = zone_end_pfn(zone);
 
                /*
@@ -955,6 +999,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
        int ret;
        unsigned long start_pfn = zone->zone_start_pfn;
        unsigned long end_pfn = zone_end_pfn(zone);
+       const bool sync = cc->mode != MIGRATE_ASYNC;
 
        ret = compaction_suitable(zone, cc->order);
        switch (ret) {
@@ -980,7 +1025,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
         * information on where the scanners should start but check that it
         * is initialised by ensuring the values are within zone boundaries.
         */
-       cc->migrate_pfn = zone->compact_cached_migrate_pfn;
+       cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
        cc->free_pfn = zone->compact_cached_free_pfn;
        if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
                cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
@@ -988,7 +1033,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
        }
        if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
                cc->migrate_pfn = start_pfn;
-               zone->compact_cached_migrate_pfn = cc->migrate_pfn;
+               zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
+               zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
        }
 
        trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);
@@ -996,7 +1042,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
        migrate_prep_local();
 
        while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
-               unsigned long nr_migrate, nr_remaining;
                int err;
 
                switch (isolate_migratepages(zone, cc)) {
@@ -1011,21 +1056,20 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                        ;
                }
 
-               nr_migrate = cc->nr_migratepages;
+               if (!cc->nr_migratepages)
+                       continue;
+
                err = migrate_pages(&cc->migratepages, compaction_alloc,
-                               (unsigned long)cc,
-                               cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC,
+                               compaction_free, (unsigned long)cc, cc->mode,
                                MR_COMPACTION);
-               update_nr_listpages(cc);
-               nr_remaining = cc->nr_migratepages;
 
-               trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
-                                               nr_remaining);
+               trace_mm_compaction_migratepages(cc->nr_migratepages, err,
+                                                       &cc->migratepages);
 
-               /* Release isolated pages not migrated */
+               /* All pages were either migrated or will be released */
+               cc->nr_migratepages = 0;
                if (err) {
                        putback_movable_pages(&cc->migratepages);
-                       cc->nr_migratepages = 0;
                        /*
                         * migrate_pages() may return -ENOMEM when scanners meet
                         * and we want compact_finished() to detect it
@@ -1047,9 +1091,8 @@ out:
        return ret;
 }
 
-static unsigned long compact_zone_order(struct zone *zone,
-                                int order, gfp_t gfp_mask,
-                                bool sync, bool *contended)
+static unsigned long compact_zone_order(struct zone *zone, int order,
+               gfp_t gfp_mask, enum migrate_mode mode, bool *contended)
 {
        unsigned long ret;
        struct compact_control cc = {
@@ -1058,7 +1101,7 @@ static unsigned long compact_zone_order(struct zone *zone,
                .order = order,
                .migratetype = allocflags_to_migratetype(gfp_mask),
                .zone = zone,
-               .sync = sync,
+               .mode = mode,
        };
        INIT_LIST_HEAD(&cc.freepages);
        INIT_LIST_HEAD(&cc.migratepages);
@@ -1080,7 +1123,7 @@ int sysctl_extfrag_threshold = 500;
  * @order: The order of the current allocation
  * @gfp_mask: The GFP mask of the current allocation
  * @nodemask: The allowed nodes to allocate from
- * @sync: Whether migration is synchronous or not
+ * @mode: The migration mode for async, sync light, or sync migration
  * @contended: Return value that is true if compaction was aborted due to lock contention
  * @page: Optionally capture a free page of the requested order during compaction
  *
@@ -1088,7 +1131,7 @@ int sysctl_extfrag_threshold = 500;
  */
 unsigned long try_to_compact_pages(struct zonelist *zonelist,
                        int order, gfp_t gfp_mask, nodemask_t *nodemask,
-                       bool sync, bool *contended)
+                       enum migrate_mode mode, bool *contended)
 {
        enum zone_type high_zoneidx = gfp_zone(gfp_mask);
        int may_enter_fs = gfp_mask & __GFP_FS;
@@ -1113,7 +1156,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
                                                                nodemask) {
                int status;
 
-               status = compact_zone_order(zone, order, gfp_mask, sync,
+               status = compact_zone_order(zone, order, gfp_mask, mode,
                                                contended);
                rc = max(status, rc);
 
@@ -1152,9 +1195,6 @@ static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
                        if (zone_watermark_ok(zone, cc->order,
                                                low_wmark_pages(zone), 0, 0))
                                compaction_defer_reset(zone, cc->order, false);
-                       /* Currently async compaction is never deferred. */
-                       else if (cc->sync)
-                               defer_compaction(zone, cc->order);
                }
 
                VM_BUG_ON(!list_empty(&cc->freepages));
@@ -1166,7 +1206,7 @@ void compact_pgdat(pg_data_t *pgdat, int order)
 {
        struct compact_control cc = {
                .order = order,
-               .sync = false,
+               .mode = MIGRATE_ASYNC,
        };
 
        if (!order)
@@ -1179,7 +1219,8 @@ static void compact_node(int nid)
 {
        struct compact_control cc = {
                .order = -1,
-               .sync = true,
+               .mode = MIGRATE_SYNC,
+               .ignore_skip_hint = true,
        };
 
        __compact_pgdat(NODE_DATA(nid), &cc);