Merge tag 'backport/v3.14.24-ltsi-rc1/phy-rcar-gen2-usb-to-v3.15' into backport/v3...
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / compaction.c
index 3c39e5b..4229fc2 100644 (file)
@@ -222,6 +222,30 @@ static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
        return true;
 }
 
+/*
+ * Aside from avoiding lock contention, compaction also periodically checks
+ * need_resched() and either schedules in sync compaction or aborts async
+ * compaction. This is similar to what compact_checklock_irqsave() does, but
+ * is used where no lock is concerned.
+ *
+ * Returns false when no scheduling was needed, or sync compaction scheduled.
+ * Returns true when async compaction should abort.
+ */
+static inline bool compact_should_abort(struct compact_control *cc)
+{
+       /* async compaction aborts if contended */
+       if (need_resched()) {
+               if (cc->mode == MIGRATE_ASYNC) {
+                       cc->contended = true;
+                       return true;
+               }
+
+               cond_resched();
+       }
+
+       return false;
+}
+
 /* Returns true if the page is within a block suitable for migration to */
 static bool suitable_migration_target(struct page *page)
 {
@@ -494,8 +518,10 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                        return 0;
        }
 
+       if (compact_should_abort(cc))
+               return 0;
+
        /* Time to isolate some pages for migration */
-       cond_resched();
        for (; low_pfn < end_pfn; low_pfn++) {
                /* give a chance to irqs before checking need_resched() */
                if (locked && !(low_pfn % SWAP_CLUSTER_MAX)) {
@@ -683,7 +709,6 @@ static void isolate_freepages(struct zone *zone,
        unsigned long block_start_pfn;  /* start of current pageblock */
        unsigned long block_end_pfn;    /* end of current pageblock */
        unsigned long low_pfn;       /* lowest pfn scanner is able to scan */
-       unsigned long next_free_pfn; /* start pfn for scaning at next round */
        int nr_freepages = cc->nr_freepages;
        struct list_head *freelist = &cc->freepages;
 
@@ -704,12 +729,6 @@ static void isolate_freepages(struct zone *zone,
        low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
 
        /*
-        * If no pages are isolated, the block_start_pfn < low_pfn check
-        * will kick in.
-        */
-       next_free_pfn = 0;
-
-       /*
         * Isolate free pages until enough are available to migrate the
         * pages on cc->migratepages. We stop searching if the migrate
         * and free page scanners meet or enough free pages are isolated.
@@ -722,9 +741,11 @@ static void isolate_freepages(struct zone *zone,
                /*
                 * This can iterate a massively long zone without finding any
                 * suitable migration targets, so periodically check if we need
-                * to schedule.
+                * to schedule, or even abort async compaction.
                 */
-               cond_resched();
+               if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
+                                               && compact_should_abort(cc))
+                       break;
 
                if (!pfn_valid(block_start_pfn))
                        continue;
@@ -749,19 +770,26 @@ static void isolate_freepages(struct zone *zone,
                        continue;
 
                /* Found a block suitable for isolating free pages from */
+               cc->free_pfn = block_start_pfn;
                isolated = isolate_freepages_block(cc, block_start_pfn,
                                        block_end_pfn, freelist, false);
                nr_freepages += isolated;
 
                /*
-                * Record the highest PFN we isolated pages from. When next
-                * looking for free pages, the search will restart here as
-                * page migration may have returned some pages to the allocator
+                * Set a flag that we successfully isolated in this pageblock.
+                * In the next loop iteration, zone->compact_cached_free_pfn
+                * will not be updated and thus it will effectively contain the
+                * highest pageblock we isolated pages from.
                 */
-               if (isolated && next_free_pfn == 0) {
+               if (isolated)
                        cc->finished_update_free = true;
-                       next_free_pfn = block_start_pfn;
-               }
+
+               /*
+                * isolate_freepages_block() might have aborted due to async
+                * compaction being contended
+                */
+               if (cc->contended)
+                       break;
        }
 
        /* split_free_page does not map the pages */
@@ -772,9 +800,8 @@ static void isolate_freepages(struct zone *zone,
         * so that compact_finished() may detect this
         */
        if (block_start_pfn < low_pfn)
-               next_free_pfn = cc->migrate_pfn;
+               cc->free_pfn = cc->migrate_pfn;
 
-       cc->free_pfn = next_free_pfn;
        cc->nr_freepages = nr_freepages;
 }
 
@@ -789,9 +816,13 @@ static struct page *compaction_alloc(struct page *migratepage,
        struct compact_control *cc = (struct compact_control *)data;
        struct page *freepage;
 
-       /* Isolate free pages if necessary */
+       /*
+        * Isolate free pages if necessary, and if we are not aborting due to
+        * contention.
+        */
        if (list_empty(&cc->freepages)) {
-               isolate_freepages(cc->zone, cc);
+               if (!cc->contended)
+                       isolate_freepages(cc->zone, cc);
 
                if (list_empty(&cc->freepages))
                        return NULL;
@@ -817,22 +848,6 @@ static void compaction_free(struct page *page, unsigned long data)
        cc->nr_freepages++;
 }
 
-/*
- * We cannot control nr_migratepages fully when migration is running as
- * migrate_pages() has no knowledge of of compact_control.  When migration is
- * complete, we count the number of pages on the list by hand.
- */
-static void update_nr_listpages(struct compact_control *cc)
-{
-       int nr_migratepages = 0;
-       struct page *page;
-
-       list_for_each_entry(page, &cc->migratepages, lru)
-               nr_migratepages++;
-
-       cc->nr_migratepages = nr_migratepages;
-}
-
 /* possible outcome of isolate_migratepages */
 typedef enum {
        ISOLATE_ABORT,          /* Abort compaction now */
@@ -877,7 +892,7 @@ static int compact_finished(struct zone *zone,
        unsigned int order;
        unsigned long watermark;
 
-       if (fatal_signal_pending(current))
+       if (cc->contended || fatal_signal_pending(current))
                return COMPACT_PARTIAL;
 
        /* Compaction run completes if the migrate and free scanner meet */
@@ -1027,7 +1042,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
        migrate_prep_local();
 
        while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
-               unsigned long nr_migrate, nr_remaining;
                int err;
 
                switch (isolate_migratepages(zone, cc)) {
@@ -1042,20 +1056,20 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                        ;
                }
 
-               nr_migrate = cc->nr_migratepages;
+               if (!cc->nr_migratepages)
+                       continue;
+
                err = migrate_pages(&cc->migratepages, compaction_alloc,
                                compaction_free, (unsigned long)cc, cc->mode,
                                MR_COMPACTION);
-               update_nr_listpages(cc);
-               nr_remaining = cc->nr_migratepages;
 
-               trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
-                                               nr_remaining);
+               trace_mm_compaction_migratepages(cc->nr_migratepages, err,
+                                                       &cc->migratepages);
 
-               /* Release isolated pages not migrated */
+               /* All pages were either migrated or will be released */
+               cc->nr_migratepages = 0;
                if (err) {
                        putback_movable_pages(&cc->migratepages);
-                       cc->nr_migratepages = 0;
                        /*
                         * migrate_pages() may return -ENOMEM when scanners meet
                         * and we want compact_finished() to detect it