mm: numa: defer TLB flush for THP migration as long as possible
authorMel Gorman <mgorman@suse.de>
Tue, 7 Jan 2014 14:00:48 +0000 (14:00 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 9 Jan 2014 20:25:14 +0000 (12:25 -0800)
commit b0943d61b8fa420180f92f64ef67662b4f6cc493 upstream.

THP migration can fail for a variety of reasons.  Avoid flushing the TLB
to deal with THP migration races until the copy is ready to start.

Signed-off-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Alex Thorlton <athorlton@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
mm/huge_memory.c
mm/migrate.c

index 07b7eb3..4796245 100644 (file)
@@ -1360,13 +1360,6 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        }
 
        /*
-        * The page_table_lock above provides a memory barrier
-        * with change_protection_range.
-        */
-       if (mm_tlb_flush_pending(mm))
-               flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
-
-       /*
         * Migrate the THP to the requested node, returns with page unlocked
         * and pmd_numa cleared.
         */
index 6609413..d455cab 100644 (file)
@@ -1705,6 +1705,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
                goto out_fail;
        }
 
+       if (mm_tlb_flush_pending(mm))
+               flush_tlb_range(vma, mmun_start, mmun_end);
+
        /* Prepare a page as a migration target */
        __set_page_locked(new_page);
        SetPageSwapBacked(new_page);