mm: migrate: Introduce migrate_misplaced_page()
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Thu, 25 Oct 2012 12:16:34 +0000 (14:16 +0200)
committerMel Gorman <mgorman@suse.de>
Tue, 11 Dec 2012 14:42:41 +0000 (14:42 +0000)
Note: This was originally based on Peter's patch "mm/migrate: Introduce
migrate_misplaced_page()" but borrows extremely heavily from Andrea's
"autonuma: memory follows CPU algorithm and task/mm_autonuma stats
collection". The end result is barely recognisable so signed-offs
had to be dropped. If original authors are ok with it, I'll
re-add the signed-off-bys.

Add migrate_misplaced_page() which deals with migrating pages from
faults.

Based-on-work-by: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Based-on-work-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Based-on-work-by: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Rik van Riel <riel@redhat.com>
include/linux/migrate.h
mm/migrate.c

index 9d1c159..f0d0313 100644 (file)
@@ -13,6 +13,7 @@ enum migrate_reason {
        MR_MEMORY_HOTPLUG,
        MR_SYSCALL,             /* also applies to cpusets */
        MR_MEMPOLICY_MBIND,
+       MR_NUMA_MISPLACED,
        MR_CMA
 };
 
@@ -73,4 +74,14 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
 #define fail_migrate_page NULL
 
 #endif /* CONFIG_MIGRATION */
+
+#ifdef CONFIG_NUMA_BALANCING
+extern int migrate_misplaced_page(struct page *page, int node);
+#else
+static inline int migrate_misplaced_page(struct page *page, int node)
+{
+       return -EAGAIN; /* can't migrate now */
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
 #endif /* _LINUX_MIGRATE_H */
index 27be9c9..d168aec 100644 (file)
@@ -282,7 +282,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
                struct page *newpage, struct page *page,
                struct buffer_head *head, enum migrate_mode mode)
 {
-       int expected_count;
+       int expected_count = 0;
        void **pslot;
 
        if (!mapping) {
@@ -1415,4 +1415,108 @@ int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
        }
        return err;
 }
-#endif
+
+#ifdef CONFIG_NUMA_BALANCING
+/*
+ * Returns true if this is a safe migration target node for misplaced NUMA
+ * pages. Currently it only checks the watermarks which crude
+ */
+static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
+                                  int nr_migrate_pages)
+{
+       int z;
+       for (z = pgdat->nr_zones - 1; z >= 0; z--) {
+               struct zone *zone = pgdat->node_zones + z;
+
+               if (!populated_zone(zone))
+                       continue;
+
+               if (zone->all_unreclaimable)
+                       continue;
+
+               /* Avoid waking kswapd by allocating pages_to_migrate pages. */
+               if (!zone_watermark_ok(zone, 0,
+                                      high_wmark_pages(zone) +
+                                      nr_migrate_pages,
+                                      0, 0))
+                       continue;
+               return true;
+       }
+       return false;
+}
+
+static struct page *alloc_misplaced_dst_page(struct page *page,
+                                          unsigned long data,
+                                          int **result)
+{
+       int nid = (int) data;
+       struct page *newpage;
+
+       newpage = alloc_pages_exact_node(nid,
+                                        (GFP_HIGHUSER_MOVABLE | GFP_THISNODE |
+                                         __GFP_NOMEMALLOC | __GFP_NORETRY |
+                                         __GFP_NOWARN) &
+                                        ~GFP_IOFS, 0);
+       return newpage;
+}
+
+/*
+ * Attempt to migrate a misplaced page to the specified destination
+ * node. Caller is expected to have an elevated reference count on
+ * the page that will be dropped by this function before returning.
+ */
+int migrate_misplaced_page(struct page *page, int node)
+{
+       int isolated = 0;
+       LIST_HEAD(migratepages);
+
+       /*
+        * Don't migrate pages that are mapped in multiple processes.
+        * TODO: Handle false sharing detection instead of this hammer
+        */
+       if (page_mapcount(page) != 1) {
+               put_page(page);
+               goto out;
+       }
+
+       /* Avoid migrating to a node that is nearly full */
+       if (migrate_balanced_pgdat(NODE_DATA(node), 1)) {
+               int page_lru;
+
+               if (isolate_lru_page(page)) {
+                       put_page(page);
+                       goto out;
+               }
+               isolated = 1;
+
+               /*
+                * Page is isolated which takes a reference count so now the
+                * callers reference can be safely dropped without the page
+                * disappearing underneath us during migration
+                */
+               put_page(page);
+
+               page_lru = page_is_file_cache(page);
+               inc_zone_page_state(page, NR_ISOLATED_ANON + page_lru);
+               list_add(&page->lru, &migratepages);
+       }
+
+       if (isolated) {
+               int nr_remaining;
+
+               nr_remaining = migrate_pages(&migratepages,
+                               alloc_misplaced_dst_page,
+                               node, false, MIGRATE_ASYNC,
+                               MR_NUMA_MISPLACED);
+               if (nr_remaining) {
+                       putback_lru_pages(&migratepages);
+                       isolated = 0;
+               }
+       }
+       BUG_ON(!list_empty(&migratepages));
+out:
+       return isolated;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+#endif /* CONFIG_NUMA */