X-Git-Url: http://review.tizen.org/git/?a=blobdiff_plain;f=mm%2Fpage_isolation.c;h=d1473b2e9481731988695755a618baa0991556a7;hb=267666ea3bd6e804c2c8bbafbe628026be963d06;hp=0cee10ffb98d4cf8e6ad930faa1f4de925bdf3a5;hpb=b409624ad5a99c2e84df6657bd0f7931ac470d2d;p=platform%2Fadaptation%2Frenesas_rcar%2Frenesas_kernel.git diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 0cee10f..d1473b2 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -6,6 +6,7 @@ #include #include #include +#include #include "internal.h" int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages) @@ -252,6 +253,19 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private, { gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE; + /* + * TODO: allocate a destination hugepage from a nearest neighbor node, + * accordance with memory policy of the user process if possible. For + * now as a simple work-around, we use the next node for destination. + */ + if (PageHuge(page)) { + nodemask_t src = nodemask_of_node(page_to_nid(page)); + nodemask_t dst; + nodes_complement(dst, src); + return alloc_huge_page_node(page_hstate(compound_head(page)), + next_node(page_to_nid(page), dst)); + } + if (PageHighMem(page)) gfp_mask |= __GFP_HIGHMEM;