mm/mempolicy: check hugepage migration is supported by arch in vma_migratable()
authorLi Xinhai <lixinhai.lxh@gmail.com>
Thu, 2 Apr 2020 04:10:52 +0000 (21:10 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 2 Apr 2020 16:35:31 +0000 (09:35 -0700)
vma_migratable() is called to check if pages in vma can be migrated before
go ahead to further actions.  Currently it is used in below code path:

- task_numa_work
- mbind
- move_pages

For hugetlb mapping, whether vma is migratable or not is determined by:
- CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
- arch_hugetlb_migration_supported

Issue: current code only checks for CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
alone, and no code should use it directly.  (note that current code in
vma_migratable don't cause failure or bug because
unmap_and_move_huge_page() will catch unsupported hugepage and handle it
properly)

This patch checks the two factors by hugepage_migration_supported for
impoving code logic and robustness.  It will enable early bail out of
hugepage migration procedure, but because currently all architecture
supporting hugepage migration is able to support all page size, we would
not see performance gain with this patch applied.

vma_migratable() is moved to mm/mempolicy.c, because of the circular
reference of mempolicy.h and hugetlb.h cause defining it as inline not
feasible.

Signed-off-by: Li Xinhai <lixinhai.lxh@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Link: http://lkml.kernel.org/r/1579786179-30633-1-git-send-email-lixinhai.lxh@gmail.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mempolicy.h
mm/mempolicy.c

index 5228c62af41659bb7d5ae0e7db00969b9f16ef73..8165278c348a5ab01924264c8867badb0102625e 100644 (file)
@@ -173,34 +173,7 @@ extern int mpol_parse_str(char *str, struct mempolicy **mpol);
 extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
 
 /* Check if a vma is migratable */
-static inline bool vma_migratable(struct vm_area_struct *vma)
-{
-       if (vma->vm_flags & (VM_IO | VM_PFNMAP))
-               return false;
-
-       /*
-        * DAX device mappings require predictable access latency, so avoid
-        * incurring periodic faults.
-        */
-       if (vma_is_dax(vma))
-               return false;
-
-#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
-       if (vma->vm_flags & VM_HUGETLB)
-               return false;
-#endif
-
-       /*
-        * Migration allocates pages in the highest zone. If we cannot
-        * do so then migration (at least from node to node) is not
-        * possible.
-        */
-       if (vma->vm_file &&
-               gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
-                                                               < policy_zone)
-                       return false;
-       return true;
-}
+extern bool vma_migratable(struct vm_area_struct *vma);
 
 extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
 extern void mpol_put_task_policy(struct task_struct *);
index b190cd456acecab3c42a67c231d0369f66d1f076..58e3dfa2f83a30795740cc194960215c1f013d24 100644 (file)
@@ -1743,6 +1743,34 @@ COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
 
 #endif /* CONFIG_COMPAT */
 
+bool vma_migratable(struct vm_area_struct *vma)
+{
+       if (vma->vm_flags & (VM_IO | VM_PFNMAP))
+               return false;
+
+       /*
+        * DAX device mappings require predictable access latency, so avoid
+        * incurring periodic faults.
+        */
+       if (vma_is_dax(vma))
+               return false;
+
+       if (is_vm_hugetlb_page(vma) &&
+               !hugepage_migration_supported(hstate_vma(vma)))
+               return false;
+
+       /*
+        * Migration allocates pages in the highest zone. If we cannot
+        * do so then migration (at least from node to node) is not
+        * possible.
+        */
+       if (vma->vm_file &&
+               gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
+                       < policy_zone)
+               return false;
+       return true;
+}
+
 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
                                                unsigned long addr)
 {