mm: soft-dirty: keep soft-dirty bits over thp migration
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Fri, 8 Sep 2017 23:11:04 +0000 (16:11 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 9 Sep 2017 01:26:45 +0000 (18:26 -0700)
Soft dirty bit is designed to keep tracked over page migration.  This
patch makes it work in the same manner for thp migration too.

Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Zi Yan <zi.yan@cs.rutgers.edu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: David Nellans <dnellans@nvidia.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/x86/include/asm/pgtable.h
fs/proc/task_mmu.c
include/asm-generic/pgtable.h
include/linux/swapops.h
mm/huge_memory.c

index bbeae4a..5b4c44d 100644 (file)
@@ -1172,6 +1172,23 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
 {
        return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
 }
+
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
+{
+       return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
+}
+
+static inline int pmd_swp_soft_dirty(pmd_t pmd)
+{
+       return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY;
+}
+
+static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
+{
+       return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
+}
+#endif
 #endif
 
 #define PKRU_AD_BIT 0x1
index 8eec35a..4b21c4e 100644 (file)
@@ -978,17 +978,22 @@ static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
 {
        pmd_t pmd = *pmdp;
 
-       /* See comment in change_huge_pmd() */
-       pmdp_invalidate(vma, addr, pmdp);
-       if (pmd_dirty(*pmdp))
-               pmd = pmd_mkdirty(pmd);
-       if (pmd_young(*pmdp))
-               pmd = pmd_mkyoung(pmd);
-
-       pmd = pmd_wrprotect(pmd);
-       pmd = pmd_clear_soft_dirty(pmd);
-
-       set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
+       if (pmd_present(pmd)) {
+               /* See comment in change_huge_pmd() */
+               pmdp_invalidate(vma, addr, pmdp);
+               if (pmd_dirty(*pmdp))
+                       pmd = pmd_mkdirty(pmd);
+               if (pmd_young(*pmdp))
+                       pmd = pmd_mkyoung(pmd);
+
+               pmd = pmd_wrprotect(pmd);
+               pmd = pmd_clear_soft_dirty(pmd);
+
+               set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
+       } else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
+               pmd = pmd_swp_clear_soft_dirty(pmd);
+               set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
+       }
 }
 #else
 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
index 4f93a6d..8e02430 100644 (file)
@@ -630,7 +630,24 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
 #define arch_start_context_switch(prev)        do {} while (0)
 #endif
 
-#ifndef CONFIG_HAVE_ARCH_SOFT_DIRTY
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
+static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
+{
+       return pmd;
+}
+
+static inline int pmd_swp_soft_dirty(pmd_t pmd)
+{
+       return 0;
+}
+
+static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
+{
+       return pmd;
+}
+#endif
+#else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */
 static inline int pte_soft_dirty(pte_t pte)
 {
        return 0;
@@ -675,6 +692,21 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
 {
        return pte;
 }
+
+static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
+{
+       return pmd;
+}
+
+static inline int pmd_swp_soft_dirty(pmd_t pmd)
+{
+       return 0;
+}
+
+static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
+{
+       return pmd;
+}
 #endif
 
 #ifndef __HAVE_PFNMAP_TRACKING
index 82089fd..45b092a 100644 (file)
@@ -183,6 +183,8 @@ static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
 {
        swp_entry_t arch_entry;
 
+       if (pmd_swp_soft_dirty(pmd))
+               pmd = pmd_swp_clear_soft_dirty(pmd);
        arch_entry = __pmd_to_swp_entry(pmd);
        return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
 }
index b82585e..269b5df 100644 (file)
@@ -937,6 +937,8 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                if (is_write_migration_entry(entry)) {
                        make_migration_entry_read(&entry);
                        pmd = swp_entry_to_pmd(entry);
+                       if (pmd_swp_soft_dirty(*src_pmd))
+                               pmd = pmd_swp_mksoft_dirty(pmd);
                        set_pmd_at(src_mm, addr, src_pmd, pmd);
                }
                set_pmd_at(dst_mm, addr, dst_pmd, pmd);
@@ -1756,6 +1758,17 @@ static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
 }
 #endif
 
+static pmd_t move_soft_dirty_pmd(pmd_t pmd)
+{
+#ifdef CONFIG_MEM_SOFT_DIRTY
+       if (unlikely(is_pmd_migration_entry(pmd)))
+               pmd = pmd_swp_mksoft_dirty(pmd);
+       else if (pmd_present(pmd))
+               pmd = pmd_mksoft_dirty(pmd);
+#endif
+       return pmd;
+}
+
 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
                  unsigned long new_addr, unsigned long old_end,
                  pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush)
@@ -1798,7 +1811,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
                        pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
                        pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
                }
-               set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
+               pmd = move_soft_dirty_pmd(pmd);
+               set_pmd_at(mm, new_addr, new_pmd, pmd);
                if (new_ptl != old_ptl)
                        spin_unlock(new_ptl);
                if (force_flush)
@@ -1846,6 +1860,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                         */
                        make_migration_entry_read(&entry);
                        newpmd = swp_entry_to_pmd(entry);
+                       if (pmd_swp_soft_dirty(*pmd))
+                               newpmd = pmd_swp_mksoft_dirty(newpmd);
                        set_pmd_at(mm, addr, pmd, newpmd);
                }
                goto unlock;
@@ -2824,6 +2840,7 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
        unsigned long address = pvmw->address;
        pmd_t pmdval;
        swp_entry_t entry;
+       pmd_t pmdswp;
 
        if (!(pvmw->pmd && !pvmw->pte))
                return;
@@ -2837,8 +2854,10 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
        if (pmd_dirty(pmdval))
                set_page_dirty(page);
        entry = make_migration_entry(page, pmd_write(pmdval));
-       pmdval = swp_entry_to_pmd(entry);
-       set_pmd_at(mm, address, pvmw->pmd, pmdval);
+       pmdswp = swp_entry_to_pmd(entry);
+       if (pmd_soft_dirty(pmdval))
+               pmdswp = pmd_swp_mksoft_dirty(pmdswp);
+       set_pmd_at(mm, address, pvmw->pmd, pmdswp);
        page_remove_rmap(page, true);
        put_page(page);
 
@@ -2861,6 +2880,8 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
        entry = pmd_to_swp_entry(*pvmw->pmd);
        get_page(new);
        pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot));
+       if (pmd_swp_soft_dirty(*pvmw->pmd))
+               pmde = pmd_mksoft_dirty(pmde);
        if (is_write_migration_entry(entry))
                pmde = maybe_pmd_mkwrite(pmde, vma);