__split_huge_pte(vma, pmd, pte, haddr, freeze, page);
}
+
+void set_huge_pte_migration_entry(
+ struct page_vma_mapped_walk *pvmw,
+ struct page *page)
+{
+ int i;
+ struct vm_area_struct *vma = pvmw->vma;
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long address = pvmw->address;
+ pte_t pteval, *pte;
+ swp_entry_t entry;
+ pte_t pteswp;
+ struct page *_page = page;
+
+ if (!(pvmw->pmd && pvmw->pte))
+ return;
+
+ flush_cache_range(vma, address, address + HPAGE_CONT_PTE_SIZE);
+ pte = pvmw->pte;
+
+ //arch_set_huge_pte_at(mm, address, pvmw->pte, ptee);
+ for (i = 0, pte = pvmw->pte; i < HPAGE_CONT_PTE_NR; i++, pte++) {
+ pteval = ptep_invalidate(vma, address, pte);
+ if (pte_dirty(pteval))
+ set_page_dirty(_page);
+ entry = make_migration_entry(page, pte_write(pteval));
+ pteswp = swp_entry_to_pte(entry);
+ if (pte_soft_dirty(pteval))
+ pteswp = pte_swp_mksoft_dirty(pteswp);
+ set_pte_at(mm, address, pte, pteswp);
+ _page++;
+ address += PAGE_SIZE;
+ }
+
+ pvmw->pte = pte;
+ pvmw->address = address;
+
+ page_remove_rmap(page, true);
+ put_page(page);
+}
+
+void remove_migration_huge_pte(
+ struct page_vma_mapped_walk *pvmw, struct page *new)
+{
+ struct vm_area_struct *vma = pvmw->vma;
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long address = pvmw->address;
+ unsigned long mmun_start = address & HPAGE_CONT_PTE_MASK;
+ pte_t ptee;
+ swp_entry_t entry;
+
+ if (!(pvmw->pmd && !pvmw->pte))
+ return;
+
+ entry = pmd_to_swp_entry(*pvmw->pmd);
+ get_page(new);
+ ptee = pte_mkold(arch_make_huge_pte(new, vma));
+ if (pte_swp_soft_dirty(*pvmw->pte))
+ ptee = pte_mksoft_dirty(ptee);
+ if (is_write_migration_entry(entry))
+ ptee = maybe_mkwrite(ptee, vma);
+
+ flush_cache_range(vma, mmun_start, mmun_start + HPAGE_CONT_PTE_SIZE);
+ if (PageAnon(new))
+ page_add_anon_rmap(new, vma, mmun_start, true);
+ else
+ page_add_file_rmap(new, true);
+
+ arch_set_huge_pte_at(mm, mmun_start, pvmw->pte, ptee, 0);
+ if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new))
+ mlock_vma_page(new);
+ pvmw->address = address + HPAGE_CONT_PTE_SIZE;
+ pvmw->pte = pvmw->pte + HPAGE_CONT_PTE_NR;
+ update_mmu_cache_pmd(vma, address, pvmw->pmd);
+}
#endif /* CONFIG_FINEGRAINED_THP */
extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
struct page *new);
+#ifdef CONFIG_FINEGRAINED_THP
+extern void set_huge_pte_migration_entry(struct page_vma_mapped_walk *pvmw,
+ struct page *page);
+
+extern void remove_migration_huge_pte(struct page_vma_mapped_walk *pvmw,
+ struct page *new);
+#endif
+
extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
{
BUILD_BUG();
}
+#ifdef CONFIG_FINEGRAINED_THP
+static inline void set_huge_pte_migration_entry(struct page_vma_mapped_walk *pvmw,
+ struct page *page)
+{
+ BUILD_BUG();
+}
+
+static inline void remove_migration_huge_pte(struct page_vma_mapped_walk *pvmw,
+ struct page *new)
+{
+ BUILD_BUG();
+}
+#endif
static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
remove_migration_pmd(&pvmw, new);
continue;
}
+#ifdef CONFIG_FINEGRAINED_THP
+ if (PageTransHuge(page) && pte_cont(*pvmw.pte)) {
+ VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
+ remove_migration_huge_pte(&pvmw, new);
+ continue;
+ }
+#endif /* CONFIG_FINEGRAINED_THP */
#endif
get_page(new);
page_dup_rmap(new, true);
} else
#endif
-#ifdef CONFIG_FINEGRAINED_THP
- if (PageTransHuge(new)) {
- pte = pte_mkcont(pte_mkhuge(pte));
- arch_set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte, 0);
- if (PageAnon(new))
- page_add_anon_rmap(new, vma, pvmw.address, true);
- else
- page_dup_rmap(new, true);
- } else
-#endif /* CONFIG_FINEGRAINED_THP */
{
set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);