From 5cbcf2258b71cbae358961796c3a879e1b06a9ff Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Tue, 22 Mar 2022 14:41:53 -0700 Subject: [PATCH] mm: thp: fix wrong cache flush in remove_migration_pmd() Patch series "Fix some cache flush bugs", v5. This series focuses on fixing cache maintenance. This patch (of 7): The flush_cache_range() is supposed to be justified only if the page is already placed in process page table, and that is done right after flush_cache_range(). So using this interface is wrong. And there is no need to invalite cache since it was non-present before in remove_migration_pmd(). So just to remove it. Link: https://lkml.kernel.org/r/20220210123058.79206-1-songmuchun@bytedance.com Link: https://lkml.kernel.org/r/20220210123058.79206-2-songmuchun@bytedance.com Signed-off-by: Muchun Song Reviewed-by: Zi Yan Cc: Kirill A. Shutemov Cc: David Rientjes Cc: Lars Persson Cc: Mike Kravetz Cc: Zi Yan Cc: Xiongchun Duan Cc: Fam Zheng Cc: Muchun Song Cc: Axel Rasmussen Cc: Peter Xu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 406a3c2..88fe13a 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3197,7 +3197,6 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) if (pmd_swp_uffd_wp(*pvmw->pmd)) pmde = pmd_wrprotect(pmd_mkuffd_wp(pmde)); - flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE); if (PageAnon(new)) page_add_anon_rmap(new, vma, mmun_start, true); else @@ -3205,6 +3204,8 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) set_pmd_at(mm, mmun_start, pvmw->pmd, pmde); if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new)) mlock_vma_page(new); + + /* No need to invalidate - it was non-present before */ update_mmu_cache_pmd(vma, address, pvmw->pmd); } #endif -- 2.7.4