mm/huge_memory: use helper touch_pmd in huge_pmd_set_accessed
authorMiaohe Lin <linmiaohe@huawei.com>
Mon, 4 Jul 2022 13:21:50 +0000 (21:21 +0800)
committerakpm <akpm@linux-foundation.org>
Mon, 18 Jul 2022 00:14:45 +0000 (17:14 -0700)
Use helper touch_pmd to set pmd accessed to simplify the code and improve
the readability. No functional change intended.

Link: https://lkml.kernel.org/r/20220704132201.14611-6-linmiaohe@huawei.com
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
Reviewed-by: Muchun Song <songmuchun@bytedance.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Zach O'Keefe <zokeefe@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index cda4180..ebf2a71 100644 (file)
@@ -1020,15 +1020,15 @@ EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud_prot);
 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
 
 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
-               pmd_t *pmd, int flags)
+                     pmd_t *pmd, bool write)
 {
        pmd_t _pmd;
 
        _pmd = pmd_mkyoung(*pmd);
-       if (flags & FOLL_WRITE)
+       if (write)
                _pmd = pmd_mkdirty(_pmd);
        if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
-                               pmd, _pmd, flags & FOLL_WRITE))
+                                 pmd, _pmd, write))
                update_mmu_cache_pmd(vma, addr, pmd);
 }
 
@@ -1061,7 +1061,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
                return NULL;
 
        if (flags & FOLL_TOUCH)
-               touch_pmd(vma, addr, pmd, flags);
+               touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
 
        /*
         * device mapped pages can only be returned if the
@@ -1298,21 +1298,13 @@ unlock:
 
 void huge_pmd_set_accessed(struct vm_fault *vmf)
 {
-       pmd_t entry;
-       unsigned long haddr;
        bool write = vmf->flags & FAULT_FLAG_WRITE;
-       pmd_t orig_pmd = vmf->orig_pmd;
 
        vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
-       if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
+       if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
                goto unlock;
 
-       entry = pmd_mkyoung(orig_pmd);
-       if (write)
-               entry = pmd_mkdirty(entry);
-       haddr = vmf->address & HPAGE_PMD_MASK;
-       if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write))
-               update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd);
+       touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
 
 unlock:
        spin_unlock(vmf->ptl);
@@ -1448,7 +1440,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
                return ERR_PTR(-ENOMEM);
 
        if (flags & FOLL_TOUCH)
-               touch_pmd(vma, addr, pmd, flags);
+               touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
 
        page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
        VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);