damon: use pmdp_get instead of drectly dereferencing pmd
authorLevi Yun <ppbuk5246@gmail.com>
Thu, 27 Jul 2023 21:21:57 +0000 (06:21 +0900)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 21 Aug 2023 20:37:30 +0000 (13:37 -0700)
As ptep_get, Use the pmdp_get wrapper when we accessing pmdval instead of
directly dereferencing pmd.

Link: https://lkml.kernel.org/r/20230727212157.2985025-1-ppbuk5246@gmail.com
Signed-off-by: Levi Yun <ppbuk5246@gmail.com>
Reviewed-by: SeongJae Park <sj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/damon/ops-common.c
mm/damon/paddr.c
mm/damon/vaddr.c

index e940802..ac1c3fa 100644 (file)
@@ -54,7 +54,7 @@ void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr
 void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr)
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       struct folio *folio = damon_get_folio(pmd_pfn(*pmd));
+       struct folio *folio = damon_get_folio(pmd_pfn(pmdp_get(pmd)));
 
        if (!folio)
                return;
index 40801e3..909db25 100644 (file)
@@ -94,7 +94,7 @@ static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
                                mmu_notifier_test_young(vma->vm_mm, addr);
                } else {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-                       *accessed = pmd_young(*pvmw.pmd) ||
+                       *accessed = pmd_young(pmdp_get(pvmw.pmd)) ||
                                !folio_test_idle(folio) ||
                                mmu_notifier_test_young(vma->vm_mm, addr);
 #else
index 2fcc973..d01cc46 100644 (file)
@@ -301,16 +301,19 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
                unsigned long next, struct mm_walk *walk)
 {
        pte_t *pte;
+       pmd_t pmde;
        spinlock_t *ptl;
 
-       if (pmd_trans_huge(*pmd)) {
+       if (pmd_trans_huge(pmdp_get(pmd))) {
                ptl = pmd_lock(walk->mm, pmd);
-               if (!pmd_present(*pmd)) {
+               pmde = pmdp_get(pmd);
+
+               if (!pmd_present(pmde)) {
                        spin_unlock(ptl);
                        return 0;
                }
 
-               if (pmd_trans_huge(*pmd)) {
+               if (pmd_trans_huge(pmde)) {
                        damon_pmdp_mkold(pmd, walk->vma, addr);
                        spin_unlock(ptl);
                        return 0;
@@ -439,21 +442,25 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
        struct damon_young_walk_private *priv = walk->private;
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       if (pmd_trans_huge(*pmd)) {
+       if (pmd_trans_huge(pmdp_get(pmd))) {
+               pmd_t pmde;
+
                ptl = pmd_lock(walk->mm, pmd);
-               if (!pmd_present(*pmd)) {
+               pmde = pmdp_get(pmd);
+
+               if (!pmd_present(pmde)) {
                        spin_unlock(ptl);
                        return 0;
                }
 
-               if (!pmd_trans_huge(*pmd)) {
+               if (!pmd_trans_huge(pmde)) {
                        spin_unlock(ptl);
                        goto regular_page;
                }
-               folio = damon_get_folio(pmd_pfn(*pmd));
+               folio = damon_get_folio(pmd_pfn(pmde));
                if (!folio)
                        goto huge_out;
-               if (pmd_young(*pmd) || !folio_test_idle(folio) ||
+               if (pmd_young(pmde) || !folio_test_idle(folio) ||
                                        mmu_notifier_test_young(walk->mm,
                                                addr))
                        priv->young = true;