mm/damon/vaddr: rename 'damon_young_walk_private->page_sz' to 'folio_sz'
authorSeongJae Park <sj@kernel.org>
Mon, 9 Jan 2023 21:33:30 +0000 (21:33 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 19 Jan 2023 01:12:58 +0000 (17:12 -0800)
Patch series "mm/damon/{v,p}addr: misc fixups for folio usage".

DAMON's monitoring operations set for the virtual and the physical address
spaces use folio now, but some code is not reflecting the fact.  Further
cleanup the code for folio usage.

This patch (of 6):

DAMON's virtual address space monitoring operations set is using folio
now.  Rename 'damon_pa_access_chk_result->page_sz' to reflect the fact.

Link: https://lkml.kernel.org/r/20230109213335.62525-1-sj@kernel.org
Link: https://lkml.kernel.org/r/20230109213335.62525-2-sj@kernel.org
Signed-off-by: SeongJae Park <sj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/damon/vaddr.c

index 9d92c5e..d6cb1fc 100644 (file)
@@ -422,7 +422,8 @@ static void damon_va_prepare_access_checks(struct damon_ctx *ctx)
 }
 
 struct damon_young_walk_private {
-       unsigned long *page_sz;
+       /* size of the folio for the access checked virtual memory address */
+       unsigned long *folio_sz;
        bool young;
 };
 
@@ -452,7 +453,7 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
                if (pmd_young(*pmd) || !folio_test_idle(folio) ||
                                        mmu_notifier_test_young(walk->mm,
                                                addr)) {
-                       *priv->page_sz = HPAGE_PMD_SIZE;
+                       *priv->folio_sz = HPAGE_PMD_SIZE;
                        priv->young = true;
                }
                folio_put(folio);
@@ -474,7 +475,7 @@ regular_page:
                goto out;
        if (pte_young(*pte) || !folio_test_idle(folio) ||
                        mmu_notifier_test_young(walk->mm, addr)) {
-               *priv->page_sz = PAGE_SIZE;
+               *priv->folio_sz = PAGE_SIZE;
                priv->young = true;
        }
        folio_put(folio);
@@ -504,7 +505,7 @@ static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask,
 
        if (pte_young(entry) || !folio_test_idle(folio) ||
            mmu_notifier_test_young(walk->mm, addr)) {
-               *priv->page_sz = huge_page_size(h);
+               *priv->folio_sz = huge_page_size(h);
                priv->young = true;
        }
 
@@ -524,10 +525,10 @@ static const struct mm_walk_ops damon_young_ops = {
 };
 
 static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
-               unsigned long *page_sz)
+               unsigned long *folio_sz)
 {
        struct damon_young_walk_private arg = {
-               .page_sz = page_sz,
+               .folio_sz = folio_sz,
                .young = false,
        };
 
@@ -547,18 +548,18 @@ static void __damon_va_check_access(struct mm_struct *mm,
                                struct damon_region *r, bool same_target)
 {
        static unsigned long last_addr;
-       static unsigned long last_page_sz = PAGE_SIZE;
+       static unsigned long last_folio_sz = PAGE_SIZE;
        static bool last_accessed;
 
        /* If the region is in the last checked page, reuse the result */
-       if (same_target && (ALIGN_DOWN(last_addr, last_page_sz) ==
-                               ALIGN_DOWN(r->sampling_addr, last_page_sz))) {
+       if (same_target && (ALIGN_DOWN(last_addr, last_folio_sz) ==
+                               ALIGN_DOWN(r->sampling_addr, last_folio_sz))) {
                if (last_accessed)
                        r->nr_accesses++;
                return;
        }
 
-       last_accessed = damon_va_young(mm, r->sampling_addr, &last_page_sz);
+       last_accessed = damon_va_young(mm, r->sampling_addr, &last_folio_sz);
        if (last_accessed)
                r->nr_accesses++;