mm, thp: modify coverage of CONFIG_FINEGRAINED_THP macro
authorSung-hun Kim <sfoon.kim@samsung.com>
Thu, 30 Sep 2021 03:28:56 +0000 (12:28 +0900)
committerHoegeun Kwon <hoegeun.kwon@samsung.com>
Mon, 7 Feb 2022 08:01:41 +0000 (17:01 +0900)
Some codes should be applied only in fTHP-enabled
kernel. This patch rearranges the coverage of
CONFIG_FINEGRAINED_THP macro.

Change-Id: I0541c36369f8bd7a8fe4b8868c51dc0e6879f100
Signed-off-by: Sung-hun Kim <sfoon.kim@samsung.com>
mm/huge_memory.c
mm/memory.c

index 9ae0287..aedff5b 100644 (file)
@@ -2215,6 +2215,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
        }
 }
 
+#ifdef CONFIG_FINEGRAINED_THP
 static int thp_pte_alloc_locked(struct mm_struct *mm, pmd_t *pmd)
 {
        pgtable_t new = pte_alloc_one(mm);
@@ -2269,6 +2270,7 @@ static inline pgprot_t thp_pmd_pgprot(pmd_t pmd)
 
        return __pgprot(pmd_val(pfn_pmd(pfn, __pgprot(0))) ^ pmd_val(pmd));
 }
+#endif
 
 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                unsigned long address, bool freeze, struct page *page)
@@ -2296,6 +2298,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
        }
 
 repeat:
+#ifdef CONFIG_FINEGRAINED_THP
        if (pmd_trans_huge(*pmd) && !vm_normal_page_pmd(vma, address, *pmd)) {
                struct mm_struct *mm = vma->vm_mm;
                unsigned long haddr = address & HPAGE_PMD_MASK;
@@ -2308,7 +2311,9 @@ repeat:
                                           pmd_pfn(orig_pmd),
                                           thp_pmd_pgprot(orig_pmd));
                goto out;
-       } else if (pmd_trans_huge(*pmd) && vm_normal_page_pmd(vma, address, *pmd)) {
+       } else
+#endif /* CONFIG_FINEGRAINED_THP */
+       if (pmd_trans_huge(*pmd) && vm_normal_page_pmd(vma, address, *pmd)) {
                if (!page) {
                        page = pmd_page(*pmd);
                        /*
index 1d640c1..e1924c1 100644 (file)
@@ -2243,37 +2243,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
 {
        return arch_remap_pte_range(mm, pmd, addr, end, pfn, prot);
 }
-#else /* CONFIG_FINEGRAINED_THP */
-static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
-                       unsigned long addr, unsigned long end,
-                       unsigned long pfn, pgprot_t prot)
-{
-       pte_t *pte, *mapped_pte;
-       spinlock_t *ptl;
-       int err = 0;
 
-       mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
-       if (!pte)
-               return -ENOMEM;
-       arch_enter_lazy_mmu_mode();
-       do {
-               BUG_ON(!pte_none(*pte));
-               if (!pfn_modify_allowed(pfn, prot)) {
-                       err = -EACCES;
-                       break;
-               }
-
-               set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
-               pfn++;
-               pte++;
-               addr += PAGE_SIZE;
-       } while (addr != end);
-       arch_leave_lazy_mmu_mode();
-       pte_unmap_unlock(mapped_pte, ptl);
-       return err;
-}
-#endif /* CONFIG_FINEGRAINED_THP */
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 static int remap_try_huge_pmd(struct mm_struct *mm, pmd_t *pmd, unsigned long addr,
                                unsigned long end, unsigned long pfn,
                                pgprot_t prot)
@@ -2305,7 +2275,36 @@ static int remap_try_huge_pmd(struct mm_struct *mm, pmd_t *pmd, unsigned long ad
 
        return ret;
 }
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#else /* CONFIG_FINEGRAINED_THP */
+static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
+                       unsigned long addr, unsigned long end,
+                       unsigned long pfn, pgprot_t prot)
+{
+       pte_t *pte, *mapped_pte;
+       spinlock_t *ptl;
+       int err = 0;
+
+       mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
+       if (!pte)
+               return -ENOMEM;
+       arch_enter_lazy_mmu_mode();
+       do {
+               BUG_ON(!pte_none(*pte));
+               if (!pfn_modify_allowed(pfn, prot)) {
+                       err = -EACCES;
+                       break;
+               }
+
+               set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
+               pfn++;
+               pte++;
+               addr += PAGE_SIZE;
+       } while (addr != end);
+       arch_leave_lazy_mmu_mode();
+       pte_unmap_unlock(mapped_pte, ptl);
+       return err;
+}
+#endif /* CONFIG_FINEGRAINED_THP */
 
 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
                        unsigned long addr, unsigned long end,
@@ -2322,12 +2321,11 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
        VM_BUG_ON(pmd_trans_huge(*pmd));
        do {
                next = pmd_addr_end(addr, end);
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#ifdef CONFIG_FINEGRAINED_THP
                if (remap_try_huge_pmd(mm, pmd, addr, next,
                                       pfn + (addr >> PAGE_SHIFT), prot))
                        continue;
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif /* CONFIG_FINEGRAINED_THP */
                err = remap_pte_range(mm, pmd, addr, next,
                                pfn + (addr >> PAGE_SHIFT), prot);
                if (err)