powerpc/mm: Move vma_mmu_pagesize()
authorChristophe Leroy <christophe.leroy@csgroup.eu>
Sat, 9 Apr 2022 17:17:29 +0000 (19:17 +0200)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 5 May 2022 12:11:57 +0000 (22:11 +1000)
vma_mmu_pagesize() is only required for slices,
otherwise there is a generic weak version doing the
exact same thing.

Move it to slice.c

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/1302e000d529c93d07208f1fae90f938e7a551b4.1649523076.git.christophe.leroy@csgroup.eu
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/slice.c

index b642a5a..7b89f07 100644 (file)
@@ -565,17 +565,6 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 }
 #endif
 
-unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
-{
-       /* With radix we don't use slice, so derive it from vma*/
-       if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled()) {
-               unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
-
-               return 1UL << mmu_psize_to_shift(psize);
-       }
-       return vma_kernel_pagesize(vma);
-}
-
 bool __init arch_hugetlb_valid_size(unsigned long size)
 {
        int shift = __ffs(size);
index f42711f..8a3ac06 100644 (file)
@@ -759,4 +759,13 @@ int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
 
        return !slice_check_range_fits(mm, maskp, addr, len);
 }
+
+unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
+{
+       /* With radix we don't use slice, so derive it from vma*/
+       if (radix_enabled())
+               return vma_kernel_pagesize(vma);
+
+       return 1UL << mmu_psize_to_shift(get_slice_psize(vma->vm_mm, vma->vm_start));
+}
 #endif