riscv: Fix hugetlb_mask_last_page() when NAPOT is enabled
authorAlexandre Ghiti <alexghiti@rivosinc.com>
Wed, 17 Jan 2024 19:57:41 +0000 (20:57 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 16 Feb 2024 18:10:53 +0000 (19:10 +0100)
[ Upstream commit a179a4bfb694f80f2709a1d0398469e787acb974 ]

When NAPOT is enabled, a new hugepage size is available and then we need
to make hugetlb_mask_last_page() aware of that.

Fixes: 82a1a1f3bfb6 ("riscv: mm: support Svnapot in hugetlb page")
Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Link: https://lore.kernel.org/r/20240117195741.1926459-3-alexghiti@rivosinc.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/riscv/mm/hugetlbpage.c

index 24c0179..87af75e 100644 (file)
@@ -125,6 +125,26 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
        return pte;
 }
 
+unsigned long hugetlb_mask_last_page(struct hstate *h)
+{
+       unsigned long hp_size = huge_page_size(h);
+
+       switch (hp_size) {
+#ifndef __PAGETABLE_PMD_FOLDED
+       case PUD_SIZE:
+               return P4D_SIZE - PUD_SIZE;
+#endif
+       case PMD_SIZE:
+               return PUD_SIZE - PMD_SIZE;
+       case napot_cont_size(NAPOT_CONT64KB_ORDER):
+               return PMD_SIZE - napot_cont_size(NAPOT_CONT64KB_ORDER);
+       default:
+               break;
+       }
+
+       return 0UL;
+}
+
 static pte_t get_clear_contig(struct mm_struct *mm,
                              unsigned long addr,
                              pte_t *ptep,