arm64/hugetlb: Reserve CMA areas for gigantic pages on 16K and 64K configs
authorAnshuman Khandual <anshuman.khandual@arm.com>
Wed, 1 Jul 2020 04:42:01 +0000 (10:12 +0530)
committerCatalin Marinas <catalin.marinas@arm.com>
Wed, 15 Jul 2020 12:38:03 +0000 (13:38 +0100)
Currently 'hugetlb_cma=' command line argument does not create CMA area on
ARM64_16K_PAGES and ARM64_64K_PAGES based platforms. Instead, it just ends
up with the following warning message. Reason being, hugetlb_cma_reserve()
never gets called for these huge page sizes.

[   64.255669] hugetlb_cma: the option isn't supported by current arch

This enables CMA areas reservation on ARM64_16K_PAGES and ARM64_64K_PAGES
configs by defining an unified arm64_hugetlb_cma_reseve() that is wrapped
in CONFIG_CMA. Call site for arm64_hugetlb_cma_reserve() is also protected
as <asm/hugetlb.h> is conditionally included and hence cannot contain stub
for the inverse config i.e !(CONFIG_HUGETLB_PAGE && CONFIG_CMA).

Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Barry Song <song.bao.hua@hisilicon.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Link: https://lore.kernel.org/r/1593578521-24672-1-git-send-email-anshuman.khandual@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/hugetlb.h
arch/arm64/mm/hugetlbpage.c
arch/arm64/mm/init.c

index 94ba0c5..5abf91e 100644 (file)
@@ -49,6 +49,8 @@ extern void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
                                 pte_t *ptep, pte_t pte, unsigned long sz);
 #define set_huge_swap_pte_at set_huge_swap_pte_at
 
+void __init arm64_hugetlb_cma_reserve(void);
+
 #include <asm-generic/hugetlb.h>
 
 #endif /* __ASM_HUGETLB_H */
index c790847..aa421bf 100644 (file)
 #include <asm/tlbflush.h>
 #include <asm/pgalloc.h>
 
+/*
+ * HugeTLB Support Matrix
+ *
+ * ---------------------------------------------------
+ * | Page Size | CONT PTE |  PMD  | CONT PMD |  PUD  |
+ * ---------------------------------------------------
+ * |     4K    |   64K    |   2M  |    32M   |   1G  |
+ * |    16K    |    2M    |  32M  |     1G   |       |
+ * |    64K    |    2M    | 512M  |    16G   |       |
+ * ---------------------------------------------------
+ */
+
+/*
+ * Reserve CMA areas for the largest supported gigantic
+ * huge page when requested. Any other smaller gigantic
+ * huge pages could still be served from those areas.
+ */
+#ifdef CONFIG_CMA
+void __init arm64_hugetlb_cma_reserve(void)
+{
+       int order;
+
+#ifdef CONFIG_ARM64_4K_PAGES
+       order = PUD_SHIFT - PAGE_SHIFT;
+#else
+       order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT;
+#endif
+       /*
+        * HugeTLB CMA reservation is required for gigantic
+        * huge pages which could not be allocated via the
+        * page allocator. Just warn if there is any change
+        * breaking this assumption.
+        */
+       WARN_ON(order <= MAX_ORDER);
+       hugetlb_cma_reserve(order);
+}
+#endif /* CONFIG_CMA */
+
 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
 bool arch_hugetlb_migration_supported(struct hstate *h)
 {
index 6c3eb42..f8c19c6 100644 (file)
@@ -425,8 +425,8 @@ void __init bootmem_init(void)
         * initialize node_online_map that gets used in hugetlb_cma_reserve()
         * while allocating required CMA size across online nodes.
         */
-#ifdef CONFIG_ARM64_4K_PAGES
-       hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
+#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
+       arm64_hugetlb_cma_reserve();
 #endif
 
        /*