pte_t *ptep, pte_t pte, unsigned long sz);
#define set_huge_swap_pte_at set_huge_swap_pte_at
+void __init arm64_hugetlb_cma_reserve(void);
+
#include <asm-generic/hugetlb.h>
#endif /* __ASM_HUGETLB_H */
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
+/*
+ * HugeTLB Support Matrix
+ *
+ * ---------------------------------------------------
+ * | Page Size | CONT PTE | PMD | CONT PMD | PUD |
+ * ---------------------------------------------------
+ * | 4K | 64K | 2M | 32M | 1G |
+ * | 16K | 2M | 32M | 1G | |
+ * | 64K | 2M | 512M | 16G | |
+ * ---------------------------------------------------
+ */
+
+/*
+ * Reserve CMA areas for the largest supported gigantic
+ * huge page when requested. Any other smaller gigantic
+ * huge pages could still be served from those areas.
+ */
+#ifdef CONFIG_CMA
+void __init arm64_hugetlb_cma_reserve(void)
+{
+ int order;
+
+#ifdef CONFIG_ARM64_4K_PAGES
+ order = PUD_SHIFT - PAGE_SHIFT;
+#else
+ order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT;
+#endif
+ /*
+ * HugeTLB CMA reservation is required for gigantic
+ * huge pages which could not be allocated via the
+ * page allocator. Just warn if there is any change
+ * breaking this assumption.
+ */
+ WARN_ON(order <= MAX_ORDER);
+ hugetlb_cma_reserve(order);
+}
+#endif /* CONFIG_CMA */
+
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
bool arch_hugetlb_migration_supported(struct hstate *h)
{
* initialize node_online_map that gets used in hugetlb_cma_reserve()
* while allocating required CMA size across online nodes.
*/
-#ifdef CONFIG_ARM64_4K_PAGES
- hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
+#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
+ arm64_hugetlb_cma_reserve();
#endif
/*