This patch adds support to allocate gigantic hugepages using CMA by
specifying the hugetlb_cma= kernel parameter. This is only supported on
RV64.
Reviewed-by: Alexandre Ghiti <alex@ghiti.fr>
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
#include <linux/set_memory.h>
#include <linux/dma-map-ops.h>
#include <linux/crash_dump.h>
+#include <linux/hugetlb.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
early_init_fdt_scan_reserved_mem();
dma_contiguous_reserve(dma32_phys_limit);
+ if (IS_ENABLED(CONFIG_64BIT))
+ hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
memblock_allow_resize();
}