In the commit
b7176c261cdb ("dma-contiguous: provide the ability to
reserve per-numa CMA"), Barry adds DMA_PERNUMA_CMA for ARM64.
But this feature is architecture independent, so support per-numa CMA
for all architectures, and enable it by default if NUMA.
Signed-off-by: Yajun Deng <yajun.deng@linux.dev>
Tested-by: Yicong Yang <yangyicong@hisilicon.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
kernel/dma/contiguous.c
cma_pernuma=nn[MG]
- [ARM64,KNL,CMA]
+ [KNL,CMA]
Sets the size of kernel per-numa memory area for
contiguous memory allocations. A value of 0 disables
per-numa CMA altogether. And If this option is not
arm64_hugetlb_cma_reserve();
#endif
- dma_pernuma_cma_reserve();
-
kvm_hyp_reserve();
/*
}
#endif /* CONFIG_DMA_CMA*/
-#ifdef CONFIG_DMA_PERNUMA_CMA
-void dma_pernuma_cma_reserve(void);
-#else
-static inline void dma_pernuma_cma_reserve(void) { }
-#endif /* CONFIG_DMA_PERNUMA_CMA */
-
#ifdef CONFIG_DMA_DECLARE_COHERENT
int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size);
config DMA_PERNUMA_CMA
bool "Enable separate DMA Contiguous Memory Area for each NUMA Node"
- default NUMA && ARM64
+ default NUMA
help
- Enable this option to get pernuma CMA areas so that devices like
- ARM64 SMMU can get local memory by DMA coherent APIs.
+ Enable this option to get pernuma CMA areas so that NUMA devices
+ can get local memory by DMA coherent APIs.
You can set the size of pernuma CMA by specifying "cma_pernuma=size"
on the kernel's command line.
#endif
#ifdef CONFIG_DMA_PERNUMA_CMA
-void __init dma_pernuma_cma_reserve(void)
+static void __init dma_pernuma_cma_reserve(void)
{
int nid;
(unsigned long long)pernuma_size_bytes / SZ_1M, nid);
}
}
+#else
+static inline void __init dma_pernuma_cma_reserve(void)
+{
+}
#endif
/**
phys_addr_t selected_limit = limit;
bool fixed = false;
+ dma_pernuma_cma_reserve();
+
pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
if (size_cmdline != -1) {