2 * Contiguous Memory Allocator for DMA mapping framework
3 * Copyright (c) 2010-2011 by Samsung Electronics.
5 * Marek Szyprowski <m.szyprowski@samsung.com>
6 * Michal Nazarewicz <mina86@mina86.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License or (at your optional) any later version of the license.
14 #define pr_fmt(fmt) "cma: " fmt
16 #ifdef CONFIG_CMA_DEBUG
23 #include <asm/dma-contiguous.h>
25 #include <linux/memblock.h>
26 #include <linux/err.h>
28 #include <linux/of_fdt.h>
29 #include <linux/of_platform.h>
31 #include <linux/mutex.h>
32 #include <linux/page-isolation.h>
33 #include <linux/sizes.h>
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <linux/mm_types.h>
37 #include <linux/dma-contiguous.h>
40 unsigned long base_pfn;
42 unsigned long *bitmap;
43 unsigned long cma_count;
44 unsigned long threshold_count;
48 static DEFINE_MUTEX(cma_mutex);
50 struct cma *dma_contiguous_def_area;
51 phys_addr_t dma_contiguous_def_base;
53 static struct cma_area {
58 } cma_areas[MAX_CMA_AREAS] __initdata;
59 static unsigned cma_area_count __initdata;
62 static struct cma_map {
65 } cma_maps[MAX_CMA_AREAS] __initdata;
66 static unsigned cma_map_count __initdata;
68 static struct cma *cma_get_area(phys_addr_t base)
71 for (i = 0; i < cma_area_count; i++)
72 if (cma_areas[i].base == base)
73 return cma_areas[i].cma;
77 static struct cma *cma_get_area_by_name(const char *name)
83 for (i = 0; i < cma_area_count; i++)
84 if (cma_areas[i].name && strcmp(cma_areas[i].name, name) == 0)
85 return cma_areas[i].cma;
91 #ifdef CONFIG_CMA_SIZE_MBYTES
92 #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
94 #define CMA_SIZE_MBYTES 0
98 * Default global CMA area size can be defined in kernel's .config.
99 * This is usefull mainly for distro maintainers to create a kernel
100 * that works correctly for most supported systems.
101 * The size can be set in bytes or as a percentage of the total memory
104 * Users, who want to set the size of global CMA area for their system
105 * should use cma= kernel parameter.
107 static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
108 static phys_addr_t size_cmdline = -1;
110 static int __init early_cma(char *p)
112 pr_debug("%s(%s)\n", __func__, p);
113 size_cmdline = memparse(p, &p);
116 early_param("cma", early_cma);
118 #ifdef CONFIG_CMA_SIZE_PERCENTAGE
120 static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
122 struct memblock_region *reg;
123 unsigned long total_pages = 0;
126 * We cannot use memblock_phys_mem_size() here, because
127 * memblock_analyze() has not been called yet.
129 for_each_memblock(memory, reg)
130 total_pages += memblock_region_memory_end_pfn(reg) -
131 memblock_region_memory_base_pfn(reg);
133 return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
138 static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
145 static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
147 unsigned long pfn = base_pfn;
148 unsigned i = count >> pageblock_order;
151 WARN_ON_ONCE(!pfn_valid(pfn));
152 zone = page_zone(pfn_to_page(pfn));
157 for (j = pageblock_nr_pages; j; --j, pfn++) {
158 WARN_ON_ONCE(!pfn_valid(pfn));
159 if (page_zone(pfn_to_page(pfn)) != zone)
162 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
167 static __init struct cma *cma_create_area(unsigned long base_pfn,
170 int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
174 pr_debug("%s(base %08lx, count %lx)\n",
175 __func__, base_pfn, count);
177 cma = kmalloc(sizeof *cma, GFP_KERNEL);
179 return ERR_PTR(-ENOMEM);
181 cma->base_pfn = base_pfn;
183 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
184 mutex_init(&cma->lock);
189 ret = cma_activate_area(base_pfn, count);
193 pr_debug("%s: returned %p\n", __func__, (void *)cma);
199 mutex_destroy(&cma->lock);
206 int __init cma_fdt_scan(unsigned long node, const char *uname,
207 int depth, void *data)
209 phys_addr_t base, size;
214 if (!of_get_flat_dt_prop(node, "linux,contiguous-region", NULL))
217 prop = of_get_flat_dt_prop(node, "sprd,ion-heap-mem", &len);
218 if (!prop || (len != 2 * sizeof(unsigned long)))
221 base = be32_to_cpu(prop[0]);
222 size = be32_to_cpu(prop[1]);
224 name = of_get_flat_dt_prop(node, "reg-names", NULL);
226 pr_info("Found %s, memory base %lx, size %ld MiB\n", uname,
227 (unsigned long)base, (unsigned long)size / SZ_1M);
229 dma_contiguous_reserve_area(size, &base, 0, name);
236 * dma_contiguous_reserve() - reserve area for contiguous memory handling
237 * @limit: End address of the reserved memory (optional, 0 for any).
239 * This function reserves memory from early allocator. It should be
240 * called by arch specific code once the early allocator (memblock or bootmem)
241 * has been activated and all other subsystems have already allocated/reserved
242 * memory. It reserves contiguous areas for global, device independent
243 * allocations and (optionally) all areas defined in device tree structures.
245 void __init dma_contiguous_reserve(phys_addr_t limit)
247 phys_addr_t sel_size = 0;
249 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
251 if (size_cmdline != -1) {
252 sel_size = size_cmdline;
254 #ifdef CONFIG_CMA_SIZE_SEL_MBYTES
255 sel_size = size_bytes;
256 #elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
257 sel_size = cma_early_percent_memory();
258 #elif defined(CONFIG_CMA_SIZE_SEL_MIN)
259 sel_size = min(size_bytes, cma_early_percent_memory());
260 #elif defined(CONFIG_CMA_SIZE_SEL_MAX)
261 sel_size = max(size_bytes, cma_early_percent_memory());
266 phys_addr_t base = 0;
267 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
268 (unsigned long)sel_size / SZ_1M);
270 if (dma_contiguous_reserve_area(sel_size, &base, limit, NULL)
272 dma_contiguous_def_base = base;
275 of_scan_flat_dt(cma_fdt_scan, NULL);
280 * dma_contiguous_reserve_area() - reserve custom contiguous area
281 * @size: Size of the reserved area (in bytes),
282 * @base: Pointer to the base address of the reserved area, also used to return
283 * base address of the actually reserved area, optional, use pointer to
285 * @limit: End address of the reserved memory (optional, 0 for any).
287 * This function reserves memory from early allocator. It should be
288 * called by arch specific code once the early allocator (memblock or bootmem)
289 * has been activated and all other subsystems have already allocated/reserved
290 * memory. This function allows to create custom reserved areas for specific
293 int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t *res_base,
294 phys_addr_t limit, const char *name)
296 phys_addr_t base = *res_base;
297 phys_addr_t alignment;
300 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
301 (unsigned long)size, (unsigned long)base,
302 (unsigned long)limit);
305 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
306 pr_err("Not enough slots for CMA reserved regions!\n");
313 /* Sanitise input arguments */
314 alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
315 base = ALIGN(base, alignment);
316 size = ALIGN(size, alignment);
317 limit &= ~(alignment - 1);
321 if (memblock_is_region_reserved(base, size) ||
322 memblock_reserve(base, size) < 0) {
328 * Use __memblock_alloc_base() since
329 * memblock_alloc_base() panic()s.
331 phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
341 * Each reserved area must be initialised later, when more kernel
342 * subsystems (like slab allocator) are available.
345 * Each reserved area must be initialised later, when more kernel
346 * subsystems (like slab allocator) are available.
348 cma_areas[cma_area_count].base = base;
349 cma_areas[cma_area_count].size = size;
350 cma_areas[cma_area_count].name = name;
354 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
355 (unsigned long)base);
357 /* Architecture specific contiguous memory fixup. */
358 dma_contiguous_early_fixup(base, size);
361 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
366 * dma_contiguous_add_device() - add device to custom contiguous reserved area
367 * @dev: Pointer to device structure.
368 * @base: Pointer to the base address of the reserved area returned by
369 * dma_contiguous_reserve_area() function, also used to return
371 * This function assigns the given device to the contiguous memory area
372 * reserved earlier by dma_contiguous_reserve_area() function.
374 int __init dma_contiguous_add_device(struct device *dev, phys_addr_t base)
376 if (cma_map_count == ARRAY_SIZE(cma_maps)) {
377 pr_err("Not enough slots for CMA reserved regions!\n");
380 cma_maps[cma_map_count].dev = dev;
381 cma_maps[cma_map_count].base = base;
387 void cma_assign_device_from_dt(struct device *dev)
389 struct device_node *node;
395 ret = of_property_read_u32(dev->of_node,
396 "linux,contiguous-region", &value);
400 if (of_property_read_string(dev->of_node, "reg-names", &name))
403 cma = cma_get_area_by_name(name);
407 dev_set_cma_area(dev, cma);
408 pr_info("Assigned CMA region to %s device\n", dev_name(dev));
411 static int cma_device_init_notifier_call(struct notifier_block *nb,
412 unsigned long event, void *data)
414 struct device *dev = data;
415 if (event == BUS_NOTIFY_ADD_DEVICE && dev->of_node)
416 cma_assign_device_from_dt(dev);
420 static struct notifier_block cma_dev_init_nb = {
421 .notifier_call = cma_device_init_notifier_call,
425 static int __init cma_init_reserved_areas(void)
430 for (i = 0; i < cma_area_count; i++) {
431 phys_addr_t base = PFN_DOWN(cma_areas[i].base);
432 unsigned int count = cma_areas[i].size >> PAGE_SHIFT;
434 cma = cma_create_area(base, count);
436 cma_areas[i].cma = cma;
439 dma_contiguous_def_area = cma_get_area(dma_contiguous_def_base);
441 for (i = 0; i < cma_map_count; i++) {
442 cma = cma_get_area(cma_maps[i].base);
443 dev_set_cma_area(cma_maps[i].dev, cma);
447 bus_register_notifier(&platform_bus_type, &cma_dev_init_nb);
451 core_initcall(cma_init_reserved_areas);
453 static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
455 mutex_lock(&cma->lock);
456 bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
457 mutex_unlock(&cma->lock);
461 * dma_alloc_from_contiguous() - allocate pages from contiguous area
462 * @dev: Pointer to device for which the allocation is performed.
463 * @count: Requested number of pages.
464 * @align: Requested alignment of pages (in PAGE_SIZE order).
466 * This function allocates memory buffer for specified device. It uses
467 * device specific contiguous memory area if available or the default
468 * global one. Requires architecture specific get_dev_cma_area() helper
471 struct page *dma_alloc_from_contiguous(struct device *dev, int count,
474 unsigned long mask, pfn, pageno, start = 0;
475 struct cma *cma = dev_get_cma_area(dev);
476 struct page *page = NULL;
478 int reserve_drain = 0;
480 if (!cma || !cma->count)
483 if (align > CONFIG_CMA_ALIGNMENT)
484 align = CONFIG_CMA_ALIGNMENT;
486 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
492 mask = (1 << align) - 1;
497 if(cma->threshold_count > 0 &&
498 count >= cma->threshold_count &&
500 start = cma->cma_count;
502 mutex_lock(&cma->lock);
503 pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
505 if (pageno >= cma->count) {
506 if(cma->threshold_count > 0 &&
507 count >= cma->threshold_count &&
511 mutex_unlock(&cma->lock);
514 mutex_unlock(&cma->lock);
518 bitmap_set(cma->bitmap, pageno, count);
520 * It's safe to drop the lock here. We've marked this region for
521 * our exclusive use. If the migration fails we will take the
522 * lock again and unmark it.
524 mutex_unlock(&cma->lock);
526 pfn = cma->base_pfn + pageno;
528 if(pageno < cma->cma_count && pageno + count > cma->cma_count) {
531 start = cma->cma_count;
535 if(pageno + count <= cma->cma_count) {
536 mutex_lock(&cma_mutex);
537 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
538 mutex_unlock(&cma_mutex);
540 pr_debug("%s(): allocate from reserved memory\n",
543 page = pfn_to_page(pfn);
545 } else if (ret != -EBUSY) {
546 clear_cma_bitmap(cma, pfn, count);
549 clear_cma_bitmap(cma, pfn, count);
550 pr_debug("%s(): memory range at %p is busy, retrying\n",
551 __func__, pfn_to_page(pfn));
552 /* try again with a bit different memory target */
553 start = pageno + mask + 1;
556 pr_debug("%s(): returned %p\n", __func__, page);
561 * dma_release_from_contiguous() - release allocated pages
562 * @dev: Pointer to device for which the pages were allocated.
563 * @pages: Allocated pages.
564 * @count: Number of allocated pages.
566 * This function releases memory allocated by dma_alloc_from_contiguous().
567 * It returns false when provided pages do not belong to contiguous area and
570 bool dma_release_from_contiguous(struct device *dev, struct page *pages,
573 struct cma *cma = dev_get_cma_area(dev);
579 pr_debug("%s(page %p, count %d)\n", __func__, (void *)pages, count);
581 pfn = page_to_pfn(pages);
583 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
586 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
588 if(pfn + count <= cma->base_pfn + cma->cma_count)
589 free_contig_range(pfn, count);
590 clear_cma_bitmap(cma, pfn, count);