2 * Contiguous Memory Allocator framework
3 * Copyright (c) 2010 by Samsung Electronics.
4 * Written by Michal Nazarewicz (m.nazarewicz@samsung.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2 of the
9 * License or (at your optional) any later version of the license.
13 * See Documentation/contiguous-memory.txt for details.
16 #define pr_fmt(fmt) "cma: " fmt
18 #ifdef CONFIG_CMA_DEBUG
22 #ifndef CONFIG_NO_BOOTMEM
23 # include <linux/bootmem.h> /* alloc_bootmem_pages_nopanic() */
25 #ifdef CONFIG_HAVE_MEMBLOCK
26 # include <linux/memblock.h> /* memblock*() */
28 #include <linux/device.h> /* struct device, dev_name() */
29 #include <linux/errno.h> /* Error numbers */
30 #include <linux/err.h> /* IS_ERR, PTR_ERR, etc. */
31 #include <linux/mm.h> /* PAGE_ALIGN() */
32 #include <linux/module.h> /* EXPORT_SYMBOL_GPL() */
33 #include <linux/mutex.h> /* mutex */
34 #include <linux/slab.h> /* kmalloc() */
35 #include <linux/string.h> /* str*() */
37 #include <linux/cma.h>
38 #include <linux/vmalloc.h>
41 * Protects cma_regions, cma_allocators, cma_map, cma_map_length,
42 * cma_kobj, cma_sysfs_regions and cma_chunks_by_start.
44 static DEFINE_MUTEX(cma_mutex);
48 /************************* Map attribute *************************/
50 static const char *cma_map;
51 static size_t cma_map_length;
54 * map-attr ::= [ rules [ ';' ] ]
55 * rules ::= rule [ ';' rules ]
56 * rule ::= patterns '=' regions
57 * patterns ::= pattern [ ',' patterns ]
58 * regions ::= REG-NAME [ ',' regions ]
59 * pattern ::= dev-pattern [ '/' TYPE-NAME ] | '/' TYPE-NAME
61 * See Documentation/contiguous-memory.txt for details.
63 static ssize_t cma_map_validate(const char *param)
65 const char *ch = param;
67 if (*ch == '\0' || *ch == '\n')
71 const char *start = ch;
73 while (*ch && *ch != '\n' && *ch != ';' && *ch != '=')
76 if (*ch != '=' || start == ch) {
77 pr_err("map: expecting \"<patterns>=<regions>\" near %s\n",
83 if (*ch == '\0' || *ch == '\n')
85 if (ch[1] == '\0' || ch[1] == '\n')
91 static int __init cma_map_param(char *param)
95 pr_debug("param: map: %s\n", param);
97 len = cma_map_validate(param);
102 cma_map_length = len;
106 #if defined CONFIG_CMA_CMDLINE
108 early_param("cma.map", cma_map_param);
114 /************************* Early regions *************************/
116 struct list_head cma_early_regions __initdata =
117 LIST_HEAD_INIT(cma_early_regions);
119 #ifdef CONFIG_CMA_CMDLINE
122 * regions-attr ::= [ regions [ ';' ] ]
123 * regions ::= region [ ';' regions ]
125 * region ::= [ '-' ] reg-name
131 * See Documentation/contiguous-memory.txt for details.
134 * cma=reg1=64M:bf;reg2=32M@0x100000:bf;reg3=64M/1M:bf
136 * If allocator is ommited the first available allocater will be used.
139 #define NUMPARSE(cond_ch, type, cond) ({ \
140 unsigned long long v = 0; \
141 if (*param == (cond_ch)) { \
142 const char *const msg = param + 1; \
143 v = memparse(msg, ¶m); \
144 if (!v || v > ~(type)0 || !(cond)) { \
145 pr_err("param: invalid value near %s\n", msg); \
153 static int __init cma_param_parse(char *param)
155 static struct cma_region regions[16];
157 size_t left = ARRAY_SIZE(regions);
158 struct cma_region *reg = regions;
161 pr_debug("param: %s\n", param);
163 for (; *param; ++reg) {
164 dma_addr_t start, alignment;
167 if (unlikely(!--left)) {
168 pr_err("param: too many early regions\n");
174 param = strchr(param, '=');
175 if (!param || param == reg->name) {
176 pr_err("param: expected \"<name>=\" near %s\n",
184 size = NUMPARSE('\0', size_t, true);
185 start = NUMPARSE('@', dma_addr_t, true);
186 alignment = NUMPARSE('/', dma_addr_t, (v & (v - 1)) == 0);
188 alignment = max(alignment, (dma_addr_t)PAGE_SIZE);
189 start = ALIGN(start, alignment);
190 size = PAGE_ALIGN(size);
191 if (start + size < start) {
192 pr_err("param: invalid start, size combination\n");
197 /* Parse allocator */
199 reg->alloc_name = ++param;
200 while (*param && *param != ';')
202 if (param == reg->alloc_name)
203 reg->alloc_name = NULL;
211 pr_err("param: expecting ';' or end of parameter near %s\n",
220 reg->alignment = alignment;
223 list_add_tail(®->list, &cma_early_regions);
225 pr_debug("param: registering early region %s (%p@%p/%p)\n",
226 reg->name, (void *)reg->size, (void *)reg->start,
227 (void *)reg->alignment);
232 early_param("cma", cma_param_parse);
239 int __init __must_check cma_early_region_register(struct cma_region *reg)
241 dma_addr_t start, alignment;
244 if (reg->alignment & (reg->alignment - 1))
247 alignment = max(reg->alignment, (dma_addr_t)PAGE_SIZE);
248 start = ALIGN(reg->start, alignment);
249 size = PAGE_ALIGN(reg->size);
251 if (start + size < start)
256 reg->alignment = alignment;
258 list_add_tail(®->list, &cma_early_regions);
260 pr_debug("param: registering early region %s (%p@%p/%p)\n",
261 reg->name, (void *)reg->size, (void *)reg->start,
262 (void *)reg->alignment);
269 /************************* Regions & Allocators *************************/
271 static void __cma_sysfs_region_add(struct cma_region *reg);
273 static int __cma_region_attach_alloc(struct cma_region *reg);
274 static void __maybe_unused __cma_region_detach_alloc(struct cma_region *reg);
277 /* List of all regions. Named regions are kept before unnamed. */
278 static LIST_HEAD(cma_regions);
280 #define cma_foreach_region(reg) \
281 list_for_each_entry(reg, &cma_regions, list)
283 int __must_check cma_region_register(struct cma_region *reg)
285 const char *name, *alloc_name;
286 struct cma_region *r;
290 if (!reg->size || reg->start + reg->size < reg->start)
295 reg->private_data = NULL;
297 reg->free_space = reg->size;
299 /* Copy name and alloc_name */
301 alloc_name = reg->alloc_name;
302 if (reg->copy_name && (reg->name || reg->alloc_name)) {
303 size_t name_size, alloc_size;
305 name_size = reg->name ? strlen(reg->name) + 1 : 0;
306 alloc_size = reg->alloc_name ? strlen(reg->alloc_name) + 1 : 0;
308 ch = kmalloc(name_size + alloc_size, GFP_KERNEL);
310 pr_err("%s: not enough memory to allocate name\n",
311 reg->name ?: "(private)");
316 memcpy(ch, reg->name, name_size);
322 memcpy(ch, reg->alloc_name, alloc_size);
327 mutex_lock(&cma_mutex);
329 /* Don't let regions overlap */
330 cma_foreach_region(r)
331 if (r->start + r->size > reg->start &&
332 r->start < reg->start + reg->size) {
338 ret = __cma_region_attach_alloc(reg);
339 if (unlikely(ret < 0))
344 reg->alloc_name = alloc_name;
349 * Keep named at the beginning and unnamed (private) at the
350 * end. This helps in traversal when named region is looked
354 list_add(®->list, &cma_regions);
356 list_add_tail(®->list, &cma_regions);
358 __cma_sysfs_region_add(reg);
361 mutex_unlock(&cma_mutex);
363 pr_debug("%s: region %sregistered\n",
364 reg->name ?: "(private)", ret ? "not " : "");
369 EXPORT_SYMBOL_GPL(cma_region_register);
371 static struct cma_region *__must_check
372 __cma_region_find(const char **namep)
374 struct cma_region *reg;
375 const char *ch, *name;
379 while (*ch && *ch != ',' && *ch != ';')
382 *namep = *ch == ',' ? ch + 1 : ch;
386 * Named regions are kept in front of unnamed so if we
387 * encounter unnamed region we can stop.
389 cma_foreach_region(reg)
392 else if (!strncmp(name, reg->name, n) && !reg->name[n])
399 /* List of all allocators. */
400 static LIST_HEAD(cma_allocators);
402 #define cma_foreach_allocator(alloc) \
403 list_for_each_entry(alloc, &cma_allocators, list)
405 int cma_allocator_register(struct cma_allocator *alloc)
407 struct cma_region *reg;
410 if (!alloc->alloc || !alloc->free)
413 mutex_lock(&cma_mutex);
415 first = list_empty(&cma_allocators);
417 list_add_tail(&alloc->list, &cma_allocators);
420 * Attach this allocator to all allocator-less regions that
421 * request this particular allocator (reg->alloc_name equals
422 * alloc->name) or if region wants the first available
423 * allocator and we are the first.
425 cma_foreach_region(reg) {
429 ? alloc->name && !strcmp(alloc->name, reg->alloc_name)
430 : (!reg->used && first))
434 __cma_region_attach_alloc(reg);
437 mutex_unlock(&cma_mutex);
439 pr_debug("%s: allocator registered\n", alloc->name ?: "(unnamed)");
443 EXPORT_SYMBOL_GPL(cma_allocator_register);
445 static struct cma_allocator *__must_check
446 __cma_allocator_find(const char *name)
448 struct cma_allocator *alloc;
451 return list_empty(&cma_allocators)
453 : list_entry(cma_allocators.next,
454 struct cma_allocator, list);
456 cma_foreach_allocator(alloc)
457 if (alloc->name && !strcmp(name, alloc->name))
465 /************************* Initialise CMA *************************/
467 int __init cma_set_defaults(struct cma_region *regions, const char *map)
470 int ret = cma_map_param((char *)map);
471 if (unlikely(ret < 0))
478 for (; regions->size; ++regions) {
479 int ret = cma_early_region_register(regions);
480 if (unlikely(ret < 0))
488 int __init cma_early_region_reserve(struct cma_region *reg)
492 if (!reg->size || (reg->alignment & (reg->alignment - 1)) ||
496 #ifndef CONFIG_NO_BOOTMEM
501 void *ptr = __alloc_bootmem_nopanic(reg->size, reg->alignment,
504 reg->start = virt_to_phys(ptr);
512 #ifdef CONFIG_HAVE_MEMBLOCK
517 if (!memblock_is_region_reserved(reg->start, reg->size) &&
518 memblock_reserve(reg->start, reg->size) >= 0) {
524 * Use __memblock_alloc_base() since
525 * memblock_alloc_base() panic()s.
527 u64 ret = __memblock_alloc_base(reg->size, reg->alignment, 0);
529 ret < ~(dma_addr_t)0 &&
530 ret + reg->size < ~(dma_addr_t)0 &&
531 ret + reg->size > ret) {
538 memblock_free(ret, reg->size);
543 return tried ? -ENOMEM : -EOPNOTSUPP;
546 void __init cma_early_regions_reserve(int (*reserve)(struct cma_region *reg))
548 struct cma_region *reg;
550 pr_debug("init: reserving early regions\n");
553 reserve = cma_early_region_reserve;
555 list_for_each_entry(reg, &cma_early_regions, list) {
558 } else if (reserve(reg) >= 0) {
559 pr_debug("init: %s: reserved %p@%p\n",
560 reg->name ?: "(private)",
561 (void *)reg->size, (void *)reg->start);
564 pr_warn("init: %s: unable to reserve %p@%p/%p\n",
565 reg->name ?: "(private)",
566 (void *)reg->size, (void *)reg->start,
567 (void *)reg->alignment);
573 static int __init cma_init(void)
575 struct cma_region *reg, *n;
577 pr_debug("init: initialising\n");
580 char *val = kmemdup(cma_map, cma_map_length + 1, GFP_KERNEL);
584 val[cma_map_length] = '\0';
587 list_for_each_entry_safe(reg, n, &cma_early_regions, list) {
588 INIT_LIST_HEAD(®->list);
590 * We don't care if there was an error. It's a pity
591 * but there's not much we can do about it any way.
592 * If the error is on a region that was parsed from
593 * command line then it will stay and waste a bit of
594 * space; if it was registered using
595 * cma_early_region_register() it's caller's
596 * responsibility to do something about it.
598 if (reg->reserved && cma_region_register(reg) < 0)
602 INIT_LIST_HEAD(&cma_early_regions);
607 * We want to be initialised earlier than module_init/__initcall so
608 * that drivers that want to grab memory at boot time will get CMA
609 * ready. subsys_initcall() seems early enough and not too early at
612 subsys_initcall(cma_init);
616 /************************* SysFS *************************/
618 #if defined CONFIG_CMA_SYSFS
620 static struct kobject cma_sysfs_regions;
621 static int cma_sysfs_regions_ready;
624 #define CMA_ATTR_INLINE(_type, _name) \
625 (&((struct cma_ ## _type ## _attribute){ \
627 .name = __stringify(_name), \
630 .show = cma_sysfs_ ## _type ## _ ## _name ## _show, \
631 .store = cma_sysfs_ ## _type ## _ ## _name ## _store, \
634 #define CMA_ATTR_RO_INLINE(_type, _name) \
635 (&((struct cma_ ## _type ## _attribute){ \
637 .name = __stringify(_name), \
640 .show = cma_sysfs_ ## _type ## _ ## _name ## _show, \
644 struct cma_root_attribute {
645 struct attribute attr;
646 ssize_t (*show)(char *buf);
647 int (*store)(const char *buf);
650 static ssize_t cma_sysfs_root_map_show(char *page)
654 len = cma_map_length;
659 if (len > (size_t)PAGE_SIZE - 1)
660 len = (size_t)PAGE_SIZE - 1;
661 memcpy(page, cma_map, len);
668 static int cma_sysfs_root_map_store(const char *page)
670 ssize_t len = cma_map_validate(page);
677 val = kmemdup(page, len + 1, GFP_KERNEL);
685 cma_map_length = len;
690 static ssize_t cma_sysfs_root_allocators_show(char *page)
692 struct cma_allocator *alloc;
693 size_t left = PAGE_SIZE;
696 cma_foreach_allocator(alloc) {
697 ssize_t l = snprintf(ch, left, "%s ", alloc->name ?: "-");
708 cma_sysfs_root_show(struct kobject *kobj, struct attribute *attr, char *buf)
710 struct cma_root_attribute *rattr =
711 container_of(attr, struct cma_root_attribute, attr);
714 mutex_lock(&cma_mutex);
715 ret = rattr->show(buf);
716 mutex_unlock(&cma_mutex);
722 cma_sysfs_root_store(struct kobject *kobj, struct attribute *attr,
723 const char *buf, size_t count)
725 struct cma_root_attribute *rattr =
726 container_of(attr, struct cma_root_attribute, attr);
729 mutex_lock(&cma_mutex);
730 ret = rattr->store(buf);
731 mutex_unlock(&cma_mutex);
733 return ret < 0 ? ret : count;
736 static struct kobj_type cma_sysfs_root_type = {
737 .sysfs_ops = &(const struct sysfs_ops){
738 .show = cma_sysfs_root_show,
739 .store = cma_sysfs_root_store,
741 .default_attrs = (struct attribute * []) {
742 CMA_ATTR_INLINE(root, map),
743 CMA_ATTR_RO_INLINE(root, allocators),
748 static int __init cma_sysfs_init(void)
750 static struct kobject root;
751 static struct kobj_type fake_type;
753 struct cma_region *reg;
757 ret = kobject_init_and_add(&root, &cma_sysfs_root_type,
758 mm_kobj, "contiguous");
759 if (unlikely(ret < 0)) {
760 pr_err("init: unable to add root kobject: %d\n", ret);
765 ret = kobject_init_and_add(&cma_sysfs_regions, &fake_type,
767 if (unlikely(ret < 0)) {
768 pr_err("init: unable to add regions kobject: %d\n", ret);
772 mutex_lock(&cma_mutex);
773 cma_sysfs_regions_ready = 1;
774 cma_foreach_region(reg)
775 __cma_sysfs_region_add(reg);
776 mutex_unlock(&cma_mutex);
780 device_initcall(cma_sysfs_init);
784 struct cma_region_attribute {
785 struct attribute attr;
786 ssize_t (*show)(struct cma_region *reg, char *buf);
787 int (*store)(struct cma_region *reg, const char *buf);
791 static ssize_t cma_sysfs_region_name_show(struct cma_region *reg, char *page)
793 return reg->name ? snprintf(page, PAGE_SIZE, "%s\n", reg->name) : 0;
796 static ssize_t cma_sysfs_region_start_show(struct cma_region *reg, char *page)
798 return snprintf(page, PAGE_SIZE, "%p\n", (void *)reg->start);
801 static ssize_t cma_sysfs_region_size_show(struct cma_region *reg, char *page)
803 return snprintf(page, PAGE_SIZE, "%zu\n", reg->size);
806 static ssize_t cma_sysfs_region_free_show(struct cma_region *reg, char *page)
808 return snprintf(page, PAGE_SIZE, "%zu\n", reg->free_space);
811 static ssize_t cma_sysfs_region_users_show(struct cma_region *reg, char *page)
813 return snprintf(page, PAGE_SIZE, "%u\n", reg->users);
816 static ssize_t cma_sysfs_region_alloc_show(struct cma_region *reg, char *page)
819 return snprintf(page, PAGE_SIZE, "%s\n",
820 reg->alloc->name ?: "-");
821 else if (reg->alloc_name)
822 return snprintf(page, PAGE_SIZE, "[%s]\n", reg->alloc_name);
828 cma_sysfs_region_alloc_store(struct cma_region *reg, const char *page)
832 if (reg->alloc && reg->users)
835 if (!*page || *page == '\n') {
840 for (s = (char *)page; *++s && *s != '\n'; )
844 s = kmemdup(page, len + 1, GFP_KERNEL);
851 __cma_region_detach_alloc(reg);
853 if (reg->free_alloc_name)
854 kfree(reg->alloc_name);
857 reg->free_alloc_name = !!s;
864 cma_sysfs_region_show(struct kobject *kobj, struct attribute *attr,
867 struct cma_region *reg = container_of(kobj, struct cma_region, kobj);
868 struct cma_region_attribute *rattr =
869 container_of(attr, struct cma_region_attribute, attr);
872 mutex_lock(&cma_mutex);
873 ret = rattr->show(reg, buf);
874 mutex_unlock(&cma_mutex);
880 cma_sysfs_region_store(struct kobject *kobj, struct attribute *attr,
881 const char *buf, size_t count)
883 struct cma_region *reg = container_of(kobj, struct cma_region, kobj);
884 struct cma_region_attribute *rattr =
885 container_of(attr, struct cma_region_attribute, attr);
888 mutex_lock(&cma_mutex);
889 ret = rattr->store(reg, buf);
890 mutex_unlock(&cma_mutex);
892 return ret < 0 ? ret : count;
895 static struct kobj_type cma_sysfs_region_type = {
896 .sysfs_ops = &(const struct sysfs_ops){
897 .show = cma_sysfs_region_show,
898 .store = cma_sysfs_region_store,
900 .default_attrs = (struct attribute * []) {
901 CMA_ATTR_RO_INLINE(region, name),
902 CMA_ATTR_RO_INLINE(region, start),
903 CMA_ATTR_RO_INLINE(region, size),
904 CMA_ATTR_RO_INLINE(region, free),
905 CMA_ATTR_RO_INLINE(region, users),
906 CMA_ATTR_INLINE(region, alloc),
911 static void __cma_sysfs_region_add(struct cma_region *reg)
915 if (!cma_sysfs_regions_ready)
918 memset(®->kobj, 0, sizeof reg->kobj);
920 ret = kobject_init_and_add(®->kobj, &cma_sysfs_region_type,
922 "%p", (void *)reg->start);
925 sysfs_create_link(&cma_sysfs_regions, ®->kobj, reg->name) < 0)
926 /* Ignore any errors. */;
931 static void __cma_sysfs_region_add(struct cma_region *reg)
939 /************************* Chunks *************************/
941 /* All chunks sorted by start address. */
942 static struct rb_root cma_chunks_by_start;
944 static struct cma_chunk *__must_check __cma_chunk_find(dma_addr_t addr)
946 struct cma_chunk *chunk;
949 for (n = cma_chunks_by_start.rb_node; n; ) {
950 chunk = rb_entry(n, struct cma_chunk, by_start);
951 if (addr < chunk->start)
953 else if (addr > chunk->start)
958 WARN(1, KERN_WARNING "no chunk starting at %p\n", (void *)addr);
962 static int __must_check __cma_chunk_insert(struct cma_chunk *chunk)
964 struct rb_node **new, *parent = NULL;
965 typeof(chunk->start) addr = chunk->start;
967 for (new = &cma_chunks_by_start.rb_node; *new; ) {
968 struct cma_chunk *c =
969 container_of(*new, struct cma_chunk, by_start);
972 if (addr < c->start) {
973 new = &(*new)->rb_left;
974 } else if (addr > c->start) {
975 new = &(*new)->rb_right;
978 * We should never be here. If we are it
979 * means allocator gave us an invalid chunk
980 * (one that has already been allocated) so we
981 * refuse to accept it. Our caller will
982 * recover by freeing the chunk.
989 rb_link_node(&chunk->by_start, parent, new);
990 rb_insert_color(&chunk->by_start, &cma_chunks_by_start);
995 static void __cma_chunk_free(struct cma_chunk *chunk)
997 rb_erase(&chunk->by_start, &cma_chunks_by_start);
999 chunk->reg->alloc->free(chunk);
1000 --chunk->reg->users;
1001 chunk->reg->free_space += chunk->size;
1005 /************************* The Device API *************************/
1007 static const char *__must_check
1008 __cma_where_from(const struct device *dev, const char *type);
1013 static dma_addr_t __must_check
1014 __cma_alloc_from_region(struct cma_region *reg,
1015 size_t size, dma_addr_t alignment)
1017 struct cma_chunk *chunk;
1019 pr_debug("allocate %p/%p from %s\n",
1020 (void *)size, (void *)alignment,
1021 reg ? reg->name ?: "(private)" : "(null)");
1023 if (!reg || reg->free_space < size)
1028 __cma_region_attach_alloc(reg);
1033 chunk = reg->alloc->alloc(reg, size, alignment);
1037 if (unlikely(__cma_chunk_insert(chunk) < 0)) {
1038 /* We should *never* be here. */
1039 chunk->reg->alloc->free(chunk);
1046 reg->free_space -= chunk->size;
1047 pr_debug("allocated at %p\n", (void *)chunk->start);
1048 return chunk->start;
1051 dma_addr_t __must_check
1052 cma_alloc_from_region(struct cma_region *reg,
1053 size_t size, dma_addr_t alignment)
1057 pr_debug("allocate %p/%p from %s\n",
1058 (void *)size, (void *)alignment,
1059 reg ? reg->name ?: "(private)" : "(null)");
1061 if (!size || alignment & (alignment - 1) || !reg)
1064 mutex_lock(&cma_mutex);
1066 addr = reg->registered ?
1067 __cma_alloc_from_region(reg, PAGE_ALIGN(size),
1068 max(alignment, (dma_addr_t)PAGE_SIZE)) :
1071 mutex_unlock(&cma_mutex);
1075 EXPORT_SYMBOL_GPL(cma_alloc_from_region);
1077 dma_addr_t __must_check
1078 __cma_alloc(const struct device *dev, const char *type,
1079 dma_addr_t size, dma_addr_t alignment)
1081 struct cma_region *reg;
1086 pr_debug("allocate %p/%p for %s/%s\n",
1087 (void *)size, (void *)alignment,
1088 dev_name(dev), type ?: "");
1090 if (!size || alignment & (alignment - 1))
1093 size = PAGE_ALIGN(size);
1094 if (alignment < PAGE_SIZE)
1095 alignment = PAGE_SIZE;
1097 mutex_lock(&cma_mutex);
1099 from = __cma_where_from(dev, type);
1100 if (unlikely(IS_ERR(from))) {
1101 addr = PTR_ERR(from);
1105 pr_debug("allocate %p/%p from one of %s\n",
1106 (void *)size, (void *)alignment, from);
1108 while (*from && *from != ';') {
1109 reg = __cma_region_find(&from);
1110 addr = __cma_alloc_from_region(reg, size, alignment);
1111 if (!IS_ERR_VALUE(addr))
1115 pr_debug("not enough memory\n");
1119 mutex_unlock(&cma_mutex);
1123 EXPORT_SYMBOL_GPL(__cma_alloc);
1126 void *cma_get_virt(dma_addr_t phys, dma_addr_t size, int noncached)
1128 unsigned long num_pages, i;
1129 struct page **pages;
1133 num_pages = size >> PAGE_SHIFT;
1134 pages = kmalloc(num_pages * sizeof(struct page *), GFP_KERNEL);
1137 return ERR_PTR(-ENOMEM);
1139 for (i = 0; i < num_pages; i++)
1140 pages[i] = pfn_to_page((phys >> PAGE_SHIFT) + i);
1142 virt = vmap(pages, num_pages, VM_MAP,
1143 pgprot_writecombine(PAGE_KERNEL));
1147 return ERR_PTR(-ENOMEM);
1152 virt = phys_to_virt((unsigned long)phys);
1157 EXPORT_SYMBOL_GPL(cma_get_virt);
1159 /* Query information about regions. */
1160 static void __cma_info_add(struct cma_info *infop, struct cma_region *reg)
1162 infop->total_size += reg->size;
1163 infop->free_size += reg->free_space;
1164 if (infop->lower_bound > reg->start)
1165 infop->lower_bound = reg->start;
1166 if (infop->upper_bound < reg->start + reg->size)
1167 infop->upper_bound = reg->start + reg->size;
1172 __cma_info(struct cma_info *infop, const struct device *dev, const char *type)
1174 struct cma_info info = { ~(dma_addr_t)0, 0, 0, 0, 0 };
1175 struct cma_region *reg;
1179 if (unlikely(!infop))
1182 mutex_lock(&cma_mutex);
1184 from = __cma_where_from(dev, type);
1186 ret = PTR_ERR(from);
1187 info.lower_bound = 0;
1191 while (*from && *from != ';') {
1192 reg = __cma_region_find(&from);
1194 __cma_info_add(&info, reg);
1199 mutex_unlock(&cma_mutex);
1201 memcpy(infop, &info, sizeof info);
1204 EXPORT_SYMBOL_GPL(__cma_info);
1208 int cma_free(dma_addr_t addr)
1210 struct cma_chunk *c;
1213 mutex_lock(&cma_mutex);
1215 c = __cma_chunk_find(addr);
1218 __cma_chunk_free(c);
1224 mutex_unlock(&cma_mutex);
1227 pr_debug("free(%p): freed\n", (void *)addr);
1229 pr_err("free(%p): not found\n", (void *)addr);
1232 EXPORT_SYMBOL_GPL(cma_free);
1235 /************************* Miscellaneous *************************/
1237 static int __cma_region_attach_alloc(struct cma_region *reg)
1239 struct cma_allocator *alloc;
1243 * If reg->alloc is set then caller wants us to use this
1244 * allocator. Otherwise we need to find one by name.
1249 alloc = __cma_allocator_find(reg->alloc_name);
1251 pr_warn("init: %s: %s: no such allocator\n",
1252 reg->name ?: "(private)",
1253 reg->alloc_name ?: "(default)");
1259 /* Try to initialise the allocator. */
1260 reg->private_data = NULL;
1261 ret = alloc->init ? alloc->init(reg) : 0;
1262 if (unlikely(ret < 0)) {
1263 pr_err("init: %s: %s: unable to initialise allocator\n",
1264 reg->name ?: "(private)", alloc->name ?: "(unnamed)");
1269 pr_debug("init: %s: %s: initialised allocator\n",
1270 reg->name ?: "(private)", alloc->name ?: "(unnamed)");
1275 static void __cma_region_detach_alloc(struct cma_region *reg)
1280 if (reg->alloc->cleanup)
1281 reg->alloc->cleanup(reg);
1290 * rules ::= rule [ ';' rules ]
1291 * rule ::= patterns '=' regions
1292 * patterns ::= pattern [ ',' patterns ]
1293 * regions ::= REG-NAME [ ',' regions ]
1294 * pattern ::= dev-pattern [ '/' TYPE-NAME ] | '/' TYPE-NAME
1296 static const char *__must_check
1297 __cma_where_from(const struct device *dev, const char *type)
1300 * This function matches the pattern from the map attribute
1301 * agains given device name and type. Type may be of course
1302 * NULL or an emtpy string.
1305 const char *s, *name;
1306 int name_matched = 0;
1309 * If dev is NULL we were called in alternative form where
1310 * type is the from string. All we have to do is return it.
1313 return type ?: ERR_PTR(-EINVAL);
1316 return ERR_PTR(-ENOENT);
1318 name = dev_name(dev);
1319 if (WARN_ON(!name || !*name))
1320 return ERR_PTR(-EINVAL);
1326 * Now we go throught the cma_map attribute.
1328 for (s = cma_map; *s; ++s) {
1332 * If the pattern starts with a slash, the device part of the
1333 * pattern matches if it matched previously.
1342 * We are now trying to match the device name. This also
1343 * updates the name_matched variable. If, while reading the
1344 * spec, we ecnounter comma it means that the pattern does not
1345 * match and we need to start over with another pattern (the
1346 * one afther the comma). If we encounter equal sign we need
1347 * to start over with another rule. If there is a character
1348 * that does not match, we neet to look for a comma (to get
1349 * another pattern) or semicolon (to get another rule) and try
1350 * again if there is one somewhere.
1355 for (c = name; *s != '*' && *c; ++c, ++s)
1360 else if (*s != '?' && *c != *s)
1368 * Now we need to match the type part of the pattern. If the
1369 * pattern is missing it we match only if type points to an
1370 * empty string. Otherwise wy try to match it just like name.
1373 match_type: /* s points to '/' */
1376 for (c = type; *s && *c; ++c, ++s)
1385 /* Return the string behind the '=' sign of the rule. */
1389 return strchr(s, '=') + 1;
1391 /* Pattern did not match */
1396 } while (*s != ',' && *s != '=');
1400 next_rule: /* s points to '=' */
1409 return ERR_PTR(-ENOENT);