3 * drivers/gpu/ion/ion.c
5 * Copyright (C) 2011 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/freezer.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/kthread.h>
24 #include <linux/list.h>
25 #include <linux/memblock.h>
26 #include <linux/miscdevice.h>
27 #include <linux/export.h>
29 #include <linux/mm_types.h>
30 #include <linux/rbtree.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/vmalloc.h>
35 #include <linux/debugfs.h>
36 #include <linux/dma-buf.h>
37 #include <linux/idr.h>
38 #include <linux/time.h>
42 #include "compat_ion.h"
45 * struct ion_device - the metadata of the ion device node
46 * @dev: the actual misc device
47 * @buffers: an rb tree of all the existing buffers
48 * @buffer_lock: lock protecting the tree of buffers
49 * @lock: rwsem protecting the tree of heaps and clients
50 * @heaps: list of all the heaps in the system
51 * @user_clients: list of all the clients created from userspace
54 struct miscdevice dev;
55 struct rb_root buffers;
56 struct mutex buffer_lock;
57 struct rw_semaphore lock;
58 struct plist_head heaps;
59 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
61 struct rb_root clients;
62 struct dentry *debug_root;
63 struct dentry *heaps_debug_root;
64 struct dentry *clients_debug_root;
68 * struct ion_client - a process/hw block local address space
69 * @node: node in the tree of all clients
70 * @dev: backpointer to ion device
71 * @handles: an rb tree of all the handles in this client
72 * @idr: an idr space for allocating handle ids
73 * @lock: lock protecting the tree of handles
74 * @name: used for debugging
75 * @display_name: used for debugging (unique version of @name)
76 * @display_serial: used for debugging (to make display_name unique)
77 * @task: used for debugging
79 * A client represents a list of buffers this client may access.
80 * The mutex stored here is used to protect both handles tree
81 * as well as the handles themselves, and should be held while modifying either.
85 struct ion_device *dev;
86 struct rb_root handles;
92 struct task_struct *task;
95 struct dentry *debug_root;
99 * ion_handle - a client local reference to a buffer
100 * @ref: reference count
101 * @client: back pointer to the client the buffer resides in
102 * @buffer: pointer to the buffer
103 * @node: node in the client's handle rbtree
104 * @kmap_cnt: count of times this client has mapped to kernel
105 * @id: client-unique id allocated by client->idr
107 * Modifications to node, map_cnt or mapping should be protected by the
108 * lock in the client. Other fields are never changed after initialization.
112 struct ion_client *client;
113 struct ion_buffer *buffer;
115 unsigned int kmap_cnt;
119 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
121 return (buffer->flags & ION_FLAG_CACHED) &&
122 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
125 bool ion_buffer_cached(struct ion_buffer *buffer)
127 return !!(buffer->flags & ION_FLAG_CACHED);
130 static inline struct page *ion_buffer_page(struct page *page)
132 return (struct page *)((unsigned long)page & ~(1UL));
135 static inline bool ion_buffer_page_is_dirty(struct page *page)
137 return !!((unsigned long)page & 1UL);
140 static inline void ion_buffer_page_dirty(struct page **page)
142 *page = (struct page *)((unsigned long)(*page) | 1UL);
145 static inline void ion_buffer_page_clean(struct page **page)
147 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
150 /* this function should only be called while dev->lock is held */
151 static void ion_buffer_add(struct ion_device *dev,
152 struct ion_buffer *buffer)
154 struct rb_node **p = &dev->buffers.rb_node;
155 struct rb_node *parent = NULL;
156 struct ion_buffer *entry;
160 entry = rb_entry(parent, struct ion_buffer, node);
162 if (buffer < entry) {
164 } else if (buffer > entry) {
167 pr_err("%s: buffer already found.", __func__);
172 rb_link_node(&buffer->node, parent, p);
173 rb_insert_color(&buffer->node, &dev->buffers);
176 /* this function should only be called while dev->lock is held */
177 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
178 struct ion_device *dev,
183 struct ion_buffer *buffer;
184 struct sg_table *table;
185 struct scatterlist *sg;
189 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
191 return ERR_PTR(-ENOMEM);
194 buffer->flags = flags;
195 kref_init(&buffer->ref);
197 ret = heap->ops->allocate(heap, buffer, len, align, flags);
200 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
203 ion_heap_freelist_drain(heap, 0);
204 ret = heap->ops->allocate(heap, buffer, len, align,
213 table = heap->ops->map_dma(heap, buffer);
214 if (WARN_ONCE(table == NULL,
215 "heap->ops->map_dma should return ERR_PTR on error"))
216 table = ERR_PTR(-EINVAL);
218 heap->ops->free(buffer);
220 return ERR_PTR(PTR_ERR(table));
222 buffer->sg_table = table;
223 if (ion_buffer_fault_user_mappings(buffer)) {
224 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
225 struct scatterlist *sg;
228 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
229 if (!buffer->pages) {
234 for_each_sg(table->sgl, sg, table->nents, i) {
235 struct page *page = sg_page(sg);
237 for (j = 0; j < sg->length / PAGE_SIZE; j++)
238 buffer->pages[k++] = page++;
247 INIT_LIST_HEAD(&buffer->vmas);
248 mutex_init(&buffer->lock);
249 /* this will set up dma addresses for the sglist -- it is not
250 technically correct as per the dma api -- a specific
251 device isn't really taking ownership here. However, in practice on
252 our systems the only dma_address space is physical addresses.
253 Additionally, we can't afford the overhead of invalidating every
254 allocation via dma_map_sg. The implicit contract here is that
255 memory comming from the heaps is ready for dma, ie if it has a
256 cached mapping that mapping has been invalidated */
257 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
258 sg_dma_address(sg) = sg_phys(sg);
259 mutex_lock(&dev->buffer_lock);
260 ion_buffer_add(dev, buffer);
261 mutex_unlock(&dev->buffer_lock);
263 do_gettimeofday(&time);
264 buffer->alloc_time = time;
268 heap->ops->unmap_dma(heap, buffer);
269 heap->ops->free(buffer);
272 vfree(buffer->pages);
278 void ion_buffer_destroy(struct ion_buffer *buffer)
280 if (WARN_ON(buffer->kmap_cnt > 0))
281 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
282 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
283 buffer->heap->ops->free(buffer);
285 vfree(buffer->pages);
289 static void _ion_buffer_destroy(struct kref *kref)
291 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
292 struct ion_heap *heap = buffer->heap;
293 struct ion_device *dev = buffer->dev;
295 #if defined(CONFIG_SPRD_IOMMU)
298 for (i = IOMMU_GSP; i < IOMMU_MAX; i++) {
299 if(buffer->iomap_cnt[i]>0)
301 buffer->iomap_cnt[i] = 0;
302 sprd_iova_unmap(i,buffer->iova[i],buffer->size);
303 sprd_iova_free(i,buffer->iova[i],buffer->size);
308 mutex_lock(&dev->buffer_lock);
309 rb_erase(&buffer->node, &dev->buffers);
310 mutex_unlock(&dev->buffer_lock);
312 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
313 ion_heap_freelist_add(heap, buffer);
315 ion_buffer_destroy(buffer);
318 static void ion_buffer_get(struct ion_buffer *buffer)
320 kref_get(&buffer->ref);
323 static int ion_buffer_put(struct ion_buffer *buffer)
325 return kref_put(&buffer->ref, _ion_buffer_destroy);
328 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
330 mutex_lock(&buffer->lock);
331 buffer->handle_count++;
332 mutex_unlock(&buffer->lock);
335 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
338 * when a buffer is removed from a handle, if it is not in
339 * any other handles, copy the taskcomm and the pid of the
340 * process it's being removed from into the buffer. At this
341 * point there will be no way to track what processes this buffer is
342 * being used by, it only exists as a dma_buf file descriptor.
343 * The taskcomm and pid can provide a debug hint as to where this fd
346 mutex_lock(&buffer->lock);
347 buffer->handle_count--;
348 BUG_ON(buffer->handle_count < 0);
349 if (!buffer->handle_count) {
350 struct task_struct *task;
352 task = current->group_leader;
353 get_task_comm(buffer->task_comm, task);
354 buffer->pid = task_pid_nr(task);
355 buffer->tid = task_pid_nr(current);
357 mutex_unlock(&buffer->lock);
360 static struct ion_handle *ion_handle_create(struct ion_client *client,
361 struct ion_buffer *buffer)
363 struct ion_handle *handle;
365 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
367 return ERR_PTR(-ENOMEM);
368 kref_init(&handle->ref);
369 RB_CLEAR_NODE(&handle->node);
370 handle->client = client;
371 ion_buffer_get(buffer);
372 ion_buffer_add_to_handle(buffer);
373 handle->buffer = buffer;
378 static void ion_handle_kmap_put(struct ion_handle *);
380 static void ion_handle_destroy(struct kref *kref)
382 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
383 struct ion_client *client = handle->client;
384 struct ion_buffer *buffer = handle->buffer;
386 mutex_lock(&buffer->lock);
387 while (handle->kmap_cnt)
388 ion_handle_kmap_put(handle);
389 mutex_unlock(&buffer->lock);
391 idr_remove(&client->idr, handle->id);
392 if (!RB_EMPTY_NODE(&handle->node))
393 rb_erase(&handle->node, &client->handles);
395 ion_buffer_remove_from_handle(buffer);
396 ion_buffer_put(buffer);
401 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
403 return handle->buffer;
406 static void ion_handle_get(struct ion_handle *handle)
408 kref_get(&handle->ref);
411 static int ion_handle_put(struct ion_handle *handle)
413 struct ion_client *client = handle->client;
416 mutex_lock(&client->lock);
417 ret = kref_put(&handle->ref, ion_handle_destroy);
418 mutex_unlock(&client->lock);
423 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
424 struct ion_buffer *buffer)
426 struct rb_node *n = client->handles.rb_node;
429 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
430 if (buffer < entry->buffer)
432 else if (buffer > entry->buffer)
437 return ERR_PTR(-EINVAL);
440 static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
443 struct ion_handle *handle;
445 mutex_lock(&client->lock);
446 handle = idr_find(&client->idr, id);
448 ion_handle_get(handle);
449 mutex_unlock(&client->lock);
451 return handle ? handle : ERR_PTR(-EINVAL);
454 static bool ion_handle_validate(struct ion_client *client,
455 struct ion_handle *handle)
457 WARN_ON(!mutex_is_locked(&client->lock));
458 return (idr_find(&client->idr, handle->id) == handle);
461 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
464 struct rb_node **p = &client->handles.rb_node;
465 struct rb_node *parent = NULL;
466 struct ion_handle *entry;
468 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
476 entry = rb_entry(parent, struct ion_handle, node);
478 if (handle->buffer < entry->buffer)
480 else if (handle->buffer > entry->buffer)
483 WARN(1, "%s: buffer already found.", __func__);
486 rb_link_node(&handle->node, parent, p);
487 rb_insert_color(&handle->node, &client->handles);
492 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
493 size_t align, unsigned int heap_id_mask,
496 struct ion_handle *handle;
497 struct ion_device *dev = client->dev;
498 struct ion_buffer *buffer = NULL;
499 struct ion_heap *heap;
502 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
503 len, align, heap_id_mask, flags);
505 * traverse the list of heaps available in this system in priority
506 * order. If the heap type is supported by the client, and matches the
507 * request of the caller allocate from it. Repeat until allocate has
508 * succeeded or all heaps have been tried
510 len = PAGE_ALIGN(len);
513 return ERR_PTR(-EINVAL);
515 down_read(&dev->lock);
516 plist_for_each_entry(heap, &dev->heaps, node) {
517 /* if the caller didn't specify this heap id */
518 if (!((1 << heap->id) & heap_id_mask))
520 buffer = ion_buffer_create(heap, dev, len, align, flags);
526 if (buffer == NULL) {
527 pr_err("%s: buffer is NULL!\n",__func__);
528 return ERR_PTR(-ENODEV);
531 if (IS_ERR(buffer)) {
532 pr_err("%s: ion alloc buffer is error! and the buffer is %p\n",__func__,buffer);
533 return ERR_PTR(PTR_ERR(buffer));
536 handle = ion_handle_create(client, buffer);
539 * ion_buffer_create will create a buffer with a ref_cnt of 1,
540 * and ion_handle_create will take a second reference, drop one here
542 ion_buffer_put(buffer);
544 if (IS_ERR(handle)) {
545 pr_err("%s: handle is error! and the handle is %p\n",__func__,handle);
549 mutex_lock(&client->lock);
550 ret = ion_handle_add(client, handle);
551 mutex_unlock(&client->lock);
553 ion_handle_put(handle);
554 handle = ERR_PTR(ret);
559 EXPORT_SYMBOL(ion_alloc);
561 void ion_free(struct ion_client *client, struct ion_handle *handle)
565 BUG_ON(client != handle->client);
567 mutex_lock(&client->lock);
568 valid_handle = ion_handle_validate(client, handle);
571 WARN(1, "%s: invalid handle passed to free.\n", __func__);
572 mutex_unlock(&client->lock);
575 mutex_unlock(&client->lock);
576 ion_handle_put(handle);
578 EXPORT_SYMBOL(ion_free);
580 int ion_phys(struct ion_client *client, struct ion_handle *handle,
581 ion_phys_addr_t *addr, size_t *len)
583 struct ion_buffer *buffer;
586 mutex_lock(&client->lock);
587 if (!ion_handle_validate(client, handle)) {
588 mutex_unlock(&client->lock);
592 buffer = handle->buffer;
594 if (!buffer->heap->ops->phys) {
595 pr_err("%s: ion_phys is not implemented by this heap.\n",
597 mutex_unlock(&client->lock);
600 mutex_unlock(&client->lock);
601 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
604 EXPORT_SYMBOL(ion_phys);
606 int ion_is_phys(struct ion_client *client, struct ion_handle *handle)
608 struct ion_buffer *buffer;
611 mutex_lock(&client->lock);
612 if (!ion_handle_validate(client, handle)) {
613 mutex_unlock(&client->lock);
617 buffer = handle->buffer;
619 if (!buffer->heap->ops->phys)
622 mutex_unlock(&client->lock);
626 EXPORT_SYMBOL(ion_is_phys);
628 int ion_is_cached(struct ion_client *client, struct ion_handle *handle)
630 struct ion_buffer *buffer;
633 mutex_lock(&client->lock);
634 if (!ion_handle_validate(client, handle)) {
635 mutex_unlock(&client->lock);
639 buffer = handle->buffer;
641 cached = ion_buffer_cached(buffer);
642 mutex_unlock(&client->lock);
646 EXPORT_SYMBOL(ion_is_cached);
648 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
652 if (buffer->kmap_cnt) {
654 return buffer->vaddr;
656 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
657 if (WARN_ONCE(vaddr == NULL,
658 "heap->ops->map_kernel should return ERR_PTR on error"))
659 return ERR_PTR(-EINVAL);
662 buffer->vaddr = vaddr;
667 static void *ion_handle_kmap_get(struct ion_handle *handle)
669 struct ion_buffer *buffer = handle->buffer;
672 if (handle->kmap_cnt) {
674 return buffer->vaddr;
676 vaddr = ion_buffer_kmap_get(buffer);
683 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
686 if (!buffer->kmap_cnt) {
687 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
688 buffer->vaddr = NULL;
692 static void ion_handle_kmap_put(struct ion_handle *handle)
694 struct ion_buffer *buffer = handle->buffer;
696 if (!handle->kmap_cnt) {
697 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
702 if (!handle->kmap_cnt)
703 ion_buffer_kmap_put(buffer);
706 int ion_map_iommu(struct ion_client *client, struct ion_handle *handle, int domain_no, unsigned long *ptr_iova)
708 struct ion_buffer *buffer;
710 mutex_lock(&client->lock);
711 if (!ion_handle_validate(client, handle)) {
712 pr_err("%s: invalid handle passed to map_kernel.\n",
714 mutex_unlock(&client->lock);
718 buffer = handle->buffer;
720 if (!handle->buffer->heap->ops->map_iommu) {
721 pr_err("%s: map_kernel is not implemented by this heap.\n",
723 mutex_unlock(&client->lock);
727 mutex_lock(&buffer->lock);
728 handle->buffer->heap->ops->map_iommu(buffer, domain_no, ptr_iova);
729 mutex_unlock(&buffer->lock);
730 mutex_unlock(&client->lock);
733 EXPORT_SYMBOL(ion_map_iommu);
735 int ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle, int domain_no)
737 struct ion_buffer *buffer;
739 mutex_lock(&client->lock);
740 if (!ion_handle_validate(client, handle)) {
741 pr_err("%s: invalid handle passed to map_kernel.\n",
743 mutex_unlock(&client->lock);
747 buffer = handle->buffer;
749 if (!handle->buffer->heap->ops->map_iommu) {
750 pr_err("%s: map_kernel is not implemented by this heap.\n",
752 mutex_unlock(&client->lock);
756 mutex_lock(&buffer->lock);
757 handle->buffer->heap->ops->unmap_iommu(buffer, domain_no);
758 mutex_unlock(&buffer->lock);
759 mutex_unlock(&client->lock);
762 EXPORT_SYMBOL(ion_unmap_iommu);
764 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
766 struct ion_buffer *buffer;
769 mutex_lock(&client->lock);
770 if (!ion_handle_validate(client, handle)) {
771 pr_err("%s: invalid handle passed to map_kernel.\n",
773 mutex_unlock(&client->lock);
774 return ERR_PTR(-EINVAL);
777 buffer = handle->buffer;
779 if (!handle->buffer->heap->ops->map_kernel) {
780 pr_err("%s: map_kernel is not implemented by this heap.\n",
782 mutex_unlock(&client->lock);
783 return ERR_PTR(-ENODEV);
786 mutex_lock(&buffer->lock);
787 vaddr = ion_handle_kmap_get(handle);
788 mutex_unlock(&buffer->lock);
789 mutex_unlock(&client->lock);
792 EXPORT_SYMBOL(ion_map_kernel);
794 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
796 struct ion_buffer *buffer;
798 mutex_lock(&client->lock);
799 buffer = handle->buffer;
800 mutex_lock(&buffer->lock);
801 ion_handle_kmap_put(handle);
802 mutex_unlock(&buffer->lock);
803 mutex_unlock(&client->lock);
805 EXPORT_SYMBOL(ion_unmap_kernel);
807 static int ion_debug_client_show(struct seq_file *s, void *unused)
809 struct ion_client *client = s->private;
811 size_t sizes[ION_NUM_HEAP_IDS] = {0};
812 const char *names[ION_NUM_HEAP_IDS] = {NULL};
815 mutex_lock(&client->lock);
816 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
817 struct ion_handle *handle = rb_entry(n, struct ion_handle,
819 unsigned int id = handle->buffer->heap->id;
822 names[id] = handle->buffer->heap->name;
823 sizes[id] += handle->buffer->size;
825 mutex_unlock(&client->lock);
827 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
828 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
831 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
836 static int ion_debug_client_open(struct inode *inode, struct file *file)
838 return single_open(file, ion_debug_client_show, inode->i_private);
841 static const struct file_operations debug_client_fops = {
842 .open = ion_debug_client_open,
845 .release = single_release,
848 static int ion_get_client_serial(const struct rb_root *root,
849 const unsigned char *name)
852 struct rb_node *node;
853 for (node = rb_first(root); node; node = rb_next(node)) {
854 struct ion_client *client = rb_entry(node, struct ion_client,
856 if (strcmp(client->name, name))
858 serial = max(serial, client->display_serial);
863 struct ion_client *ion_client_create(struct ion_device *dev,
866 struct ion_client *client;
867 struct task_struct *task;
869 struct rb_node *parent = NULL;
870 struct ion_client *entry;
875 pr_err("%s: Name cannot be null\n", __func__);
876 return ERR_PTR(-EINVAL);
879 get_task_struct(current->group_leader);
880 task_lock(current->group_leader);
881 pid = task_pid_nr(current->group_leader);
882 tid = task_pid_nr(current);
883 /* don't bother to store task struct for kernel threads,
884 they can't be killed anyway */
885 if (current->group_leader->flags & PF_KTHREAD) {
886 put_task_struct(current->group_leader);
889 task = current->group_leader;
891 task_unlock(current->group_leader);
893 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
895 goto err_put_task_struct;
898 client->handles = RB_ROOT;
899 idr_init(&client->idr);
900 mutex_init(&client->lock);
904 client->name = kstrdup(name, GFP_KERNEL);
906 goto err_free_client;
908 down_write(&dev->lock);
909 client->display_serial = ion_get_client_serial(&dev->clients, name);
910 client->display_name = kasprintf(
911 GFP_KERNEL, "%s-%d", name, client->display_serial);
912 if (!client->display_name) {
913 up_write(&dev->lock);
914 goto err_free_client_name;
916 p = &dev->clients.rb_node;
919 entry = rb_entry(parent, struct ion_client, node);
923 else if (client > entry)
926 rb_link_node(&client->node, parent, p);
927 rb_insert_color(&client->node, &dev->clients);
929 client->debug_root = debugfs_create_file(client->display_name, 0664,
930 dev->clients_debug_root,
931 client, &debug_client_fops);
932 if (!client->debug_root) {
933 char buf[256], *path;
934 path = dentry_path(dev->clients_debug_root, buf, 256);
935 pr_err("Failed to create client debugfs at %s/%s\n",
936 path, client->display_name);
939 up_write(&dev->lock);
943 err_free_client_name:
949 put_task_struct(current->group_leader);
950 return ERR_PTR(-ENOMEM);
952 EXPORT_SYMBOL(ion_client_create);
954 void ion_client_destroy(struct ion_client *client)
956 struct ion_device *dev = client->dev;
959 pr_debug("%s: %d\n", __func__, __LINE__);
960 while ((n = rb_first(&client->handles))) {
961 struct ion_handle *handle = rb_entry(n, struct ion_handle,
963 ion_handle_destroy(&handle->ref);
966 idr_destroy(&client->idr);
968 down_write(&dev->lock);
970 put_task_struct(client->task);
971 rb_erase(&client->node, &dev->clients);
972 debugfs_remove_recursive(client->debug_root);
973 up_write(&dev->lock);
975 kfree(client->display_name);
979 EXPORT_SYMBOL(ion_client_destroy);
981 int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
982 unsigned long *size, unsigned int *heap_id)
984 struct ion_buffer *buffer;
985 struct ion_heap *heap;
987 mutex_lock(&client->lock);
988 if (!ion_handle_validate(client, handle)) {
989 pr_err("%s: invalid handle passed to %s.\n",
991 mutex_unlock(&client->lock);
994 buffer = handle->buffer;
995 mutex_lock(&buffer->lock);
997 *heap_id = (1 << heap->id);
998 *size = buffer->size;
999 mutex_unlock(&buffer->lock);
1000 mutex_unlock(&client->lock);
1004 EXPORT_SYMBOL(ion_handle_get_size);
1006 struct sg_table *ion_sg_table(struct ion_client *client,
1007 struct ion_handle *handle)
1009 struct ion_buffer *buffer;
1010 struct sg_table *table;
1012 mutex_lock(&client->lock);
1013 if (!ion_handle_validate(client, handle)) {
1014 pr_err("%s: invalid handle passed to map_dma.\n",
1016 mutex_unlock(&client->lock);
1017 return ERR_PTR(-EINVAL);
1019 buffer = handle->buffer;
1020 table = buffer->sg_table;
1021 mutex_unlock(&client->lock);
1024 EXPORT_SYMBOL(ion_sg_table);
1026 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1028 enum dma_data_direction direction);
1030 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1031 enum dma_data_direction direction)
1033 struct dma_buf *dmabuf = attachment->dmabuf;
1034 struct ion_buffer *buffer = dmabuf->priv;
1036 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
1037 return buffer->sg_table;
1040 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1041 struct sg_table *table,
1042 enum dma_data_direction direction)
1046 void ion_pages_sync_for_device(struct device *dev, struct page *page,
1047 size_t size, enum dma_data_direction dir)
1049 struct scatterlist sg;
1051 sg_init_table(&sg, 1);
1052 sg_set_page(&sg, page, size, 0);
1054 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
1055 * for the the targeted device, but this works on the currently targeted
1058 sg_dma_address(&sg) = page_to_phys(page);
1059 dma_sync_sg_for_device(dev, &sg, 1, dir);
1062 struct ion_vma_list {
1063 struct list_head list;
1064 struct vm_area_struct *vma;
1067 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1069 enum dma_data_direction dir)
1071 struct ion_vma_list *vma_list;
1072 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1075 pr_debug("%s: syncing for device %s\n", __func__,
1076 dev ? dev_name(dev) : "null");
1078 if (!ion_buffer_fault_user_mappings(buffer))
1081 mutex_lock(&buffer->lock);
1082 for (i = 0; i < pages; i++) {
1083 struct page *page = buffer->pages[i];
1085 if (ion_buffer_page_is_dirty(page))
1086 ion_pages_sync_for_device(dev, ion_buffer_page(page),
1089 ion_buffer_page_clean(buffer->pages + i);
1091 list_for_each_entry(vma_list, &buffer->vmas, list) {
1092 struct vm_area_struct *vma = vma_list->vma;
1094 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1097 mutex_unlock(&buffer->lock);
1100 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1102 struct ion_buffer *buffer = vma->vm_private_data;
1106 mutex_lock(&buffer->lock);
1107 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1108 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1110 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1111 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1112 mutex_unlock(&buffer->lock);
1114 return VM_FAULT_ERROR;
1116 return VM_FAULT_NOPAGE;
1119 static void ion_vm_open(struct vm_area_struct *vma)
1121 struct ion_buffer *buffer = vma->vm_private_data;
1122 struct ion_vma_list *vma_list;
1124 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1127 vma_list->vma = vma;
1128 mutex_lock(&buffer->lock);
1129 list_add(&vma_list->list, &buffer->vmas);
1130 mutex_unlock(&buffer->lock);
1131 pr_debug("%s: adding %p\n", __func__, vma);
1134 static void ion_vm_close(struct vm_area_struct *vma)
1136 struct ion_buffer *buffer = vma->vm_private_data;
1137 struct ion_vma_list *vma_list, *tmp;
1139 pr_debug("%s\n", __func__);
1140 mutex_lock(&buffer->lock);
1141 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1142 if (vma_list->vma != vma)
1144 list_del(&vma_list->list);
1146 pr_debug("%s: deleting %p\n", __func__, vma);
1149 mutex_unlock(&buffer->lock);
1152 static struct vm_operations_struct ion_vma_ops = {
1153 .open = ion_vm_open,
1154 .close = ion_vm_close,
1155 .fault = ion_vm_fault,
1158 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1160 struct ion_buffer *buffer = dmabuf->priv;
1163 if (!buffer->heap->ops->map_user) {
1164 pr_err("%s: this heap does not define a method for mapping "
1165 "to userspace\n", __func__);
1169 if (ion_buffer_fault_user_mappings(buffer)) {
1170 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1172 vma->vm_private_data = buffer;
1173 vma->vm_ops = &ion_vma_ops;
1178 if (!(buffer->flags & ION_FLAG_CACHED))
1179 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1181 mutex_lock(&buffer->lock);
1182 /* now map it to userspace */
1183 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1184 mutex_unlock(&buffer->lock);
1187 pr_err("%s: failure mapping buffer to userspace\n",
1193 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1195 struct ion_buffer *buffer = dmabuf->priv;
1196 ion_buffer_put(buffer);
1199 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1201 struct ion_buffer *buffer = dmabuf->priv;
1202 return buffer->vaddr + offset * PAGE_SIZE;
1205 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1211 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1213 enum dma_data_direction direction)
1215 struct ion_buffer *buffer = dmabuf->priv;
1218 if (!buffer->heap->ops->map_kernel) {
1219 pr_err("%s: map kernel is not implemented by this heap.\n",
1224 mutex_lock(&buffer->lock);
1225 vaddr = ion_buffer_kmap_get(buffer);
1226 mutex_unlock(&buffer->lock);
1228 return PTR_ERR(vaddr);
1232 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1234 enum dma_data_direction direction)
1236 struct ion_buffer *buffer = dmabuf->priv;
1238 mutex_lock(&buffer->lock);
1239 ion_buffer_kmap_put(buffer);
1240 mutex_unlock(&buffer->lock);
1243 static struct dma_buf_ops dma_buf_ops = {
1244 .map_dma_buf = ion_map_dma_buf,
1245 .unmap_dma_buf = ion_unmap_dma_buf,
1247 .release = ion_dma_buf_release,
1248 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1249 .end_cpu_access = ion_dma_buf_end_cpu_access,
1250 .kmap_atomic = ion_dma_buf_kmap,
1251 .kunmap_atomic = ion_dma_buf_kunmap,
1252 .kmap = ion_dma_buf_kmap,
1253 .kunmap = ion_dma_buf_kunmap,
1256 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1257 struct ion_handle *handle)
1259 struct ion_buffer *buffer;
1260 struct dma_buf *dmabuf;
1263 mutex_lock(&client->lock);
1264 valid_handle = ion_handle_validate(client, handle);
1265 if (!valid_handle) {
1266 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1267 mutex_unlock(&client->lock);
1268 return ERR_PTR(-EINVAL);
1270 buffer = handle->buffer;
1271 ion_buffer_get(buffer);
1272 mutex_unlock(&client->lock);
1274 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1275 if (IS_ERR(dmabuf)) {
1276 ion_buffer_put(buffer);
1282 EXPORT_SYMBOL(ion_share_dma_buf);
1284 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1286 struct dma_buf *dmabuf;
1289 dmabuf = ion_share_dma_buf(client, handle);
1290 if (IS_ERR(dmabuf)) {
1291 pr_err("%s: dmabuf is error and dmabuf is %p!\n",__func__,dmabuf);
1292 return PTR_ERR(dmabuf);
1295 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1297 pr_err("%s: dmabuf fd is error %d!\n",__func__, fd);
1298 dma_buf_put(dmabuf);
1303 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1305 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1307 struct dma_buf *dmabuf;
1308 struct ion_buffer *buffer;
1309 struct ion_handle *handle;
1312 dmabuf = dma_buf_get(fd);
1313 if (IS_ERR(dmabuf)) {
1314 pr_err("ion_import_dma_buf() dmabuf=0x%lx, fd:%d, dma_buf_get error!\n",
1315 (unsigned long)dmabuf, fd);
1316 return ERR_PTR(PTR_ERR(dmabuf));
1318 /* if this memory came from ion */
1320 if (dmabuf->ops != &dma_buf_ops) {
1321 pr_err("%s: can not import dmabuf from another exporter\n",
1323 dma_buf_put(dmabuf);
1324 return ERR_PTR(-EINVAL);
1326 buffer = dmabuf->priv;
1328 mutex_lock(&client->lock);
1329 /* if a handle exists for this buffer just take a reference to it */
1330 handle = ion_handle_lookup(client, buffer);
1331 if (!IS_ERR(handle)) {
1332 ion_handle_get(handle);
1333 mutex_unlock(&client->lock);
1336 mutex_unlock(&client->lock);
1338 handle = ion_handle_create(client, buffer);
1342 mutex_lock(&client->lock);
1343 ret = ion_handle_add(client, handle);
1344 mutex_unlock(&client->lock);
1346 ion_handle_put(handle);
1347 handle = ERR_PTR(ret);
1351 dma_buf_put(dmabuf);
1354 EXPORT_SYMBOL(ion_import_dma_buf);
1356 static int ion_invalidate_for_cpu(struct ion_client *client, int fd)
1358 struct dma_buf *dmabuf;
1359 struct ion_buffer *buffer;
1361 dmabuf = dma_buf_get(fd);
1364 pr_err("%s: dmabuf is error and dmabuf is %p!\n",__func__,dmabuf);
1365 return PTR_ERR(dmabuf);
1368 /* if this memory came from ion */
1369 if (dmabuf->ops != &dma_buf_ops) {
1370 pr_err("%s: can not sync dmabuf from another exporter\n",
1372 dma_buf_put(dmabuf);
1375 buffer = dmabuf->priv;
1377 dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
1378 buffer->sg_table->nents, DMA_FROM_DEVICE);
1379 dma_buf_put(dmabuf);
1383 static int ion_sync_for_device(struct ion_client *client, int fd)
1385 struct dma_buf *dmabuf;
1386 struct ion_buffer *buffer;
1388 dmabuf = dma_buf_get(fd);
1389 if (IS_ERR(dmabuf)) {
1390 pr_err("%s: the dmabuf is err dmabuf is %p, fd %d\n",__func__,dmabuf,fd);
1391 return PTR_ERR(dmabuf);
1394 /* if this memory came from ion */
1395 if (dmabuf->ops != &dma_buf_ops) {
1396 pr_err("%s: can not sync dmabuf from another exporter\n",
1398 dma_buf_put(dmabuf);
1401 buffer = dmabuf->priv;
1403 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1404 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1405 dma_buf_put(dmabuf);
1409 /* fix up the cases where the ioctl direction bits are incorrect */
1410 static unsigned int ion_ioctl_dir(unsigned int cmd)
1415 case ION_IOC_CUSTOM:
1418 return _IOC_DIR(cmd);
1422 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1424 struct ion_client *client = filp->private_data;
1425 struct ion_device *dev = client->dev;
1426 struct ion_handle *cleanup_handle = NULL;
1431 struct ion_fd_data fd;
1432 struct ion_allocation_data allocation;
1433 struct ion_handle_data handle;
1434 struct ion_custom_data custom;
1437 dir = ion_ioctl_dir(cmd);
1438 pr_debug("%s:cmd[0x%x]dir[0x%x]\n", __func__, cmd, dir);
1440 if (_IOC_SIZE(cmd) > sizeof(data)) {
1445 if (dir & _IOC_WRITE)
1446 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) {
1454 struct ion_handle *handle;
1456 handle = ion_alloc(client, data.allocation.len,
1457 data.allocation.align,
1458 data.allocation.heap_id_mask,
1459 data.allocation.flags);
1460 if (IS_ERR(handle)) {
1461 ret = PTR_ERR(handle);
1465 data.allocation.handle = handle->id;
1467 cleanup_handle = handle;
1472 struct ion_handle *handle;
1474 handle = ion_handle_get_by_id(client, data.handle.handle);
1475 if (IS_ERR(handle)) {
1476 ret = PTR_ERR(handle);
1479 ion_free(client, handle);
1480 ion_handle_put(handle);
1486 struct ion_handle *handle;
1488 handle = ion_handle_get_by_id(client, data.handle.handle);
1489 if (IS_ERR(handle)) {
1490 ret = PTR_ERR(handle);
1493 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1494 ion_handle_put(handle);
1499 case ION_IOC_IMPORT:
1501 struct ion_handle *handle;
1502 handle = ion_import_dma_buf(client, data.fd.fd);
1504 ret = PTR_ERR(handle);
1506 data.handle.handle = handle->id;
1509 case ION_IOC_INVALIDATE:
1511 ret = ion_invalidate_for_cpu(client, data.fd.fd);
1516 ret = ion_sync_for_device(client, data.fd.fd);
1519 case ION_IOC_CUSTOM:
1521 if (!dev->custom_ioctl) {
1525 ret = dev->custom_ioctl(client, data.custom.cmd,
1534 if (dir & _IOC_READ) {
1535 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1537 ion_free(client, cleanup_handle);
1544 pr_info("%s:cmd[0x%x]ret[%d]\n", __func__, cmd, ret);
1549 static int ion_release(struct inode *inode, struct file *file)
1551 struct ion_client *client = file->private_data;
1553 pr_debug("%s: %d\n", __func__, __LINE__);
1554 ion_client_destroy(client);
1558 static int ion_open(struct inode *inode, struct file *file)
1560 struct miscdevice *miscdev = file->private_data;
1561 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1562 struct ion_client *client;
1563 char debug_name[64];
1565 pr_debug("%s: %d\n", __func__, __LINE__);
1566 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1567 client = ion_client_create(dev, debug_name);
1569 return PTR_ERR(client);
1570 file->private_data = client;
1575 static const struct file_operations ion_fops = {
1576 .owner = THIS_MODULE,
1578 .release = ion_release,
1579 .unlocked_ioctl = ion_ioctl,
1580 .compat_ioctl = compat_ion_ioctl,
1583 static size_t ion_debug_heap_total(struct ion_client *client,
1589 mutex_lock(&client->lock);
1590 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1591 struct ion_handle *handle = rb_entry(n,
1594 if (handle->buffer->heap->id == id)
1595 size += handle->buffer->size;
1597 mutex_unlock(&client->lock);
1601 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1603 struct ion_heap *heap = s->private;
1604 struct ion_device *dev = heap->dev;
1608 size_t total_size = 0;
1609 size_t total_orphaned_size = 0;
1611 seq_printf(s, "%16.s %6.s %6.s %10.s %16.s\n", "client", "pid", "tid", "size", "alloc_time");
1612 seq_printf(s, "----------------------------------------------------------\n");
1614 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1615 struct ion_client *client = rb_entry(n, struct ion_client,
1618 mutex_lock(&client->lock);
1619 for (r = rb_first(&client->handles); r; r = rb_next(r)) {
1620 struct ion_handle *handle = rb_entry(r,
1623 struct ion_buffer *buffer = handle->buffer;
1625 if (buffer->heap->id == heap->id) {
1628 time_to_tm(buffer->alloc_time.tv_sec, 0, &t);
1630 char task_comm[TASK_COMM_LEN];
1632 get_task_comm(task_comm, client->task);
1633 seq_printf(s, "%16.s %6u %6u %10zu %ld.%d.%d-%d:%d:%d.%ld\n",
1634 task_comm, client->pid, client->tid, buffer->size,
1635 t.tm_year + 1900, t.tm_mon + 1,
1636 t.tm_mday, t.tm_hour, t.tm_min,
1637 t.tm_sec, buffer->alloc_time.tv_usec);
1639 seq_printf(s, "%16.s %6u %6u %10zu %ld.%d.%d-%d:%d:%d.%ld\n",
1640 client->name, client->pid, client->tid, buffer->size,
1641 t.tm_year + 1900, t.tm_mon + 1,
1642 t.tm_mday, t.tm_hour, t.tm_min,
1643 t.tm_sec, buffer->alloc_time.tv_usec);
1647 mutex_unlock(&client->lock);
1649 seq_printf(s, "----------------------------------------------------------\n");
1650 seq_printf(s, "orphaned allocations (info is from last known client):"
1652 mutex_lock(&dev->buffer_lock);
1653 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1654 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1656 if (buffer->heap->id != heap->id)
1658 total_size += buffer->size;
1659 if (!buffer->handle_count) {
1660 time_to_tm(buffer->alloc_time.tv_sec, 0, &t);
1661 seq_printf(s, "%16.s %6u %6u %10zu %d %d %ld.%d.%d-%d:%d:%d.%ld\n",
1662 buffer->task_comm, buffer->pid, buffer->tid, buffer->size,
1663 buffer->kmap_cnt, atomic_read(&buffer->ref.refcount),
1664 t.tm_year + 1900, t.tm_mon + 1,
1665 t.tm_mday, t.tm_hour, t.tm_min,
1666 t.tm_sec, buffer->alloc_time.tv_usec);
1667 total_orphaned_size += buffer->size;
1670 mutex_unlock(&dev->buffer_lock);
1671 seq_printf(s, "----------------------------------------------------------\n");
1672 seq_printf(s, "%16.s %22zu\n", "total orphaned",
1673 total_orphaned_size);
1674 seq_printf(s, "%16.s %22zu\n", "total ", total_size);
1675 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1676 seq_printf(s, "%16.s %22zu\n", "deferred free",
1677 heap->free_list_size);
1678 seq_printf(s, "----------------------------------------------------------\n");
1680 if (heap->debug_show)
1681 heap->debug_show(heap, s, unused);
1686 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1688 return single_open(file, ion_debug_heap_show, inode->i_private);
1691 static const struct file_operations debug_heap_fops = {
1692 .open = ion_debug_heap_open,
1694 .llseek = seq_lseek,
1695 .release = single_release,
1698 #ifdef DEBUG_HEAP_SHRINKER
1699 static int debug_shrink_set(void *data, u64 val)
1701 struct ion_heap *heap = data;
1702 struct shrink_control sc;
1711 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1712 sc.nr_to_scan = objs;
1714 heap->shrinker.shrink(&heap->shrinker, &sc);
1718 static int debug_shrink_get(void *data, u64 *val)
1720 struct ion_heap *heap = data;
1721 struct shrink_control sc;
1727 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1732 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1733 debug_shrink_set, "%llu\n");
1736 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1738 struct dentry *debug_file;
1740 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1741 !heap->ops->unmap_dma)
1742 pr_err("%s: can not add heap with invalid ops struct.\n",
1745 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1746 ion_heap_init_deferred_free(heap);
1748 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1749 ion_heap_init_shrinker(heap);
1752 down_write(&dev->lock);
1753 /* use negative heap->id to reverse the priority -- when traversing
1754 the list later attempt higher id numbers first */
1755 plist_node_init(&heap->node, -heap->id);
1756 plist_add(&heap->node, &dev->heaps);
1757 debug_file = debugfs_create_file(heap->name, 0664,
1758 dev->heaps_debug_root, heap,
1762 char buf[256], *path;
1763 path = dentry_path(dev->heaps_debug_root, buf, 256);
1764 pr_err("Failed to create heap debugfs at %s/%s\n",
1768 #ifdef DEBUG_HEAP_SHRINKER
1769 if (heap->shrinker.shrink) {
1770 char debug_name[64];
1772 snprintf(debug_name, 64, "%s_shrink", heap->name);
1773 debug_file = debugfs_create_file(
1774 debug_name, 0644, dev->heaps_debug_root, heap,
1775 &debug_shrink_fops);
1777 char buf[256], *path;
1778 path = dentry_path(dev->heaps_debug_root, buf, 256);
1779 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1784 up_write(&dev->lock);
1787 struct ion_device *ion_device_create(long (*custom_ioctl)
1788 (struct ion_client *client,
1792 struct ion_device *idev;
1795 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1797 return ERR_PTR(-ENOMEM);
1799 idev->dev.minor = MISC_DYNAMIC_MINOR;
1800 idev->dev.name = "ion";
1801 idev->dev.fops = &ion_fops;
1802 idev->dev.parent = NULL;
1803 ret = misc_register(&idev->dev);
1805 pr_err("ion: failed to register misc device.\n");
1806 return ERR_PTR(ret);
1809 idev->debug_root = debugfs_create_dir("ion", NULL);
1810 if (!idev->debug_root) {
1811 pr_err("ion: failed to create debugfs root directory.\n");
1814 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1815 if (!idev->heaps_debug_root) {
1816 pr_err("ion: failed to create debugfs heaps directory.\n");
1819 idev->clients_debug_root = debugfs_create_dir("clients",
1821 if (!idev->clients_debug_root)
1822 pr_err("ion: failed to create debugfs clients directory.\n");
1826 idev->custom_ioctl = custom_ioctl;
1827 idev->buffers = RB_ROOT;
1828 mutex_init(&idev->buffer_lock);
1829 init_rwsem(&idev->lock);
1830 plist_head_init(&idev->heaps);
1831 idev->clients = RB_ROOT;
1835 void ion_device_destroy(struct ion_device *dev)
1837 misc_deregister(&dev->dev);
1838 debugfs_remove_recursive(dev->debug_root);
1839 /* XXX need to free the heaps and clients ? */
1843 void __init ion_reserve(struct ion_platform_data *data)
1847 for (i = 0; i < data->nr; i++) {
1848 if (data->heaps[i].size == 0)
1851 if (data->heaps[i].base == 0) {
1853 paddr = memblock_alloc_base(data->heaps[i].size,
1854 data->heaps[i].align,
1855 MEMBLOCK_ALLOC_ANYWHERE);
1857 pr_err("%s: error allocating memblock for "
1862 data->heaps[i].base = paddr;
1864 int ret = memblock_reserve(data->heaps[i].base,
1865 data->heaps[i].size);
1867 pr_err("memblock reserve of %zx@%lx failed\n",
1868 data->heaps[i].size,
1869 data->heaps[i].base);
1871 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1872 data->heaps[i].name,
1873 data->heaps[i].base,
1874 data->heaps[i].size);