3 * drivers/gpu/ion/ion.c
5 * Copyright (C) 2011 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/freezer.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/kthread.h>
24 #include <linux/list.h>
25 #include <linux/memblock.h>
26 #include <linux/miscdevice.h>
27 #include <linux/export.h>
29 #include <linux/mm_types.h>
30 #include <linux/rbtree.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/vmalloc.h>
35 #include <linux/debugfs.h>
36 #include <linux/dma-buf.h>
37 #include <linux/idr.h>
38 #include <linux/time.h>
39 #ifdef CONFIG_DRM_SPRD
45 #include "compat_ion.h"
48 * struct ion_device - the metadata of the ion device node
49 * @dev: the actual misc device
50 * @buffers: an rb tree of all the existing buffers
51 * @buffer_lock: lock protecting the tree of buffers
52 * @lock: rwsem protecting the tree of heaps and clients
53 * @heaps: list of all the heaps in the system
54 * @user_clients: list of all the clients created from userspace
57 struct miscdevice dev;
58 struct rb_root buffers;
59 struct mutex buffer_lock;
60 struct rw_semaphore lock;
61 struct plist_head heaps;
62 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
64 struct rb_root clients;
65 struct dentry *debug_root;
66 struct dentry *heaps_debug_root;
67 struct dentry *clients_debug_root;
71 * struct ion_client - a process/hw block local address space
72 * @node: node in the tree of all clients
73 * @dev: backpointer to ion device
74 * @handles: an rb tree of all the handles in this client
75 * @idr: an idr space for allocating handle ids
76 * @lock: lock protecting the tree of handles
77 * @name: used for debugging
78 * @display_name: used for debugging (unique version of @name)
79 * @display_serial: used for debugging (to make display_name unique)
80 * @task: used for debugging
82 * A client represents a list of buffers this client may access.
83 * The mutex stored here is used to protect both handles tree
84 * as well as the handles themselves, and should be held while modifying either.
88 struct ion_device *dev;
89 struct rb_root handles;
95 struct task_struct *task;
98 struct dentry *debug_root;
102 * ion_handle - a client local reference to a buffer
103 * @ref: reference count
104 * @client: back pointer to the client the buffer resides in
105 * @buffer: pointer to the buffer
106 * @node: node in the client's handle rbtree
107 * @kmap_cnt: count of times this client has mapped to kernel
108 * @id: client-unique id allocated by client->idr
110 * Modifications to node, map_cnt or mapping should be protected by the
111 * lock in the client. Other fields are never changed after initialization.
115 struct ion_client *client;
116 struct ion_buffer *buffer;
118 unsigned int kmap_cnt;
122 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
124 return (buffer->flags & ION_FLAG_CACHED) &&
125 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
128 bool ion_buffer_cached(struct ion_buffer *buffer)
130 return !!(buffer->flags & ION_FLAG_CACHED);
133 static inline struct page *ion_buffer_page(struct page *page)
135 return (struct page *)((unsigned long)page & ~(1UL));
138 static inline bool ion_buffer_page_is_dirty(struct page *page)
140 return !!((unsigned long)page & 1UL);
143 static inline void ion_buffer_page_dirty(struct page **page)
145 *page = (struct page *)((unsigned long)(*page) | 1UL);
148 static inline void ion_buffer_page_clean(struct page **page)
150 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
153 /* this function should only be called while dev->lock is held */
154 static void ion_buffer_add(struct ion_device *dev,
155 struct ion_buffer *buffer)
157 struct rb_node **p = &dev->buffers.rb_node;
158 struct rb_node *parent = NULL;
159 struct ion_buffer *entry;
163 entry = rb_entry(parent, struct ion_buffer, node);
165 if (buffer < entry) {
167 } else if (buffer > entry) {
170 pr_err("%s: buffer already found.", __func__);
175 rb_link_node(&buffer->node, parent, p);
176 rb_insert_color(&buffer->node, &dev->buffers);
179 /* this function should only be called while dev->lock is held */
180 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
181 struct ion_device *dev,
186 struct ion_buffer *buffer;
187 struct sg_table *table;
188 struct scatterlist *sg;
192 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
194 return ERR_PTR(-ENOMEM);
197 buffer->flags = flags;
198 kref_init(&buffer->ref);
200 ret = heap->ops->allocate(heap, buffer, len, align, flags);
203 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
206 ion_heap_freelist_drain(heap, 0);
207 ret = heap->ops->allocate(heap, buffer, len, align,
216 table = heap->ops->map_dma(heap, buffer);
217 if (WARN_ONCE(table == NULL,
218 "heap->ops->map_dma should return ERR_PTR on error"))
219 table = ERR_PTR(-EINVAL);
221 heap->ops->free(buffer);
223 return ERR_PTR(PTR_ERR(table));
225 buffer->sg_table = table;
226 if (ion_buffer_fault_user_mappings(buffer)) {
227 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
228 struct scatterlist *sg;
231 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
232 if (!buffer->pages) {
237 for_each_sg(table->sgl, sg, table->nents, i) {
238 struct page *page = sg_page(sg);
240 for (j = 0; j < sg->length / PAGE_SIZE; j++)
241 buffer->pages[k++] = page++;
250 INIT_LIST_HEAD(&buffer->vmas);
251 mutex_init(&buffer->lock);
252 /* this will set up dma addresses for the sglist -- it is not
253 technically correct as per the dma api -- a specific
254 device isn't really taking ownership here. However, in practice on
255 our systems the only dma_address space is physical addresses.
256 Additionally, we can't afford the overhead of invalidating every
257 allocation via dma_map_sg. The implicit contract here is that
258 memory comming from the heaps is ready for dma, ie if it has a
259 cached mapping that mapping has been invalidated */
260 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
261 sg_dma_address(sg) = sg_phys(sg);
262 mutex_lock(&dev->buffer_lock);
263 ion_buffer_add(dev, buffer);
264 mutex_unlock(&dev->buffer_lock);
266 do_gettimeofday(&time);
267 buffer->alloc_time = time;
271 heap->ops->unmap_dma(heap, buffer);
272 heap->ops->free(buffer);
275 vfree(buffer->pages);
281 void ion_buffer_destroy(struct ion_buffer *buffer)
283 if (WARN_ON(buffer->kmap_cnt > 0))
284 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
285 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
286 buffer->heap->ops->free(buffer);
288 vfree(buffer->pages);
292 static void _ion_buffer_destroy(struct kref *kref)
294 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
295 struct ion_heap *heap = buffer->heap;
296 struct ion_device *dev = buffer->dev;
298 #if defined(CONFIG_SPRD_IOMMU)
301 for (i = IOMMU_GSP; i < IOMMU_MAX; i++) {
302 if(buffer->iomap_cnt[i]>0)
304 buffer->iomap_cnt[i] = 0;
305 sprd_iova_unmap(i,buffer->iova[i],buffer->size);
306 sprd_iova_free(i,buffer->iova[i],buffer->size);
311 mutex_lock(&dev->buffer_lock);
312 rb_erase(&buffer->node, &dev->buffers);
313 mutex_unlock(&dev->buffer_lock);
315 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
316 ion_heap_freelist_add(heap, buffer);
318 ion_buffer_destroy(buffer);
321 static void ion_buffer_get(struct ion_buffer *buffer)
323 kref_get(&buffer->ref);
326 static int ion_buffer_put(struct ion_buffer *buffer)
328 return kref_put(&buffer->ref, _ion_buffer_destroy);
331 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
333 mutex_lock(&buffer->lock);
334 buffer->handle_count++;
335 mutex_unlock(&buffer->lock);
338 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
341 * when a buffer is removed from a handle, if it is not in
342 * any other handles, copy the taskcomm and the pid of the
343 * process it's being removed from into the buffer. At this
344 * point there will be no way to track what processes this buffer is
345 * being used by, it only exists as a dma_buf file descriptor.
346 * The taskcomm and pid can provide a debug hint as to where this fd
349 mutex_lock(&buffer->lock);
350 buffer->handle_count--;
351 BUG_ON(buffer->handle_count < 0);
352 if (!buffer->handle_count) {
353 struct task_struct *task;
355 task = current->group_leader;
356 get_task_comm(buffer->task_comm, task);
357 buffer->pid = task_pid_nr(task);
358 buffer->tid = task_pid_nr(current);
360 mutex_unlock(&buffer->lock);
363 static struct ion_handle *ion_handle_create(struct ion_client *client,
364 struct ion_buffer *buffer)
366 struct ion_handle *handle;
368 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
370 return ERR_PTR(-ENOMEM);
371 kref_init(&handle->ref);
372 RB_CLEAR_NODE(&handle->node);
373 handle->client = client;
374 ion_buffer_get(buffer);
375 ion_buffer_add_to_handle(buffer);
376 handle->buffer = buffer;
381 static void ion_handle_kmap_put(struct ion_handle *);
383 static void ion_handle_destroy(struct kref *kref)
385 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
386 struct ion_client *client = handle->client;
387 struct ion_buffer *buffer = handle->buffer;
389 mutex_lock(&buffer->lock);
390 while (handle->kmap_cnt)
391 ion_handle_kmap_put(handle);
392 mutex_unlock(&buffer->lock);
394 idr_remove(&client->idr, handle->id);
395 if (!RB_EMPTY_NODE(&handle->node))
396 rb_erase(&handle->node, &client->handles);
398 ion_buffer_remove_from_handle(buffer);
399 ion_buffer_put(buffer);
404 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
406 return handle->buffer;
409 static void ion_handle_get(struct ion_handle *handle)
411 kref_get(&handle->ref);
414 static int ion_handle_put(struct ion_handle *handle)
416 struct ion_client *client = handle->client;
419 mutex_lock(&client->lock);
420 ret = kref_put(&handle->ref, ion_handle_destroy);
421 mutex_unlock(&client->lock);
426 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
427 struct ion_buffer *buffer)
429 struct rb_node *n = client->handles.rb_node;
432 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
433 if (buffer < entry->buffer)
435 else if (buffer > entry->buffer)
440 return ERR_PTR(-EINVAL);
443 static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
446 struct ion_handle *handle;
448 mutex_lock(&client->lock);
449 handle = idr_find(&client->idr, id);
451 ion_handle_get(handle);
452 mutex_unlock(&client->lock);
454 return handle ? handle : ERR_PTR(-EINVAL);
457 static bool ion_handle_validate(struct ion_client *client,
458 struct ion_handle *handle)
460 WARN_ON(!mutex_is_locked(&client->lock));
461 return (idr_find(&client->idr, handle->id) == handle);
464 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
467 struct rb_node **p = &client->handles.rb_node;
468 struct rb_node *parent = NULL;
469 struct ion_handle *entry;
471 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
479 entry = rb_entry(parent, struct ion_handle, node);
481 if (handle->buffer < entry->buffer)
483 else if (handle->buffer > entry->buffer)
486 WARN(1, "%s: buffer already found.", __func__);
489 rb_link_node(&handle->node, parent, p);
490 rb_insert_color(&handle->node, &client->handles);
495 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
496 size_t align, unsigned int heap_id_mask,
499 struct ion_handle *handle;
500 struct ion_device *dev = client->dev;
501 struct ion_buffer *buffer = NULL;
502 struct ion_heap *heap;
505 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
506 len, align, heap_id_mask, flags);
508 * traverse the list of heaps available in this system in priority
509 * order. If the heap type is supported by the client, and matches the
510 * request of the caller allocate from it. Repeat until allocate has
511 * succeeded or all heaps have been tried
513 len = PAGE_ALIGN(len);
516 return ERR_PTR(-EINVAL);
518 down_read(&dev->lock);
519 plist_for_each_entry(heap, &dev->heaps, node) {
520 /* if the caller didn't specify this heap id */
521 if (!((1 << heap->id) & heap_id_mask))
523 buffer = ion_buffer_create(heap, dev, len, align, flags);
529 if (buffer == NULL) {
530 pr_err("%s: buffer is NULL!\n",__func__);
531 return ERR_PTR(-ENODEV);
534 if (IS_ERR(buffer)) {
535 pr_err("%s: ion alloc buffer is error! and the buffer is %p\n",__func__,buffer);
536 return ERR_PTR(PTR_ERR(buffer));
539 handle = ion_handle_create(client, buffer);
542 * ion_buffer_create will create a buffer with a ref_cnt of 1,
543 * and ion_handle_create will take a second reference, drop one here
545 ion_buffer_put(buffer);
547 if (IS_ERR(handle)) {
548 pr_err("%s: handle is error! and the handle is %p\n",__func__,handle);
552 mutex_lock(&client->lock);
553 ret = ion_handle_add(client, handle);
554 mutex_unlock(&client->lock);
556 ion_handle_put(handle);
557 handle = ERR_PTR(ret);
562 EXPORT_SYMBOL(ion_alloc);
564 #ifdef CONFIG_DRM_SPRD
565 struct ion_handle *ion_alloc_with_gem(struct ion_client *client, size_t len,
566 size_t align, unsigned int heap_id_mask,
568 struct drm_gem_object *obj)
570 struct ion_handle *handle;
572 handle = ion_alloc(client, len, align, heap_id_mask, flags);
574 handle->buffer->obj = obj;
578 EXPORT_SYMBOL(ion_alloc_with_gem);
580 struct drm_gem_object *ion_get_gem(struct ion_handle *handle)
582 if (handle && handle->buffer)
583 return handle->buffer->obj;
587 EXPORT_SYMBOL(ion_get_gem);
590 void ion_free(struct ion_client *client, struct ion_handle *handle)
594 BUG_ON(client != handle->client);
596 mutex_lock(&client->lock);
597 valid_handle = ion_handle_validate(client, handle);
600 WARN(1, "%s: invalid handle passed to free.\n", __func__);
601 mutex_unlock(&client->lock);
604 mutex_unlock(&client->lock);
605 ion_handle_put(handle);
607 EXPORT_SYMBOL(ion_free);
609 int ion_phys(struct ion_client *client, struct ion_handle *handle,
610 ion_phys_addr_t *addr, size_t *len)
612 struct ion_buffer *buffer;
615 mutex_lock(&client->lock);
616 if (!ion_handle_validate(client, handle)) {
617 mutex_unlock(&client->lock);
621 buffer = handle->buffer;
623 if (!buffer->heap->ops->phys) {
624 pr_err("%s: ion_phys is not implemented by this heap.\n",
626 mutex_unlock(&client->lock);
629 mutex_unlock(&client->lock);
630 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
633 EXPORT_SYMBOL(ion_phys);
635 int ion_is_phys(struct ion_client *client, struct ion_handle *handle)
637 struct ion_buffer *buffer;
640 mutex_lock(&client->lock);
641 if (!ion_handle_validate(client, handle)) {
642 mutex_unlock(&client->lock);
646 buffer = handle->buffer;
648 if (!buffer->heap->ops->phys)
651 mutex_unlock(&client->lock);
655 EXPORT_SYMBOL(ion_is_phys);
657 int ion_is_cached(struct ion_client *client, struct ion_handle *handle)
659 struct ion_buffer *buffer;
662 mutex_lock(&client->lock);
663 if (!ion_handle_validate(client, handle)) {
664 mutex_unlock(&client->lock);
668 buffer = handle->buffer;
670 cached = ion_buffer_cached(buffer);
671 mutex_unlock(&client->lock);
675 EXPORT_SYMBOL(ion_is_cached);
677 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
681 if (buffer->kmap_cnt) {
683 return buffer->vaddr;
685 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
686 if (WARN_ONCE(vaddr == NULL,
687 "heap->ops->map_kernel should return ERR_PTR on error"))
688 return ERR_PTR(-EINVAL);
691 buffer->vaddr = vaddr;
696 static void *ion_handle_kmap_get(struct ion_handle *handle)
698 struct ion_buffer *buffer = handle->buffer;
701 if (handle->kmap_cnt) {
703 return buffer->vaddr;
705 vaddr = ion_buffer_kmap_get(buffer);
712 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
715 if (!buffer->kmap_cnt) {
716 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
717 buffer->vaddr = NULL;
721 static void ion_handle_kmap_put(struct ion_handle *handle)
723 struct ion_buffer *buffer = handle->buffer;
725 if (!handle->kmap_cnt) {
726 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
731 if (!handle->kmap_cnt)
732 ion_buffer_kmap_put(buffer);
735 int ion_map_iommu(struct ion_client *client, struct ion_handle *handle, int domain_no, unsigned long *ptr_iova)
737 struct ion_buffer *buffer;
739 mutex_lock(&client->lock);
740 if (!ion_handle_validate(client, handle)) {
741 pr_err("%s: invalid handle passed to map_kernel.\n",
743 mutex_unlock(&client->lock);
747 buffer = handle->buffer;
749 if (!handle->buffer->heap->ops->map_iommu) {
750 pr_err("%s: map_kernel is not implemented by this heap.\n",
752 mutex_unlock(&client->lock);
756 mutex_lock(&buffer->lock);
757 handle->buffer->heap->ops->map_iommu(buffer, domain_no, ptr_iova);
758 mutex_unlock(&buffer->lock);
759 mutex_unlock(&client->lock);
762 EXPORT_SYMBOL(ion_map_iommu);
764 int ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle, int domain_no)
766 struct ion_buffer *buffer;
768 mutex_lock(&client->lock);
769 if (!ion_handle_validate(client, handle)) {
770 pr_err("%s: invalid handle passed to map_kernel.\n",
772 mutex_unlock(&client->lock);
776 buffer = handle->buffer;
778 if (!handle->buffer->heap->ops->map_iommu) {
779 pr_err("%s: map_kernel is not implemented by this heap.\n",
781 mutex_unlock(&client->lock);
785 mutex_lock(&buffer->lock);
786 handle->buffer->heap->ops->unmap_iommu(buffer, domain_no);
787 mutex_unlock(&buffer->lock);
788 mutex_unlock(&client->lock);
791 EXPORT_SYMBOL(ion_unmap_iommu);
793 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
795 struct ion_buffer *buffer;
798 mutex_lock(&client->lock);
799 if (!ion_handle_validate(client, handle)) {
800 pr_err("%s: invalid handle passed to map_kernel.\n",
802 mutex_unlock(&client->lock);
803 return ERR_PTR(-EINVAL);
806 buffer = handle->buffer;
808 if (!handle->buffer->heap->ops->map_kernel) {
809 pr_err("%s: map_kernel is not implemented by this heap.\n",
811 mutex_unlock(&client->lock);
812 return ERR_PTR(-ENODEV);
815 mutex_lock(&buffer->lock);
816 vaddr = ion_handle_kmap_get(handle);
817 mutex_unlock(&buffer->lock);
818 mutex_unlock(&client->lock);
821 EXPORT_SYMBOL(ion_map_kernel);
823 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
825 struct ion_buffer *buffer;
827 mutex_lock(&client->lock);
828 buffer = handle->buffer;
829 mutex_lock(&buffer->lock);
830 ion_handle_kmap_put(handle);
831 mutex_unlock(&buffer->lock);
832 mutex_unlock(&client->lock);
834 EXPORT_SYMBOL(ion_unmap_kernel);
836 static int ion_debug_client_show(struct seq_file *s, void *unused)
838 struct ion_client *client = s->private;
840 size_t sizes[ION_NUM_HEAP_IDS] = {0};
841 const char *names[ION_NUM_HEAP_IDS] = {NULL};
844 mutex_lock(&client->lock);
845 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
846 struct ion_handle *handle = rb_entry(n, struct ion_handle,
848 unsigned int id = handle->buffer->heap->id;
851 names[id] = handle->buffer->heap->name;
852 sizes[id] += handle->buffer->size;
854 mutex_unlock(&client->lock);
856 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
857 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
860 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
865 static int ion_debug_client_open(struct inode *inode, struct file *file)
867 return single_open(file, ion_debug_client_show, inode->i_private);
870 static const struct file_operations debug_client_fops = {
871 .open = ion_debug_client_open,
874 .release = single_release,
877 static int ion_get_client_serial(const struct rb_root *root,
878 const unsigned char *name)
881 struct rb_node *node;
882 for (node = rb_first(root); node; node = rb_next(node)) {
883 struct ion_client *client = rb_entry(node, struct ion_client,
885 if (strcmp(client->name, name))
887 serial = max(serial, client->display_serial);
892 struct ion_client *ion_client_create(struct ion_device *dev,
895 struct ion_client *client;
896 struct task_struct *task;
898 struct rb_node *parent = NULL;
899 struct ion_client *entry;
904 pr_err("%s: Name cannot be null\n", __func__);
905 return ERR_PTR(-EINVAL);
908 get_task_struct(current->group_leader);
909 task_lock(current->group_leader);
910 pid = task_pid_nr(current->group_leader);
911 tid = task_pid_nr(current);
912 /* don't bother to store task struct for kernel threads,
913 they can't be killed anyway */
914 if (current->group_leader->flags & PF_KTHREAD) {
915 put_task_struct(current->group_leader);
918 task = current->group_leader;
920 task_unlock(current->group_leader);
922 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
924 goto err_put_task_struct;
927 client->handles = RB_ROOT;
928 idr_init(&client->idr);
929 mutex_init(&client->lock);
933 client->name = kstrdup(name, GFP_KERNEL);
935 goto err_free_client;
937 down_write(&dev->lock);
938 client->display_serial = ion_get_client_serial(&dev->clients, name);
939 client->display_name = kasprintf(
940 GFP_KERNEL, "%s-%d", name, client->display_serial);
941 if (!client->display_name) {
942 up_write(&dev->lock);
943 goto err_free_client_name;
945 p = &dev->clients.rb_node;
948 entry = rb_entry(parent, struct ion_client, node);
952 else if (client > entry)
955 rb_link_node(&client->node, parent, p);
956 rb_insert_color(&client->node, &dev->clients);
958 client->debug_root = debugfs_create_file(client->display_name, 0664,
959 dev->clients_debug_root,
960 client, &debug_client_fops);
961 if (!client->debug_root) {
962 char buf[256], *path;
963 path = dentry_path(dev->clients_debug_root, buf, 256);
964 pr_err("Failed to create client debugfs at %s/%s\n",
965 path, client->display_name);
968 up_write(&dev->lock);
972 err_free_client_name:
978 put_task_struct(current->group_leader);
979 return ERR_PTR(-ENOMEM);
981 EXPORT_SYMBOL(ion_client_create);
983 void ion_client_destroy(struct ion_client *client)
985 struct ion_device *dev = client->dev;
988 pr_debug("%s: %d\n", __func__, __LINE__);
989 while ((n = rb_first(&client->handles))) {
990 struct ion_handle *handle = rb_entry(n, struct ion_handle,
992 ion_handle_destroy(&handle->ref);
995 idr_destroy(&client->idr);
997 down_write(&dev->lock);
999 put_task_struct(client->task);
1000 rb_erase(&client->node, &dev->clients);
1001 debugfs_remove_recursive(client->debug_root);
1002 up_write(&dev->lock);
1004 kfree(client->display_name);
1005 kfree(client->name);
1008 EXPORT_SYMBOL(ion_client_destroy);
1010 int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
1011 unsigned long *size, unsigned int *heap_id)
1013 struct ion_buffer *buffer;
1014 struct ion_heap *heap;
1016 mutex_lock(&client->lock);
1017 if (!ion_handle_validate(client, handle)) {
1018 pr_err("%s: invalid handle passed to %s.\n",
1019 __func__, __func__);
1020 mutex_unlock(&client->lock);
1023 buffer = handle->buffer;
1024 mutex_lock(&buffer->lock);
1025 heap = buffer->heap;
1026 *heap_id = (1 << heap->id);
1027 *size = buffer->size;
1028 mutex_unlock(&buffer->lock);
1029 mutex_unlock(&client->lock);
1033 EXPORT_SYMBOL(ion_handle_get_size);
1035 struct sg_table *ion_sg_table(struct ion_client *client,
1036 struct ion_handle *handle)
1038 struct ion_buffer *buffer;
1039 struct sg_table *table;
1041 mutex_lock(&client->lock);
1042 if (!ion_handle_validate(client, handle)) {
1043 pr_err("%s: invalid handle passed to map_dma.\n",
1045 mutex_unlock(&client->lock);
1046 return ERR_PTR(-EINVAL);
1048 buffer = handle->buffer;
1049 table = buffer->sg_table;
1050 mutex_unlock(&client->lock);
1053 EXPORT_SYMBOL(ion_sg_table);
1055 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1057 enum dma_data_direction direction);
1059 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1060 enum dma_data_direction direction)
1062 struct dma_buf *dmabuf = attachment->dmabuf;
1063 struct ion_buffer *buffer = dmabuf->priv;
1065 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
1066 return buffer->sg_table;
1069 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1070 struct sg_table *table,
1071 enum dma_data_direction direction)
1075 void ion_pages_sync_for_device(struct device *dev, struct page *page,
1076 size_t size, enum dma_data_direction dir)
1078 struct scatterlist sg;
1080 sg_init_table(&sg, 1);
1081 sg_set_page(&sg, page, size, 0);
1083 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
1084 * for the the targeted device, but this works on the currently targeted
1087 sg_dma_address(&sg) = page_to_phys(page);
1088 dma_sync_sg_for_device(dev, &sg, 1, dir);
1091 struct ion_vma_list {
1092 struct list_head list;
1093 struct vm_area_struct *vma;
1096 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1098 enum dma_data_direction dir)
1100 struct ion_vma_list *vma_list;
1101 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1104 pr_debug("%s: syncing for device %s\n", __func__,
1105 dev ? dev_name(dev) : "null");
1107 if (!ion_buffer_fault_user_mappings(buffer))
1110 mutex_lock(&buffer->lock);
1111 for (i = 0; i < pages; i++) {
1112 struct page *page = buffer->pages[i];
1114 if (ion_buffer_page_is_dirty(page))
1115 ion_pages_sync_for_device(dev, ion_buffer_page(page),
1118 ion_buffer_page_clean(buffer->pages + i);
1120 list_for_each_entry(vma_list, &buffer->vmas, list) {
1121 struct vm_area_struct *vma = vma_list->vma;
1123 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1126 mutex_unlock(&buffer->lock);
1129 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1131 struct ion_buffer *buffer = vma->vm_private_data;
1135 mutex_lock(&buffer->lock);
1136 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1137 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1139 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1140 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1141 mutex_unlock(&buffer->lock);
1143 return VM_FAULT_ERROR;
1145 return VM_FAULT_NOPAGE;
1148 static void ion_vm_open(struct vm_area_struct *vma)
1150 struct ion_buffer *buffer = vma->vm_private_data;
1151 struct ion_vma_list *vma_list;
1153 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1156 vma_list->vma = vma;
1157 mutex_lock(&buffer->lock);
1158 list_add(&vma_list->list, &buffer->vmas);
1159 mutex_unlock(&buffer->lock);
1160 pr_debug("%s: adding %p\n", __func__, vma);
1163 static void ion_vm_close(struct vm_area_struct *vma)
1165 struct ion_buffer *buffer = vma->vm_private_data;
1166 struct ion_vma_list *vma_list, *tmp;
1168 pr_debug("%s\n", __func__);
1169 mutex_lock(&buffer->lock);
1170 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1171 if (vma_list->vma != vma)
1173 list_del(&vma_list->list);
1175 pr_debug("%s: deleting %p\n", __func__, vma);
1178 mutex_unlock(&buffer->lock);
1181 static struct vm_operations_struct ion_vma_ops = {
1182 .open = ion_vm_open,
1183 .close = ion_vm_close,
1184 .fault = ion_vm_fault,
1187 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1189 struct ion_buffer *buffer = dmabuf->priv;
1192 if (!buffer->heap->ops->map_user) {
1193 pr_err("%s: this heap does not define a method for mapping "
1194 "to userspace\n", __func__);
1198 if (ion_buffer_fault_user_mappings(buffer)) {
1199 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1201 vma->vm_private_data = buffer;
1202 vma->vm_ops = &ion_vma_ops;
1207 if (!(buffer->flags & ION_FLAG_CACHED))
1208 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1210 mutex_lock(&buffer->lock);
1211 /* now map it to userspace */
1212 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1213 mutex_unlock(&buffer->lock);
1216 pr_err("%s: failure mapping buffer to userspace\n",
1222 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1224 struct ion_buffer *buffer = dmabuf->priv;
1225 ion_buffer_put(buffer);
1227 #ifdef CONFIG_DRM_SPRD
1229 drm_gem_object_unreference_unlocked(buffer->obj);
1235 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1237 struct ion_buffer *buffer = dmabuf->priv;
1238 return buffer->vaddr + offset * PAGE_SIZE;
1241 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1247 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1249 enum dma_data_direction direction)
1251 struct ion_buffer *buffer = dmabuf->priv;
1254 if (!buffer->heap->ops->map_kernel) {
1255 pr_err("%s: map kernel is not implemented by this heap.\n",
1260 mutex_lock(&buffer->lock);
1261 vaddr = ion_buffer_kmap_get(buffer);
1262 mutex_unlock(&buffer->lock);
1264 return PTR_ERR(vaddr);
1268 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1270 enum dma_data_direction direction)
1272 struct ion_buffer *buffer = dmabuf->priv;
1274 mutex_lock(&buffer->lock);
1275 ion_buffer_kmap_put(buffer);
1276 mutex_unlock(&buffer->lock);
1279 static struct dma_buf_ops dma_buf_ops = {
1280 .map_dma_buf = ion_map_dma_buf,
1281 .unmap_dma_buf = ion_unmap_dma_buf,
1283 .release = ion_dma_buf_release,
1284 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1285 .end_cpu_access = ion_dma_buf_end_cpu_access,
1286 .kmap_atomic = ion_dma_buf_kmap,
1287 .kunmap_atomic = ion_dma_buf_kunmap,
1288 .kmap = ion_dma_buf_kmap,
1289 .kunmap = ion_dma_buf_kunmap,
1292 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1293 struct ion_handle *handle)
1295 struct ion_buffer *buffer;
1296 struct dma_buf *dmabuf;
1299 mutex_lock(&client->lock);
1300 valid_handle = ion_handle_validate(client, handle);
1301 if (!valid_handle) {
1302 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1303 mutex_unlock(&client->lock);
1304 return ERR_PTR(-EINVAL);
1306 buffer = handle->buffer;
1307 ion_buffer_get(buffer);
1308 mutex_unlock(&client->lock);
1310 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1311 if (IS_ERR(dmabuf)) {
1312 ion_buffer_put(buffer);
1318 EXPORT_SYMBOL(ion_share_dma_buf);
1320 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1322 struct dma_buf *dmabuf;
1325 dmabuf = ion_share_dma_buf(client, handle);
1326 if (IS_ERR(dmabuf)) {
1327 pr_err("%s: dmabuf is error and dmabuf is %p!\n",__func__,dmabuf);
1328 return PTR_ERR(dmabuf);
1331 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1333 pr_err("%s: dmabuf fd is error %d!\n",__func__, fd);
1334 dma_buf_put(dmabuf);
1339 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1341 struct ion_handle *get_ion_handle_from_dmabuf(struct ion_client *client, struct dma_buf *dmabuf)
1343 struct ion_buffer *buffer;
1344 struct ion_handle *handle;
1347 /* if this memory came from ion */
1348 if (dmabuf->ops != &dma_buf_ops) {
1349 pr_err("%s: can not import dmabuf from another exporter\n",
1351 return ERR_PTR(-EINVAL);
1353 buffer = dmabuf->priv;
1355 mutex_lock(&client->lock);
1356 /* if a handle exists for this buffer just take a reference to it */
1357 handle = ion_handle_lookup(client, buffer);
1358 if (!IS_ERR(handle)) {
1359 ion_handle_get(handle);
1360 mutex_unlock(&client->lock);
1363 mutex_unlock(&client->lock);
1365 handle = ion_handle_create(client, buffer);
1369 mutex_lock(&client->lock);
1370 ret = ion_handle_add(client, handle);
1371 mutex_unlock(&client->lock);
1373 ion_handle_put(handle);
1374 handle = ERR_PTR(ret);
1380 EXPORT_SYMBOL(get_ion_handle_from_dmabuf);
1382 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1384 struct dma_buf *dmabuf;
1385 struct ion_handle *handle;
1387 dmabuf = dma_buf_get(fd);
1388 if (IS_ERR(dmabuf)) {
1389 pr_err("ion_import_dma_buf() dmabuf=0x%lx, fd:%d, dma_buf_get error!\n",
1390 (unsigned long)dmabuf, fd);
1391 return ERR_PTR(PTR_ERR(dmabuf));
1393 handle = get_ion_handle_from_dmabuf(client, dmabuf);
1394 dma_buf_put(dmabuf);
1397 EXPORT_SYMBOL(ion_import_dma_buf);
1399 static int ion_invalidate_for_cpu(struct ion_client *client, int fd)
1401 struct dma_buf *dmabuf;
1402 struct ion_buffer *buffer;
1404 dmabuf = dma_buf_get(fd);
1407 pr_err("%s: dmabuf is error and dmabuf is %p!\n",__func__,dmabuf);
1408 return PTR_ERR(dmabuf);
1411 /* if this memory came from ion */
1412 if (dmabuf->ops != &dma_buf_ops) {
1413 pr_err("%s: can not sync dmabuf from another exporter\n",
1415 dma_buf_put(dmabuf);
1418 buffer = dmabuf->priv;
1420 dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
1421 buffer->sg_table->nents, DMA_FROM_DEVICE);
1422 dma_buf_put(dmabuf);
1426 static int ion_sync_for_device(struct ion_client *client, int fd)
1428 struct dma_buf *dmabuf;
1429 struct ion_buffer *buffer;
1431 dmabuf = dma_buf_get(fd);
1432 if (IS_ERR(dmabuf)) {
1433 pr_err("%s: the dmabuf is err dmabuf is %p, fd %d\n",__func__,dmabuf,fd);
1434 return PTR_ERR(dmabuf);
1437 /* if this memory came from ion */
1438 if (dmabuf->ops != &dma_buf_ops) {
1439 pr_err("%s: can not sync dmabuf from another exporter\n",
1441 dma_buf_put(dmabuf);
1444 buffer = dmabuf->priv;
1446 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1447 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1448 dma_buf_put(dmabuf);
1452 /* fix up the cases where the ioctl direction bits are incorrect */
1453 static unsigned int ion_ioctl_dir(unsigned int cmd)
1458 case ION_IOC_CUSTOM:
1461 return _IOC_DIR(cmd);
1465 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1467 struct ion_client *client = filp->private_data;
1468 struct ion_device *dev = client->dev;
1469 struct ion_handle *cleanup_handle = NULL;
1474 struct ion_fd_data fd;
1475 struct ion_allocation_data allocation;
1476 struct ion_handle_data handle;
1477 struct ion_custom_data custom;
1480 dir = ion_ioctl_dir(cmd);
1481 pr_debug("%s:cmd[0x%x]dir[0x%x]\n", __func__, cmd, dir);
1483 if (_IOC_SIZE(cmd) > sizeof(data)) {
1488 if (dir & _IOC_WRITE)
1489 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) {
1497 struct ion_handle *handle;
1499 handle = ion_alloc(client, data.allocation.len,
1500 data.allocation.align,
1501 data.allocation.heap_id_mask,
1502 data.allocation.flags);
1503 if (IS_ERR(handle)) {
1504 ret = PTR_ERR(handle);
1508 data.allocation.handle = handle->id;
1510 cleanup_handle = handle;
1515 struct ion_handle *handle;
1517 handle = ion_handle_get_by_id(client, data.handle.handle);
1518 if (IS_ERR(handle)) {
1519 ret = PTR_ERR(handle);
1522 ion_free(client, handle);
1523 ion_handle_put(handle);
1529 struct ion_handle *handle;
1531 handle = ion_handle_get_by_id(client, data.handle.handle);
1532 if (IS_ERR(handle)) {
1533 ret = PTR_ERR(handle);
1536 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1537 ion_handle_put(handle);
1542 case ION_IOC_IMPORT:
1544 struct ion_handle *handle;
1545 handle = ion_import_dma_buf(client, data.fd.fd);
1547 ret = PTR_ERR(handle);
1549 data.handle.handle = handle->id;
1552 case ION_IOC_INVALIDATE:
1554 ret = ion_invalidate_for_cpu(client, data.fd.fd);
1559 ret = ion_sync_for_device(client, data.fd.fd);
1562 case ION_IOC_CUSTOM:
1564 if (!dev->custom_ioctl) {
1568 ret = dev->custom_ioctl(client, data.custom.cmd,
1577 if (dir & _IOC_READ) {
1578 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1580 ion_free(client, cleanup_handle);
1587 pr_info("%s:cmd[0x%x]ret[%d]\n", __func__, cmd, ret);
1592 static int ion_release(struct inode *inode, struct file *file)
1594 struct ion_client *client = file->private_data;
1596 pr_debug("%s: %d\n", __func__, __LINE__);
1597 ion_client_destroy(client);
1601 static int ion_open(struct inode *inode, struct file *file)
1603 struct miscdevice *miscdev = file->private_data;
1604 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1605 struct ion_client *client;
1606 char debug_name[64];
1608 pr_debug("%s: %d\n", __func__, __LINE__);
1609 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1610 client = ion_client_create(dev, debug_name);
1612 return PTR_ERR(client);
1613 file->private_data = client;
1618 static const struct file_operations ion_fops = {
1619 .owner = THIS_MODULE,
1621 .release = ion_release,
1622 .unlocked_ioctl = ion_ioctl,
1623 .compat_ioctl = compat_ion_ioctl,
1626 static size_t ion_debug_heap_total(struct ion_client *client,
1632 mutex_lock(&client->lock);
1633 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1634 struct ion_handle *handle = rb_entry(n,
1637 if (handle->buffer->heap->id == id)
1638 size += handle->buffer->size;
1640 mutex_unlock(&client->lock);
1644 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1646 struct ion_heap *heap = s->private;
1647 struct ion_device *dev = heap->dev;
1651 size_t total_size = 0;
1652 size_t total_orphaned_size = 0;
1654 seq_printf(s, "%16.s %6.s %6.s %10.s %16.s\n", "client", "pid", "tid", "size", "alloc_time");
1655 seq_printf(s, "----------------------------------------------------------\n");
1657 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1658 struct ion_client *client = rb_entry(n, struct ion_client,
1661 mutex_lock(&client->lock);
1662 for (r = rb_first(&client->handles); r; r = rb_next(r)) {
1663 struct ion_handle *handle = rb_entry(r,
1666 struct ion_buffer *buffer = handle->buffer;
1668 if (buffer->heap->id == heap->id) {
1671 time_to_tm(buffer->alloc_time.tv_sec, 0, &t);
1673 char task_comm[TASK_COMM_LEN];
1675 get_task_comm(task_comm, client->task);
1676 seq_printf(s, "%16.s %6u %6u %10zu %ld.%d.%d-%d:%d:%d.%ld\n",
1677 task_comm, client->pid, client->tid, buffer->size,
1678 t.tm_year + 1900, t.tm_mon + 1,
1679 t.tm_mday, t.tm_hour, t.tm_min,
1680 t.tm_sec, buffer->alloc_time.tv_usec);
1682 seq_printf(s, "%16.s %6u %6u %10zu %ld.%d.%d-%d:%d:%d.%ld\n",
1683 client->name, client->pid, client->tid, buffer->size,
1684 t.tm_year + 1900, t.tm_mon + 1,
1685 t.tm_mday, t.tm_hour, t.tm_min,
1686 t.tm_sec, buffer->alloc_time.tv_usec);
1690 mutex_unlock(&client->lock);
1692 seq_printf(s, "----------------------------------------------------------\n");
1693 seq_printf(s, "orphaned allocations (info is from last known client):"
1695 mutex_lock(&dev->buffer_lock);
1696 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1697 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1699 if (buffer->heap->id != heap->id)
1701 total_size += buffer->size;
1702 if (!buffer->handle_count) {
1703 time_to_tm(buffer->alloc_time.tv_sec, 0, &t);
1704 seq_printf(s, "%16.s %6u %6u %10zu %d %d %ld.%d.%d-%d:%d:%d.%ld\n",
1705 buffer->task_comm, buffer->pid, buffer->tid, buffer->size,
1706 buffer->kmap_cnt, atomic_read(&buffer->ref.refcount),
1707 t.tm_year + 1900, t.tm_mon + 1,
1708 t.tm_mday, t.tm_hour, t.tm_min,
1709 t.tm_sec, buffer->alloc_time.tv_usec);
1710 total_orphaned_size += buffer->size;
1713 mutex_unlock(&dev->buffer_lock);
1714 seq_printf(s, "----------------------------------------------------------\n");
1715 seq_printf(s, "%16.s %22zu\n", "total orphaned",
1716 total_orphaned_size);
1717 seq_printf(s, "%16.s %22zu\n", "total ", total_size);
1718 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1719 seq_printf(s, "%16.s %22zu\n", "deferred free",
1720 heap->free_list_size);
1721 seq_printf(s, "----------------------------------------------------------\n");
1723 if (heap->debug_show)
1724 heap->debug_show(heap, s, unused);
1729 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1731 return single_open(file, ion_debug_heap_show, inode->i_private);
1734 static const struct file_operations debug_heap_fops = {
1735 .open = ion_debug_heap_open,
1737 .llseek = seq_lseek,
1738 .release = single_release,
1741 #ifdef DEBUG_HEAP_SHRINKER
1742 static int debug_shrink_set(void *data, u64 val)
1744 struct ion_heap *heap = data;
1745 struct shrink_control sc;
1754 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1755 sc.nr_to_scan = objs;
1757 heap->shrinker.shrink(&heap->shrinker, &sc);
1761 static int debug_shrink_get(void *data, u64 *val)
1763 struct ion_heap *heap = data;
1764 struct shrink_control sc;
1770 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1775 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1776 debug_shrink_set, "%llu\n");
1779 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1781 struct dentry *debug_file;
1783 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1784 !heap->ops->unmap_dma)
1785 pr_err("%s: can not add heap with invalid ops struct.\n",
1788 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1789 ion_heap_init_deferred_free(heap);
1791 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1792 ion_heap_init_shrinker(heap);
1795 down_write(&dev->lock);
1796 /* use negative heap->id to reverse the priority -- when traversing
1797 the list later attempt higher id numbers first */
1798 plist_node_init(&heap->node, -heap->id);
1799 plist_add(&heap->node, &dev->heaps);
1800 debug_file = debugfs_create_file(heap->name, 0664,
1801 dev->heaps_debug_root, heap,
1805 char buf[256], *path;
1806 path = dentry_path(dev->heaps_debug_root, buf, 256);
1807 pr_err("Failed to create heap debugfs at %s/%s\n",
1811 #ifdef DEBUG_HEAP_SHRINKER
1812 if (heap->shrinker.shrink) {
1813 char debug_name[64];
1815 snprintf(debug_name, 64, "%s_shrink", heap->name);
1816 debug_file = debugfs_create_file(
1817 debug_name, 0644, dev->heaps_debug_root, heap,
1818 &debug_shrink_fops);
1820 char buf[256], *path;
1821 path = dentry_path(dev->heaps_debug_root, buf, 256);
1822 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1827 up_write(&dev->lock);
1830 struct ion_device *ion_device_create(long (*custom_ioctl)
1831 (struct ion_client *client,
1835 struct ion_device *idev;
1838 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1840 return ERR_PTR(-ENOMEM);
1842 idev->dev.minor = MISC_DYNAMIC_MINOR;
1843 idev->dev.name = "ion";
1844 idev->dev.fops = &ion_fops;
1845 idev->dev.parent = NULL;
1846 ret = misc_register(&idev->dev);
1848 pr_err("ion: failed to register misc device.\n");
1849 return ERR_PTR(ret);
1852 idev->debug_root = debugfs_create_dir("ion", NULL);
1853 if (!idev->debug_root) {
1854 pr_err("ion: failed to create debugfs root directory.\n");
1857 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1858 if (!idev->heaps_debug_root) {
1859 pr_err("ion: failed to create debugfs heaps directory.\n");
1862 idev->clients_debug_root = debugfs_create_dir("clients",
1864 if (!idev->clients_debug_root)
1865 pr_err("ion: failed to create debugfs clients directory.\n");
1869 idev->custom_ioctl = custom_ioctl;
1870 idev->buffers = RB_ROOT;
1871 mutex_init(&idev->buffer_lock);
1872 init_rwsem(&idev->lock);
1873 plist_head_init(&idev->heaps);
1874 idev->clients = RB_ROOT;
1878 void ion_device_destroy(struct ion_device *dev)
1880 misc_deregister(&dev->dev);
1881 debugfs_remove_recursive(dev->debug_root);
1882 /* XXX need to free the heaps and clients ? */
1886 void __init ion_reserve(struct ion_platform_data *data)
1890 for (i = 0; i < data->nr; i++) {
1891 if (data->heaps[i].size == 0)
1894 if (data->heaps[i].base == 0) {
1896 paddr = memblock_alloc_base(data->heaps[i].size,
1897 data->heaps[i].align,
1898 MEMBLOCK_ALLOC_ANYWHERE);
1900 pr_err("%s: error allocating memblock for "
1905 data->heaps[i].base = paddr;
1907 int ret = memblock_reserve(data->heaps[i].base,
1908 data->heaps[i].size);
1910 pr_err("memblock reserve of %zx@%lx failed\n",
1911 data->heaps[i].size,
1912 data->heaps[i].base);
1914 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1915 data->heaps[i].name,
1916 data->heaps[i].base,
1917 data->heaps[i].size);