3 * drivers/gpu/ion/ion.c
5 * Copyright (C) 2011 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/freezer.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/kthread.h>
24 #include <linux/list.h>
25 #include <linux/memblock.h>
26 #include <linux/miscdevice.h>
27 #include <linux/export.h>
29 #include <linux/mm_types.h>
30 #include <linux/rbtree.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/vmalloc.h>
35 #include <linux/debugfs.h>
36 #include <linux/dma-buf.h>
37 #include <linux/idr.h>
38 #include <linux/time.h>
39 #ifdef CONFIG_DRM_SPRD
45 #include "compat_ion.h"
47 #ifdef CONFIG_ION_SPRD
48 #define DEBUG_HEAP_SHRINKER
52 * struct ion_device - the metadata of the ion device node
53 * @dev: the actual misc device
54 * @buffers: an rb tree of all the existing buffers
55 * @buffer_lock: lock protecting the tree of buffers
56 * @lock: rwsem protecting the tree of heaps and clients
57 * @heaps: list of all the heaps in the system
58 * @user_clients: list of all the clients created from userspace
61 struct miscdevice dev;
62 struct rb_root buffers;
63 struct mutex buffer_lock;
64 struct rw_semaphore lock;
65 struct plist_head heaps;
66 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
68 struct rb_root clients;
69 struct dentry *debug_root;
70 struct dentry *heaps_debug_root;
71 struct dentry *clients_debug_root;
75 * struct ion_client - a process/hw block local address space
76 * @node: node in the tree of all clients
77 * @dev: backpointer to ion device
78 * @handles: an rb tree of all the handles in this client
79 * @idr: an idr space for allocating handle ids
80 * @lock: lock protecting the tree of handles
81 * @name: used for debugging
82 * @display_name: used for debugging (unique version of @name)
83 * @display_serial: used for debugging (to make display_name unique)
84 * @task: used for debugging
86 * A client represents a list of buffers this client may access.
87 * The mutex stored here is used to protect both handles tree
88 * as well as the handles themselves, and should be held while modifying either.
92 struct ion_device *dev;
93 struct rb_root handles;
99 struct task_struct *task;
102 struct dentry *debug_root;
106 * ion_handle - a client local reference to a buffer
107 * @ref: reference count
108 * @client: back pointer to the client the buffer resides in
109 * @buffer: pointer to the buffer
110 * @node: node in the client's handle rbtree
111 * @kmap_cnt: count of times this client has mapped to kernel
112 * @id: client-unique id allocated by client->idr
114 * Modifications to node, map_cnt or mapping should be protected by the
115 * lock in the client. Other fields are never changed after initialization.
119 struct ion_client *client;
120 struct ion_buffer *buffer;
122 unsigned int kmap_cnt;
126 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
128 return (buffer->flags & ION_FLAG_CACHED) &&
129 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
132 bool ion_buffer_cached(struct ion_buffer *buffer)
134 return !!(buffer->flags & ION_FLAG_CACHED);
137 static inline struct page *ion_buffer_page(struct page *page)
139 return (struct page *)((unsigned long)page & ~(1UL));
142 static inline bool ion_buffer_page_is_dirty(struct page *page)
144 return !!((unsigned long)page & 1UL);
147 static inline void ion_buffer_page_dirty(struct page **page)
149 *page = (struct page *)((unsigned long)(*page) | 1UL);
152 static inline void ion_buffer_page_clean(struct page **page)
154 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
157 /* this function should only be called while dev->lock is held */
158 static void ion_buffer_add(struct ion_device *dev,
159 struct ion_buffer *buffer)
161 struct rb_node **p = &dev->buffers.rb_node;
162 struct rb_node *parent = NULL;
163 struct ion_buffer *entry;
167 entry = rb_entry(parent, struct ion_buffer, node);
169 if (buffer < entry) {
171 } else if (buffer > entry) {
174 pr_err("%s: buffer already found.", __func__);
179 rb_link_node(&buffer->node, parent, p);
180 rb_insert_color(&buffer->node, &dev->buffers);
183 /* this function should only be called while dev->lock is held */
184 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
185 struct ion_device *dev,
190 struct ion_buffer *buffer;
191 struct sg_table *table;
192 struct scatterlist *sg;
196 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
198 return ERR_PTR(-ENOMEM);
201 buffer->flags = flags;
202 kref_init(&buffer->ref);
204 ret = heap->ops->allocate(heap, buffer, len, align, flags);
207 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
210 ion_heap_freelist_drain(heap, 0);
211 ret = heap->ops->allocate(heap, buffer, len, align,
220 table = heap->ops->map_dma(heap, buffer);
221 if (WARN_ONCE(table == NULL,
222 "heap->ops->map_dma should return ERR_PTR on error"))
223 table = ERR_PTR(-EINVAL);
225 heap->ops->free(buffer);
227 return ERR_PTR(PTR_ERR(table));
229 buffer->sg_table = table;
230 if (ion_buffer_fault_user_mappings(buffer)) {
231 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
232 struct scatterlist *sg;
235 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
236 if (!buffer->pages) {
241 for_each_sg(table->sgl, sg, table->nents, i) {
242 struct page *page = sg_page(sg);
244 for (j = 0; j < sg->length / PAGE_SIZE; j++)
245 buffer->pages[k++] = page++;
254 INIT_LIST_HEAD(&buffer->vmas);
255 mutex_init(&buffer->lock);
256 /* this will set up dma addresses for the sglist -- it is not
257 technically correct as per the dma api -- a specific
258 device isn't really taking ownership here. However, in practice on
259 our systems the only dma_address space is physical addresses.
260 Additionally, we can't afford the overhead of invalidating every
261 allocation via dma_map_sg. The implicit contract here is that
262 memory comming from the heaps is ready for dma, ie if it has a
263 cached mapping that mapping has been invalidated */
264 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
265 sg_dma_address(sg) = sg_phys(sg);
266 mutex_lock(&dev->buffer_lock);
267 ion_buffer_add(dev, buffer);
268 mutex_unlock(&dev->buffer_lock);
270 do_gettimeofday(&time);
271 buffer->alloc_time = time;
275 heap->ops->unmap_dma(heap, buffer);
276 heap->ops->free(buffer);
279 vfree(buffer->pages);
285 void ion_buffer_destroy(struct ion_buffer *buffer)
287 if (WARN_ON(buffer->kmap_cnt > 0))
288 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
289 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
290 buffer->heap->ops->free(buffer);
292 vfree(buffer->pages);
296 static void _ion_buffer_destroy(struct kref *kref)
298 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
299 struct ion_heap *heap = buffer->heap;
300 struct ion_device *dev = buffer->dev;
302 #if defined(CONFIG_SPRD_IOMMU)
305 for (i = IOMMU_GSP; i < IOMMU_MAX; i++) {
306 if(buffer->iomap_cnt[i]>0)
308 buffer->iomap_cnt[i] = 0;
309 sprd_iova_unmap(i,buffer->iova[i],buffer->size);
310 sprd_iova_free(i,buffer->iova[i],buffer->size);
315 mutex_lock(&dev->buffer_lock);
316 rb_erase(&buffer->node, &dev->buffers);
317 mutex_unlock(&dev->buffer_lock);
319 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
320 ion_heap_freelist_add(heap, buffer);
322 ion_buffer_destroy(buffer);
325 static void ion_buffer_get(struct ion_buffer *buffer)
327 kref_get(&buffer->ref);
330 static int ion_buffer_put(struct ion_buffer *buffer)
332 return kref_put(&buffer->ref, _ion_buffer_destroy);
335 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
337 mutex_lock(&buffer->lock);
338 buffer->handle_count++;
339 mutex_unlock(&buffer->lock);
342 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
345 * when a buffer is removed from a handle, if it is not in
346 * any other handles, copy the taskcomm and the pid of the
347 * process it's being removed from into the buffer. At this
348 * point there will be no way to track what processes this buffer is
349 * being used by, it only exists as a dma_buf file descriptor.
350 * The taskcomm and pid can provide a debug hint as to where this fd
353 mutex_lock(&buffer->lock);
354 buffer->handle_count--;
355 BUG_ON(buffer->handle_count < 0);
356 if (!buffer->handle_count) {
357 struct task_struct *task;
359 task = current->group_leader;
360 get_task_comm(buffer->task_comm, task);
361 buffer->pid = task_pid_nr(task);
362 buffer->tid = task_pid_nr(current);
364 mutex_unlock(&buffer->lock);
367 static struct ion_handle *ion_handle_create(struct ion_client *client,
368 struct ion_buffer *buffer)
370 struct ion_handle *handle;
372 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
374 return ERR_PTR(-ENOMEM);
375 kref_init(&handle->ref);
376 RB_CLEAR_NODE(&handle->node);
377 handle->client = client;
378 ion_buffer_get(buffer);
379 ion_buffer_add_to_handle(buffer);
380 handle->buffer = buffer;
385 static void ion_handle_kmap_put(struct ion_handle *);
387 static void ion_handle_destroy(struct kref *kref)
389 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
390 struct ion_client *client = handle->client;
391 struct ion_buffer *buffer = handle->buffer;
393 mutex_lock(&buffer->lock);
394 while (handle->kmap_cnt)
395 ion_handle_kmap_put(handle);
396 mutex_unlock(&buffer->lock);
398 idr_remove(&client->idr, handle->id);
399 if (!RB_EMPTY_NODE(&handle->node))
400 rb_erase(&handle->node, &client->handles);
402 ion_buffer_remove_from_handle(buffer);
403 ion_buffer_put(buffer);
408 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
410 return handle->buffer;
413 static void ion_handle_get(struct ion_handle *handle)
415 kref_get(&handle->ref);
418 static int ion_handle_put(struct ion_handle *handle)
420 struct ion_client *client = handle->client;
423 mutex_lock(&client->lock);
424 ret = kref_put(&handle->ref, ion_handle_destroy);
425 mutex_unlock(&client->lock);
430 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
431 struct ion_buffer *buffer)
433 struct rb_node *n = client->handles.rb_node;
436 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
437 if (buffer < entry->buffer)
439 else if (buffer > entry->buffer)
444 return ERR_PTR(-EINVAL);
447 static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
450 struct ion_handle *handle;
452 mutex_lock(&client->lock);
453 handle = idr_find(&client->idr, id);
455 ion_handle_get(handle);
456 mutex_unlock(&client->lock);
458 return handle ? handle : ERR_PTR(-EINVAL);
461 static bool ion_handle_validate(struct ion_client *client,
462 struct ion_handle *handle)
464 WARN_ON(!mutex_is_locked(&client->lock));
465 return (idr_find(&client->idr, handle->id) == handle);
468 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
471 struct rb_node **p = &client->handles.rb_node;
472 struct rb_node *parent = NULL;
473 struct ion_handle *entry;
475 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
483 entry = rb_entry(parent, struct ion_handle, node);
485 if (handle->buffer < entry->buffer)
487 else if (handle->buffer > entry->buffer)
490 WARN(1, "%s: buffer already found.", __func__);
493 rb_link_node(&handle->node, parent, p);
494 rb_insert_color(&handle->node, &client->handles);
499 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
500 size_t align, unsigned int heap_id_mask,
503 struct ion_handle *handle;
504 struct ion_device *dev = client->dev;
505 struct ion_buffer *buffer = NULL;
506 struct ion_heap *heap;
509 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
510 len, align, heap_id_mask, flags);
512 * traverse the list of heaps available in this system in priority
513 * order. If the heap type is supported by the client, and matches the
514 * request of the caller allocate from it. Repeat until allocate has
515 * succeeded or all heaps have been tried
517 len = PAGE_ALIGN(len);
520 return ERR_PTR(-EINVAL);
522 down_read(&dev->lock);
523 plist_for_each_entry(heap, &dev->heaps, node) {
524 /* if the caller didn't specify this heap id */
525 if (!((1 << heap->id) & heap_id_mask))
527 buffer = ion_buffer_create(heap, dev, len, align, flags);
533 if (buffer == NULL) {
534 pr_err("%s: buffer is NULL!\n",__func__);
535 return ERR_PTR(-ENODEV);
538 if (IS_ERR(buffer)) {
539 pr_err("%s: ion alloc buffer is error! and the buffer is %p\n",__func__,buffer);
540 return ERR_PTR(PTR_ERR(buffer));
543 handle = ion_handle_create(client, buffer);
546 * ion_buffer_create will create a buffer with a ref_cnt of 1,
547 * and ion_handle_create will take a second reference, drop one here
549 ion_buffer_put(buffer);
551 if (IS_ERR(handle)) {
552 pr_err("%s: handle is error! and the handle is %p\n",__func__,handle);
556 mutex_lock(&client->lock);
557 ret = ion_handle_add(client, handle);
558 mutex_unlock(&client->lock);
560 ion_handle_put(handle);
561 handle = ERR_PTR(ret);
566 EXPORT_SYMBOL(ion_alloc);
568 #ifdef CONFIG_DRM_SPRD
569 struct ion_handle *ion_alloc_with_gem(struct ion_client *client, size_t len,
570 size_t align, unsigned int heap_id_mask,
572 struct drm_gem_object *obj)
574 struct ion_handle *handle;
576 handle = ion_alloc(client, len, align, heap_id_mask, flags);
578 handle->buffer->obj = obj;
582 EXPORT_SYMBOL(ion_alloc_with_gem);
584 struct drm_gem_object *ion_get_gem(struct ion_handle *handle)
586 if (handle && handle->buffer)
587 return handle->buffer->obj;
591 EXPORT_SYMBOL(ion_get_gem);
594 void ion_free(struct ion_client *client, struct ion_handle *handle)
598 BUG_ON(client != handle->client);
600 mutex_lock(&client->lock);
601 valid_handle = ion_handle_validate(client, handle);
604 WARN(1, "%s: invalid handle passed to free.\n", __func__);
605 mutex_unlock(&client->lock);
608 mutex_unlock(&client->lock);
609 ion_handle_put(handle);
611 EXPORT_SYMBOL(ion_free);
613 int ion_phys(struct ion_client *client, struct ion_handle *handle,
614 ion_phys_addr_t *addr, size_t *len)
616 struct ion_buffer *buffer;
619 mutex_lock(&client->lock);
620 if (!ion_handle_validate(client, handle)) {
621 mutex_unlock(&client->lock);
625 buffer = handle->buffer;
627 if (!buffer->heap->ops->phys) {
628 pr_err("%s: ion_phys is not implemented by this heap.\n",
630 mutex_unlock(&client->lock);
633 mutex_unlock(&client->lock);
634 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
637 EXPORT_SYMBOL(ion_phys);
639 int ion_is_phys(struct ion_client *client, struct ion_handle *handle)
641 struct ion_buffer *buffer;
644 mutex_lock(&client->lock);
645 if (!ion_handle_validate(client, handle)) {
646 mutex_unlock(&client->lock);
650 buffer = handle->buffer;
652 if (!buffer->heap->ops->phys)
655 mutex_unlock(&client->lock);
659 EXPORT_SYMBOL(ion_is_phys);
661 int ion_is_cached(struct ion_client *client, struct ion_handle *handle)
663 struct ion_buffer *buffer;
666 mutex_lock(&client->lock);
667 if (!ion_handle_validate(client, handle)) {
668 mutex_unlock(&client->lock);
672 buffer = handle->buffer;
674 cached = ion_buffer_cached(buffer);
675 mutex_unlock(&client->lock);
679 EXPORT_SYMBOL(ion_is_cached);
681 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
685 if (buffer->kmap_cnt) {
687 return buffer->vaddr;
689 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
690 if (WARN_ONCE(vaddr == NULL,
691 "heap->ops->map_kernel should return ERR_PTR on error"))
692 return ERR_PTR(-EINVAL);
695 buffer->vaddr = vaddr;
700 static void *ion_handle_kmap_get(struct ion_handle *handle)
702 struct ion_buffer *buffer = handle->buffer;
705 if (handle->kmap_cnt) {
707 return buffer->vaddr;
709 vaddr = ion_buffer_kmap_get(buffer);
716 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
719 if (!buffer->kmap_cnt) {
720 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
721 buffer->vaddr = NULL;
725 static void ion_handle_kmap_put(struct ion_handle *handle)
727 struct ion_buffer *buffer = handle->buffer;
729 if (!handle->kmap_cnt) {
730 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
735 if (!handle->kmap_cnt)
736 ion_buffer_kmap_put(buffer);
739 int ion_map_iommu(struct ion_client *client, struct ion_handle *handle, int domain_no, unsigned long *ptr_iova)
741 struct ion_buffer *buffer;
743 mutex_lock(&client->lock);
744 if (!ion_handle_validate(client, handle)) {
745 pr_err("%s: invalid handle passed to map_kernel.\n",
747 mutex_unlock(&client->lock);
751 buffer = handle->buffer;
753 if (!handle->buffer->heap->ops->map_iommu) {
754 pr_err("%s: map_kernel is not implemented by this heap.\n",
756 mutex_unlock(&client->lock);
760 mutex_lock(&buffer->lock);
761 handle->buffer->heap->ops->map_iommu(buffer, domain_no, ptr_iova);
762 mutex_unlock(&buffer->lock);
763 mutex_unlock(&client->lock);
766 EXPORT_SYMBOL(ion_map_iommu);
768 int ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle, int domain_no)
770 struct ion_buffer *buffer;
772 mutex_lock(&client->lock);
773 if (!ion_handle_validate(client, handle)) {
774 pr_err("%s: invalid handle passed to map_kernel.\n",
776 mutex_unlock(&client->lock);
780 buffer = handle->buffer;
782 if (!handle->buffer->heap->ops->map_iommu) {
783 pr_err("%s: map_kernel is not implemented by this heap.\n",
785 mutex_unlock(&client->lock);
789 mutex_lock(&buffer->lock);
790 handle->buffer->heap->ops->unmap_iommu(buffer, domain_no);
791 mutex_unlock(&buffer->lock);
792 mutex_unlock(&client->lock);
795 EXPORT_SYMBOL(ion_unmap_iommu);
797 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
799 struct ion_buffer *buffer;
802 mutex_lock(&client->lock);
803 if (!ion_handle_validate(client, handle)) {
804 pr_err("%s: invalid handle passed to map_kernel.\n",
806 mutex_unlock(&client->lock);
807 return ERR_PTR(-EINVAL);
810 buffer = handle->buffer;
812 if (!handle->buffer->heap->ops->map_kernel) {
813 pr_err("%s: map_kernel is not implemented by this heap.\n",
815 mutex_unlock(&client->lock);
816 return ERR_PTR(-ENODEV);
819 mutex_lock(&buffer->lock);
820 vaddr = ion_handle_kmap_get(handle);
821 mutex_unlock(&buffer->lock);
822 mutex_unlock(&client->lock);
825 EXPORT_SYMBOL(ion_map_kernel);
827 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
829 struct ion_buffer *buffer;
831 mutex_lock(&client->lock);
832 buffer = handle->buffer;
833 mutex_lock(&buffer->lock);
834 ion_handle_kmap_put(handle);
835 mutex_unlock(&buffer->lock);
836 mutex_unlock(&client->lock);
838 EXPORT_SYMBOL(ion_unmap_kernel);
840 static int ion_debug_client_show(struct seq_file *s, void *unused)
842 struct ion_client *client = s->private;
844 size_t sizes[ION_NUM_HEAP_IDS] = {0};
845 const char *names[ION_NUM_HEAP_IDS] = {NULL};
848 mutex_lock(&client->lock);
849 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
850 struct ion_handle *handle = rb_entry(n, struct ion_handle,
852 unsigned int id = handle->buffer->heap->id;
855 names[id] = handle->buffer->heap->name;
856 sizes[id] += handle->buffer->size;
858 mutex_unlock(&client->lock);
860 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
861 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
864 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
869 static int ion_debug_client_open(struct inode *inode, struct file *file)
871 return single_open(file, ion_debug_client_show, inode->i_private);
874 static const struct file_operations debug_client_fops = {
875 .open = ion_debug_client_open,
878 .release = single_release,
881 static int ion_get_client_serial(const struct rb_root *root,
882 const unsigned char *name)
885 struct rb_node *node;
886 for (node = rb_first(root); node; node = rb_next(node)) {
887 struct ion_client *client = rb_entry(node, struct ion_client,
889 if (strcmp(client->name, name))
891 serial = max(serial, client->display_serial);
896 struct ion_client *ion_client_create(struct ion_device *dev,
899 struct ion_client *client;
900 struct task_struct *task;
902 struct rb_node *parent = NULL;
903 struct ion_client *entry;
908 pr_err("%s: Name cannot be null\n", __func__);
909 return ERR_PTR(-EINVAL);
912 get_task_struct(current->group_leader);
913 task_lock(current->group_leader);
914 pid = task_pid_nr(current->group_leader);
915 tid = task_pid_nr(current);
916 /* don't bother to store task struct for kernel threads,
917 they can't be killed anyway */
918 if (current->group_leader->flags & PF_KTHREAD) {
919 put_task_struct(current->group_leader);
922 task = current->group_leader;
924 task_unlock(current->group_leader);
926 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
928 goto err_put_task_struct;
931 client->handles = RB_ROOT;
932 idr_init(&client->idr);
933 mutex_init(&client->lock);
937 client->name = kstrdup(name, GFP_KERNEL);
939 goto err_free_client;
941 down_write(&dev->lock);
942 client->display_serial = ion_get_client_serial(&dev->clients, name);
943 client->display_name = kasprintf(
944 GFP_KERNEL, "%s-%d", name, client->display_serial);
945 if (!client->display_name) {
946 up_write(&dev->lock);
947 goto err_free_client_name;
949 p = &dev->clients.rb_node;
952 entry = rb_entry(parent, struct ion_client, node);
956 else if (client > entry)
959 rb_link_node(&client->node, parent, p);
960 rb_insert_color(&client->node, &dev->clients);
962 client->debug_root = debugfs_create_file(client->display_name, 0664,
963 dev->clients_debug_root,
964 client, &debug_client_fops);
965 if (!client->debug_root) {
966 char buf[256], *path;
967 path = dentry_path(dev->clients_debug_root, buf, 256);
968 pr_err("Failed to create client debugfs at %s/%s\n",
969 path, client->display_name);
972 up_write(&dev->lock);
976 err_free_client_name:
982 put_task_struct(current->group_leader);
983 return ERR_PTR(-ENOMEM);
985 EXPORT_SYMBOL(ion_client_create);
987 void ion_client_destroy(struct ion_client *client)
989 struct ion_device *dev = client->dev;
992 pr_debug("%s: %d\n", __func__, __LINE__);
993 while ((n = rb_first(&client->handles))) {
994 struct ion_handle *handle = rb_entry(n, struct ion_handle,
996 ion_handle_destroy(&handle->ref);
999 idr_destroy(&client->idr);
1001 down_write(&dev->lock);
1003 put_task_struct(client->task);
1004 rb_erase(&client->node, &dev->clients);
1005 debugfs_remove_recursive(client->debug_root);
1006 up_write(&dev->lock);
1008 kfree(client->display_name);
1009 kfree(client->name);
1012 EXPORT_SYMBOL(ion_client_destroy);
1014 int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
1015 unsigned long *size, unsigned int *heap_id)
1017 struct ion_buffer *buffer;
1018 struct ion_heap *heap;
1020 mutex_lock(&client->lock);
1021 if (!ion_handle_validate(client, handle)) {
1022 pr_err("%s: invalid handle passed to %s.\n",
1023 __func__, __func__);
1024 mutex_unlock(&client->lock);
1027 buffer = handle->buffer;
1028 mutex_lock(&buffer->lock);
1029 heap = buffer->heap;
1030 *heap_id = (1 << heap->id);
1031 *size = buffer->size;
1032 mutex_unlock(&buffer->lock);
1033 mutex_unlock(&client->lock);
1037 EXPORT_SYMBOL(ion_handle_get_size);
1039 struct sg_table *ion_sg_table(struct ion_client *client,
1040 struct ion_handle *handle)
1042 struct ion_buffer *buffer;
1043 struct sg_table *table;
1045 mutex_lock(&client->lock);
1046 if (!ion_handle_validate(client, handle)) {
1047 pr_err("%s: invalid handle passed to map_dma.\n",
1049 mutex_unlock(&client->lock);
1050 return ERR_PTR(-EINVAL);
1052 buffer = handle->buffer;
1053 table = buffer->sg_table;
1054 mutex_unlock(&client->lock);
1057 EXPORT_SYMBOL(ion_sg_table);
1059 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1061 enum dma_data_direction direction);
1063 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1064 enum dma_data_direction direction)
1066 struct dma_buf *dmabuf = attachment->dmabuf;
1067 struct ion_buffer *buffer = dmabuf->priv;
1069 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
1070 return buffer->sg_table;
1073 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1074 struct sg_table *table,
1075 enum dma_data_direction direction)
1079 void ion_pages_sync_for_device(struct device *dev, struct page *page,
1080 size_t size, enum dma_data_direction dir)
1082 struct scatterlist sg;
1084 sg_init_table(&sg, 1);
1085 sg_set_page(&sg, page, size, 0);
1087 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
1088 * for the the targeted device, but this works on the currently targeted
1091 sg_dma_address(&sg) = page_to_phys(page);
1092 dma_sync_sg_for_device(dev, &sg, 1, dir);
1095 struct ion_vma_list {
1096 struct list_head list;
1097 struct vm_area_struct *vma;
1100 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1102 enum dma_data_direction dir)
1104 struct ion_vma_list *vma_list;
1105 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1108 pr_debug("%s: syncing for device %s\n", __func__,
1109 dev ? dev_name(dev) : "null");
1111 if (!ion_buffer_fault_user_mappings(buffer))
1114 mutex_lock(&buffer->lock);
1115 for (i = 0; i < pages; i++) {
1116 struct page *page = buffer->pages[i];
1118 if (ion_buffer_page_is_dirty(page))
1119 ion_pages_sync_for_device(dev, ion_buffer_page(page),
1122 ion_buffer_page_clean(buffer->pages + i);
1124 list_for_each_entry(vma_list, &buffer->vmas, list) {
1125 struct vm_area_struct *vma = vma_list->vma;
1127 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1130 mutex_unlock(&buffer->lock);
1133 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1135 struct ion_buffer *buffer = vma->vm_private_data;
1139 mutex_lock(&buffer->lock);
1140 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1141 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1143 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1144 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1145 mutex_unlock(&buffer->lock);
1147 return VM_FAULT_ERROR;
1149 return VM_FAULT_NOPAGE;
1152 static void ion_vm_open(struct vm_area_struct *vma)
1154 struct ion_buffer *buffer = vma->vm_private_data;
1155 struct ion_vma_list *vma_list;
1157 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1160 vma_list->vma = vma;
1161 mutex_lock(&buffer->lock);
1162 list_add(&vma_list->list, &buffer->vmas);
1163 mutex_unlock(&buffer->lock);
1164 pr_debug("%s: adding %p\n", __func__, vma);
1167 static void ion_vm_close(struct vm_area_struct *vma)
1169 struct ion_buffer *buffer = vma->vm_private_data;
1170 struct ion_vma_list *vma_list, *tmp;
1172 pr_debug("%s\n", __func__);
1173 mutex_lock(&buffer->lock);
1174 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1175 if (vma_list->vma != vma)
1177 list_del(&vma_list->list);
1179 pr_debug("%s: deleting %p\n", __func__, vma);
1182 mutex_unlock(&buffer->lock);
1185 static struct vm_operations_struct ion_vma_ops = {
1186 .open = ion_vm_open,
1187 .close = ion_vm_close,
1188 .fault = ion_vm_fault,
1191 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1193 struct ion_buffer *buffer = dmabuf->priv;
1196 if (!buffer->heap->ops->map_user) {
1197 pr_err("%s: this heap does not define a method for mapping "
1198 "to userspace\n", __func__);
1202 if (ion_buffer_fault_user_mappings(buffer)) {
1203 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1205 vma->vm_private_data = buffer;
1206 vma->vm_ops = &ion_vma_ops;
1211 if (!(buffer->flags & ION_FLAG_CACHED))
1212 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1214 mutex_lock(&buffer->lock);
1215 /* now map it to userspace */
1216 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1217 mutex_unlock(&buffer->lock);
1220 pr_err("%s: failure mapping buffer to userspace\n",
1226 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1228 struct ion_buffer *buffer = dmabuf->priv;
1229 ion_buffer_put(buffer);
1231 #ifdef CONFIG_DRM_SPRD
1233 drm_gem_object_unreference_unlocked(buffer->obj);
1239 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1241 struct ion_buffer *buffer = dmabuf->priv;
1242 return buffer->vaddr + offset * PAGE_SIZE;
1245 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1251 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1253 enum dma_data_direction direction)
1255 struct ion_buffer *buffer = dmabuf->priv;
1258 if (!buffer->heap->ops->map_kernel) {
1259 pr_err("%s: map kernel is not implemented by this heap.\n",
1264 mutex_lock(&buffer->lock);
1265 vaddr = ion_buffer_kmap_get(buffer);
1266 mutex_unlock(&buffer->lock);
1268 return PTR_ERR(vaddr);
1272 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1274 enum dma_data_direction direction)
1276 struct ion_buffer *buffer = dmabuf->priv;
1278 mutex_lock(&buffer->lock);
1279 ion_buffer_kmap_put(buffer);
1280 mutex_unlock(&buffer->lock);
1283 static struct dma_buf_ops dma_buf_ops = {
1284 .map_dma_buf = ion_map_dma_buf,
1285 .unmap_dma_buf = ion_unmap_dma_buf,
1287 .release = ion_dma_buf_release,
1288 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1289 .end_cpu_access = ion_dma_buf_end_cpu_access,
1290 .kmap_atomic = ion_dma_buf_kmap,
1291 .kunmap_atomic = ion_dma_buf_kunmap,
1292 .kmap = ion_dma_buf_kmap,
1293 .kunmap = ion_dma_buf_kunmap,
1296 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1297 struct ion_handle *handle)
1299 struct ion_buffer *buffer;
1300 struct dma_buf *dmabuf;
1303 mutex_lock(&client->lock);
1304 valid_handle = ion_handle_validate(client, handle);
1305 if (!valid_handle) {
1306 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1307 mutex_unlock(&client->lock);
1308 return ERR_PTR(-EINVAL);
1310 buffer = handle->buffer;
1311 ion_buffer_get(buffer);
1312 mutex_unlock(&client->lock);
1314 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1315 if (IS_ERR(dmabuf)) {
1316 ion_buffer_put(buffer);
1322 EXPORT_SYMBOL(ion_share_dma_buf);
1324 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1326 struct dma_buf *dmabuf;
1329 dmabuf = ion_share_dma_buf(client, handle);
1330 if (IS_ERR(dmabuf)) {
1331 pr_err("%s: dmabuf is error and dmabuf is %p!\n",__func__,dmabuf);
1332 return PTR_ERR(dmabuf);
1335 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1337 pr_err("%s: dmabuf fd is error %d!\n",__func__, fd);
1338 dma_buf_put(dmabuf);
1343 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1345 struct ion_handle *get_ion_handle_from_dmabuf(struct ion_client *client, struct dma_buf *dmabuf)
1347 struct ion_buffer *buffer;
1348 struct ion_handle *handle;
1351 /* if this memory came from ion */
1352 if (dmabuf->ops != &dma_buf_ops) {
1353 pr_err("%s: can not import dmabuf from another exporter\n",
1355 return ERR_PTR(-EINVAL);
1357 buffer = dmabuf->priv;
1359 mutex_lock(&client->lock);
1360 /* if a handle exists for this buffer just take a reference to it */
1361 handle = ion_handle_lookup(client, buffer);
1362 if (!IS_ERR(handle)) {
1363 ion_handle_get(handle);
1364 mutex_unlock(&client->lock);
1367 mutex_unlock(&client->lock);
1369 handle = ion_handle_create(client, buffer);
1373 mutex_lock(&client->lock);
1374 ret = ion_handle_add(client, handle);
1375 mutex_unlock(&client->lock);
1377 ion_handle_put(handle);
1378 handle = ERR_PTR(ret);
1384 EXPORT_SYMBOL(get_ion_handle_from_dmabuf);
1386 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1388 struct dma_buf *dmabuf;
1389 struct ion_handle *handle;
1391 dmabuf = dma_buf_get(fd);
1392 if (IS_ERR(dmabuf)) {
1393 pr_err("ion_import_dma_buf() dmabuf=0x%lx, fd:%d, dma_buf_get error!\n",
1394 (unsigned long)dmabuf, fd);
1395 return ERR_PTR(PTR_ERR(dmabuf));
1397 handle = get_ion_handle_from_dmabuf(client, dmabuf);
1398 dma_buf_put(dmabuf);
1401 EXPORT_SYMBOL(ion_import_dma_buf);
1403 static int ion_invalidate_for_cpu(struct ion_client *client, int fd)
1405 struct dma_buf *dmabuf;
1406 struct ion_buffer *buffer;
1408 dmabuf = dma_buf_get(fd);
1411 pr_err("%s: dmabuf is error and dmabuf is %p!\n",__func__,dmabuf);
1412 return PTR_ERR(dmabuf);
1415 /* if this memory came from ion */
1416 if (dmabuf->ops != &dma_buf_ops) {
1417 pr_err("%s: can not sync dmabuf from another exporter\n",
1419 dma_buf_put(dmabuf);
1422 buffer = dmabuf->priv;
1424 dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
1425 buffer->sg_table->nents, DMA_FROM_DEVICE);
1426 dma_buf_put(dmabuf);
1430 static int ion_sync_for_device(struct ion_client *client, int fd)
1432 struct dma_buf *dmabuf;
1433 struct ion_buffer *buffer;
1435 dmabuf = dma_buf_get(fd);
1436 if (IS_ERR(dmabuf)) {
1437 pr_err("%s: the dmabuf is err dmabuf is %p, fd %d\n",__func__,dmabuf,fd);
1438 return PTR_ERR(dmabuf);
1441 /* if this memory came from ion */
1442 if (dmabuf->ops != &dma_buf_ops) {
1443 pr_err("%s: can not sync dmabuf from another exporter\n",
1445 dma_buf_put(dmabuf);
1448 buffer = dmabuf->priv;
1450 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1451 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1452 dma_buf_put(dmabuf);
1456 /* fix up the cases where the ioctl direction bits are incorrect */
1457 static unsigned int ion_ioctl_dir(unsigned int cmd)
1462 case ION_IOC_CUSTOM:
1465 return _IOC_DIR(cmd);
1469 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1471 struct ion_client *client = filp->private_data;
1472 struct ion_device *dev = client->dev;
1473 struct ion_handle *cleanup_handle = NULL;
1478 struct ion_fd_data fd;
1479 struct ion_allocation_data allocation;
1480 struct ion_handle_data handle;
1481 struct ion_custom_data custom;
1484 dir = ion_ioctl_dir(cmd);
1485 pr_debug("%s:cmd[0x%x]dir[0x%x]\n", __func__, cmd, dir);
1487 if (_IOC_SIZE(cmd) > sizeof(data)) {
1492 if (dir & _IOC_WRITE)
1493 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) {
1501 struct ion_handle *handle;
1503 handle = ion_alloc(client, data.allocation.len,
1504 data.allocation.align,
1505 data.allocation.heap_id_mask,
1506 data.allocation.flags);
1507 if (IS_ERR(handle)) {
1508 ret = PTR_ERR(handle);
1512 data.allocation.handle = handle->id;
1514 cleanup_handle = handle;
1519 struct ion_handle *handle;
1521 handle = ion_handle_get_by_id(client, data.handle.handle);
1522 if (IS_ERR(handle)) {
1523 ret = PTR_ERR(handle);
1526 ion_free(client, handle);
1527 ion_handle_put(handle);
1533 struct ion_handle *handle;
1535 handle = ion_handle_get_by_id(client, data.handle.handle);
1536 if (IS_ERR(handle)) {
1537 ret = PTR_ERR(handle);
1540 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1541 ion_handle_put(handle);
1546 case ION_IOC_IMPORT:
1548 struct ion_handle *handle;
1549 handle = ion_import_dma_buf(client, data.fd.fd);
1551 ret = PTR_ERR(handle);
1553 data.handle.handle = handle->id;
1556 case ION_IOC_INVALIDATE:
1558 ret = ion_invalidate_for_cpu(client, data.fd.fd);
1563 ret = ion_sync_for_device(client, data.fd.fd);
1566 case ION_IOC_CUSTOM:
1568 if (!dev->custom_ioctl) {
1572 ret = dev->custom_ioctl(client, data.custom.cmd,
1581 if (dir & _IOC_READ) {
1582 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1584 ion_free(client, cleanup_handle);
1591 pr_info("%s:cmd[0x%x]ret[%d]\n", __func__, cmd, ret);
1596 static int ion_release(struct inode *inode, struct file *file)
1598 struct ion_client *client = file->private_data;
1600 pr_debug("%s: %d\n", __func__, __LINE__);
1601 ion_client_destroy(client);
1605 static int ion_open(struct inode *inode, struct file *file)
1607 struct miscdevice *miscdev = file->private_data;
1608 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1609 struct ion_client *client;
1610 char debug_name[64];
1612 pr_debug("%s: %d\n", __func__, __LINE__);
1613 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1614 client = ion_client_create(dev, debug_name);
1616 return PTR_ERR(client);
1617 file->private_data = client;
1622 static const struct file_operations ion_fops = {
1623 .owner = THIS_MODULE,
1625 .release = ion_release,
1626 .unlocked_ioctl = ion_ioctl,
1627 .compat_ioctl = compat_ion_ioctl,
1630 static size_t ion_debug_heap_total(struct ion_client *client,
1636 mutex_lock(&client->lock);
1637 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1638 struct ion_handle *handle = rb_entry(n,
1641 if (handle->buffer->heap->id == id)
1642 size += handle->buffer->size;
1644 mutex_unlock(&client->lock);
1648 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1650 struct ion_heap *heap = s->private;
1651 struct ion_device *dev = heap->dev;
1655 size_t total_size = 0;
1656 size_t total_orphaned_size = 0;
1658 seq_printf(s, "%16.s %6.s %6.s %10.s %16.s\n", "client", "pid", "tid", "size", "alloc_time");
1659 seq_printf(s, "----------------------------------------------------------\n");
1661 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1662 struct ion_client *client = rb_entry(n, struct ion_client,
1665 mutex_lock(&client->lock);
1666 for (r = rb_first(&client->handles); r; r = rb_next(r)) {
1667 struct ion_handle *handle = rb_entry(r,
1670 struct ion_buffer *buffer = handle->buffer;
1672 if (buffer->heap->id == heap->id) {
1675 time_to_tm(buffer->alloc_time.tv_sec, 0, &t);
1677 char task_comm[TASK_COMM_LEN];
1679 get_task_comm(task_comm, client->task);
1680 seq_printf(s, "%16.s %6u %6u %10zu %ld.%d.%d-%d:%d:%d.%ld\n",
1681 task_comm, client->pid, client->tid, buffer->size,
1682 t.tm_year + 1900, t.tm_mon + 1,
1683 t.tm_mday, t.tm_hour, t.tm_min,
1684 t.tm_sec, buffer->alloc_time.tv_usec);
1686 seq_printf(s, "%16.s %6u %6u %10zu %ld.%d.%d-%d:%d:%d.%ld\n",
1687 client->name, client->pid, client->tid, buffer->size,
1688 t.tm_year + 1900, t.tm_mon + 1,
1689 t.tm_mday, t.tm_hour, t.tm_min,
1690 t.tm_sec, buffer->alloc_time.tv_usec);
1694 mutex_unlock(&client->lock);
1696 seq_printf(s, "----------------------------------------------------------\n");
1697 seq_printf(s, "orphaned allocations (info is from last known client):"
1699 mutex_lock(&dev->buffer_lock);
1700 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1701 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1703 if (buffer->heap->id != heap->id)
1705 total_size += buffer->size;
1706 if (!buffer->handle_count) {
1707 time_to_tm(buffer->alloc_time.tv_sec, 0, &t);
1708 seq_printf(s, "%16.s %6u %6u %10zu %d %d %ld.%d.%d-%d:%d:%d.%ld\n",
1709 buffer->task_comm, buffer->pid, buffer->tid, buffer->size,
1710 buffer->kmap_cnt, atomic_read(&buffer->ref.refcount),
1711 t.tm_year + 1900, t.tm_mon + 1,
1712 t.tm_mday, t.tm_hour, t.tm_min,
1713 t.tm_sec, buffer->alloc_time.tv_usec);
1714 total_orphaned_size += buffer->size;
1717 mutex_unlock(&dev->buffer_lock);
1718 seq_printf(s, "----------------------------------------------------------\n");
1719 seq_printf(s, "%16.s %22zu\n", "total orphaned",
1720 total_orphaned_size);
1721 seq_printf(s, "%16.s %22zu\n", "total ", total_size);
1722 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1723 seq_printf(s, "%16.s %22zu\n", "deferred free",
1724 heap->free_list_size);
1725 seq_printf(s, "----------------------------------------------------------\n");
1727 if (heap->debug_show)
1728 heap->debug_show(heap, s, unused);
1733 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1735 return single_open(file, ion_debug_heap_show, inode->i_private);
1738 static const struct file_operations debug_heap_fops = {
1739 .open = ion_debug_heap_open,
1741 .llseek = seq_lseek,
1742 .release = single_release,
1745 #ifdef DEBUG_HEAP_SHRINKER
1746 static int debug_shrink_set(void *data, u64 val)
1748 struct ion_heap *heap = data;
1749 struct shrink_control sc;
1758 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1759 sc.nr_to_scan = objs;
1761 heap->shrinker.shrink(&heap->shrinker, &sc);
1765 static int debug_shrink_get(void *data, u64 *val)
1767 struct ion_heap *heap = data;
1768 struct shrink_control sc;
1774 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1779 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1780 debug_shrink_set, "%llu\n");
1783 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1785 struct dentry *debug_file;
1787 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1788 !heap->ops->unmap_dma)
1789 pr_err("%s: can not add heap with invalid ops struct.\n",
1792 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1793 ion_heap_init_deferred_free(heap);
1795 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1796 ion_heap_init_shrinker(heap);
1799 down_write(&dev->lock);
1800 /* use negative heap->id to reverse the priority -- when traversing
1801 the list later attempt higher id numbers first */
1802 plist_node_init(&heap->node, -heap->id);
1803 plist_add(&heap->node, &dev->heaps);
1804 debug_file = debugfs_create_file(heap->name, 0664,
1805 dev->heaps_debug_root, heap,
1809 char buf[256], *path;
1810 path = dentry_path(dev->heaps_debug_root, buf, 256);
1811 pr_err("Failed to create heap debugfs at %s/%s\n",
1815 #ifdef DEBUG_HEAP_SHRINKER
1816 if (heap->shrinker.shrink) {
1817 char debug_name[64];
1819 snprintf(debug_name, 64, "%s_shrink", heap->name);
1820 debug_file = debugfs_create_file(
1821 debug_name, 0644, dev->heaps_debug_root, heap,
1822 &debug_shrink_fops);
1824 char buf[256], *path;
1825 path = dentry_path(dev->heaps_debug_root, buf, 256);
1826 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1831 up_write(&dev->lock);
1834 struct ion_device *ion_device_create(long (*custom_ioctl)
1835 (struct ion_client *client,
1839 struct ion_device *idev;
1842 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1844 return ERR_PTR(-ENOMEM);
1846 idev->dev.minor = MISC_DYNAMIC_MINOR;
1847 idev->dev.name = "ion";
1848 idev->dev.fops = &ion_fops;
1849 idev->dev.parent = NULL;
1850 ret = misc_register(&idev->dev);
1852 pr_err("ion: failed to register misc device.\n");
1853 return ERR_PTR(ret);
1856 idev->debug_root = debugfs_create_dir("ion", NULL);
1857 if (!idev->debug_root) {
1858 pr_err("ion: failed to create debugfs root directory.\n");
1861 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1862 if (!idev->heaps_debug_root) {
1863 pr_err("ion: failed to create debugfs heaps directory.\n");
1866 idev->clients_debug_root = debugfs_create_dir("clients",
1868 if (!idev->clients_debug_root)
1869 pr_err("ion: failed to create debugfs clients directory.\n");
1873 idev->custom_ioctl = custom_ioctl;
1874 idev->buffers = RB_ROOT;
1875 mutex_init(&idev->buffer_lock);
1876 init_rwsem(&idev->lock);
1877 plist_head_init(&idev->heaps);
1878 idev->clients = RB_ROOT;
1882 void ion_device_destroy(struct ion_device *dev)
1884 misc_deregister(&dev->dev);
1885 debugfs_remove_recursive(dev->debug_root);
1886 /* XXX need to free the heaps and clients ? */
1890 void __init ion_reserve(struct ion_platform_data *data)
1894 for (i = 0; i < data->nr; i++) {
1895 if (data->heaps[i].size == 0)
1898 if (data->heaps[i].base == 0) {
1900 paddr = memblock_alloc_base(data->heaps[i].size,
1901 data->heaps[i].align,
1902 MEMBLOCK_ALLOC_ANYWHERE);
1904 pr_err("%s: error allocating memblock for "
1909 data->heaps[i].base = paddr;
1911 int ret = memblock_reserve(data->heaps[i].base,
1912 data->heaps[i].size);
1914 pr_err("memblock reserve of %zx@%lx failed\n",
1915 data->heaps[i].size,
1916 data->heaps[i].base);
1918 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1919 data->heaps[i].name,
1920 data->heaps[i].base,
1921 data->heaps[i].size);