drm/sprd: fix always gem creation of imported dma-buf
[profile/mobile/platform/kernel/linux-3.10-sc7730.git] / drivers / staging / android / ion / ion.c
1 /*
2
3  * drivers/gpu/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/freezer.h>
21 #include <linux/fs.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/kthread.h>
24 #include <linux/list.h>
25 #include <linux/memblock.h>
26 #include <linux/miscdevice.h>
27 #include <linux/export.h>
28 #include <linux/mm.h>
29 #include <linux/mm_types.h>
30 #include <linux/rbtree.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/vmalloc.h>
35 #include <linux/debugfs.h>
36 #include <linux/dma-buf.h>
37 #include <linux/idr.h>
38 #include <linux/time.h>
39 #ifdef CONFIG_DRM_SPRD
40 #include <drm/drmP.h>
41 #endif
42
43 #include "ion.h"
44 #include "ion_priv.h"
45 #include "compat_ion.h"
46
47 /**
48  * struct ion_device - the metadata of the ion device node
49  * @dev:                the actual misc device
50  * @buffers:            an rb tree of all the existing buffers
51  * @buffer_lock:        lock protecting the tree of buffers
52  * @lock:               rwsem protecting the tree of heaps and clients
53  * @heaps:              list of all the heaps in the system
54  * @user_clients:       list of all the clients created from userspace
55  */
56 struct ion_device {
57         struct miscdevice dev;
58         struct rb_root buffers;
59         struct mutex buffer_lock;
60         struct rw_semaphore lock;
61         struct plist_head heaps;
62         long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
63                               unsigned long arg);
64         struct rb_root clients;
65         struct dentry *debug_root;
66         struct dentry *heaps_debug_root;
67         struct dentry *clients_debug_root;
68 };
69
70 /**
71  * struct ion_client - a process/hw block local address space
72  * @node:               node in the tree of all clients
73  * @dev:                backpointer to ion device
74  * @handles:            an rb tree of all the handles in this client
75  * @idr:                an idr space for allocating handle ids
76  * @lock:               lock protecting the tree of handles
77  * @name:               used for debugging
78  * @display_name:       used for debugging (unique version of @name)
79  * @display_serial:     used for debugging (to make display_name unique)
80  * @task:               used for debugging
81  *
82  * A client represents a list of buffers this client may access.
83  * The mutex stored here is used to protect both handles tree
84  * as well as the handles themselves, and should be held while modifying either.
85  */
86 struct ion_client {
87         struct rb_node node;
88         struct ion_device *dev;
89         struct rb_root handles;
90         struct idr idr;
91         struct mutex lock;
92         const char *name;
93         char *display_name;
94         int display_serial;
95         struct task_struct *task;
96         pid_t pid;
97         pid_t tid;
98         struct dentry *debug_root;
99 };
100
101 /**
102  * ion_handle - a client local reference to a buffer
103  * @ref:                reference count
104  * @client:             back pointer to the client the buffer resides in
105  * @buffer:             pointer to the buffer
106  * @node:               node in the client's handle rbtree
107  * @kmap_cnt:           count of times this client has mapped to kernel
108  * @id:                 client-unique id allocated by client->idr
109  *
110  * Modifications to node, map_cnt or mapping should be protected by the
111  * lock in the client.  Other fields are never changed after initialization.
112  */
113 struct ion_handle {
114         struct kref ref;
115         struct ion_client *client;
116         struct ion_buffer *buffer;
117         struct rb_node node;
118         unsigned int kmap_cnt;
119         int id;
120 };
121
122 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
123 {
124         return (buffer->flags & ION_FLAG_CACHED) &&
125                 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
126 }
127
128 bool ion_buffer_cached(struct ion_buffer *buffer)
129 {
130         return !!(buffer->flags & ION_FLAG_CACHED);
131 }
132
133 static inline struct page *ion_buffer_page(struct page *page)
134 {
135         return (struct page *)((unsigned long)page & ~(1UL));
136 }
137
138 static inline bool ion_buffer_page_is_dirty(struct page *page)
139 {
140         return !!((unsigned long)page & 1UL);
141 }
142
143 static inline void ion_buffer_page_dirty(struct page **page)
144 {
145         *page = (struct page *)((unsigned long)(*page) | 1UL);
146 }
147
148 static inline void ion_buffer_page_clean(struct page **page)
149 {
150         *page = (struct page *)((unsigned long)(*page) & ~(1UL));
151 }
152
153 /* this function should only be called while dev->lock is held */
154 static void ion_buffer_add(struct ion_device *dev,
155                            struct ion_buffer *buffer)
156 {
157         struct rb_node **p = &dev->buffers.rb_node;
158         struct rb_node *parent = NULL;
159         struct ion_buffer *entry;
160
161         while (*p) {
162                 parent = *p;
163                 entry = rb_entry(parent, struct ion_buffer, node);
164
165                 if (buffer < entry) {
166                         p = &(*p)->rb_left;
167                 } else if (buffer > entry) {
168                         p = &(*p)->rb_right;
169                 } else {
170                         pr_err("%s: buffer already found.", __func__);
171                         BUG();
172                 }
173         }
174
175         rb_link_node(&buffer->node, parent, p);
176         rb_insert_color(&buffer->node, &dev->buffers);
177 }
178
179 /* this function should only be called while dev->lock is held */
180 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
181                                      struct ion_device *dev,
182                                      unsigned long len,
183                                      unsigned long align,
184                                      unsigned long flags)
185 {
186         struct ion_buffer *buffer;
187         struct sg_table *table;
188         struct scatterlist *sg;
189         struct timeval time;
190         int i, ret;
191
192         buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
193         if (!buffer)
194                 return ERR_PTR(-ENOMEM);
195
196         buffer->heap = heap;
197         buffer->flags = flags;
198         kref_init(&buffer->ref);
199
200         ret = heap->ops->allocate(heap, buffer, len, align, flags);
201
202         if (ret) {
203                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
204                         goto err2;
205
206                 ion_heap_freelist_drain(heap, 0);
207                 ret = heap->ops->allocate(heap, buffer, len, align,
208                                           flags);
209                 if (ret)
210                         goto err2;
211         }
212
213         buffer->dev = dev;
214         buffer->size = len;
215
216         table = heap->ops->map_dma(heap, buffer);
217         if (WARN_ONCE(table == NULL,
218                         "heap->ops->map_dma should return ERR_PTR on error"))
219                 table = ERR_PTR(-EINVAL);
220         if (IS_ERR(table)) {
221                 heap->ops->free(buffer);
222                 kfree(buffer);
223                 return ERR_PTR(PTR_ERR(table));
224         }
225         buffer->sg_table = table;
226         if (ion_buffer_fault_user_mappings(buffer)) {
227                 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
228                 struct scatterlist *sg;
229                 int i, j, k = 0;
230
231                 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
232                 if (!buffer->pages) {
233                         ret = -ENOMEM;
234                         goto err1;
235                 }
236
237                 for_each_sg(table->sgl, sg, table->nents, i) {
238                         struct page *page = sg_page(sg);
239
240                         for (j = 0; j < sg->length / PAGE_SIZE; j++)
241                                 buffer->pages[k++] = page++;
242                 }
243
244                 if (ret)
245                         goto err;
246         }
247
248         buffer->dev = dev;
249         buffer->size = len;
250         INIT_LIST_HEAD(&buffer->vmas);
251         mutex_init(&buffer->lock);
252         /* this will set up dma addresses for the sglist -- it is not
253            technically correct as per the dma api -- a specific
254            device isn't really taking ownership here.  However, in practice on
255            our systems the only dma_address space is physical addresses.
256            Additionally, we can't afford the overhead of invalidating every
257            allocation via dma_map_sg. The implicit contract here is that
258            memory comming from the heaps is ready for dma, ie if it has a
259            cached mapping that mapping has been invalidated */
260         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
261                 sg_dma_address(sg) = sg_phys(sg);
262         mutex_lock(&dev->buffer_lock);
263         ion_buffer_add(dev, buffer);
264         mutex_unlock(&dev->buffer_lock);
265
266         do_gettimeofday(&time);
267         buffer->alloc_time = time;
268         return buffer;
269
270 err:
271         heap->ops->unmap_dma(heap, buffer);
272         heap->ops->free(buffer);
273 err1:
274         if (buffer->pages)
275                 vfree(buffer->pages);
276 err2:
277         kfree(buffer);
278         return ERR_PTR(ret);
279 }
280
281 void ion_buffer_destroy(struct ion_buffer *buffer)
282 {
283         if (WARN_ON(buffer->kmap_cnt > 0))
284                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
285         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
286         buffer->heap->ops->free(buffer);
287         if (buffer->pages)
288                 vfree(buffer->pages);
289         kfree(buffer);
290 }
291
292 static void _ion_buffer_destroy(struct kref *kref)
293 {
294         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
295         struct ion_heap *heap = buffer->heap;
296         struct ion_device *dev = buffer->dev;
297
298 #if defined(CONFIG_SPRD_IOMMU)
299
300         int i;
301         for (i = IOMMU_GSP; i < IOMMU_MAX; i++) {
302                 if(buffer->iomap_cnt[i]>0)
303                 {
304                         buffer->iomap_cnt[i] = 0;
305                         sprd_iova_unmap(i,buffer->iova[i],buffer->size);
306                         sprd_iova_free(i,buffer->iova[i],buffer->size);
307                 }
308         }
309 #endif
310
311         mutex_lock(&dev->buffer_lock);
312         rb_erase(&buffer->node, &dev->buffers);
313         mutex_unlock(&dev->buffer_lock);
314
315         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
316                 ion_heap_freelist_add(heap, buffer);
317         else
318                 ion_buffer_destroy(buffer);
319 }
320
321 static void ion_buffer_get(struct ion_buffer *buffer)
322 {
323         kref_get(&buffer->ref);
324 }
325
326 static int ion_buffer_put(struct ion_buffer *buffer)
327 {
328         return kref_put(&buffer->ref, _ion_buffer_destroy);
329 }
330
331 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
332 {
333         mutex_lock(&buffer->lock);
334         buffer->handle_count++;
335         mutex_unlock(&buffer->lock);
336 }
337
338 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
339 {
340         /*
341          * when a buffer is removed from a handle, if it is not in
342          * any other handles, copy the taskcomm and the pid of the
343          * process it's being removed from into the buffer.  At this
344          * point there will be no way to track what processes this buffer is
345          * being used by, it only exists as a dma_buf file descriptor.
346          * The taskcomm and pid can provide a debug hint as to where this fd
347          * is in the system
348          */
349         mutex_lock(&buffer->lock);
350         buffer->handle_count--;
351         BUG_ON(buffer->handle_count < 0);
352         if (!buffer->handle_count) {
353                 struct task_struct *task;
354
355                 task = current->group_leader;
356                 get_task_comm(buffer->task_comm, task);
357                 buffer->pid = task_pid_nr(task);
358                 buffer->tid = task_pid_nr(current);
359         }
360         mutex_unlock(&buffer->lock);
361 }
362
363 static struct ion_handle *ion_handle_create(struct ion_client *client,
364                                      struct ion_buffer *buffer)
365 {
366         struct ion_handle *handle;
367
368         handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
369         if (!handle)
370                 return ERR_PTR(-ENOMEM);
371         kref_init(&handle->ref);
372         RB_CLEAR_NODE(&handle->node);
373         handle->client = client;
374         ion_buffer_get(buffer);
375         ion_buffer_add_to_handle(buffer);
376         handle->buffer = buffer;
377
378         return handle;
379 }
380
381 static void ion_handle_kmap_put(struct ion_handle *);
382
383 static void ion_handle_destroy(struct kref *kref)
384 {
385         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
386         struct ion_client *client = handle->client;
387         struct ion_buffer *buffer = handle->buffer;
388
389         mutex_lock(&buffer->lock);
390         while (handle->kmap_cnt)
391                 ion_handle_kmap_put(handle);
392         mutex_unlock(&buffer->lock);
393
394         idr_remove(&client->idr, handle->id);
395         if (!RB_EMPTY_NODE(&handle->node))
396                 rb_erase(&handle->node, &client->handles);
397
398         ion_buffer_remove_from_handle(buffer);
399         ion_buffer_put(buffer);
400
401         kfree(handle);
402 }
403
404 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
405 {
406         return handle->buffer;
407 }
408
409 static void ion_handle_get(struct ion_handle *handle)
410 {
411         kref_get(&handle->ref);
412 }
413
414 static int ion_handle_put(struct ion_handle *handle)
415 {
416         struct ion_client *client = handle->client;
417         int ret;
418
419         mutex_lock(&client->lock);
420         ret = kref_put(&handle->ref, ion_handle_destroy);
421         mutex_unlock(&client->lock);
422
423         return ret;
424 }
425
426 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
427                                             struct ion_buffer *buffer)
428 {
429         struct rb_node *n = client->handles.rb_node;
430
431         while (n) {
432                 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
433                 if (buffer < entry->buffer)
434                         n = n->rb_left;
435                 else if (buffer > entry->buffer)
436                         n = n->rb_right;
437                 else
438                         return entry;
439         }
440         return ERR_PTR(-EINVAL);
441 }
442
443 static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
444                                                 int id)
445 {
446         struct ion_handle *handle;
447
448         mutex_lock(&client->lock);
449         handle = idr_find(&client->idr, id);
450         if (handle)
451                 ion_handle_get(handle);
452         mutex_unlock(&client->lock);
453
454         return handle ? handle : ERR_PTR(-EINVAL);
455 }
456
457 static bool ion_handle_validate(struct ion_client *client,
458                                 struct ion_handle *handle)
459 {
460         WARN_ON(!mutex_is_locked(&client->lock));
461         return (idr_find(&client->idr, handle->id) == handle);
462 }
463
464 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
465 {
466         int id;
467         struct rb_node **p = &client->handles.rb_node;
468         struct rb_node *parent = NULL;
469         struct ion_handle *entry;
470
471         id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
472         if (id < 0)
473                 return id;
474
475         handle->id = id;
476
477         while (*p) {
478                 parent = *p;
479                 entry = rb_entry(parent, struct ion_handle, node);
480
481                 if (handle->buffer < entry->buffer)
482                         p = &(*p)->rb_left;
483                 else if (handle->buffer > entry->buffer)
484                         p = &(*p)->rb_right;
485                 else
486                         WARN(1, "%s: buffer already found.", __func__);
487         }
488
489         rb_link_node(&handle->node, parent, p);
490         rb_insert_color(&handle->node, &client->handles);
491
492         return 0;
493 }
494
495 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
496                              size_t align, unsigned int heap_id_mask,
497                              unsigned int flags)
498 {
499         struct ion_handle *handle;
500         struct ion_device *dev = client->dev;
501         struct ion_buffer *buffer = NULL;
502         struct ion_heap *heap;
503         int ret;
504
505         pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
506                  len, align, heap_id_mask, flags);
507         /*
508          * traverse the list of heaps available in this system in priority
509          * order.  If the heap type is supported by the client, and matches the
510          * request of the caller allocate from it.  Repeat until allocate has
511          * succeeded or all heaps have been tried
512          */
513         len = PAGE_ALIGN(len);
514
515         if (!len)
516                 return ERR_PTR(-EINVAL);
517
518         down_read(&dev->lock);
519         plist_for_each_entry(heap, &dev->heaps, node) {
520                 /* if the caller didn't specify this heap id */
521                 if (!((1 << heap->id) & heap_id_mask))
522                         continue;
523                 buffer = ion_buffer_create(heap, dev, len, align, flags);
524                 if (!IS_ERR(buffer))
525                         break;
526         }
527         up_read(&dev->lock);
528
529         if (buffer == NULL) {
530                 pr_err("%s: buffer is NULL!\n",__func__);
531                 return ERR_PTR(-ENODEV);
532         }
533
534         if (IS_ERR(buffer)) {
535                 pr_err("%s: ion alloc buffer is error! and the buffer is %p\n",__func__,buffer);
536                 return ERR_PTR(PTR_ERR(buffer));
537         }
538
539         handle = ion_handle_create(client, buffer);
540
541         /*
542          * ion_buffer_create will create a buffer with a ref_cnt of 1,
543          * and ion_handle_create will take a second reference, drop one here
544          */
545         ion_buffer_put(buffer);
546
547         if (IS_ERR(handle)) {
548                 pr_err("%s: handle is error! and the handle is %p\n",__func__,handle);
549                 return handle;
550         }
551
552         mutex_lock(&client->lock);
553         ret = ion_handle_add(client, handle);
554         mutex_unlock(&client->lock);
555         if (ret) {
556                 ion_handle_put(handle);
557                 handle = ERR_PTR(ret);
558         }
559
560         return handle;
561 }
562 EXPORT_SYMBOL(ion_alloc);
563
564 #ifdef CONFIG_DRM_SPRD
565 struct ion_handle *ion_alloc_with_gem(struct ion_client *client, size_t len,
566                                       size_t align, unsigned int heap_id_mask,
567                                       unsigned int flags,
568                                       struct drm_gem_object *obj)
569 {
570         struct ion_handle *handle;
571
572         handle = ion_alloc(client, len, align, heap_id_mask, flags);
573         if (!IS_ERR(handle))
574                 handle->buffer->obj = obj;
575
576         return handle;
577 }
578 EXPORT_SYMBOL(ion_alloc_with_gem);
579
580 struct drm_gem_object *ion_get_gem(struct ion_handle *handle)
581 {
582         if (handle && handle->buffer)
583                 return handle->buffer->obj;
584
585         return NULL;
586 }
587 EXPORT_SYMBOL(ion_get_gem);
588 #endif
589
590 void ion_free(struct ion_client *client, struct ion_handle *handle)
591 {
592         bool valid_handle;
593
594         BUG_ON(client != handle->client);
595
596         mutex_lock(&client->lock);
597         valid_handle = ion_handle_validate(client, handle);
598
599         if (!valid_handle) {
600                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
601                 mutex_unlock(&client->lock);
602                 return;
603         }
604         mutex_unlock(&client->lock);
605         ion_handle_put(handle);
606 }
607 EXPORT_SYMBOL(ion_free);
608
609 int ion_phys(struct ion_client *client, struct ion_handle *handle,
610              ion_phys_addr_t *addr, size_t *len)
611 {
612         struct ion_buffer *buffer;
613         int ret;
614
615         mutex_lock(&client->lock);
616         if (!ion_handle_validate(client, handle)) {
617                 mutex_unlock(&client->lock);
618                 return -EINVAL;
619         }
620
621         buffer = handle->buffer;
622
623         if (!buffer->heap->ops->phys) {
624                 pr_err("%s: ion_phys is not implemented by this heap.\n",
625                        __func__);
626                 mutex_unlock(&client->lock);
627                 return -ENODEV;
628         }
629         mutex_unlock(&client->lock);
630         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
631         return ret;
632 }
633 EXPORT_SYMBOL(ion_phys);
634
635 int ion_is_phys(struct ion_client *client, struct ion_handle *handle)
636 {
637         struct ion_buffer *buffer;
638         int ret = 0;
639
640         mutex_lock(&client->lock);
641         if (!ion_handle_validate(client, handle)) {
642                 mutex_unlock(&client->lock);
643                 return -EINVAL;
644         }
645
646         buffer = handle->buffer;
647
648         if (!buffer->heap->ops->phys)
649                 ret = -1;
650
651         mutex_unlock(&client->lock);
652
653         return ret;
654 }
655 EXPORT_SYMBOL(ion_is_phys);
656
657 int ion_is_cached(struct ion_client *client, struct ion_handle *handle)
658 {
659         struct ion_buffer *buffer;
660         int cached;
661
662         mutex_lock(&client->lock);
663         if (!ion_handle_validate(client, handle)) {
664                 mutex_unlock(&client->lock);
665                 return -EINVAL;
666         }
667
668         buffer = handle->buffer;
669
670         cached = ion_buffer_cached(buffer);
671         mutex_unlock(&client->lock);
672
673         return cached;
674 }
675 EXPORT_SYMBOL(ion_is_cached);
676
677 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
678 {
679         void *vaddr;
680
681         if (buffer->kmap_cnt) {
682                 buffer->kmap_cnt++;
683                 return buffer->vaddr;
684         }
685         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
686         if (WARN_ONCE(vaddr == NULL,
687                         "heap->ops->map_kernel should return ERR_PTR on error"))
688                 return ERR_PTR(-EINVAL);
689         if (IS_ERR(vaddr))
690                 return vaddr;
691         buffer->vaddr = vaddr;
692         buffer->kmap_cnt++;
693         return vaddr;
694 }
695
696 static void *ion_handle_kmap_get(struct ion_handle *handle)
697 {
698         struct ion_buffer *buffer = handle->buffer;
699         void *vaddr;
700
701         if (handle->kmap_cnt) {
702                 handle->kmap_cnt++;
703                 return buffer->vaddr;
704         }
705         vaddr = ion_buffer_kmap_get(buffer);
706         if (IS_ERR(vaddr))
707                 return vaddr;
708         handle->kmap_cnt++;
709         return vaddr;
710 }
711
712 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
713 {
714         buffer->kmap_cnt--;
715         if (!buffer->kmap_cnt) {
716                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
717                 buffer->vaddr = NULL;
718         }
719 }
720
721 static void ion_handle_kmap_put(struct ion_handle *handle)
722 {
723         struct ion_buffer *buffer = handle->buffer;
724
725         if (!handle->kmap_cnt) {
726                 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
727                 return;
728         }
729
730         handle->kmap_cnt--;
731         if (!handle->kmap_cnt)
732                 ion_buffer_kmap_put(buffer);
733 }
734
735 int ion_map_iommu(struct ion_client *client, struct ion_handle *handle, int domain_no, unsigned long *ptr_iova)
736 {
737         struct ion_buffer *buffer;
738
739         mutex_lock(&client->lock);
740         if (!ion_handle_validate(client, handle)) {
741                 pr_err("%s: invalid handle passed to map_kernel.\n",
742                        __func__);
743                 mutex_unlock(&client->lock);
744                 return -EINVAL;
745         }
746
747         buffer = handle->buffer;
748
749         if (!handle->buffer->heap->ops->map_iommu) {
750                 pr_err("%s: map_kernel is not implemented by this heap.\n",
751                        __func__);
752                 mutex_unlock(&client->lock);
753                 return -ENODEV;
754         }
755
756         mutex_lock(&buffer->lock);
757         handle->buffer->heap->ops->map_iommu(buffer, domain_no, ptr_iova);
758         mutex_unlock(&buffer->lock);
759         mutex_unlock(&client->lock);
760         return 0;
761 }
762 EXPORT_SYMBOL(ion_map_iommu);
763
764 int ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle, int domain_no)
765 {
766         struct ion_buffer *buffer;
767
768         mutex_lock(&client->lock);
769         if (!ion_handle_validate(client, handle)) {
770                 pr_err("%s: invalid handle passed to map_kernel.\n",
771                        __func__);
772                 mutex_unlock(&client->lock);
773                 return -EINVAL;
774         }
775
776         buffer = handle->buffer;
777
778         if (!handle->buffer->heap->ops->map_iommu) {
779                 pr_err("%s: map_kernel is not implemented by this heap.\n",
780                        __func__);
781                 mutex_unlock(&client->lock);
782                 return -ENODEV;
783         }
784
785         mutex_lock(&buffer->lock);
786         handle->buffer->heap->ops->unmap_iommu(buffer, domain_no);
787         mutex_unlock(&buffer->lock);
788         mutex_unlock(&client->lock);
789         return 0;
790 }
791 EXPORT_SYMBOL(ion_unmap_iommu);
792
793 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
794 {
795         struct ion_buffer *buffer;
796         void *vaddr;
797
798         mutex_lock(&client->lock);
799         if (!ion_handle_validate(client, handle)) {
800                 pr_err("%s: invalid handle passed to map_kernel.\n",
801                        __func__);
802                 mutex_unlock(&client->lock);
803                 return ERR_PTR(-EINVAL);
804         }
805
806         buffer = handle->buffer;
807
808         if (!handle->buffer->heap->ops->map_kernel) {
809                 pr_err("%s: map_kernel is not implemented by this heap.\n",
810                        __func__);
811                 mutex_unlock(&client->lock);
812                 return ERR_PTR(-ENODEV);
813         }
814
815         mutex_lock(&buffer->lock);
816         vaddr = ion_handle_kmap_get(handle);
817         mutex_unlock(&buffer->lock);
818         mutex_unlock(&client->lock);
819         return vaddr;
820 }
821 EXPORT_SYMBOL(ion_map_kernel);
822
823 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
824 {
825         struct ion_buffer *buffer;
826
827         mutex_lock(&client->lock);
828         buffer = handle->buffer;
829         mutex_lock(&buffer->lock);
830         ion_handle_kmap_put(handle);
831         mutex_unlock(&buffer->lock);
832         mutex_unlock(&client->lock);
833 }
834 EXPORT_SYMBOL(ion_unmap_kernel);
835
836 static int ion_debug_client_show(struct seq_file *s, void *unused)
837 {
838         struct ion_client *client = s->private;
839         struct rb_node *n;
840         size_t sizes[ION_NUM_HEAP_IDS] = {0};
841         const char *names[ION_NUM_HEAP_IDS] = {NULL};
842         int i;
843
844         mutex_lock(&client->lock);
845         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
846                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
847                                                      node);
848                 unsigned int id = handle->buffer->heap->id;
849
850                 if (!names[id])
851                         names[id] = handle->buffer->heap->name;
852                 sizes[id] += handle->buffer->size;
853         }
854         mutex_unlock(&client->lock);
855
856         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
857         for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
858                 if (!names[i])
859                         continue;
860                 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
861         }
862         return 0;
863 }
864
865 static int ion_debug_client_open(struct inode *inode, struct file *file)
866 {
867         return single_open(file, ion_debug_client_show, inode->i_private);
868 }
869
870 static const struct file_operations debug_client_fops = {
871         .open = ion_debug_client_open,
872         .read = seq_read,
873         .llseek = seq_lseek,
874         .release = single_release,
875 };
876
877 static int ion_get_client_serial(const struct rb_root *root,
878                                         const unsigned char *name)
879 {
880         int serial = -1;
881         struct rb_node *node;
882         for (node = rb_first(root); node; node = rb_next(node)) {
883                 struct ion_client *client = rb_entry(node, struct ion_client,
884                                                 node);
885                 if (strcmp(client->name, name))
886                         continue;
887                 serial = max(serial, client->display_serial);
888         }
889         return serial + 1;
890 }
891
892 struct ion_client *ion_client_create(struct ion_device *dev,
893                                      const char *name)
894 {
895         struct ion_client *client;
896         struct task_struct *task;
897         struct rb_node **p;
898         struct rb_node *parent = NULL;
899         struct ion_client *entry;
900         pid_t pid;
901         pid_t tid;
902
903         if (!name) {
904                 pr_err("%s: Name cannot be null\n", __func__);
905                 return ERR_PTR(-EINVAL);
906         }
907
908         get_task_struct(current->group_leader);
909         task_lock(current->group_leader);
910         pid = task_pid_nr(current->group_leader);
911         tid = task_pid_nr(current);
912         /* don't bother to store task struct for kernel threads,
913            they can't be killed anyway */
914         if (current->group_leader->flags & PF_KTHREAD) {
915                 put_task_struct(current->group_leader);
916                 task = NULL;
917         } else {
918                 task = current->group_leader;
919         }
920         task_unlock(current->group_leader);
921
922         client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
923         if (!client)
924                 goto err_put_task_struct;
925
926         client->dev = dev;
927         client->handles = RB_ROOT;
928         idr_init(&client->idr);
929         mutex_init(&client->lock);
930         client->task = task;
931         client->pid = pid;
932         client->tid = tid;
933         client->name = kstrdup(name, GFP_KERNEL);
934         if (!client->name)
935                 goto err_free_client;
936
937         down_write(&dev->lock);
938         client->display_serial = ion_get_client_serial(&dev->clients, name);
939         client->display_name = kasprintf(
940                 GFP_KERNEL, "%s-%d", name, client->display_serial);
941         if (!client->display_name) {
942                 up_write(&dev->lock);
943                 goto err_free_client_name;
944         }
945         p = &dev->clients.rb_node;
946         while (*p) {
947                 parent = *p;
948                 entry = rb_entry(parent, struct ion_client, node);
949
950                 if (client < entry)
951                         p = &(*p)->rb_left;
952                 else if (client > entry)
953                         p = &(*p)->rb_right;
954         }
955         rb_link_node(&client->node, parent, p);
956         rb_insert_color(&client->node, &dev->clients);
957
958         client->debug_root = debugfs_create_file(client->display_name, 0664,
959                                                 dev->clients_debug_root,
960                                                 client, &debug_client_fops);
961         if (!client->debug_root) {
962                 char buf[256], *path;
963                 path = dentry_path(dev->clients_debug_root, buf, 256);
964                 pr_err("Failed to create client debugfs at %s/%s\n",
965                         path, client->display_name);
966         }
967
968         up_write(&dev->lock);
969
970         return client;
971
972 err_free_client_name:
973         kfree(client->name);
974 err_free_client:
975         kfree(client);
976 err_put_task_struct:
977         if (task)
978                 put_task_struct(current->group_leader);
979         return ERR_PTR(-ENOMEM);
980 }
981 EXPORT_SYMBOL(ion_client_create);
982
983 void ion_client_destroy(struct ion_client *client)
984 {
985         struct ion_device *dev = client->dev;
986         struct rb_node *n;
987
988         pr_debug("%s: %d\n", __func__, __LINE__);
989         while ((n = rb_first(&client->handles))) {
990                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
991                                                      node);
992                 ion_handle_destroy(&handle->ref);
993         }
994
995         idr_destroy(&client->idr);
996
997         down_write(&dev->lock);
998         if (client->task)
999                 put_task_struct(client->task);
1000         rb_erase(&client->node, &dev->clients);
1001         debugfs_remove_recursive(client->debug_root);
1002         up_write(&dev->lock);
1003
1004         kfree(client->display_name);
1005         kfree(client->name);
1006         kfree(client);
1007 }
1008 EXPORT_SYMBOL(ion_client_destroy);
1009
1010 int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
1011                         unsigned long *size, unsigned int *heap_id)
1012 {
1013         struct ion_buffer *buffer;
1014         struct ion_heap *heap;
1015
1016         mutex_lock(&client->lock);
1017         if (!ion_handle_validate(client, handle)) {
1018                 pr_err("%s: invalid handle passed to %s.\n",
1019                                 __func__, __func__);
1020                 mutex_unlock(&client->lock);
1021                 return -EINVAL;
1022         }
1023         buffer = handle->buffer;
1024         mutex_lock(&buffer->lock);
1025         heap = buffer->heap;
1026         *heap_id = (1 << heap->id);
1027         *size = buffer->size;
1028         mutex_unlock(&buffer->lock);
1029         mutex_unlock(&client->lock);
1030
1031         return 0;
1032 }
1033 EXPORT_SYMBOL(ion_handle_get_size);
1034
1035 struct sg_table *ion_sg_table(struct ion_client *client,
1036                               struct ion_handle *handle)
1037 {
1038         struct ion_buffer *buffer;
1039         struct sg_table *table;
1040
1041         mutex_lock(&client->lock);
1042         if (!ion_handle_validate(client, handle)) {
1043                 pr_err("%s: invalid handle passed to map_dma.\n",
1044                        __func__);
1045                 mutex_unlock(&client->lock);
1046                 return ERR_PTR(-EINVAL);
1047         }
1048         buffer = handle->buffer;
1049         table = buffer->sg_table;
1050         mutex_unlock(&client->lock);
1051         return table;
1052 }
1053 EXPORT_SYMBOL(ion_sg_table);
1054
1055 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1056                                        struct device *dev,
1057                                        enum dma_data_direction direction);
1058
1059 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1060                                         enum dma_data_direction direction)
1061 {
1062         struct dma_buf *dmabuf = attachment->dmabuf;
1063         struct ion_buffer *buffer = dmabuf->priv;
1064
1065         ion_buffer_sync_for_device(buffer, attachment->dev, direction);
1066         return buffer->sg_table;
1067 }
1068
1069 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1070                               struct sg_table *table,
1071                               enum dma_data_direction direction)
1072 {
1073 }
1074
1075 void ion_pages_sync_for_device(struct device *dev, struct page *page,
1076                 size_t size, enum dma_data_direction dir)
1077 {
1078         struct scatterlist sg;
1079
1080         sg_init_table(&sg, 1);
1081         sg_set_page(&sg, page, size, 0);
1082         /*
1083          * This is not correct - sg_dma_address needs a dma_addr_t that is valid
1084          * for the the targeted device, but this works on the currently targeted
1085          * hardware.
1086          */
1087         sg_dma_address(&sg) = page_to_phys(page);
1088         dma_sync_sg_for_device(dev, &sg, 1, dir);
1089 }
1090
1091 struct ion_vma_list {
1092         struct list_head list;
1093         struct vm_area_struct *vma;
1094 };
1095
1096 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1097                                        struct device *dev,
1098                                        enum dma_data_direction dir)
1099 {
1100         struct ion_vma_list *vma_list;
1101         int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1102         int i;
1103
1104         pr_debug("%s: syncing for device %s\n", __func__,
1105                  dev ? dev_name(dev) : "null");
1106
1107         if (!ion_buffer_fault_user_mappings(buffer))
1108                 return;
1109
1110         mutex_lock(&buffer->lock);
1111         for (i = 0; i < pages; i++) {
1112                 struct page *page = buffer->pages[i];
1113
1114                 if (ion_buffer_page_is_dirty(page))
1115                         ion_pages_sync_for_device(dev, ion_buffer_page(page),
1116                                                         PAGE_SIZE, dir);
1117
1118                 ion_buffer_page_clean(buffer->pages + i);
1119         }
1120         list_for_each_entry(vma_list, &buffer->vmas, list) {
1121                 struct vm_area_struct *vma = vma_list->vma;
1122
1123                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1124                                NULL);
1125         }
1126         mutex_unlock(&buffer->lock);
1127 }
1128
1129 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1130 {
1131         struct ion_buffer *buffer = vma->vm_private_data;
1132         unsigned long pfn;
1133         int ret;
1134
1135         mutex_lock(&buffer->lock);
1136         ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1137         BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1138
1139         pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1140         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1141         mutex_unlock(&buffer->lock);
1142         if (ret)
1143                 return VM_FAULT_ERROR;
1144
1145         return VM_FAULT_NOPAGE;
1146 }
1147
1148 static void ion_vm_open(struct vm_area_struct *vma)
1149 {
1150         struct ion_buffer *buffer = vma->vm_private_data;
1151         struct ion_vma_list *vma_list;
1152
1153         vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1154         if (!vma_list)
1155                 return;
1156         vma_list->vma = vma;
1157         mutex_lock(&buffer->lock);
1158         list_add(&vma_list->list, &buffer->vmas);
1159         mutex_unlock(&buffer->lock);
1160         pr_debug("%s: adding %p\n", __func__, vma);
1161 }
1162
1163 static void ion_vm_close(struct vm_area_struct *vma)
1164 {
1165         struct ion_buffer *buffer = vma->vm_private_data;
1166         struct ion_vma_list *vma_list, *tmp;
1167
1168         pr_debug("%s\n", __func__);
1169         mutex_lock(&buffer->lock);
1170         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1171                 if (vma_list->vma != vma)
1172                         continue;
1173                 list_del(&vma_list->list);
1174                 kfree(vma_list);
1175                 pr_debug("%s: deleting %p\n", __func__, vma);
1176                 break;
1177         }
1178         mutex_unlock(&buffer->lock);
1179 }
1180
1181 static struct vm_operations_struct ion_vma_ops = {
1182         .open = ion_vm_open,
1183         .close = ion_vm_close,
1184         .fault = ion_vm_fault,
1185 };
1186
1187 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1188 {
1189         struct ion_buffer *buffer = dmabuf->priv;
1190         int ret = 0;
1191
1192         if (!buffer->heap->ops->map_user) {
1193                 pr_err("%s: this heap does not define a method for mapping "
1194                        "to userspace\n", __func__);
1195                 return -EINVAL;
1196         }
1197
1198         if (ion_buffer_fault_user_mappings(buffer)) {
1199                 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1200                                                         VM_DONTDUMP;
1201                 vma->vm_private_data = buffer;
1202                 vma->vm_ops = &ion_vma_ops;
1203                 ion_vm_open(vma);
1204                 return 0;
1205         }
1206
1207         if (!(buffer->flags & ION_FLAG_CACHED))
1208                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1209
1210         mutex_lock(&buffer->lock);
1211         /* now map it to userspace */
1212         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1213         mutex_unlock(&buffer->lock);
1214
1215         if (ret)
1216                 pr_err("%s: failure mapping buffer to userspace\n",
1217                        __func__);
1218
1219         return ret;
1220 }
1221
1222 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1223 {
1224         struct ion_buffer *buffer = dmabuf->priv;
1225         ion_buffer_put(buffer);
1226
1227 #ifdef CONFIG_DRM_SPRD
1228         if (buffer->obj) {
1229                 drm_gem_object_unreference_unlocked(buffer->obj);
1230                 buffer->obj = NULL;
1231         }
1232 #endif
1233 }
1234
1235 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1236 {
1237         struct ion_buffer *buffer = dmabuf->priv;
1238         return buffer->vaddr + offset * PAGE_SIZE;
1239 }
1240
1241 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1242                                void *ptr)
1243 {
1244         return;
1245 }
1246
1247 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1248                                         size_t len,
1249                                         enum dma_data_direction direction)
1250 {
1251         struct ion_buffer *buffer = dmabuf->priv;
1252         void *vaddr;
1253
1254         if (!buffer->heap->ops->map_kernel) {
1255                 pr_err("%s: map kernel is not implemented by this heap.\n",
1256                        __func__);
1257                 return -ENODEV;
1258         }
1259
1260         mutex_lock(&buffer->lock);
1261         vaddr = ion_buffer_kmap_get(buffer);
1262         mutex_unlock(&buffer->lock);
1263         if (IS_ERR(vaddr))
1264                 return PTR_ERR(vaddr);
1265         return 0;
1266 }
1267
1268 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1269                                        size_t len,
1270                                        enum dma_data_direction direction)
1271 {
1272         struct ion_buffer *buffer = dmabuf->priv;
1273
1274         mutex_lock(&buffer->lock);
1275         ion_buffer_kmap_put(buffer);
1276         mutex_unlock(&buffer->lock);
1277 }
1278
1279 static struct dma_buf_ops dma_buf_ops = {
1280         .map_dma_buf = ion_map_dma_buf,
1281         .unmap_dma_buf = ion_unmap_dma_buf,
1282         .mmap = ion_mmap,
1283         .release = ion_dma_buf_release,
1284         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1285         .end_cpu_access = ion_dma_buf_end_cpu_access,
1286         .kmap_atomic = ion_dma_buf_kmap,
1287         .kunmap_atomic = ion_dma_buf_kunmap,
1288         .kmap = ion_dma_buf_kmap,
1289         .kunmap = ion_dma_buf_kunmap,
1290 };
1291
1292 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1293                                                 struct ion_handle *handle)
1294 {
1295         struct ion_buffer *buffer;
1296         struct dma_buf *dmabuf;
1297         bool valid_handle;
1298
1299         mutex_lock(&client->lock);
1300         valid_handle = ion_handle_validate(client, handle);
1301         if (!valid_handle) {
1302                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1303                 mutex_unlock(&client->lock);
1304                 return ERR_PTR(-EINVAL);
1305         }
1306         buffer = handle->buffer;
1307         ion_buffer_get(buffer);
1308         mutex_unlock(&client->lock);
1309
1310         dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1311         if (IS_ERR(dmabuf)) {
1312                 ion_buffer_put(buffer);
1313                 return dmabuf;
1314         }
1315
1316         return dmabuf;
1317 }
1318 EXPORT_SYMBOL(ion_share_dma_buf);
1319
1320 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1321 {
1322         struct dma_buf *dmabuf;
1323         int fd;
1324
1325         dmabuf = ion_share_dma_buf(client, handle);
1326         if (IS_ERR(dmabuf)) {
1327                 pr_err("%s: dmabuf is error and dmabuf is %p!\n",__func__,dmabuf);
1328                 return PTR_ERR(dmabuf);
1329         }
1330
1331         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1332         if (fd < 0) {
1333                 pr_err("%s: dmabuf fd is error %d!\n",__func__, fd);
1334                 dma_buf_put(dmabuf);
1335         }
1336
1337         return fd;
1338 }
1339 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1340
1341 struct ion_handle *get_ion_handle_from_dmabuf(struct ion_client *client, struct dma_buf *dmabuf)
1342 {
1343         struct ion_buffer *buffer;
1344         struct ion_handle *handle;
1345         int ret;
1346
1347         /* if this memory came from ion */
1348         if (dmabuf->ops != &dma_buf_ops) {
1349                 pr_err("%s: can not import dmabuf from another exporter\n",
1350                                 __func__);
1351                 return ERR_PTR(-EINVAL);
1352         }
1353         buffer = dmabuf->priv;
1354
1355         mutex_lock(&client->lock);
1356         /* if a handle exists for this buffer just take a reference to it */
1357         handle = ion_handle_lookup(client, buffer);
1358         if (!IS_ERR(handle)) {
1359                 ion_handle_get(handle);
1360                 mutex_unlock(&client->lock);
1361                 goto end;
1362         }
1363         mutex_unlock(&client->lock);
1364
1365         handle = ion_handle_create(client, buffer);
1366         if (IS_ERR(handle))
1367                 goto end;
1368
1369         mutex_lock(&client->lock);
1370         ret = ion_handle_add(client, handle);
1371         mutex_unlock(&client->lock);
1372         if (ret) {
1373                 ion_handle_put(handle);
1374                 handle = ERR_PTR(ret);
1375         }
1376
1377 end:
1378         return handle;
1379 }
1380 EXPORT_SYMBOL(get_ion_handle_from_dmabuf);
1381
1382 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1383 {
1384         struct dma_buf *dmabuf;
1385         struct ion_handle *handle;
1386
1387         dmabuf = dma_buf_get(fd);
1388         if (IS_ERR(dmabuf)) {
1389                 pr_err("ion_import_dma_buf() dmabuf=0x%lx, fd:%d, dma_buf_get error!\n",
1390                                 (unsigned long)dmabuf, fd);
1391                 return ERR_PTR(PTR_ERR(dmabuf));
1392         }
1393         handle = get_ion_handle_from_dmabuf(client, dmabuf);
1394         dma_buf_put(dmabuf);
1395         return handle;
1396 }
1397 EXPORT_SYMBOL(ion_import_dma_buf);
1398
1399 static int ion_invalidate_for_cpu(struct ion_client *client, int fd)
1400 {
1401         struct dma_buf *dmabuf;
1402         struct ion_buffer *buffer;
1403
1404         dmabuf = dma_buf_get(fd);
1405         if (IS_ERR(dmabuf))
1406         {
1407                 pr_err("%s: dmabuf is error and dmabuf is %p!\n",__func__,dmabuf);
1408                 return PTR_ERR(dmabuf);
1409         }
1410
1411         /* if this memory came from ion */
1412         if (dmabuf->ops != &dma_buf_ops) {
1413                 pr_err("%s: can not sync dmabuf from another exporter\n",
1414                        __func__);
1415                 dma_buf_put(dmabuf);
1416                 return -EINVAL;
1417         }
1418         buffer = dmabuf->priv;
1419
1420         dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
1421                                buffer->sg_table->nents, DMA_FROM_DEVICE);
1422         dma_buf_put(dmabuf);
1423         return 0;
1424 }
1425
1426 static int ion_sync_for_device(struct ion_client *client, int fd)
1427 {
1428         struct dma_buf *dmabuf;
1429         struct ion_buffer *buffer;
1430
1431         dmabuf = dma_buf_get(fd);
1432         if (IS_ERR(dmabuf)) {
1433                 pr_err("%s: the dmabuf is err dmabuf is %p, fd %d\n",__func__,dmabuf,fd);
1434                 return PTR_ERR(dmabuf);
1435         }
1436
1437         /* if this memory came from ion */
1438         if (dmabuf->ops != &dma_buf_ops) {
1439                 pr_err("%s: can not sync dmabuf from another exporter\n",
1440                        __func__);
1441                 dma_buf_put(dmabuf);
1442                 return -EINVAL;
1443         }
1444         buffer = dmabuf->priv;
1445
1446         dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1447                                buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1448         dma_buf_put(dmabuf);
1449         return 0;
1450 }
1451
1452 /* fix up the cases where the ioctl direction bits are incorrect */
1453 static unsigned int ion_ioctl_dir(unsigned int cmd)
1454 {
1455         switch (cmd) {
1456         case ION_IOC_SYNC:
1457         case ION_IOC_FREE:
1458         case ION_IOC_CUSTOM:
1459                 return _IOC_WRITE;
1460         default:
1461                 return _IOC_DIR(cmd);
1462         }
1463 }
1464
1465 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1466 {
1467         struct ion_client *client = filp->private_data;
1468         struct ion_device *dev = client->dev;
1469         struct ion_handle *cleanup_handle = NULL;
1470         int ret = 0;
1471         unsigned int dir;
1472
1473         union {
1474                 struct ion_fd_data fd;
1475                 struct ion_allocation_data allocation;
1476                 struct ion_handle_data handle;
1477                 struct ion_custom_data custom;
1478         } data;
1479
1480         dir = ion_ioctl_dir(cmd);
1481         pr_debug("%s:cmd[0x%x]dir[0x%x]\n", __func__, cmd, dir);
1482
1483         if (_IOC_SIZE(cmd) > sizeof(data)) {
1484                 ret = -EINVAL;
1485                 goto out;
1486         }
1487
1488         if (dir & _IOC_WRITE)
1489                 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) {
1490                         ret = -EFAULT;
1491                         goto out;
1492                 }
1493
1494         switch (cmd) {
1495         case ION_IOC_ALLOC:
1496         {
1497                 struct ion_handle *handle;
1498
1499                 handle = ion_alloc(client, data.allocation.len,
1500                                                 data.allocation.align,
1501                                                 data.allocation.heap_id_mask,
1502                                                 data.allocation.flags);
1503                 if (IS_ERR(handle)) {
1504                         ret = PTR_ERR(handle);
1505                         goto out;
1506                 }
1507
1508                 data.allocation.handle = handle->id;
1509
1510                 cleanup_handle = handle;
1511                 break;
1512         }
1513         case ION_IOC_FREE:
1514         {
1515                 struct ion_handle *handle;
1516
1517                 handle = ion_handle_get_by_id(client, data.handle.handle);
1518                 if (IS_ERR(handle)) {
1519                         ret = PTR_ERR(handle);
1520                         goto out;
1521                 }
1522                 ion_free(client, handle);
1523                 ion_handle_put(handle);
1524                 break;
1525         }
1526         case ION_IOC_SHARE:
1527         case ION_IOC_MAP:
1528         {
1529                 struct ion_handle *handle;
1530
1531                 handle = ion_handle_get_by_id(client, data.handle.handle);
1532                 if (IS_ERR(handle)) {
1533                         ret = PTR_ERR(handle);
1534                         goto out;
1535                 }
1536                 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1537                 ion_handle_put(handle);
1538                 if (data.fd.fd < 0)
1539                         ret = data.fd.fd;
1540                 break;
1541         }
1542         case ION_IOC_IMPORT:
1543         {
1544                 struct ion_handle *handle;
1545                 handle = ion_import_dma_buf(client, data.fd.fd);
1546                 if (IS_ERR(handle))
1547                         ret = PTR_ERR(handle);
1548                 else
1549                         data.handle.handle = handle->id;
1550                 break;
1551         }
1552         case ION_IOC_INVALIDATE:
1553         {
1554                 ret = ion_invalidate_for_cpu(client, data.fd.fd);
1555                 break;
1556         }
1557         case ION_IOC_SYNC:
1558         {
1559                 ret = ion_sync_for_device(client, data.fd.fd);
1560                 break;
1561         }
1562         case ION_IOC_CUSTOM:
1563         {
1564                 if (!dev->custom_ioctl) {
1565                         ret = -ENOTTY;
1566                         goto out;
1567                 }
1568                 ret = dev->custom_ioctl(client, data.custom.cmd,
1569                                                 data.custom.arg);
1570                 break;
1571         }
1572         default:
1573                 ret = -ENOTTY;
1574                 goto out;
1575         }
1576
1577         if (dir & _IOC_READ) {
1578                 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1579                         if (cleanup_handle)
1580                                 ion_free(client, cleanup_handle);
1581                         ret = -EFAULT;
1582                 }
1583         }
1584
1585 out:
1586         if (ret)
1587                 pr_info("%s:cmd[0x%x]ret[%d]\n", __func__, cmd, ret);
1588
1589         return ret;
1590 }
1591
1592 static int ion_release(struct inode *inode, struct file *file)
1593 {
1594         struct ion_client *client = file->private_data;
1595
1596         pr_debug("%s: %d\n", __func__, __LINE__);
1597         ion_client_destroy(client);
1598         return 0;
1599 }
1600
1601 static int ion_open(struct inode *inode, struct file *file)
1602 {
1603         struct miscdevice *miscdev = file->private_data;
1604         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1605         struct ion_client *client;
1606         char debug_name[64];
1607
1608         pr_debug("%s: %d\n", __func__, __LINE__);
1609         snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1610         client = ion_client_create(dev, debug_name);
1611         if (IS_ERR(client))
1612                 return PTR_ERR(client);
1613         file->private_data = client;
1614
1615         return 0;
1616 }
1617
1618 static const struct file_operations ion_fops = {
1619         .owner          = THIS_MODULE,
1620         .open           = ion_open,
1621         .release        = ion_release,
1622         .unlocked_ioctl = ion_ioctl,
1623         .compat_ioctl   = compat_ion_ioctl,
1624 };
1625 #if 0
1626 static size_t ion_debug_heap_total(struct ion_client *client,
1627                                    unsigned int id)
1628 {
1629         size_t size = 0;
1630         struct rb_node *n;
1631
1632         mutex_lock(&client->lock);
1633         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1634                 struct ion_handle *handle = rb_entry(n,
1635                                                      struct ion_handle,
1636                                                      node);
1637                 if (handle->buffer->heap->id == id)
1638                         size += handle->buffer->size;
1639         }
1640         mutex_unlock(&client->lock);
1641         return size;
1642 }
1643 #endif
1644 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1645 {
1646         struct ion_heap *heap = s->private;
1647         struct ion_device *dev = heap->dev;
1648         struct rb_node *n;
1649         struct rb_node *r;
1650         struct tm t;
1651         size_t total_size = 0;
1652         size_t total_orphaned_size = 0;
1653
1654         seq_printf(s, "%16.s %6.s %6.s %10.s %16.s\n", "client", "pid", "tid", "size", "alloc_time");
1655         seq_printf(s, "----------------------------------------------------------\n");
1656
1657         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1658                 struct ion_client *client = rb_entry(n, struct ion_client,
1659                                                      node);
1660
1661                 mutex_lock(&client->lock);
1662                 for (r = rb_first(&client->handles); r; r = rb_next(r)) {
1663                         struct ion_handle *handle = rb_entry(r,
1664                                                                  struct ion_handle,
1665                                                                  node);
1666                         struct ion_buffer *buffer = handle->buffer;
1667
1668                         if (buffer->heap->id == heap->id) {
1669                                 if (!buffer->size)
1670                                         continue;
1671                                 time_to_tm(buffer->alloc_time.tv_sec, 0, &t);
1672                                 if (client->task) {
1673                                         char task_comm[TASK_COMM_LEN];
1674
1675                                         get_task_comm(task_comm, client->task);
1676                                         seq_printf(s, "%16.s %6u %6u %10zu %ld.%d.%d-%d:%d:%d.%ld\n",
1677                                                 task_comm, client->pid, client->tid, buffer->size,
1678                                                 t.tm_year + 1900, t.tm_mon + 1,
1679                                                 t.tm_mday, t.tm_hour, t.tm_min,
1680                                                 t.tm_sec, buffer->alloc_time.tv_usec);
1681                                 } else {
1682                                         seq_printf(s, "%16.s %6u %6u %10zu %ld.%d.%d-%d:%d:%d.%ld\n",
1683                                                 client->name, client->pid, client->tid, buffer->size,
1684                                                 t.tm_year + 1900, t.tm_mon + 1,
1685                                                 t.tm_mday, t.tm_hour, t.tm_min,
1686                                                 t.tm_sec, buffer->alloc_time.tv_usec);
1687                                 }
1688                         }
1689                 }
1690                 mutex_unlock(&client->lock);
1691         }
1692         seq_printf(s, "----------------------------------------------------------\n");
1693         seq_printf(s, "orphaned allocations (info is from last known client):"
1694                    "\n");
1695         mutex_lock(&dev->buffer_lock);
1696         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1697                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1698                                                      node);
1699                 if (buffer->heap->id != heap->id)
1700                         continue;
1701                 total_size += buffer->size;
1702                 if (!buffer->handle_count) {
1703                         time_to_tm(buffer->alloc_time.tv_sec, 0, &t);
1704                         seq_printf(s, "%16.s %6u %6u %10zu %d %d %ld.%d.%d-%d:%d:%d.%ld\n",
1705                                 buffer->task_comm, buffer->pid, buffer->tid, buffer->size,
1706                                 buffer->kmap_cnt, atomic_read(&buffer->ref.refcount),
1707                                 t.tm_year + 1900, t.tm_mon + 1,
1708                                 t.tm_mday, t.tm_hour, t.tm_min,
1709                                 t.tm_sec, buffer->alloc_time.tv_usec);
1710                         total_orphaned_size += buffer->size;
1711                 }
1712         }
1713         mutex_unlock(&dev->buffer_lock);
1714         seq_printf(s, "----------------------------------------------------------\n");
1715         seq_printf(s, "%16.s %22zu\n", "total orphaned",
1716                    total_orphaned_size);
1717         seq_printf(s, "%16.s %22zu\n", "total ", total_size);
1718         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1719                 seq_printf(s, "%16.s %22zu\n", "deferred free",
1720                                 heap->free_list_size);
1721         seq_printf(s, "----------------------------------------------------------\n");
1722
1723         if (heap->debug_show)
1724                 heap->debug_show(heap, s, unused);
1725
1726         return 0;
1727 }
1728
1729 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1730 {
1731         return single_open(file, ion_debug_heap_show, inode->i_private);
1732 }
1733
1734 static const struct file_operations debug_heap_fops = {
1735         .open = ion_debug_heap_open,
1736         .read = seq_read,
1737         .llseek = seq_lseek,
1738         .release = single_release,
1739 };
1740
1741 #ifdef DEBUG_HEAP_SHRINKER
1742 static int debug_shrink_set(void *data, u64 val)
1743 {
1744         struct ion_heap *heap = data;
1745         struct shrink_control sc;
1746         int objs;
1747
1748         sc.gfp_mask = -1;
1749         sc.nr_to_scan = 0;
1750
1751         if (!val)
1752                 return 0;
1753
1754         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1755         sc.nr_to_scan = objs;
1756
1757         heap->shrinker.shrink(&heap->shrinker, &sc);
1758         return 0;
1759 }
1760
1761 static int debug_shrink_get(void *data, u64 *val)
1762 {
1763         struct ion_heap *heap = data;
1764         struct shrink_control sc;
1765         int objs;
1766
1767         sc.gfp_mask = -1;
1768         sc.nr_to_scan = 0;
1769
1770         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1771         *val = objs;
1772         return 0;
1773 }
1774
1775 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1776                         debug_shrink_set, "%llu\n");
1777 #endif
1778
1779 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1780 {
1781         struct dentry *debug_file;
1782
1783         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1784             !heap->ops->unmap_dma)
1785                 pr_err("%s: can not add heap with invalid ops struct.\n",
1786                        __func__);
1787
1788         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1789                 ion_heap_init_deferred_free(heap);
1790
1791         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1792                 ion_heap_init_shrinker(heap);
1793
1794         heap->dev = dev;
1795         down_write(&dev->lock);
1796         /* use negative heap->id to reverse the priority -- when traversing
1797            the list later attempt higher id numbers first */
1798         plist_node_init(&heap->node, -heap->id);
1799         plist_add(&heap->node, &dev->heaps);
1800         debug_file = debugfs_create_file(heap->name, 0664,
1801                                         dev->heaps_debug_root, heap,
1802                                         &debug_heap_fops);
1803
1804         if (!debug_file) {
1805                 char buf[256], *path;
1806                 path = dentry_path(dev->heaps_debug_root, buf, 256);
1807                 pr_err("Failed to create heap debugfs at %s/%s\n",
1808                         path, heap->name);
1809         }
1810
1811 #ifdef DEBUG_HEAP_SHRINKER
1812         if (heap->shrinker.shrink) {
1813                 char debug_name[64];
1814
1815                 snprintf(debug_name, 64, "%s_shrink", heap->name);
1816                 debug_file = debugfs_create_file(
1817                         debug_name, 0644, dev->heaps_debug_root, heap,
1818                         &debug_shrink_fops);
1819                 if (!debug_file) {
1820                         char buf[256], *path;
1821                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1822                         pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1823                                 path, debug_name);
1824                 }
1825         }
1826 #endif
1827         up_write(&dev->lock);
1828 }
1829
1830 struct ion_device *ion_device_create(long (*custom_ioctl)
1831                                      (struct ion_client *client,
1832                                       unsigned int cmd,
1833                                       unsigned long arg))
1834 {
1835         struct ion_device *idev;
1836         int ret;
1837
1838         idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1839         if (!idev)
1840                 return ERR_PTR(-ENOMEM);
1841
1842         idev->dev.minor = MISC_DYNAMIC_MINOR;
1843         idev->dev.name = "ion";
1844         idev->dev.fops = &ion_fops;
1845         idev->dev.parent = NULL;
1846         ret = misc_register(&idev->dev);
1847         if (ret) {
1848                 pr_err("ion: failed to register misc device.\n");
1849                 return ERR_PTR(ret);
1850         }
1851
1852         idev->debug_root = debugfs_create_dir("ion", NULL);
1853         if (!idev->debug_root) {
1854                 pr_err("ion: failed to create debugfs root directory.\n");
1855                 goto debugfs_done;
1856         }
1857         idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1858         if (!idev->heaps_debug_root) {
1859                 pr_err("ion: failed to create debugfs heaps directory.\n");
1860                 goto debugfs_done;
1861         }
1862         idev->clients_debug_root = debugfs_create_dir("clients",
1863                                                 idev->debug_root);
1864         if (!idev->clients_debug_root)
1865                 pr_err("ion: failed to create debugfs clients directory.\n");
1866
1867 debugfs_done:
1868
1869         idev->custom_ioctl = custom_ioctl;
1870         idev->buffers = RB_ROOT;
1871         mutex_init(&idev->buffer_lock);
1872         init_rwsem(&idev->lock);
1873         plist_head_init(&idev->heaps);
1874         idev->clients = RB_ROOT;
1875         return idev;
1876 }
1877
1878 void ion_device_destroy(struct ion_device *dev)
1879 {
1880         misc_deregister(&dev->dev);
1881         debugfs_remove_recursive(dev->debug_root);
1882         /* XXX need to free the heaps and clients ? */
1883         kfree(dev);
1884 }
1885
1886 void __init ion_reserve(struct ion_platform_data *data)
1887 {
1888         int i;
1889
1890         for (i = 0; i < data->nr; i++) {
1891                 if (data->heaps[i].size == 0)
1892                         continue;
1893
1894                 if (data->heaps[i].base == 0) {
1895                         phys_addr_t paddr;
1896                         paddr = memblock_alloc_base(data->heaps[i].size,
1897                                                     data->heaps[i].align,
1898                                                     MEMBLOCK_ALLOC_ANYWHERE);
1899                         if (!paddr) {
1900                                 pr_err("%s: error allocating memblock for "
1901                                        "heap %d\n",
1902                                         __func__, i);
1903                                 continue;
1904                         }
1905                         data->heaps[i].base = paddr;
1906                 } else {
1907                         int ret = memblock_reserve(data->heaps[i].base,
1908                                                data->heaps[i].size);
1909                         if (ret)
1910                                 pr_err("memblock reserve of %zx@%lx failed\n",
1911                                        data->heaps[i].size,
1912                                        data->heaps[i].base);
1913                 }
1914                 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1915                         data->heaps[i].name,
1916                         data->heaps[i].base,
1917                         data->heaps[i].size);
1918         }
1919 }