c06101c957420d45d65bcb370e9c078fbffe1a5d
[profile/mobile/platform/kernel/linux-3.10-sc7730.git] / drivers / staging / android / ion / ion.c
1 /*
2
3  * drivers/gpu/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/freezer.h>
21 #include <linux/fs.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/kthread.h>
24 #include <linux/list.h>
25 #include <linux/memblock.h>
26 #include <linux/miscdevice.h>
27 #include <linux/export.h>
28 #include <linux/mm.h>
29 #include <linux/mm_types.h>
30 #include <linux/rbtree.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/vmalloc.h>
35 #include <linux/debugfs.h>
36 #include <linux/dma-buf.h>
37 #include <linux/idr.h>
38 #include <linux/time.h>
39 #ifdef CONFIG_DRM_SPRD
40 #include <drm/drmP.h>
41 #endif
42
43 #include "ion.h"
44 #include "ion_priv.h"
45 #include "compat_ion.h"
46
47 #ifdef CONFIG_ION_SPRD
48 #define DEBUG_HEAP_SHRINKER
49 #endif
50
51 /**
52  * struct ion_device - the metadata of the ion device node
53  * @dev:                the actual misc device
54  * @buffers:            an rb tree of all the existing buffers
55  * @buffer_lock:        lock protecting the tree of buffers
56  * @lock:               rwsem protecting the tree of heaps and clients
57  * @heaps:              list of all the heaps in the system
58  * @user_clients:       list of all the clients created from userspace
59  */
60 struct ion_device {
61         struct miscdevice dev;
62         struct rb_root buffers;
63         struct mutex buffer_lock;
64         struct rw_semaphore lock;
65         struct plist_head heaps;
66         long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
67                               unsigned long arg);
68         struct rb_root clients;
69         struct dentry *debug_root;
70         struct dentry *heaps_debug_root;
71         struct dentry *clients_debug_root;
72 };
73
74 /**
75  * struct ion_client - a process/hw block local address space
76  * @node:               node in the tree of all clients
77  * @dev:                backpointer to ion device
78  * @handles:            an rb tree of all the handles in this client
79  * @idr:                an idr space for allocating handle ids
80  * @lock:               lock protecting the tree of handles
81  * @name:               used for debugging
82  * @display_name:       used for debugging (unique version of @name)
83  * @display_serial:     used for debugging (to make display_name unique)
84  * @task:               used for debugging
85  *
86  * A client represents a list of buffers this client may access.
87  * The mutex stored here is used to protect both handles tree
88  * as well as the handles themselves, and should be held while modifying either.
89  */
90 struct ion_client {
91         struct rb_node node;
92         struct ion_device *dev;
93         struct rb_root handles;
94         struct idr idr;
95         struct mutex lock;
96         const char *name;
97         char *display_name;
98         int display_serial;
99         struct task_struct *task;
100         pid_t pid;
101         pid_t tid;
102         struct dentry *debug_root;
103 };
104
105 /**
106  * ion_handle - a client local reference to a buffer
107  * @ref:                reference count
108  * @client:             back pointer to the client the buffer resides in
109  * @buffer:             pointer to the buffer
110  * @node:               node in the client's handle rbtree
111  * @kmap_cnt:           count of times this client has mapped to kernel
112  * @id:                 client-unique id allocated by client->idr
113  *
114  * Modifications to node, map_cnt or mapping should be protected by the
115  * lock in the client.  Other fields are never changed after initialization.
116  */
117 struct ion_handle {
118         struct kref ref;
119         struct ion_client *client;
120         struct ion_buffer *buffer;
121         struct rb_node node;
122         unsigned int kmap_cnt;
123         int id;
124 };
125
126 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
127 {
128         return (buffer->flags & ION_FLAG_CACHED) &&
129                 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
130 }
131
132 bool ion_buffer_cached(struct ion_buffer *buffer)
133 {
134         return !!(buffer->flags & ION_FLAG_CACHED);
135 }
136
137 static inline struct page *ion_buffer_page(struct page *page)
138 {
139         return (struct page *)((unsigned long)page & ~(1UL));
140 }
141
142 static inline bool ion_buffer_page_is_dirty(struct page *page)
143 {
144         return !!((unsigned long)page & 1UL);
145 }
146
147 static inline void ion_buffer_page_dirty(struct page **page)
148 {
149         *page = (struct page *)((unsigned long)(*page) | 1UL);
150 }
151
152 static inline void ion_buffer_page_clean(struct page **page)
153 {
154         *page = (struct page *)((unsigned long)(*page) & ~(1UL));
155 }
156
157 /* this function should only be called while dev->lock is held */
158 static void ion_buffer_add(struct ion_device *dev,
159                            struct ion_buffer *buffer)
160 {
161         struct rb_node **p = &dev->buffers.rb_node;
162         struct rb_node *parent = NULL;
163         struct ion_buffer *entry;
164
165         while (*p) {
166                 parent = *p;
167                 entry = rb_entry(parent, struct ion_buffer, node);
168
169                 if (buffer < entry) {
170                         p = &(*p)->rb_left;
171                 } else if (buffer > entry) {
172                         p = &(*p)->rb_right;
173                 } else {
174                         pr_err("%s: buffer already found.", __func__);
175                         BUG();
176                 }
177         }
178
179         rb_link_node(&buffer->node, parent, p);
180         rb_insert_color(&buffer->node, &dev->buffers);
181 }
182
183 /* this function should only be called while dev->lock is held */
184 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
185                                      struct ion_device *dev,
186                                      unsigned long len,
187                                      unsigned long align,
188                                      unsigned long flags)
189 {
190         struct ion_buffer *buffer;
191         struct sg_table *table;
192         struct scatterlist *sg;
193         struct timeval time;
194         int i, ret;
195
196         buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
197         if (!buffer)
198                 return ERR_PTR(-ENOMEM);
199
200         buffer->heap = heap;
201         buffer->flags = flags;
202         kref_init(&buffer->ref);
203
204         ret = heap->ops->allocate(heap, buffer, len, align, flags);
205
206         if (ret) {
207                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
208                         goto err2;
209
210                 ion_heap_freelist_drain(heap, 0);
211                 ret = heap->ops->allocate(heap, buffer, len, align,
212                                           flags);
213                 if (ret)
214                         goto err2;
215         }
216
217         buffer->dev = dev;
218         buffer->size = len;
219
220         table = heap->ops->map_dma(heap, buffer);
221         if (WARN_ONCE(table == NULL,
222                         "heap->ops->map_dma should return ERR_PTR on error"))
223                 table = ERR_PTR(-EINVAL);
224         if (IS_ERR(table)) {
225                 heap->ops->free(buffer);
226                 kfree(buffer);
227                 return ERR_PTR(PTR_ERR(table));
228         }
229         buffer->sg_table = table;
230         if (ion_buffer_fault_user_mappings(buffer)) {
231                 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
232                 struct scatterlist *sg;
233                 int i, j, k = 0;
234
235                 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
236                 if (!buffer->pages) {
237                         ret = -ENOMEM;
238                         goto err1;
239                 }
240
241                 for_each_sg(table->sgl, sg, table->nents, i) {
242                         struct page *page = sg_page(sg);
243
244                         for (j = 0; j < sg->length / PAGE_SIZE; j++)
245                                 buffer->pages[k++] = page++;
246                 }
247
248                 if (ret)
249                         goto err;
250         }
251
252         buffer->dev = dev;
253         buffer->size = len;
254         INIT_LIST_HEAD(&buffer->vmas);
255         mutex_init(&buffer->lock);
256         /* this will set up dma addresses for the sglist -- it is not
257            technically correct as per the dma api -- a specific
258            device isn't really taking ownership here.  However, in practice on
259            our systems the only dma_address space is physical addresses.
260            Additionally, we can't afford the overhead of invalidating every
261            allocation via dma_map_sg. The implicit contract here is that
262            memory comming from the heaps is ready for dma, ie if it has a
263            cached mapping that mapping has been invalidated */
264         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
265                 sg_dma_address(sg) = sg_phys(sg);
266         mutex_lock(&dev->buffer_lock);
267         ion_buffer_add(dev, buffer);
268         mutex_unlock(&dev->buffer_lock);
269
270         do_gettimeofday(&time);
271         buffer->alloc_time = time;
272         return buffer;
273
274 err:
275         heap->ops->unmap_dma(heap, buffer);
276         heap->ops->free(buffer);
277 err1:
278         if (buffer->pages)
279                 vfree(buffer->pages);
280 err2:
281         kfree(buffer);
282         return ERR_PTR(ret);
283 }
284
285 void ion_buffer_destroy(struct ion_buffer *buffer)
286 {
287         if (WARN_ON(buffer->kmap_cnt > 0))
288                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
289         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
290         buffer->heap->ops->free(buffer);
291         if (buffer->pages)
292                 vfree(buffer->pages);
293         kfree(buffer);
294 }
295
296 static void _ion_buffer_destroy(struct kref *kref)
297 {
298         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
299         struct ion_heap *heap = buffer->heap;
300         struct ion_device *dev = buffer->dev;
301
302 #if defined(CONFIG_SPRD_IOMMU)
303
304         int i;
305         for (i = IOMMU_GSP; i < IOMMU_MAX; i++) {
306                 if(buffer->iomap_cnt[i]>0)
307                 {
308                         buffer->iomap_cnt[i] = 0;
309                         sprd_iova_unmap(i,buffer->iova[i],buffer->size);
310                         sprd_iova_free(i,buffer->iova[i],buffer->size);
311                 }
312         }
313 #endif
314
315         mutex_lock(&dev->buffer_lock);
316         rb_erase(&buffer->node, &dev->buffers);
317         mutex_unlock(&dev->buffer_lock);
318
319         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
320                 ion_heap_freelist_add(heap, buffer);
321         else
322                 ion_buffer_destroy(buffer);
323 }
324
325 static void ion_buffer_get(struct ion_buffer *buffer)
326 {
327         kref_get(&buffer->ref);
328 }
329
330 static int ion_buffer_put(struct ion_buffer *buffer)
331 {
332         return kref_put(&buffer->ref, _ion_buffer_destroy);
333 }
334
335 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
336 {
337         mutex_lock(&buffer->lock);
338         buffer->handle_count++;
339         mutex_unlock(&buffer->lock);
340 }
341
342 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
343 {
344         /*
345          * when a buffer is removed from a handle, if it is not in
346          * any other handles, copy the taskcomm and the pid of the
347          * process it's being removed from into the buffer.  At this
348          * point there will be no way to track what processes this buffer is
349          * being used by, it only exists as a dma_buf file descriptor.
350          * The taskcomm and pid can provide a debug hint as to where this fd
351          * is in the system
352          */
353         mutex_lock(&buffer->lock);
354         buffer->handle_count--;
355         BUG_ON(buffer->handle_count < 0);
356         if (!buffer->handle_count) {
357                 struct task_struct *task;
358
359                 task = current->group_leader;
360                 get_task_comm(buffer->task_comm, task);
361                 buffer->pid = task_pid_nr(task);
362                 buffer->tid = task_pid_nr(current);
363         }
364         mutex_unlock(&buffer->lock);
365 }
366
367 static struct ion_handle *ion_handle_create(struct ion_client *client,
368                                      struct ion_buffer *buffer)
369 {
370         struct ion_handle *handle;
371
372         handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
373         if (!handle)
374                 return ERR_PTR(-ENOMEM);
375         kref_init(&handle->ref);
376         RB_CLEAR_NODE(&handle->node);
377         handle->client = client;
378         ion_buffer_get(buffer);
379         ion_buffer_add_to_handle(buffer);
380         handle->buffer = buffer;
381
382         return handle;
383 }
384
385 static void ion_handle_kmap_put(struct ion_handle *);
386
387 static void ion_handle_destroy(struct kref *kref)
388 {
389         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
390         struct ion_client *client = handle->client;
391         struct ion_buffer *buffer = handle->buffer;
392
393         mutex_lock(&buffer->lock);
394         while (handle->kmap_cnt)
395                 ion_handle_kmap_put(handle);
396         mutex_unlock(&buffer->lock);
397
398         idr_remove(&client->idr, handle->id);
399         if (!RB_EMPTY_NODE(&handle->node))
400                 rb_erase(&handle->node, &client->handles);
401
402         ion_buffer_remove_from_handle(buffer);
403         ion_buffer_put(buffer);
404
405         kfree(handle);
406 }
407
408 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
409 {
410         return handle->buffer;
411 }
412
413 static void ion_handle_get(struct ion_handle *handle)
414 {
415         kref_get(&handle->ref);
416 }
417
418 static int ion_handle_put(struct ion_handle *handle)
419 {
420         struct ion_client *client = handle->client;
421         int ret;
422
423         mutex_lock(&client->lock);
424         ret = kref_put(&handle->ref, ion_handle_destroy);
425         mutex_unlock(&client->lock);
426
427         return ret;
428 }
429
430 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
431                                             struct ion_buffer *buffer)
432 {
433         struct rb_node *n = client->handles.rb_node;
434
435         while (n) {
436                 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
437                 if (buffer < entry->buffer)
438                         n = n->rb_left;
439                 else if (buffer > entry->buffer)
440                         n = n->rb_right;
441                 else
442                         return entry;
443         }
444         return ERR_PTR(-EINVAL);
445 }
446
447 static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
448                                                 int id)
449 {
450         struct ion_handle *handle;
451
452         mutex_lock(&client->lock);
453         handle = idr_find(&client->idr, id);
454         if (handle)
455                 ion_handle_get(handle);
456         mutex_unlock(&client->lock);
457
458         return handle ? handle : ERR_PTR(-EINVAL);
459 }
460
461 static bool ion_handle_validate(struct ion_client *client,
462                                 struct ion_handle *handle)
463 {
464         WARN_ON(!mutex_is_locked(&client->lock));
465         return (idr_find(&client->idr, handle->id) == handle);
466 }
467
468 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
469 {
470         int id;
471         struct rb_node **p = &client->handles.rb_node;
472         struct rb_node *parent = NULL;
473         struct ion_handle *entry;
474
475         id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
476         if (id < 0)
477                 return id;
478
479         handle->id = id;
480
481         while (*p) {
482                 parent = *p;
483                 entry = rb_entry(parent, struct ion_handle, node);
484
485                 if (handle->buffer < entry->buffer)
486                         p = &(*p)->rb_left;
487                 else if (handle->buffer > entry->buffer)
488                         p = &(*p)->rb_right;
489                 else
490                         WARN(1, "%s: buffer already found.", __func__);
491         }
492
493         rb_link_node(&handle->node, parent, p);
494         rb_insert_color(&handle->node, &client->handles);
495
496         return 0;
497 }
498
499 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
500                              size_t align, unsigned int heap_id_mask,
501                              unsigned int flags)
502 {
503         struct ion_handle *handle;
504         struct ion_device *dev = client->dev;
505         struct ion_buffer *buffer = NULL;
506         struct ion_heap *heap;
507         int ret;
508
509         pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
510                  len, align, heap_id_mask, flags);
511         /*
512          * traverse the list of heaps available in this system in priority
513          * order.  If the heap type is supported by the client, and matches the
514          * request of the caller allocate from it.  Repeat until allocate has
515          * succeeded or all heaps have been tried
516          */
517         len = PAGE_ALIGN(len);
518
519         if (!len)
520                 return ERR_PTR(-EINVAL);
521
522         down_read(&dev->lock);
523         plist_for_each_entry(heap, &dev->heaps, node) {
524                 /* if the caller didn't specify this heap id */
525                 if (!((1 << heap->id) & heap_id_mask))
526                         continue;
527                 buffer = ion_buffer_create(heap, dev, len, align, flags);
528                 if (!IS_ERR(buffer))
529                         break;
530         }
531         up_read(&dev->lock);
532
533         if (buffer == NULL) {
534                 pr_err("%s: buffer is NULL!\n",__func__);
535                 return ERR_PTR(-ENODEV);
536         }
537
538         if (IS_ERR(buffer)) {
539                 pr_err("%s: ion alloc buffer is error! and the buffer is %p\n",__func__,buffer);
540                 return ERR_PTR(PTR_ERR(buffer));
541         }
542
543         handle = ion_handle_create(client, buffer);
544
545         /*
546          * ion_buffer_create will create a buffer with a ref_cnt of 1,
547          * and ion_handle_create will take a second reference, drop one here
548          */
549         ion_buffer_put(buffer);
550
551         if (IS_ERR(handle)) {
552                 pr_err("%s: handle is error! and the handle is %p\n",__func__,handle);
553                 return handle;
554         }
555
556         mutex_lock(&client->lock);
557         ret = ion_handle_add(client, handle);
558         mutex_unlock(&client->lock);
559         if (ret) {
560                 ion_handle_put(handle);
561                 handle = ERR_PTR(ret);
562         }
563
564         return handle;
565 }
566 EXPORT_SYMBOL(ion_alloc);
567
568 #ifdef CONFIG_DRM_SPRD
569 struct ion_handle *ion_alloc_with_gem(struct ion_client *client, size_t len,
570                                       size_t align, unsigned int heap_id_mask,
571                                       unsigned int flags,
572                                       struct drm_gem_object *obj)
573 {
574         struct ion_handle *handle;
575
576         handle = ion_alloc(client, len, align, heap_id_mask, flags);
577         if (!IS_ERR(handle))
578                 handle->buffer->obj = obj;
579
580         return handle;
581 }
582 EXPORT_SYMBOL(ion_alloc_with_gem);
583
584 struct drm_gem_object *ion_get_gem(struct ion_handle *handle)
585 {
586         if (handle && handle->buffer)
587                 return handle->buffer->obj;
588
589         return NULL;
590 }
591 EXPORT_SYMBOL(ion_get_gem);
592 #endif
593
594 void ion_free(struct ion_client *client, struct ion_handle *handle)
595 {
596         bool valid_handle;
597
598         BUG_ON(client != handle->client);
599
600         mutex_lock(&client->lock);
601         valid_handle = ion_handle_validate(client, handle);
602
603         if (!valid_handle) {
604                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
605                 mutex_unlock(&client->lock);
606                 return;
607         }
608         mutex_unlock(&client->lock);
609         ion_handle_put(handle);
610 }
611 EXPORT_SYMBOL(ion_free);
612
613 int ion_phys(struct ion_client *client, struct ion_handle *handle,
614              ion_phys_addr_t *addr, size_t *len)
615 {
616         struct ion_buffer *buffer;
617         int ret;
618
619         mutex_lock(&client->lock);
620         if (!ion_handle_validate(client, handle)) {
621                 mutex_unlock(&client->lock);
622                 return -EINVAL;
623         }
624
625         buffer = handle->buffer;
626
627         if (!buffer->heap->ops->phys) {
628                 pr_err("%s: ion_phys is not implemented by this heap.\n",
629                        __func__);
630                 mutex_unlock(&client->lock);
631                 return -ENODEV;
632         }
633         mutex_unlock(&client->lock);
634         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
635         return ret;
636 }
637 EXPORT_SYMBOL(ion_phys);
638
639 int ion_is_phys(struct ion_client *client, struct ion_handle *handle)
640 {
641         struct ion_buffer *buffer;
642         int ret = 0;
643
644         mutex_lock(&client->lock);
645         if (!ion_handle_validate(client, handle)) {
646                 mutex_unlock(&client->lock);
647                 return -EINVAL;
648         }
649
650         buffer = handle->buffer;
651
652         if (!buffer->heap->ops->phys)
653                 ret = -1;
654
655         mutex_unlock(&client->lock);
656
657         return ret;
658 }
659 EXPORT_SYMBOL(ion_is_phys);
660
661 int ion_is_cached(struct ion_client *client, struct ion_handle *handle)
662 {
663         struct ion_buffer *buffer;
664         int cached;
665
666         mutex_lock(&client->lock);
667         if (!ion_handle_validate(client, handle)) {
668                 mutex_unlock(&client->lock);
669                 return -EINVAL;
670         }
671
672         buffer = handle->buffer;
673
674         cached = ion_buffer_cached(buffer);
675         mutex_unlock(&client->lock);
676
677         return cached;
678 }
679 EXPORT_SYMBOL(ion_is_cached);
680
681 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
682 {
683         void *vaddr;
684
685         if (buffer->kmap_cnt) {
686                 buffer->kmap_cnt++;
687                 return buffer->vaddr;
688         }
689         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
690         if (WARN_ONCE(vaddr == NULL,
691                         "heap->ops->map_kernel should return ERR_PTR on error"))
692                 return ERR_PTR(-EINVAL);
693         if (IS_ERR(vaddr))
694                 return vaddr;
695         buffer->vaddr = vaddr;
696         buffer->kmap_cnt++;
697         return vaddr;
698 }
699
700 static void *ion_handle_kmap_get(struct ion_handle *handle)
701 {
702         struct ion_buffer *buffer = handle->buffer;
703         void *vaddr;
704
705         if (handle->kmap_cnt) {
706                 handle->kmap_cnt++;
707                 return buffer->vaddr;
708         }
709         vaddr = ion_buffer_kmap_get(buffer);
710         if (IS_ERR(vaddr))
711                 return vaddr;
712         handle->kmap_cnt++;
713         return vaddr;
714 }
715
716 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
717 {
718         buffer->kmap_cnt--;
719         if (!buffer->kmap_cnt) {
720                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
721                 buffer->vaddr = NULL;
722         }
723 }
724
725 static void ion_handle_kmap_put(struct ion_handle *handle)
726 {
727         struct ion_buffer *buffer = handle->buffer;
728
729         if (!handle->kmap_cnt) {
730                 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
731                 return;
732         }
733
734         handle->kmap_cnt--;
735         if (!handle->kmap_cnt)
736                 ion_buffer_kmap_put(buffer);
737 }
738
739 int ion_map_iommu(struct ion_client *client, struct ion_handle *handle, int domain_no, unsigned long *ptr_iova)
740 {
741         struct ion_buffer *buffer;
742
743         mutex_lock(&client->lock);
744         if (!ion_handle_validate(client, handle)) {
745                 pr_err("%s: invalid handle passed to map_kernel.\n",
746                        __func__);
747                 mutex_unlock(&client->lock);
748                 return -EINVAL;
749         }
750
751         buffer = handle->buffer;
752
753         if (!handle->buffer->heap->ops->map_iommu) {
754                 pr_err("%s: map_kernel is not implemented by this heap.\n",
755                        __func__);
756                 mutex_unlock(&client->lock);
757                 return -ENODEV;
758         }
759
760         mutex_lock(&buffer->lock);
761         handle->buffer->heap->ops->map_iommu(buffer, domain_no, ptr_iova);
762         mutex_unlock(&buffer->lock);
763         mutex_unlock(&client->lock);
764         return 0;
765 }
766 EXPORT_SYMBOL(ion_map_iommu);
767
768 int ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle, int domain_no)
769 {
770         struct ion_buffer *buffer;
771
772         mutex_lock(&client->lock);
773         if (!ion_handle_validate(client, handle)) {
774                 pr_err("%s: invalid handle passed to map_kernel.\n",
775                        __func__);
776                 mutex_unlock(&client->lock);
777                 return -EINVAL;
778         }
779
780         buffer = handle->buffer;
781
782         if (!handle->buffer->heap->ops->map_iommu) {
783                 pr_err("%s: map_kernel is not implemented by this heap.\n",
784                        __func__);
785                 mutex_unlock(&client->lock);
786                 return -ENODEV;
787         }
788
789         mutex_lock(&buffer->lock);
790         handle->buffer->heap->ops->unmap_iommu(buffer, domain_no);
791         mutex_unlock(&buffer->lock);
792         mutex_unlock(&client->lock);
793         return 0;
794 }
795 EXPORT_SYMBOL(ion_unmap_iommu);
796
797 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
798 {
799         struct ion_buffer *buffer;
800         void *vaddr;
801
802         mutex_lock(&client->lock);
803         if (!ion_handle_validate(client, handle)) {
804                 pr_err("%s: invalid handle passed to map_kernel.\n",
805                        __func__);
806                 mutex_unlock(&client->lock);
807                 return ERR_PTR(-EINVAL);
808         }
809
810         buffer = handle->buffer;
811
812         if (!handle->buffer->heap->ops->map_kernel) {
813                 pr_err("%s: map_kernel is not implemented by this heap.\n",
814                        __func__);
815                 mutex_unlock(&client->lock);
816                 return ERR_PTR(-ENODEV);
817         }
818
819         mutex_lock(&buffer->lock);
820         vaddr = ion_handle_kmap_get(handle);
821         mutex_unlock(&buffer->lock);
822         mutex_unlock(&client->lock);
823         return vaddr;
824 }
825 EXPORT_SYMBOL(ion_map_kernel);
826
827 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
828 {
829         struct ion_buffer *buffer;
830
831         mutex_lock(&client->lock);
832         buffer = handle->buffer;
833         mutex_lock(&buffer->lock);
834         ion_handle_kmap_put(handle);
835         mutex_unlock(&buffer->lock);
836         mutex_unlock(&client->lock);
837 }
838 EXPORT_SYMBOL(ion_unmap_kernel);
839
840 static int ion_debug_client_show(struct seq_file *s, void *unused)
841 {
842         struct ion_client *client = s->private;
843         struct rb_node *n;
844         size_t sizes[ION_NUM_HEAP_IDS] = {0};
845         const char *names[ION_NUM_HEAP_IDS] = {NULL};
846         int i;
847
848         mutex_lock(&client->lock);
849         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
850                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
851                                                      node);
852                 unsigned int id = handle->buffer->heap->id;
853
854                 if (!names[id])
855                         names[id] = handle->buffer->heap->name;
856                 sizes[id] += handle->buffer->size;
857         }
858         mutex_unlock(&client->lock);
859
860         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
861         for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
862                 if (!names[i])
863                         continue;
864                 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
865         }
866         return 0;
867 }
868
869 static int ion_debug_client_open(struct inode *inode, struct file *file)
870 {
871         return single_open(file, ion_debug_client_show, inode->i_private);
872 }
873
874 static const struct file_operations debug_client_fops = {
875         .open = ion_debug_client_open,
876         .read = seq_read,
877         .llseek = seq_lseek,
878         .release = single_release,
879 };
880
881 static int ion_get_client_serial(const struct rb_root *root,
882                                         const unsigned char *name)
883 {
884         int serial = -1;
885         struct rb_node *node;
886         for (node = rb_first(root); node; node = rb_next(node)) {
887                 struct ion_client *client = rb_entry(node, struct ion_client,
888                                                 node);
889                 if (strcmp(client->name, name))
890                         continue;
891                 serial = max(serial, client->display_serial);
892         }
893         return serial + 1;
894 }
895
896 struct ion_client *ion_client_create(struct ion_device *dev,
897                                      const char *name)
898 {
899         struct ion_client *client;
900         struct task_struct *task;
901         struct rb_node **p;
902         struct rb_node *parent = NULL;
903         struct ion_client *entry;
904         pid_t pid;
905         pid_t tid;
906
907         if (!name) {
908                 pr_err("%s: Name cannot be null\n", __func__);
909                 return ERR_PTR(-EINVAL);
910         }
911
912         get_task_struct(current->group_leader);
913         task_lock(current->group_leader);
914         pid = task_pid_nr(current->group_leader);
915         tid = task_pid_nr(current);
916         /* don't bother to store task struct for kernel threads,
917            they can't be killed anyway */
918         if (current->group_leader->flags & PF_KTHREAD) {
919                 put_task_struct(current->group_leader);
920                 task = NULL;
921         } else {
922                 task = current->group_leader;
923         }
924         task_unlock(current->group_leader);
925
926         client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
927         if (!client)
928                 goto err_put_task_struct;
929
930         client->dev = dev;
931         client->handles = RB_ROOT;
932         idr_init(&client->idr);
933         mutex_init(&client->lock);
934         client->task = task;
935         client->pid = pid;
936         client->tid = tid;
937         client->name = kstrdup(name, GFP_KERNEL);
938         if (!client->name)
939                 goto err_free_client;
940
941         down_write(&dev->lock);
942         client->display_serial = ion_get_client_serial(&dev->clients, name);
943         client->display_name = kasprintf(
944                 GFP_KERNEL, "%s-%d", name, client->display_serial);
945         if (!client->display_name) {
946                 up_write(&dev->lock);
947                 goto err_free_client_name;
948         }
949         p = &dev->clients.rb_node;
950         while (*p) {
951                 parent = *p;
952                 entry = rb_entry(parent, struct ion_client, node);
953
954                 if (client < entry)
955                         p = &(*p)->rb_left;
956                 else if (client > entry)
957                         p = &(*p)->rb_right;
958         }
959         rb_link_node(&client->node, parent, p);
960         rb_insert_color(&client->node, &dev->clients);
961
962         client->debug_root = debugfs_create_file(client->display_name, 0664,
963                                                 dev->clients_debug_root,
964                                                 client, &debug_client_fops);
965         if (!client->debug_root) {
966                 char buf[256], *path;
967                 path = dentry_path(dev->clients_debug_root, buf, 256);
968                 pr_err("Failed to create client debugfs at %s/%s\n",
969                         path, client->display_name);
970         }
971
972         up_write(&dev->lock);
973
974         return client;
975
976 err_free_client_name:
977         kfree(client->name);
978 err_free_client:
979         kfree(client);
980 err_put_task_struct:
981         if (task)
982                 put_task_struct(current->group_leader);
983         return ERR_PTR(-ENOMEM);
984 }
985 EXPORT_SYMBOL(ion_client_create);
986
987 void ion_client_destroy(struct ion_client *client)
988 {
989         struct ion_device *dev = client->dev;
990         struct rb_node *n;
991
992         pr_debug("%s: %d\n", __func__, __LINE__);
993         while ((n = rb_first(&client->handles))) {
994                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
995                                                      node);
996                 ion_handle_destroy(&handle->ref);
997         }
998
999         idr_destroy(&client->idr);
1000
1001         down_write(&dev->lock);
1002         if (client->task)
1003                 put_task_struct(client->task);
1004         rb_erase(&client->node, &dev->clients);
1005         debugfs_remove_recursive(client->debug_root);
1006         up_write(&dev->lock);
1007
1008         kfree(client->display_name);
1009         kfree(client->name);
1010         kfree(client);
1011 }
1012 EXPORT_SYMBOL(ion_client_destroy);
1013
1014 int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
1015                         unsigned long *size, unsigned int *heap_id)
1016 {
1017         struct ion_buffer *buffer;
1018         struct ion_heap *heap;
1019
1020         mutex_lock(&client->lock);
1021         if (!ion_handle_validate(client, handle)) {
1022                 pr_err("%s: invalid handle passed to %s.\n",
1023                                 __func__, __func__);
1024                 mutex_unlock(&client->lock);
1025                 return -EINVAL;
1026         }
1027         buffer = handle->buffer;
1028         mutex_lock(&buffer->lock);
1029         heap = buffer->heap;
1030         *heap_id = (1 << heap->id);
1031         *size = buffer->size;
1032         mutex_unlock(&buffer->lock);
1033         mutex_unlock(&client->lock);
1034
1035         return 0;
1036 }
1037 EXPORT_SYMBOL(ion_handle_get_size);
1038
1039 struct sg_table *ion_sg_table(struct ion_client *client,
1040                               struct ion_handle *handle)
1041 {
1042         struct ion_buffer *buffer;
1043         struct sg_table *table;
1044
1045         mutex_lock(&client->lock);
1046         if (!ion_handle_validate(client, handle)) {
1047                 pr_err("%s: invalid handle passed to map_dma.\n",
1048                        __func__);
1049                 mutex_unlock(&client->lock);
1050                 return ERR_PTR(-EINVAL);
1051         }
1052         buffer = handle->buffer;
1053         table = buffer->sg_table;
1054         mutex_unlock(&client->lock);
1055         return table;
1056 }
1057 EXPORT_SYMBOL(ion_sg_table);
1058
1059 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1060                                        struct device *dev,
1061                                        enum dma_data_direction direction);
1062
1063 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1064                                         enum dma_data_direction direction)
1065 {
1066         struct dma_buf *dmabuf = attachment->dmabuf;
1067         struct ion_buffer *buffer = dmabuf->priv;
1068
1069         ion_buffer_sync_for_device(buffer, attachment->dev, direction);
1070         return buffer->sg_table;
1071 }
1072
1073 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1074                               struct sg_table *table,
1075                               enum dma_data_direction direction)
1076 {
1077 }
1078
1079 void ion_pages_sync_for_device(struct device *dev, struct page *page,
1080                 size_t size, enum dma_data_direction dir)
1081 {
1082         struct scatterlist sg;
1083
1084         sg_init_table(&sg, 1);
1085         sg_set_page(&sg, page, size, 0);
1086         /*
1087          * This is not correct - sg_dma_address needs a dma_addr_t that is valid
1088          * for the the targeted device, but this works on the currently targeted
1089          * hardware.
1090          */
1091         sg_dma_address(&sg) = page_to_phys(page);
1092         dma_sync_sg_for_device(dev, &sg, 1, dir);
1093 }
1094
1095 struct ion_vma_list {
1096         struct list_head list;
1097         struct vm_area_struct *vma;
1098 };
1099
1100 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1101                                        struct device *dev,
1102                                        enum dma_data_direction dir)
1103 {
1104         struct ion_vma_list *vma_list;
1105         int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1106         int i;
1107
1108         pr_debug("%s: syncing for device %s\n", __func__,
1109                  dev ? dev_name(dev) : "null");
1110
1111         if (!ion_buffer_fault_user_mappings(buffer))
1112                 return;
1113
1114         mutex_lock(&buffer->lock);
1115         for (i = 0; i < pages; i++) {
1116                 struct page *page = buffer->pages[i];
1117
1118                 if (ion_buffer_page_is_dirty(page))
1119                         ion_pages_sync_for_device(dev, ion_buffer_page(page),
1120                                                         PAGE_SIZE, dir);
1121
1122                 ion_buffer_page_clean(buffer->pages + i);
1123         }
1124         list_for_each_entry(vma_list, &buffer->vmas, list) {
1125                 struct vm_area_struct *vma = vma_list->vma;
1126
1127                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1128                                NULL);
1129         }
1130         mutex_unlock(&buffer->lock);
1131 }
1132
1133 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1134 {
1135         struct ion_buffer *buffer = vma->vm_private_data;
1136         unsigned long pfn;
1137         int ret;
1138
1139         mutex_lock(&buffer->lock);
1140         ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1141         BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1142
1143         pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1144         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1145         mutex_unlock(&buffer->lock);
1146         if (ret)
1147                 return VM_FAULT_ERROR;
1148
1149         return VM_FAULT_NOPAGE;
1150 }
1151
1152 static void ion_vm_open(struct vm_area_struct *vma)
1153 {
1154         struct ion_buffer *buffer = vma->vm_private_data;
1155         struct ion_vma_list *vma_list;
1156
1157         vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1158         if (!vma_list)
1159                 return;
1160         vma_list->vma = vma;
1161         mutex_lock(&buffer->lock);
1162         list_add(&vma_list->list, &buffer->vmas);
1163         mutex_unlock(&buffer->lock);
1164         pr_debug("%s: adding %p\n", __func__, vma);
1165 }
1166
1167 static void ion_vm_close(struct vm_area_struct *vma)
1168 {
1169         struct ion_buffer *buffer = vma->vm_private_data;
1170         struct ion_vma_list *vma_list, *tmp;
1171
1172         pr_debug("%s\n", __func__);
1173         mutex_lock(&buffer->lock);
1174         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1175                 if (vma_list->vma != vma)
1176                         continue;
1177                 list_del(&vma_list->list);
1178                 kfree(vma_list);
1179                 pr_debug("%s: deleting %p\n", __func__, vma);
1180                 break;
1181         }
1182         mutex_unlock(&buffer->lock);
1183 }
1184
1185 static struct vm_operations_struct ion_vma_ops = {
1186         .open = ion_vm_open,
1187         .close = ion_vm_close,
1188         .fault = ion_vm_fault,
1189 };
1190
1191 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1192 {
1193         struct ion_buffer *buffer = dmabuf->priv;
1194         int ret = 0;
1195
1196         if (!buffer->heap->ops->map_user) {
1197                 pr_err("%s: this heap does not define a method for mapping "
1198                        "to userspace\n", __func__);
1199                 return -EINVAL;
1200         }
1201
1202         if (ion_buffer_fault_user_mappings(buffer)) {
1203                 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1204                                                         VM_DONTDUMP;
1205                 vma->vm_private_data = buffer;
1206                 vma->vm_ops = &ion_vma_ops;
1207                 ion_vm_open(vma);
1208                 return 0;
1209         }
1210
1211         if (!(buffer->flags & ION_FLAG_CACHED))
1212                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1213
1214         mutex_lock(&buffer->lock);
1215         /* now map it to userspace */
1216         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1217         mutex_unlock(&buffer->lock);
1218
1219         if (ret)
1220                 pr_err("%s: failure mapping buffer to userspace\n",
1221                        __func__);
1222
1223         return ret;
1224 }
1225
1226 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1227 {
1228         struct ion_buffer *buffer = dmabuf->priv;
1229         ion_buffer_put(buffer);
1230
1231 #ifdef CONFIG_DRM_SPRD
1232         if (buffer->obj) {
1233                 drm_gem_object_unreference_unlocked(buffer->obj);
1234                 buffer->obj = NULL;
1235         }
1236 #endif
1237 }
1238
1239 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1240 {
1241         struct ion_buffer *buffer = dmabuf->priv;
1242         return buffer->vaddr + offset * PAGE_SIZE;
1243 }
1244
1245 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1246                                void *ptr)
1247 {
1248         return;
1249 }
1250
1251 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1252                                         size_t len,
1253                                         enum dma_data_direction direction)
1254 {
1255         struct ion_buffer *buffer = dmabuf->priv;
1256         void *vaddr;
1257
1258         if (!buffer->heap->ops->map_kernel) {
1259                 pr_err("%s: map kernel is not implemented by this heap.\n",
1260                        __func__);
1261                 return -ENODEV;
1262         }
1263
1264         mutex_lock(&buffer->lock);
1265         vaddr = ion_buffer_kmap_get(buffer);
1266         mutex_unlock(&buffer->lock);
1267         if (IS_ERR(vaddr))
1268                 return PTR_ERR(vaddr);
1269         return 0;
1270 }
1271
1272 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1273                                        size_t len,
1274                                        enum dma_data_direction direction)
1275 {
1276         struct ion_buffer *buffer = dmabuf->priv;
1277
1278         mutex_lock(&buffer->lock);
1279         ion_buffer_kmap_put(buffer);
1280         mutex_unlock(&buffer->lock);
1281 }
1282
1283 static struct dma_buf_ops dma_buf_ops = {
1284         .map_dma_buf = ion_map_dma_buf,
1285         .unmap_dma_buf = ion_unmap_dma_buf,
1286         .mmap = ion_mmap,
1287         .release = ion_dma_buf_release,
1288         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1289         .end_cpu_access = ion_dma_buf_end_cpu_access,
1290         .kmap_atomic = ion_dma_buf_kmap,
1291         .kunmap_atomic = ion_dma_buf_kunmap,
1292         .kmap = ion_dma_buf_kmap,
1293         .kunmap = ion_dma_buf_kunmap,
1294 };
1295
1296 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1297                                                 struct ion_handle *handle)
1298 {
1299         struct ion_buffer *buffer;
1300         struct dma_buf *dmabuf;
1301         bool valid_handle;
1302
1303         mutex_lock(&client->lock);
1304         valid_handle = ion_handle_validate(client, handle);
1305         if (!valid_handle) {
1306                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1307                 mutex_unlock(&client->lock);
1308                 return ERR_PTR(-EINVAL);
1309         }
1310         buffer = handle->buffer;
1311         ion_buffer_get(buffer);
1312         mutex_unlock(&client->lock);
1313
1314         dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1315         if (IS_ERR(dmabuf)) {
1316                 ion_buffer_put(buffer);
1317                 return dmabuf;
1318         }
1319
1320         return dmabuf;
1321 }
1322 EXPORT_SYMBOL(ion_share_dma_buf);
1323
1324 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1325 {
1326         struct dma_buf *dmabuf;
1327         int fd;
1328
1329         dmabuf = ion_share_dma_buf(client, handle);
1330         if (IS_ERR(dmabuf)) {
1331                 pr_err("%s: dmabuf is error and dmabuf is %p!\n",__func__,dmabuf);
1332                 return PTR_ERR(dmabuf);
1333         }
1334
1335         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1336         if (fd < 0) {
1337                 pr_err("%s: dmabuf fd is error %d!\n",__func__, fd);
1338                 dma_buf_put(dmabuf);
1339         }
1340
1341         return fd;
1342 }
1343 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1344
1345 struct ion_handle *get_ion_handle_from_dmabuf(struct ion_client *client, struct dma_buf *dmabuf)
1346 {
1347         struct ion_buffer *buffer;
1348         struct ion_handle *handle;
1349         int ret;
1350
1351         /* if this memory came from ion */
1352         if (dmabuf->ops != &dma_buf_ops) {
1353                 pr_err("%s: can not import dmabuf from another exporter\n",
1354                                 __func__);
1355                 return ERR_PTR(-EINVAL);
1356         }
1357         buffer = dmabuf->priv;
1358
1359         mutex_lock(&client->lock);
1360         /* if a handle exists for this buffer just take a reference to it */
1361         handle = ion_handle_lookup(client, buffer);
1362         if (!IS_ERR(handle)) {
1363                 ion_handle_get(handle);
1364                 mutex_unlock(&client->lock);
1365                 goto end;
1366         }
1367         mutex_unlock(&client->lock);
1368
1369         handle = ion_handle_create(client, buffer);
1370         if (IS_ERR(handle))
1371                 goto end;
1372
1373         mutex_lock(&client->lock);
1374         ret = ion_handle_add(client, handle);
1375         mutex_unlock(&client->lock);
1376         if (ret) {
1377                 ion_handle_put(handle);
1378                 handle = ERR_PTR(ret);
1379         }
1380
1381 end:
1382         return handle;
1383 }
1384 EXPORT_SYMBOL(get_ion_handle_from_dmabuf);
1385
1386 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1387 {
1388         struct dma_buf *dmabuf;
1389         struct ion_handle *handle;
1390
1391         dmabuf = dma_buf_get(fd);
1392         if (IS_ERR(dmabuf)) {
1393                 pr_err("ion_import_dma_buf() dmabuf=0x%lx, fd:%d, dma_buf_get error!\n",
1394                                 (unsigned long)dmabuf, fd);
1395                 return ERR_PTR(PTR_ERR(dmabuf));
1396         }
1397         handle = get_ion_handle_from_dmabuf(client, dmabuf);
1398         dma_buf_put(dmabuf);
1399         return handle;
1400 }
1401 EXPORT_SYMBOL(ion_import_dma_buf);
1402
1403 static int ion_invalidate_for_cpu(struct ion_client *client, int fd)
1404 {
1405         struct dma_buf *dmabuf;
1406         struct ion_buffer *buffer;
1407
1408         dmabuf = dma_buf_get(fd);
1409         if (IS_ERR(dmabuf))
1410         {
1411                 pr_err("%s: dmabuf is error and dmabuf is %p!\n",__func__,dmabuf);
1412                 return PTR_ERR(dmabuf);
1413         }
1414
1415         /* if this memory came from ion */
1416         if (dmabuf->ops != &dma_buf_ops) {
1417                 pr_err("%s: can not sync dmabuf from another exporter\n",
1418                        __func__);
1419                 dma_buf_put(dmabuf);
1420                 return -EINVAL;
1421         }
1422         buffer = dmabuf->priv;
1423
1424         dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
1425                                buffer->sg_table->nents, DMA_FROM_DEVICE);
1426         dma_buf_put(dmabuf);
1427         return 0;
1428 }
1429
1430 static int ion_sync_for_device(struct ion_client *client, int fd)
1431 {
1432         struct dma_buf *dmabuf;
1433         struct ion_buffer *buffer;
1434
1435         dmabuf = dma_buf_get(fd);
1436         if (IS_ERR(dmabuf)) {
1437                 pr_err("%s: the dmabuf is err dmabuf is %p, fd %d\n",__func__,dmabuf,fd);
1438                 return PTR_ERR(dmabuf);
1439         }
1440
1441         /* if this memory came from ion */
1442         if (dmabuf->ops != &dma_buf_ops) {
1443                 pr_err("%s: can not sync dmabuf from another exporter\n",
1444                        __func__);
1445                 dma_buf_put(dmabuf);
1446                 return -EINVAL;
1447         }
1448         buffer = dmabuf->priv;
1449
1450         dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1451                                buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1452         dma_buf_put(dmabuf);
1453         return 0;
1454 }
1455
1456 /* fix up the cases where the ioctl direction bits are incorrect */
1457 static unsigned int ion_ioctl_dir(unsigned int cmd)
1458 {
1459         switch (cmd) {
1460         case ION_IOC_SYNC:
1461         case ION_IOC_FREE:
1462         case ION_IOC_CUSTOM:
1463                 return _IOC_WRITE;
1464         default:
1465                 return _IOC_DIR(cmd);
1466         }
1467 }
1468
1469 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1470 {
1471         struct ion_client *client = filp->private_data;
1472         struct ion_device *dev = client->dev;
1473         struct ion_handle *cleanup_handle = NULL;
1474         int ret = 0;
1475         unsigned int dir;
1476
1477         union {
1478                 struct ion_fd_data fd;
1479                 struct ion_allocation_data allocation;
1480                 struct ion_handle_data handle;
1481                 struct ion_custom_data custom;
1482         } data;
1483
1484         dir = ion_ioctl_dir(cmd);
1485         pr_debug("%s:cmd[0x%x]dir[0x%x]\n", __func__, cmd, dir);
1486
1487         if (_IOC_SIZE(cmd) > sizeof(data)) {
1488                 ret = -EINVAL;
1489                 goto out;
1490         }
1491
1492         if (dir & _IOC_WRITE)
1493                 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) {
1494                         ret = -EFAULT;
1495                         goto out;
1496                 }
1497
1498         switch (cmd) {
1499         case ION_IOC_ALLOC:
1500         {
1501                 struct ion_handle *handle;
1502
1503                 handle = ion_alloc(client, data.allocation.len,
1504                                                 data.allocation.align,
1505                                                 data.allocation.heap_id_mask,
1506                                                 data.allocation.flags);
1507                 if (IS_ERR(handle)) {
1508                         ret = PTR_ERR(handle);
1509                         goto out;
1510                 }
1511
1512                 data.allocation.handle = handle->id;
1513
1514                 cleanup_handle = handle;
1515                 break;
1516         }
1517         case ION_IOC_FREE:
1518         {
1519                 struct ion_handle *handle;
1520
1521                 handle = ion_handle_get_by_id(client, data.handle.handle);
1522                 if (IS_ERR(handle)) {
1523                         ret = PTR_ERR(handle);
1524                         goto out;
1525                 }
1526                 ion_free(client, handle);
1527                 ion_handle_put(handle);
1528                 break;
1529         }
1530         case ION_IOC_SHARE:
1531         case ION_IOC_MAP:
1532         {
1533                 struct ion_handle *handle;
1534
1535                 handle = ion_handle_get_by_id(client, data.handle.handle);
1536                 if (IS_ERR(handle)) {
1537                         ret = PTR_ERR(handle);
1538                         goto out;
1539                 }
1540                 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1541                 ion_handle_put(handle);
1542                 if (data.fd.fd < 0)
1543                         ret = data.fd.fd;
1544                 break;
1545         }
1546         case ION_IOC_IMPORT:
1547         {
1548                 struct ion_handle *handle;
1549                 handle = ion_import_dma_buf(client, data.fd.fd);
1550                 if (IS_ERR(handle))
1551                         ret = PTR_ERR(handle);
1552                 else
1553                         data.handle.handle = handle->id;
1554                 break;
1555         }
1556         case ION_IOC_INVALIDATE:
1557         {
1558                 ret = ion_invalidate_for_cpu(client, data.fd.fd);
1559                 break;
1560         }
1561         case ION_IOC_SYNC:
1562         {
1563                 ret = ion_sync_for_device(client, data.fd.fd);
1564                 break;
1565         }
1566         case ION_IOC_CUSTOM:
1567         {
1568                 if (!dev->custom_ioctl) {
1569                         ret = -ENOTTY;
1570                         goto out;
1571                 }
1572                 ret = dev->custom_ioctl(client, data.custom.cmd,
1573                                                 data.custom.arg);
1574                 break;
1575         }
1576         default:
1577                 ret = -ENOTTY;
1578                 goto out;
1579         }
1580
1581         if (dir & _IOC_READ) {
1582                 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1583                         if (cleanup_handle)
1584                                 ion_free(client, cleanup_handle);
1585                         ret = -EFAULT;
1586                 }
1587         }
1588
1589 out:
1590         if (ret)
1591                 pr_info("%s:cmd[0x%x]ret[%d]\n", __func__, cmd, ret);
1592
1593         return ret;
1594 }
1595
1596 static int ion_release(struct inode *inode, struct file *file)
1597 {
1598         struct ion_client *client = file->private_data;
1599
1600         pr_debug("%s: %d\n", __func__, __LINE__);
1601         ion_client_destroy(client);
1602         return 0;
1603 }
1604
1605 static int ion_open(struct inode *inode, struct file *file)
1606 {
1607         struct miscdevice *miscdev = file->private_data;
1608         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1609         struct ion_client *client;
1610         char debug_name[64];
1611
1612         pr_debug("%s: %d\n", __func__, __LINE__);
1613         snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1614         client = ion_client_create(dev, debug_name);
1615         if (IS_ERR(client))
1616                 return PTR_ERR(client);
1617         file->private_data = client;
1618
1619         return 0;
1620 }
1621
1622 static const struct file_operations ion_fops = {
1623         .owner          = THIS_MODULE,
1624         .open           = ion_open,
1625         .release        = ion_release,
1626         .unlocked_ioctl = ion_ioctl,
1627         .compat_ioctl   = compat_ion_ioctl,
1628 };
1629 #if 0
1630 static size_t ion_debug_heap_total(struct ion_client *client,
1631                                    unsigned int id)
1632 {
1633         size_t size = 0;
1634         struct rb_node *n;
1635
1636         mutex_lock(&client->lock);
1637         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1638                 struct ion_handle *handle = rb_entry(n,
1639                                                      struct ion_handle,
1640                                                      node);
1641                 if (handle->buffer->heap->id == id)
1642                         size += handle->buffer->size;
1643         }
1644         mutex_unlock(&client->lock);
1645         return size;
1646 }
1647 #endif
1648 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1649 {
1650         struct ion_heap *heap = s->private;
1651         struct ion_device *dev = heap->dev;
1652         struct rb_node *n;
1653         struct rb_node *r;
1654         struct tm t;
1655         size_t total_size = 0;
1656         size_t total_orphaned_size = 0;
1657
1658         seq_printf(s, "%16.s %6.s %6.s %10.s %16.s\n", "client", "pid", "tid", "size", "alloc_time");
1659         seq_printf(s, "----------------------------------------------------------\n");
1660
1661         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1662                 struct ion_client *client = rb_entry(n, struct ion_client,
1663                                                      node);
1664
1665                 mutex_lock(&client->lock);
1666                 for (r = rb_first(&client->handles); r; r = rb_next(r)) {
1667                         struct ion_handle *handle = rb_entry(r,
1668                                                                  struct ion_handle,
1669                                                                  node);
1670                         struct ion_buffer *buffer = handle->buffer;
1671
1672                         if (buffer->heap->id == heap->id) {
1673                                 if (!buffer->size)
1674                                         continue;
1675                                 time_to_tm(buffer->alloc_time.tv_sec, 0, &t);
1676                                 if (client->task) {
1677                                         char task_comm[TASK_COMM_LEN];
1678
1679                                         get_task_comm(task_comm, client->task);
1680                                         seq_printf(s, "%16.s %6u %6u %10zu %ld.%d.%d-%d:%d:%d.%ld\n",
1681                                                 task_comm, client->pid, client->tid, buffer->size,
1682                                                 t.tm_year + 1900, t.tm_mon + 1,
1683                                                 t.tm_mday, t.tm_hour, t.tm_min,
1684                                                 t.tm_sec, buffer->alloc_time.tv_usec);
1685                                 } else {
1686                                         seq_printf(s, "%16.s %6u %6u %10zu %ld.%d.%d-%d:%d:%d.%ld\n",
1687                                                 client->name, client->pid, client->tid, buffer->size,
1688                                                 t.tm_year + 1900, t.tm_mon + 1,
1689                                                 t.tm_mday, t.tm_hour, t.tm_min,
1690                                                 t.tm_sec, buffer->alloc_time.tv_usec);
1691                                 }
1692                         }
1693                 }
1694                 mutex_unlock(&client->lock);
1695         }
1696         seq_printf(s, "----------------------------------------------------------\n");
1697         seq_printf(s, "orphaned allocations (info is from last known client):"
1698                    "\n");
1699         mutex_lock(&dev->buffer_lock);
1700         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1701                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1702                                                      node);
1703                 if (buffer->heap->id != heap->id)
1704                         continue;
1705                 total_size += buffer->size;
1706                 if (!buffer->handle_count) {
1707                         time_to_tm(buffer->alloc_time.tv_sec, 0, &t);
1708                         seq_printf(s, "%16.s %6u %6u %10zu %d %d %ld.%d.%d-%d:%d:%d.%ld\n",
1709                                 buffer->task_comm, buffer->pid, buffer->tid, buffer->size,
1710                                 buffer->kmap_cnt, atomic_read(&buffer->ref.refcount),
1711                                 t.tm_year + 1900, t.tm_mon + 1,
1712                                 t.tm_mday, t.tm_hour, t.tm_min,
1713                                 t.tm_sec, buffer->alloc_time.tv_usec);
1714                         total_orphaned_size += buffer->size;
1715                 }
1716         }
1717         mutex_unlock(&dev->buffer_lock);
1718         seq_printf(s, "----------------------------------------------------------\n");
1719         seq_printf(s, "%16.s %22zu\n", "total orphaned",
1720                    total_orphaned_size);
1721         seq_printf(s, "%16.s %22zu\n", "total ", total_size);
1722         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1723                 seq_printf(s, "%16.s %22zu\n", "deferred free",
1724                                 heap->free_list_size);
1725         seq_printf(s, "----------------------------------------------------------\n");
1726
1727         if (heap->debug_show)
1728                 heap->debug_show(heap, s, unused);
1729
1730         return 0;
1731 }
1732
1733 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1734 {
1735         return single_open(file, ion_debug_heap_show, inode->i_private);
1736 }
1737
1738 static const struct file_operations debug_heap_fops = {
1739         .open = ion_debug_heap_open,
1740         .read = seq_read,
1741         .llseek = seq_lseek,
1742         .release = single_release,
1743 };
1744
1745 #ifdef DEBUG_HEAP_SHRINKER
1746 static int debug_shrink_set(void *data, u64 val)
1747 {
1748         struct ion_heap *heap = data;
1749         struct shrink_control sc;
1750         int objs;
1751
1752         sc.gfp_mask = -1;
1753         sc.nr_to_scan = 0;
1754
1755         if (!val)
1756                 return 0;
1757
1758         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1759         sc.nr_to_scan = objs;
1760
1761         heap->shrinker.shrink(&heap->shrinker, &sc);
1762         return 0;
1763 }
1764
1765 static int debug_shrink_get(void *data, u64 *val)
1766 {
1767         struct ion_heap *heap = data;
1768         struct shrink_control sc;
1769         int objs;
1770
1771         sc.gfp_mask = -1;
1772         sc.nr_to_scan = 0;
1773
1774         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1775         *val = objs;
1776         return 0;
1777 }
1778
1779 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1780                         debug_shrink_set, "%llu\n");
1781 #endif
1782
1783 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1784 {
1785         struct dentry *debug_file;
1786
1787         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1788             !heap->ops->unmap_dma)
1789                 pr_err("%s: can not add heap with invalid ops struct.\n",
1790                        __func__);
1791
1792         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1793                 ion_heap_init_deferred_free(heap);
1794
1795         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1796                 ion_heap_init_shrinker(heap);
1797
1798         heap->dev = dev;
1799         down_write(&dev->lock);
1800         /* use negative heap->id to reverse the priority -- when traversing
1801            the list later attempt higher id numbers first */
1802         plist_node_init(&heap->node, -heap->id);
1803         plist_add(&heap->node, &dev->heaps);
1804         debug_file = debugfs_create_file(heap->name, 0664,
1805                                         dev->heaps_debug_root, heap,
1806                                         &debug_heap_fops);
1807
1808         if (!debug_file) {
1809                 char buf[256], *path;
1810                 path = dentry_path(dev->heaps_debug_root, buf, 256);
1811                 pr_err("Failed to create heap debugfs at %s/%s\n",
1812                         path, heap->name);
1813         }
1814
1815 #ifdef DEBUG_HEAP_SHRINKER
1816         if (heap->shrinker.shrink) {
1817                 char debug_name[64];
1818
1819                 snprintf(debug_name, 64, "%s_shrink", heap->name);
1820                 debug_file = debugfs_create_file(
1821                         debug_name, 0644, dev->heaps_debug_root, heap,
1822                         &debug_shrink_fops);
1823                 if (!debug_file) {
1824                         char buf[256], *path;
1825                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1826                         pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1827                                 path, debug_name);
1828                 }
1829         }
1830 #endif
1831         up_write(&dev->lock);
1832 }
1833
1834 struct ion_device *ion_device_create(long (*custom_ioctl)
1835                                      (struct ion_client *client,
1836                                       unsigned int cmd,
1837                                       unsigned long arg))
1838 {
1839         struct ion_device *idev;
1840         int ret;
1841
1842         idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1843         if (!idev)
1844                 return ERR_PTR(-ENOMEM);
1845
1846         idev->dev.minor = MISC_DYNAMIC_MINOR;
1847         idev->dev.name = "ion";
1848         idev->dev.fops = &ion_fops;
1849         idev->dev.parent = NULL;
1850         ret = misc_register(&idev->dev);
1851         if (ret) {
1852                 pr_err("ion: failed to register misc device.\n");
1853                 return ERR_PTR(ret);
1854         }
1855
1856         idev->debug_root = debugfs_create_dir("ion", NULL);
1857         if (!idev->debug_root) {
1858                 pr_err("ion: failed to create debugfs root directory.\n");
1859                 goto debugfs_done;
1860         }
1861         idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1862         if (!idev->heaps_debug_root) {
1863                 pr_err("ion: failed to create debugfs heaps directory.\n");
1864                 goto debugfs_done;
1865         }
1866         idev->clients_debug_root = debugfs_create_dir("clients",
1867                                                 idev->debug_root);
1868         if (!idev->clients_debug_root)
1869                 pr_err("ion: failed to create debugfs clients directory.\n");
1870
1871 debugfs_done:
1872
1873         idev->custom_ioctl = custom_ioctl;
1874         idev->buffers = RB_ROOT;
1875         mutex_init(&idev->buffer_lock);
1876         init_rwsem(&idev->lock);
1877         plist_head_init(&idev->heaps);
1878         idev->clients = RB_ROOT;
1879         return idev;
1880 }
1881
1882 void ion_device_destroy(struct ion_device *dev)
1883 {
1884         misc_deregister(&dev->dev);
1885         debugfs_remove_recursive(dev->debug_root);
1886         /* XXX need to free the heaps and clients ? */
1887         kfree(dev);
1888 }
1889
1890 void __init ion_reserve(struct ion_platform_data *data)
1891 {
1892         int i;
1893
1894         for (i = 0; i < data->nr; i++) {
1895                 if (data->heaps[i].size == 0)
1896                         continue;
1897
1898                 if (data->heaps[i].base == 0) {
1899                         phys_addr_t paddr;
1900                         paddr = memblock_alloc_base(data->heaps[i].size,
1901                                                     data->heaps[i].align,
1902                                                     MEMBLOCK_ALLOC_ANYWHERE);
1903                         if (!paddr) {
1904                                 pr_err("%s: error allocating memblock for "
1905                                        "heap %d\n",
1906                                         __func__, i);
1907                                 continue;
1908                         }
1909                         data->heaps[i].base = paddr;
1910                 } else {
1911                         int ret = memblock_reserve(data->heaps[i].base,
1912                                                data->heaps[i].size);
1913                         if (ret)
1914                                 pr_err("memblock reserve of %zx@%lx failed\n",
1915                                        data->heaps[i].size,
1916                                        data->heaps[i].base);
1917                 }
1918                 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1919                         data->heaps[i].name,
1920                         data->heaps[i].base,
1921                         data->heaps[i].size);
1922         }
1923 }