Update from product codes
[profile/mobile/platform/kernel/linux-3.10-sc7730.git] / drivers / staging / android / ion / ion.c
1 /*
2
3  * drivers/gpu/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/freezer.h>
21 #include <linux/fs.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/kthread.h>
24 #include <linux/list.h>
25 #include <linux/memblock.h>
26 #include <linux/miscdevice.h>
27 #include <linux/export.h>
28 #include <linux/mm.h>
29 #include <linux/mm_types.h>
30 #include <linux/rbtree.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/vmalloc.h>
35 #include <linux/debugfs.h>
36 #include <linux/dma-buf.h>
37 #include <linux/idr.h>
38 #include <linux/time.h>
39
40 #include "ion.h"
41 #include "ion_priv.h"
42 #include "compat_ion.h"
43
44 /**
45  * struct ion_device - the metadata of the ion device node
46  * @dev:                the actual misc device
47  * @buffers:            an rb tree of all the existing buffers
48  * @buffer_lock:        lock protecting the tree of buffers
49  * @lock:               rwsem protecting the tree of heaps and clients
50  * @heaps:              list of all the heaps in the system
51  * @user_clients:       list of all the clients created from userspace
52  */
53 struct ion_device {
54         struct miscdevice dev;
55         struct rb_root buffers;
56         struct mutex buffer_lock;
57         struct rw_semaphore lock;
58         struct plist_head heaps;
59         long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
60                               unsigned long arg);
61         struct rb_root clients;
62         struct dentry *debug_root;
63         struct dentry *heaps_debug_root;
64         struct dentry *clients_debug_root;
65 };
66
67 /**
68  * struct ion_client - a process/hw block local address space
69  * @node:               node in the tree of all clients
70  * @dev:                backpointer to ion device
71  * @handles:            an rb tree of all the handles in this client
72  * @idr:                an idr space for allocating handle ids
73  * @lock:               lock protecting the tree of handles
74  * @name:               used for debugging
75  * @display_name:       used for debugging (unique version of @name)
76  * @display_serial:     used for debugging (to make display_name unique)
77  * @task:               used for debugging
78  *
79  * A client represents a list of buffers this client may access.
80  * The mutex stored here is used to protect both handles tree
81  * as well as the handles themselves, and should be held while modifying either.
82  */
83 struct ion_client {
84         struct rb_node node;
85         struct ion_device *dev;
86         struct rb_root handles;
87         struct idr idr;
88         struct mutex lock;
89         const char *name;
90         char *display_name;
91         int display_serial;
92         struct task_struct *task;
93         pid_t pid;
94         pid_t tid;
95         struct dentry *debug_root;
96 };
97
98 /**
99  * ion_handle - a client local reference to a buffer
100  * @ref:                reference count
101  * @client:             back pointer to the client the buffer resides in
102  * @buffer:             pointer to the buffer
103  * @node:               node in the client's handle rbtree
104  * @kmap_cnt:           count of times this client has mapped to kernel
105  * @id:                 client-unique id allocated by client->idr
106  *
107  * Modifications to node, map_cnt or mapping should be protected by the
108  * lock in the client.  Other fields are never changed after initialization.
109  */
110 struct ion_handle {
111         struct kref ref;
112         struct ion_client *client;
113         struct ion_buffer *buffer;
114         struct rb_node node;
115         unsigned int kmap_cnt;
116         int id;
117 };
118
119 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
120 {
121         return (buffer->flags & ION_FLAG_CACHED) &&
122                 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
123 }
124
125 bool ion_buffer_cached(struct ion_buffer *buffer)
126 {
127         return !!(buffer->flags & ION_FLAG_CACHED);
128 }
129
130 static inline struct page *ion_buffer_page(struct page *page)
131 {
132         return (struct page *)((unsigned long)page & ~(1UL));
133 }
134
135 static inline bool ion_buffer_page_is_dirty(struct page *page)
136 {
137         return !!((unsigned long)page & 1UL);
138 }
139
140 static inline void ion_buffer_page_dirty(struct page **page)
141 {
142         *page = (struct page *)((unsigned long)(*page) | 1UL);
143 }
144
145 static inline void ion_buffer_page_clean(struct page **page)
146 {
147         *page = (struct page *)((unsigned long)(*page) & ~(1UL));
148 }
149
150 /* this function should only be called while dev->lock is held */
151 static void ion_buffer_add(struct ion_device *dev,
152                            struct ion_buffer *buffer)
153 {
154         struct rb_node **p = &dev->buffers.rb_node;
155         struct rb_node *parent = NULL;
156         struct ion_buffer *entry;
157
158         while (*p) {
159                 parent = *p;
160                 entry = rb_entry(parent, struct ion_buffer, node);
161
162                 if (buffer < entry) {
163                         p = &(*p)->rb_left;
164                 } else if (buffer > entry) {
165                         p = &(*p)->rb_right;
166                 } else {
167                         pr_err("%s: buffer already found.", __func__);
168                         BUG();
169                 }
170         }
171
172         rb_link_node(&buffer->node, parent, p);
173         rb_insert_color(&buffer->node, &dev->buffers);
174 }
175
176 /* this function should only be called while dev->lock is held */
177 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
178                                      struct ion_device *dev,
179                                      unsigned long len,
180                                      unsigned long align,
181                                      unsigned long flags)
182 {
183         struct ion_buffer *buffer;
184         struct sg_table *table;
185         struct scatterlist *sg;
186         struct timeval time;
187         int i, ret;
188
189         buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
190         if (!buffer)
191                 return ERR_PTR(-ENOMEM);
192
193         buffer->heap = heap;
194         buffer->flags = flags;
195         kref_init(&buffer->ref);
196
197         ret = heap->ops->allocate(heap, buffer, len, align, flags);
198
199         if (ret) {
200                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
201                         goto err2;
202
203                 ion_heap_freelist_drain(heap, 0);
204                 ret = heap->ops->allocate(heap, buffer, len, align,
205                                           flags);
206                 if (ret)
207                         goto err2;
208         }
209
210         buffer->dev = dev;
211         buffer->size = len;
212
213         table = heap->ops->map_dma(heap, buffer);
214         if (WARN_ONCE(table == NULL,
215                         "heap->ops->map_dma should return ERR_PTR on error"))
216                 table = ERR_PTR(-EINVAL);
217         if (IS_ERR(table)) {
218                 heap->ops->free(buffer);
219                 kfree(buffer);
220                 return ERR_PTR(PTR_ERR(table));
221         }
222         buffer->sg_table = table;
223         if (ion_buffer_fault_user_mappings(buffer)) {
224                 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
225                 struct scatterlist *sg;
226                 int i, j, k = 0;
227
228                 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
229                 if (!buffer->pages) {
230                         ret = -ENOMEM;
231                         goto err1;
232                 }
233
234                 for_each_sg(table->sgl, sg, table->nents, i) {
235                         struct page *page = sg_page(sg);
236
237                         for (j = 0; j < sg->length / PAGE_SIZE; j++)
238                                 buffer->pages[k++] = page++;
239                 }
240
241                 if (ret)
242                         goto err;
243         }
244
245         buffer->dev = dev;
246         buffer->size = len;
247         INIT_LIST_HEAD(&buffer->vmas);
248         mutex_init(&buffer->lock);
249         /* this will set up dma addresses for the sglist -- it is not
250            technically correct as per the dma api -- a specific
251            device isn't really taking ownership here.  However, in practice on
252            our systems the only dma_address space is physical addresses.
253            Additionally, we can't afford the overhead of invalidating every
254            allocation via dma_map_sg. The implicit contract here is that
255            memory comming from the heaps is ready for dma, ie if it has a
256            cached mapping that mapping has been invalidated */
257         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
258                 sg_dma_address(sg) = sg_phys(sg);
259         mutex_lock(&dev->buffer_lock);
260         ion_buffer_add(dev, buffer);
261         mutex_unlock(&dev->buffer_lock);
262
263         do_gettimeofday(&time);
264         buffer->alloc_time = time;
265         return buffer;
266
267 err:
268         heap->ops->unmap_dma(heap, buffer);
269         heap->ops->free(buffer);
270 err1:
271         if (buffer->pages)
272                 vfree(buffer->pages);
273 err2:
274         kfree(buffer);
275         return ERR_PTR(ret);
276 }
277
278 void ion_buffer_destroy(struct ion_buffer *buffer)
279 {
280         if (WARN_ON(buffer->kmap_cnt > 0))
281                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
282         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
283         buffer->heap->ops->free(buffer);
284         if (buffer->pages)
285                 vfree(buffer->pages);
286         kfree(buffer);
287 }
288
289 static void _ion_buffer_destroy(struct kref *kref)
290 {
291         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
292         struct ion_heap *heap = buffer->heap;
293         struct ion_device *dev = buffer->dev;
294
295 #if defined(CONFIG_SPRD_IOMMU)
296
297         int i;
298         for (i = IOMMU_GSP; i < IOMMU_MAX; i++) {
299                 if(buffer->iomap_cnt[i]>0)
300                 {
301                         buffer->iomap_cnt[i] = 0;
302                         sprd_iova_unmap(i,buffer->iova[i],buffer->size);
303                         sprd_iova_free(i,buffer->iova[i],buffer->size);
304                 }
305         }
306 #endif
307
308         mutex_lock(&dev->buffer_lock);
309         rb_erase(&buffer->node, &dev->buffers);
310         mutex_unlock(&dev->buffer_lock);
311
312         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
313                 ion_heap_freelist_add(heap, buffer);
314         else
315                 ion_buffer_destroy(buffer);
316 }
317
318 static void ion_buffer_get(struct ion_buffer *buffer)
319 {
320         kref_get(&buffer->ref);
321 }
322
323 static int ion_buffer_put(struct ion_buffer *buffer)
324 {
325         return kref_put(&buffer->ref, _ion_buffer_destroy);
326 }
327
328 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
329 {
330         mutex_lock(&buffer->lock);
331         buffer->handle_count++;
332         mutex_unlock(&buffer->lock);
333 }
334
335 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
336 {
337         /*
338          * when a buffer is removed from a handle, if it is not in
339          * any other handles, copy the taskcomm and the pid of the
340          * process it's being removed from into the buffer.  At this
341          * point there will be no way to track what processes this buffer is
342          * being used by, it only exists as a dma_buf file descriptor.
343          * The taskcomm and pid can provide a debug hint as to where this fd
344          * is in the system
345          */
346         mutex_lock(&buffer->lock);
347         buffer->handle_count--;
348         BUG_ON(buffer->handle_count < 0);
349         if (!buffer->handle_count) {
350                 struct task_struct *task;
351
352                 task = current->group_leader;
353                 get_task_comm(buffer->task_comm, task);
354                 buffer->pid = task_pid_nr(task);
355                 buffer->tid = task_pid_nr(current);
356         }
357         mutex_unlock(&buffer->lock);
358 }
359
360 static struct ion_handle *ion_handle_create(struct ion_client *client,
361                                      struct ion_buffer *buffer)
362 {
363         struct ion_handle *handle;
364
365         handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
366         if (!handle)
367                 return ERR_PTR(-ENOMEM);
368         kref_init(&handle->ref);
369         RB_CLEAR_NODE(&handle->node);
370         handle->client = client;
371         ion_buffer_get(buffer);
372         ion_buffer_add_to_handle(buffer);
373         handle->buffer = buffer;
374
375         return handle;
376 }
377
378 static void ion_handle_kmap_put(struct ion_handle *);
379
380 static void ion_handle_destroy(struct kref *kref)
381 {
382         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
383         struct ion_client *client = handle->client;
384         struct ion_buffer *buffer = handle->buffer;
385
386         mutex_lock(&buffer->lock);
387         while (handle->kmap_cnt)
388                 ion_handle_kmap_put(handle);
389         mutex_unlock(&buffer->lock);
390
391         idr_remove(&client->idr, handle->id);
392         if (!RB_EMPTY_NODE(&handle->node))
393                 rb_erase(&handle->node, &client->handles);
394
395         ion_buffer_remove_from_handle(buffer);
396         ion_buffer_put(buffer);
397
398         kfree(handle);
399 }
400
401 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
402 {
403         return handle->buffer;
404 }
405
406 static void ion_handle_get(struct ion_handle *handle)
407 {
408         kref_get(&handle->ref);
409 }
410
411 static int ion_handle_put(struct ion_handle *handle)
412 {
413         struct ion_client *client = handle->client;
414         int ret;
415
416         mutex_lock(&client->lock);
417         ret = kref_put(&handle->ref, ion_handle_destroy);
418         mutex_unlock(&client->lock);
419
420         return ret;
421 }
422
423 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
424                                             struct ion_buffer *buffer)
425 {
426         struct rb_node *n = client->handles.rb_node;
427
428         while (n) {
429                 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
430                 if (buffer < entry->buffer)
431                         n = n->rb_left;
432                 else if (buffer > entry->buffer)
433                         n = n->rb_right;
434                 else
435                         return entry;
436         }
437         return ERR_PTR(-EINVAL);
438 }
439
440 static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
441                                                 int id)
442 {
443         struct ion_handle *handle;
444
445         mutex_lock(&client->lock);
446         handle = idr_find(&client->idr, id);
447         if (handle)
448                 ion_handle_get(handle);
449         mutex_unlock(&client->lock);
450
451         return handle ? handle : ERR_PTR(-EINVAL);
452 }
453
454 static bool ion_handle_validate(struct ion_client *client,
455                                 struct ion_handle *handle)
456 {
457         WARN_ON(!mutex_is_locked(&client->lock));
458         return (idr_find(&client->idr, handle->id) == handle);
459 }
460
461 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
462 {
463         int id;
464         struct rb_node **p = &client->handles.rb_node;
465         struct rb_node *parent = NULL;
466         struct ion_handle *entry;
467
468         id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
469         if (id < 0)
470                 return id;
471
472         handle->id = id;
473
474         while (*p) {
475                 parent = *p;
476                 entry = rb_entry(parent, struct ion_handle, node);
477
478                 if (handle->buffer < entry->buffer)
479                         p = &(*p)->rb_left;
480                 else if (handle->buffer > entry->buffer)
481                         p = &(*p)->rb_right;
482                 else
483                         WARN(1, "%s: buffer already found.", __func__);
484         }
485
486         rb_link_node(&handle->node, parent, p);
487         rb_insert_color(&handle->node, &client->handles);
488
489         return 0;
490 }
491
492 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
493                              size_t align, unsigned int heap_id_mask,
494                              unsigned int flags)
495 {
496         struct ion_handle *handle;
497         struct ion_device *dev = client->dev;
498         struct ion_buffer *buffer = NULL;
499         struct ion_heap *heap;
500         int ret;
501
502         pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
503                  len, align, heap_id_mask, flags);
504         /*
505          * traverse the list of heaps available in this system in priority
506          * order.  If the heap type is supported by the client, and matches the
507          * request of the caller allocate from it.  Repeat until allocate has
508          * succeeded or all heaps have been tried
509          */
510         len = PAGE_ALIGN(len);
511
512         if (!len)
513                 return ERR_PTR(-EINVAL);
514
515         down_read(&dev->lock);
516         plist_for_each_entry(heap, &dev->heaps, node) {
517                 /* if the caller didn't specify this heap id */
518                 if (!((1 << heap->id) & heap_id_mask))
519                         continue;
520                 buffer = ion_buffer_create(heap, dev, len, align, flags);
521                 if (!IS_ERR(buffer))
522                         break;
523         }
524         up_read(&dev->lock);
525
526         if (buffer == NULL) {
527                 pr_err("%s: buffer is NULL!\n",__func__);
528                 return ERR_PTR(-ENODEV);
529         }
530
531         if (IS_ERR(buffer)) {
532                 pr_err("%s: ion alloc buffer is error! and the buffer is %p\n",__func__,buffer);
533                 return ERR_PTR(PTR_ERR(buffer));
534         }
535
536         handle = ion_handle_create(client, buffer);
537
538         /*
539          * ion_buffer_create will create a buffer with a ref_cnt of 1,
540          * and ion_handle_create will take a second reference, drop one here
541          */
542         ion_buffer_put(buffer);
543
544         if (IS_ERR(handle)) {
545                 pr_err("%s: handle is error! and the handle is %p\n",__func__,handle);
546                 return handle;
547         }
548
549         mutex_lock(&client->lock);
550         ret = ion_handle_add(client, handle);
551         mutex_unlock(&client->lock);
552         if (ret) {
553                 ion_handle_put(handle);
554                 handle = ERR_PTR(ret);
555         }
556
557         return handle;
558 }
559 EXPORT_SYMBOL(ion_alloc);
560
561 void ion_free(struct ion_client *client, struct ion_handle *handle)
562 {
563         bool valid_handle;
564
565         BUG_ON(client != handle->client);
566
567         mutex_lock(&client->lock);
568         valid_handle = ion_handle_validate(client, handle);
569
570         if (!valid_handle) {
571                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
572                 mutex_unlock(&client->lock);
573                 return;
574         }
575         mutex_unlock(&client->lock);
576         ion_handle_put(handle);
577 }
578 EXPORT_SYMBOL(ion_free);
579
580 int ion_phys(struct ion_client *client, struct ion_handle *handle,
581              ion_phys_addr_t *addr, size_t *len)
582 {
583         struct ion_buffer *buffer;
584         int ret;
585
586         mutex_lock(&client->lock);
587         if (!ion_handle_validate(client, handle)) {
588                 mutex_unlock(&client->lock);
589                 return -EINVAL;
590         }
591
592         buffer = handle->buffer;
593
594         if (!buffer->heap->ops->phys) {
595                 pr_err("%s: ion_phys is not implemented by this heap.\n",
596                        __func__);
597                 mutex_unlock(&client->lock);
598                 return -ENODEV;
599         }
600         mutex_unlock(&client->lock);
601         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
602         return ret;
603 }
604 EXPORT_SYMBOL(ion_phys);
605
606 int ion_is_phys(struct ion_client *client, struct ion_handle *handle)
607 {
608         struct ion_buffer *buffer;
609         int ret = 0;
610
611         mutex_lock(&client->lock);
612         if (!ion_handle_validate(client, handle)) {
613                 mutex_unlock(&client->lock);
614                 return -EINVAL;
615         }
616
617         buffer = handle->buffer;
618
619         if (!buffer->heap->ops->phys)
620                 ret = -1;
621
622         mutex_unlock(&client->lock);
623
624         return ret;
625 }
626 EXPORT_SYMBOL(ion_is_phys);
627
628 int ion_is_cached(struct ion_client *client, struct ion_handle *handle)
629 {
630         struct ion_buffer *buffer;
631         int cached;
632
633         mutex_lock(&client->lock);
634         if (!ion_handle_validate(client, handle)) {
635                 mutex_unlock(&client->lock);
636                 return -EINVAL;
637         }
638
639         buffer = handle->buffer;
640
641         cached = ion_buffer_cached(buffer);
642         mutex_unlock(&client->lock);
643
644         return cached;
645 }
646 EXPORT_SYMBOL(ion_is_cached);
647
648 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
649 {
650         void *vaddr;
651
652         if (buffer->kmap_cnt) {
653                 buffer->kmap_cnt++;
654                 return buffer->vaddr;
655         }
656         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
657         if (WARN_ONCE(vaddr == NULL,
658                         "heap->ops->map_kernel should return ERR_PTR on error"))
659                 return ERR_PTR(-EINVAL);
660         if (IS_ERR(vaddr))
661                 return vaddr;
662         buffer->vaddr = vaddr;
663         buffer->kmap_cnt++;
664         return vaddr;
665 }
666
667 static void *ion_handle_kmap_get(struct ion_handle *handle)
668 {
669         struct ion_buffer *buffer = handle->buffer;
670         void *vaddr;
671
672         if (handle->kmap_cnt) {
673                 handle->kmap_cnt++;
674                 return buffer->vaddr;
675         }
676         vaddr = ion_buffer_kmap_get(buffer);
677         if (IS_ERR(vaddr))
678                 return vaddr;
679         handle->kmap_cnt++;
680         return vaddr;
681 }
682
683 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
684 {
685         buffer->kmap_cnt--;
686         if (!buffer->kmap_cnt) {
687                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
688                 buffer->vaddr = NULL;
689         }
690 }
691
692 static void ion_handle_kmap_put(struct ion_handle *handle)
693 {
694         struct ion_buffer *buffer = handle->buffer;
695
696         if (!handle->kmap_cnt) {
697                 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
698                 return;
699         }
700
701         handle->kmap_cnt--;
702         if (!handle->kmap_cnt)
703                 ion_buffer_kmap_put(buffer);
704 }
705
706 int ion_map_iommu(struct ion_client *client, struct ion_handle *handle, int domain_no, unsigned long *ptr_iova)
707 {
708         struct ion_buffer *buffer;
709
710         mutex_lock(&client->lock);
711         if (!ion_handle_validate(client, handle)) {
712                 pr_err("%s: invalid handle passed to map_kernel.\n",
713                        __func__);
714                 mutex_unlock(&client->lock);
715                 return -EINVAL;
716         }
717
718         buffer = handle->buffer;
719
720         if (!handle->buffer->heap->ops->map_iommu) {
721                 pr_err("%s: map_kernel is not implemented by this heap.\n",
722                        __func__);
723                 mutex_unlock(&client->lock);
724                 return -ENODEV;
725         }
726
727         mutex_lock(&buffer->lock);
728         handle->buffer->heap->ops->map_iommu(buffer, domain_no, ptr_iova);
729         mutex_unlock(&buffer->lock);
730         mutex_unlock(&client->lock);
731         return 0;
732 }
733 EXPORT_SYMBOL(ion_map_iommu);
734
735 int ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle, int domain_no)
736 {
737         struct ion_buffer *buffer;
738
739         mutex_lock(&client->lock);
740         if (!ion_handle_validate(client, handle)) {
741                 pr_err("%s: invalid handle passed to map_kernel.\n",
742                        __func__);
743                 mutex_unlock(&client->lock);
744                 return -EINVAL;
745         }
746
747         buffer = handle->buffer;
748
749         if (!handle->buffer->heap->ops->map_iommu) {
750                 pr_err("%s: map_kernel is not implemented by this heap.\n",
751                        __func__);
752                 mutex_unlock(&client->lock);
753                 return -ENODEV;
754         }
755
756         mutex_lock(&buffer->lock);
757         handle->buffer->heap->ops->unmap_iommu(buffer, domain_no);
758         mutex_unlock(&buffer->lock);
759         mutex_unlock(&client->lock);
760         return 0;
761 }
762 EXPORT_SYMBOL(ion_unmap_iommu);
763
764 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
765 {
766         struct ion_buffer *buffer;
767         void *vaddr;
768
769         mutex_lock(&client->lock);
770         if (!ion_handle_validate(client, handle)) {
771                 pr_err("%s: invalid handle passed to map_kernel.\n",
772                        __func__);
773                 mutex_unlock(&client->lock);
774                 return ERR_PTR(-EINVAL);
775         }
776
777         buffer = handle->buffer;
778
779         if (!handle->buffer->heap->ops->map_kernel) {
780                 pr_err("%s: map_kernel is not implemented by this heap.\n",
781                        __func__);
782                 mutex_unlock(&client->lock);
783                 return ERR_PTR(-ENODEV);
784         }
785
786         mutex_lock(&buffer->lock);
787         vaddr = ion_handle_kmap_get(handle);
788         mutex_unlock(&buffer->lock);
789         mutex_unlock(&client->lock);
790         return vaddr;
791 }
792 EXPORT_SYMBOL(ion_map_kernel);
793
794 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
795 {
796         struct ion_buffer *buffer;
797
798         mutex_lock(&client->lock);
799         buffer = handle->buffer;
800         mutex_lock(&buffer->lock);
801         ion_handle_kmap_put(handle);
802         mutex_unlock(&buffer->lock);
803         mutex_unlock(&client->lock);
804 }
805 EXPORT_SYMBOL(ion_unmap_kernel);
806
807 static int ion_debug_client_show(struct seq_file *s, void *unused)
808 {
809         struct ion_client *client = s->private;
810         struct rb_node *n;
811         size_t sizes[ION_NUM_HEAP_IDS] = {0};
812         const char *names[ION_NUM_HEAP_IDS] = {NULL};
813         int i;
814
815         mutex_lock(&client->lock);
816         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
817                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
818                                                      node);
819                 unsigned int id = handle->buffer->heap->id;
820
821                 if (!names[id])
822                         names[id] = handle->buffer->heap->name;
823                 sizes[id] += handle->buffer->size;
824         }
825         mutex_unlock(&client->lock);
826
827         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
828         for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
829                 if (!names[i])
830                         continue;
831                 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
832         }
833         return 0;
834 }
835
836 static int ion_debug_client_open(struct inode *inode, struct file *file)
837 {
838         return single_open(file, ion_debug_client_show, inode->i_private);
839 }
840
841 static const struct file_operations debug_client_fops = {
842         .open = ion_debug_client_open,
843         .read = seq_read,
844         .llseek = seq_lseek,
845         .release = single_release,
846 };
847
848 static int ion_get_client_serial(const struct rb_root *root,
849                                         const unsigned char *name)
850 {
851         int serial = -1;
852         struct rb_node *node;
853         for (node = rb_first(root); node; node = rb_next(node)) {
854                 struct ion_client *client = rb_entry(node, struct ion_client,
855                                                 node);
856                 if (strcmp(client->name, name))
857                         continue;
858                 serial = max(serial, client->display_serial);
859         }
860         return serial + 1;
861 }
862
863 struct ion_client *ion_client_create(struct ion_device *dev,
864                                      const char *name)
865 {
866         struct ion_client *client;
867         struct task_struct *task;
868         struct rb_node **p;
869         struct rb_node *parent = NULL;
870         struct ion_client *entry;
871         pid_t pid;
872         pid_t tid;
873
874         if (!name) {
875                 pr_err("%s: Name cannot be null\n", __func__);
876                 return ERR_PTR(-EINVAL);
877         }
878
879         get_task_struct(current->group_leader);
880         task_lock(current->group_leader);
881         pid = task_pid_nr(current->group_leader);
882         tid = task_pid_nr(current);
883         /* don't bother to store task struct for kernel threads,
884            they can't be killed anyway */
885         if (current->group_leader->flags & PF_KTHREAD) {
886                 put_task_struct(current->group_leader);
887                 task = NULL;
888         } else {
889                 task = current->group_leader;
890         }
891         task_unlock(current->group_leader);
892
893         client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
894         if (!client)
895                 goto err_put_task_struct;
896
897         client->dev = dev;
898         client->handles = RB_ROOT;
899         idr_init(&client->idr);
900         mutex_init(&client->lock);
901         client->task = task;
902         client->pid = pid;
903         client->tid = tid;
904         client->name = kstrdup(name, GFP_KERNEL);
905         if (!client->name)
906                 goto err_free_client;
907
908         down_write(&dev->lock);
909         client->display_serial = ion_get_client_serial(&dev->clients, name);
910         client->display_name = kasprintf(
911                 GFP_KERNEL, "%s-%d", name, client->display_serial);
912         if (!client->display_name) {
913                 up_write(&dev->lock);
914                 goto err_free_client_name;
915         }
916         p = &dev->clients.rb_node;
917         while (*p) {
918                 parent = *p;
919                 entry = rb_entry(parent, struct ion_client, node);
920
921                 if (client < entry)
922                         p = &(*p)->rb_left;
923                 else if (client > entry)
924                         p = &(*p)->rb_right;
925         }
926         rb_link_node(&client->node, parent, p);
927         rb_insert_color(&client->node, &dev->clients);
928
929         client->debug_root = debugfs_create_file(client->display_name, 0664,
930                                                 dev->clients_debug_root,
931                                                 client, &debug_client_fops);
932         if (!client->debug_root) {
933                 char buf[256], *path;
934                 path = dentry_path(dev->clients_debug_root, buf, 256);
935                 pr_err("Failed to create client debugfs at %s/%s\n",
936                         path, client->display_name);
937         }
938
939         up_write(&dev->lock);
940
941         return client;
942
943 err_free_client_name:
944         kfree(client->name);
945 err_free_client:
946         kfree(client);
947 err_put_task_struct:
948         if (task)
949                 put_task_struct(current->group_leader);
950         return ERR_PTR(-ENOMEM);
951 }
952 EXPORT_SYMBOL(ion_client_create);
953
954 void ion_client_destroy(struct ion_client *client)
955 {
956         struct ion_device *dev = client->dev;
957         struct rb_node *n;
958
959         pr_debug("%s: %d\n", __func__, __LINE__);
960         while ((n = rb_first(&client->handles))) {
961                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
962                                                      node);
963                 ion_handle_destroy(&handle->ref);
964         }
965
966         idr_destroy(&client->idr);
967
968         down_write(&dev->lock);
969         if (client->task)
970                 put_task_struct(client->task);
971         rb_erase(&client->node, &dev->clients);
972         debugfs_remove_recursive(client->debug_root);
973         up_write(&dev->lock);
974
975         kfree(client->display_name);
976         kfree(client->name);
977         kfree(client);
978 }
979 EXPORT_SYMBOL(ion_client_destroy);
980
981 int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
982                         unsigned long *size, unsigned int *heap_id)
983 {
984         struct ion_buffer *buffer;
985         struct ion_heap *heap;
986
987         mutex_lock(&client->lock);
988         if (!ion_handle_validate(client, handle)) {
989                 pr_err("%s: invalid handle passed to %s.\n",
990                                 __func__, __func__);
991                 mutex_unlock(&client->lock);
992                 return -EINVAL;
993         }
994         buffer = handle->buffer;
995         mutex_lock(&buffer->lock);
996         heap = buffer->heap;
997         *heap_id = (1 << heap->id);
998         *size = buffer->size;
999         mutex_unlock(&buffer->lock);
1000         mutex_unlock(&client->lock);
1001
1002         return 0;
1003 }
1004 EXPORT_SYMBOL(ion_handle_get_size);
1005
1006 struct sg_table *ion_sg_table(struct ion_client *client,
1007                               struct ion_handle *handle)
1008 {
1009         struct ion_buffer *buffer;
1010         struct sg_table *table;
1011
1012         mutex_lock(&client->lock);
1013         if (!ion_handle_validate(client, handle)) {
1014                 pr_err("%s: invalid handle passed to map_dma.\n",
1015                        __func__);
1016                 mutex_unlock(&client->lock);
1017                 return ERR_PTR(-EINVAL);
1018         }
1019         buffer = handle->buffer;
1020         table = buffer->sg_table;
1021         mutex_unlock(&client->lock);
1022         return table;
1023 }
1024 EXPORT_SYMBOL(ion_sg_table);
1025
1026 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1027                                        struct device *dev,
1028                                        enum dma_data_direction direction);
1029
1030 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1031                                         enum dma_data_direction direction)
1032 {
1033         struct dma_buf *dmabuf = attachment->dmabuf;
1034         struct ion_buffer *buffer = dmabuf->priv;
1035
1036         ion_buffer_sync_for_device(buffer, attachment->dev, direction);
1037         return buffer->sg_table;
1038 }
1039
1040 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1041                               struct sg_table *table,
1042                               enum dma_data_direction direction)
1043 {
1044 }
1045
1046 void ion_pages_sync_for_device(struct device *dev, struct page *page,
1047                 size_t size, enum dma_data_direction dir)
1048 {
1049         struct scatterlist sg;
1050
1051         sg_init_table(&sg, 1);
1052         sg_set_page(&sg, page, size, 0);
1053         /*
1054          * This is not correct - sg_dma_address needs a dma_addr_t that is valid
1055          * for the the targeted device, but this works on the currently targeted
1056          * hardware.
1057          */
1058         sg_dma_address(&sg) = page_to_phys(page);
1059         dma_sync_sg_for_device(dev, &sg, 1, dir);
1060 }
1061
1062 struct ion_vma_list {
1063         struct list_head list;
1064         struct vm_area_struct *vma;
1065 };
1066
1067 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1068                                        struct device *dev,
1069                                        enum dma_data_direction dir)
1070 {
1071         struct ion_vma_list *vma_list;
1072         int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1073         int i;
1074
1075         pr_debug("%s: syncing for device %s\n", __func__,
1076                  dev ? dev_name(dev) : "null");
1077
1078         if (!ion_buffer_fault_user_mappings(buffer))
1079                 return;
1080
1081         mutex_lock(&buffer->lock);
1082         for (i = 0; i < pages; i++) {
1083                 struct page *page = buffer->pages[i];
1084
1085                 if (ion_buffer_page_is_dirty(page))
1086                         ion_pages_sync_for_device(dev, ion_buffer_page(page),
1087                                                         PAGE_SIZE, dir);
1088
1089                 ion_buffer_page_clean(buffer->pages + i);
1090         }
1091         list_for_each_entry(vma_list, &buffer->vmas, list) {
1092                 struct vm_area_struct *vma = vma_list->vma;
1093
1094                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1095                                NULL);
1096         }
1097         mutex_unlock(&buffer->lock);
1098 }
1099
1100 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1101 {
1102         struct ion_buffer *buffer = vma->vm_private_data;
1103         unsigned long pfn;
1104         int ret;
1105
1106         mutex_lock(&buffer->lock);
1107         ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1108         BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1109
1110         pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1111         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1112         mutex_unlock(&buffer->lock);
1113         if (ret)
1114                 return VM_FAULT_ERROR;
1115
1116         return VM_FAULT_NOPAGE;
1117 }
1118
1119 static void ion_vm_open(struct vm_area_struct *vma)
1120 {
1121         struct ion_buffer *buffer = vma->vm_private_data;
1122         struct ion_vma_list *vma_list;
1123
1124         vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1125         if (!vma_list)
1126                 return;
1127         vma_list->vma = vma;
1128         mutex_lock(&buffer->lock);
1129         list_add(&vma_list->list, &buffer->vmas);
1130         mutex_unlock(&buffer->lock);
1131         pr_debug("%s: adding %p\n", __func__, vma);
1132 }
1133
1134 static void ion_vm_close(struct vm_area_struct *vma)
1135 {
1136         struct ion_buffer *buffer = vma->vm_private_data;
1137         struct ion_vma_list *vma_list, *tmp;
1138
1139         pr_debug("%s\n", __func__);
1140         mutex_lock(&buffer->lock);
1141         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1142                 if (vma_list->vma != vma)
1143                         continue;
1144                 list_del(&vma_list->list);
1145                 kfree(vma_list);
1146                 pr_debug("%s: deleting %p\n", __func__, vma);
1147                 break;
1148         }
1149         mutex_unlock(&buffer->lock);
1150 }
1151
1152 static struct vm_operations_struct ion_vma_ops = {
1153         .open = ion_vm_open,
1154         .close = ion_vm_close,
1155         .fault = ion_vm_fault,
1156 };
1157
1158 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1159 {
1160         struct ion_buffer *buffer = dmabuf->priv;
1161         int ret = 0;
1162
1163         if (!buffer->heap->ops->map_user) {
1164                 pr_err("%s: this heap does not define a method for mapping "
1165                        "to userspace\n", __func__);
1166                 return -EINVAL;
1167         }
1168
1169         if (ion_buffer_fault_user_mappings(buffer)) {
1170                 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1171                                                         VM_DONTDUMP;
1172                 vma->vm_private_data = buffer;
1173                 vma->vm_ops = &ion_vma_ops;
1174                 ion_vm_open(vma);
1175                 return 0;
1176         }
1177
1178         if (!(buffer->flags & ION_FLAG_CACHED))
1179                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1180
1181         mutex_lock(&buffer->lock);
1182         /* now map it to userspace */
1183         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1184         mutex_unlock(&buffer->lock);
1185
1186         if (ret)
1187                 pr_err("%s: failure mapping buffer to userspace\n",
1188                        __func__);
1189
1190         return ret;
1191 }
1192
1193 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1194 {
1195         struct ion_buffer *buffer = dmabuf->priv;
1196         ion_buffer_put(buffer);
1197 }
1198
1199 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1200 {
1201         struct ion_buffer *buffer = dmabuf->priv;
1202         return buffer->vaddr + offset * PAGE_SIZE;
1203 }
1204
1205 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1206                                void *ptr)
1207 {
1208         return;
1209 }
1210
1211 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1212                                         size_t len,
1213                                         enum dma_data_direction direction)
1214 {
1215         struct ion_buffer *buffer = dmabuf->priv;
1216         void *vaddr;
1217
1218         if (!buffer->heap->ops->map_kernel) {
1219                 pr_err("%s: map kernel is not implemented by this heap.\n",
1220                        __func__);
1221                 return -ENODEV;
1222         }
1223
1224         mutex_lock(&buffer->lock);
1225         vaddr = ion_buffer_kmap_get(buffer);
1226         mutex_unlock(&buffer->lock);
1227         if (IS_ERR(vaddr))
1228                 return PTR_ERR(vaddr);
1229         return 0;
1230 }
1231
1232 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1233                                        size_t len,
1234                                        enum dma_data_direction direction)
1235 {
1236         struct ion_buffer *buffer = dmabuf->priv;
1237
1238         mutex_lock(&buffer->lock);
1239         ion_buffer_kmap_put(buffer);
1240         mutex_unlock(&buffer->lock);
1241 }
1242
1243 static struct dma_buf_ops dma_buf_ops = {
1244         .map_dma_buf = ion_map_dma_buf,
1245         .unmap_dma_buf = ion_unmap_dma_buf,
1246         .mmap = ion_mmap,
1247         .release = ion_dma_buf_release,
1248         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1249         .end_cpu_access = ion_dma_buf_end_cpu_access,
1250         .kmap_atomic = ion_dma_buf_kmap,
1251         .kunmap_atomic = ion_dma_buf_kunmap,
1252         .kmap = ion_dma_buf_kmap,
1253         .kunmap = ion_dma_buf_kunmap,
1254 };
1255
1256 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1257                                                 struct ion_handle *handle)
1258 {
1259         struct ion_buffer *buffer;
1260         struct dma_buf *dmabuf;
1261         bool valid_handle;
1262
1263         mutex_lock(&client->lock);
1264         valid_handle = ion_handle_validate(client, handle);
1265         if (!valid_handle) {
1266                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1267                 mutex_unlock(&client->lock);
1268                 return ERR_PTR(-EINVAL);
1269         }
1270         buffer = handle->buffer;
1271         ion_buffer_get(buffer);
1272         mutex_unlock(&client->lock);
1273
1274         dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1275         if (IS_ERR(dmabuf)) {
1276                 ion_buffer_put(buffer);
1277                 return dmabuf;
1278         }
1279
1280         return dmabuf;
1281 }
1282 EXPORT_SYMBOL(ion_share_dma_buf);
1283
1284 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1285 {
1286         struct dma_buf *dmabuf;
1287         int fd;
1288
1289         dmabuf = ion_share_dma_buf(client, handle);
1290         if (IS_ERR(dmabuf)) {
1291                 pr_err("%s: dmabuf is error and dmabuf is %p!\n",__func__,dmabuf);
1292                 return PTR_ERR(dmabuf);
1293         }
1294
1295         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1296         if (fd < 0) {
1297                 pr_err("%s: dmabuf fd is error %d!\n",__func__, fd);
1298                 dma_buf_put(dmabuf);
1299         }
1300
1301         return fd;
1302 }
1303 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1304
1305 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1306 {
1307         struct dma_buf *dmabuf;
1308         struct ion_buffer *buffer;
1309         struct ion_handle *handle;
1310         int ret;
1311
1312         dmabuf = dma_buf_get(fd);
1313         if (IS_ERR(dmabuf)) {
1314                 pr_err("ion_import_dma_buf() dmabuf=0x%lx, fd:%d, dma_buf_get error!\n",
1315                         (unsigned long)dmabuf, fd);
1316                 return ERR_PTR(PTR_ERR(dmabuf));
1317         }
1318         /* if this memory came from ion */
1319
1320         if (dmabuf->ops != &dma_buf_ops) {
1321                 pr_err("%s: can not import dmabuf from another exporter\n",
1322                        __func__);
1323                 dma_buf_put(dmabuf);
1324                 return ERR_PTR(-EINVAL);
1325         }
1326         buffer = dmabuf->priv;
1327
1328         mutex_lock(&client->lock);
1329         /* if a handle exists for this buffer just take a reference to it */
1330         handle = ion_handle_lookup(client, buffer);
1331         if (!IS_ERR(handle)) {
1332                 ion_handle_get(handle);
1333                 mutex_unlock(&client->lock);
1334                 goto end;
1335         }
1336         mutex_unlock(&client->lock);
1337
1338         handle = ion_handle_create(client, buffer);
1339         if (IS_ERR(handle))
1340                 goto end;
1341
1342         mutex_lock(&client->lock);
1343         ret = ion_handle_add(client, handle);
1344         mutex_unlock(&client->lock);
1345         if (ret) {
1346                 ion_handle_put(handle);
1347                 handle = ERR_PTR(ret);
1348         }
1349
1350 end:
1351         dma_buf_put(dmabuf);
1352         return handle;
1353 }
1354 EXPORT_SYMBOL(ion_import_dma_buf);
1355
1356 static int ion_invalidate_for_cpu(struct ion_client *client, int fd)
1357 {
1358         struct dma_buf *dmabuf;
1359         struct ion_buffer *buffer;
1360
1361         dmabuf = dma_buf_get(fd);
1362         if (IS_ERR(dmabuf))
1363         {
1364                 pr_err("%s: dmabuf is error and dmabuf is %p!\n",__func__,dmabuf);
1365                 return PTR_ERR(dmabuf);
1366         }
1367
1368         /* if this memory came from ion */
1369         if (dmabuf->ops != &dma_buf_ops) {
1370                 pr_err("%s: can not sync dmabuf from another exporter\n",
1371                        __func__);
1372                 dma_buf_put(dmabuf);
1373                 return -EINVAL;
1374         }
1375         buffer = dmabuf->priv;
1376
1377         dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
1378                                buffer->sg_table->nents, DMA_FROM_DEVICE);
1379         dma_buf_put(dmabuf);
1380         return 0;
1381 }
1382
1383 static int ion_sync_for_device(struct ion_client *client, int fd)
1384 {
1385         struct dma_buf *dmabuf;
1386         struct ion_buffer *buffer;
1387
1388         dmabuf = dma_buf_get(fd);
1389         if (IS_ERR(dmabuf)) {
1390                 pr_err("%s: the dmabuf is err dmabuf is %p, fd %d\n",__func__,dmabuf,fd);
1391                 return PTR_ERR(dmabuf);
1392         }
1393
1394         /* if this memory came from ion */
1395         if (dmabuf->ops != &dma_buf_ops) {
1396                 pr_err("%s: can not sync dmabuf from another exporter\n",
1397                        __func__);
1398                 dma_buf_put(dmabuf);
1399                 return -EINVAL;
1400         }
1401         buffer = dmabuf->priv;
1402
1403         dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1404                                buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1405         dma_buf_put(dmabuf);
1406         return 0;
1407 }
1408
1409 /* fix up the cases where the ioctl direction bits are incorrect */
1410 static unsigned int ion_ioctl_dir(unsigned int cmd)
1411 {
1412         switch (cmd) {
1413         case ION_IOC_SYNC:
1414         case ION_IOC_FREE:
1415         case ION_IOC_CUSTOM:
1416                 return _IOC_WRITE;
1417         default:
1418                 return _IOC_DIR(cmd);
1419         }
1420 }
1421
1422 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1423 {
1424         struct ion_client *client = filp->private_data;
1425         struct ion_device *dev = client->dev;
1426         struct ion_handle *cleanup_handle = NULL;
1427         int ret = 0;
1428         unsigned int dir;
1429
1430         union {
1431                 struct ion_fd_data fd;
1432                 struct ion_allocation_data allocation;
1433                 struct ion_handle_data handle;
1434                 struct ion_custom_data custom;
1435         } data;
1436
1437         dir = ion_ioctl_dir(cmd);
1438         pr_debug("%s:cmd[0x%x]dir[0x%x]\n", __func__, cmd, dir);
1439
1440         if (_IOC_SIZE(cmd) > sizeof(data)) {
1441                 ret = -EINVAL;
1442                 goto out;
1443         }
1444
1445         if (dir & _IOC_WRITE)
1446                 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) {
1447                         ret = -EFAULT;
1448                         goto out;
1449                 }
1450
1451         switch (cmd) {
1452         case ION_IOC_ALLOC:
1453         {
1454                 struct ion_handle *handle;
1455
1456                 handle = ion_alloc(client, data.allocation.len,
1457                                                 data.allocation.align,
1458                                                 data.allocation.heap_id_mask,
1459                                                 data.allocation.flags);
1460                 if (IS_ERR(handle)) {
1461                         ret = PTR_ERR(handle);
1462                         goto out;
1463                 }
1464
1465                 data.allocation.handle = handle->id;
1466
1467                 cleanup_handle = handle;
1468                 break;
1469         }
1470         case ION_IOC_FREE:
1471         {
1472                 struct ion_handle *handle;
1473
1474                 handle = ion_handle_get_by_id(client, data.handle.handle);
1475                 if (IS_ERR(handle)) {
1476                         ret = PTR_ERR(handle);
1477                         goto out;
1478                 }
1479                 ion_free(client, handle);
1480                 ion_handle_put(handle);
1481                 break;
1482         }
1483         case ION_IOC_SHARE:
1484         case ION_IOC_MAP:
1485         {
1486                 struct ion_handle *handle;
1487
1488                 handle = ion_handle_get_by_id(client, data.handle.handle);
1489                 if (IS_ERR(handle)) {
1490                         ret = PTR_ERR(handle);
1491                         goto out;
1492                 }
1493                 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1494                 ion_handle_put(handle);
1495                 if (data.fd.fd < 0)
1496                         ret = data.fd.fd;
1497                 break;
1498         }
1499         case ION_IOC_IMPORT:
1500         {
1501                 struct ion_handle *handle;
1502                 handle = ion_import_dma_buf(client, data.fd.fd);
1503                 if (IS_ERR(handle))
1504                         ret = PTR_ERR(handle);
1505                 else
1506                         data.handle.handle = handle->id;
1507                 break;
1508         }
1509         case ION_IOC_INVALIDATE:
1510         {
1511                 ret = ion_invalidate_for_cpu(client, data.fd.fd);
1512                 break;
1513         }
1514         case ION_IOC_SYNC:
1515         {
1516                 ret = ion_sync_for_device(client, data.fd.fd);
1517                 break;
1518         }
1519         case ION_IOC_CUSTOM:
1520         {
1521                 if (!dev->custom_ioctl) {
1522                         ret = -ENOTTY;
1523                         goto out;
1524                 }
1525                 ret = dev->custom_ioctl(client, data.custom.cmd,
1526                                                 data.custom.arg);
1527                 break;
1528         }
1529         default:
1530                 ret = -ENOTTY;
1531                 goto out;
1532         }
1533
1534         if (dir & _IOC_READ) {
1535                 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1536                         if (cleanup_handle)
1537                                 ion_free(client, cleanup_handle);
1538                         ret = -EFAULT;
1539                 }
1540         }
1541
1542 out:
1543         if (ret)
1544                 pr_info("%s:cmd[0x%x]ret[%d]\n", __func__, cmd, ret);
1545
1546         return ret;
1547 }
1548
1549 static int ion_release(struct inode *inode, struct file *file)
1550 {
1551         struct ion_client *client = file->private_data;
1552
1553         pr_debug("%s: %d\n", __func__, __LINE__);
1554         ion_client_destroy(client);
1555         return 0;
1556 }
1557
1558 static int ion_open(struct inode *inode, struct file *file)
1559 {
1560         struct miscdevice *miscdev = file->private_data;
1561         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1562         struct ion_client *client;
1563         char debug_name[64];
1564
1565         pr_debug("%s: %d\n", __func__, __LINE__);
1566         snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1567         client = ion_client_create(dev, debug_name);
1568         if (IS_ERR(client))
1569                 return PTR_ERR(client);
1570         file->private_data = client;
1571
1572         return 0;
1573 }
1574
1575 static const struct file_operations ion_fops = {
1576         .owner          = THIS_MODULE,
1577         .open           = ion_open,
1578         .release        = ion_release,
1579         .unlocked_ioctl = ion_ioctl,
1580         .compat_ioctl   = compat_ion_ioctl,
1581 };
1582 #if 0
1583 static size_t ion_debug_heap_total(struct ion_client *client,
1584                                    unsigned int id)
1585 {
1586         size_t size = 0;
1587         struct rb_node *n;
1588
1589         mutex_lock(&client->lock);
1590         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1591                 struct ion_handle *handle = rb_entry(n,
1592                                                      struct ion_handle,
1593                                                      node);
1594                 if (handle->buffer->heap->id == id)
1595                         size += handle->buffer->size;
1596         }
1597         mutex_unlock(&client->lock);
1598         return size;
1599 }
1600 #endif
1601 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1602 {
1603         struct ion_heap *heap = s->private;
1604         struct ion_device *dev = heap->dev;
1605         struct rb_node *n;
1606         struct rb_node *r;
1607         struct tm t;
1608         size_t total_size = 0;
1609         size_t total_orphaned_size = 0;
1610
1611         seq_printf(s, "%16.s %6.s %6.s %10.s %16.s\n", "client", "pid", "tid", "size", "alloc_time");
1612         seq_printf(s, "----------------------------------------------------------\n");
1613
1614         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1615                 struct ion_client *client = rb_entry(n, struct ion_client,
1616                                                      node);
1617
1618                 mutex_lock(&client->lock);
1619                 for (r = rb_first(&client->handles); r; r = rb_next(r)) {
1620                         struct ion_handle *handle = rb_entry(r,
1621                                                                  struct ion_handle,
1622                                                                  node);
1623                         struct ion_buffer *buffer = handle->buffer;
1624
1625                         if (buffer->heap->id == heap->id) {
1626                                 if (!buffer->size)
1627                                         continue;
1628                                 time_to_tm(buffer->alloc_time.tv_sec, 0, &t);
1629                                 if (client->task) {
1630                                         char task_comm[TASK_COMM_LEN];
1631
1632                                         get_task_comm(task_comm, client->task);
1633                                         seq_printf(s, "%16.s %6u %6u %10zu %ld.%d.%d-%d:%d:%d.%ld\n",
1634                                                 task_comm, client->pid, client->tid, buffer->size,
1635                                                 t.tm_year + 1900, t.tm_mon + 1,
1636                                                 t.tm_mday, t.tm_hour, t.tm_min,
1637                                                 t.tm_sec, buffer->alloc_time.tv_usec);
1638                                 } else {
1639                                         seq_printf(s, "%16.s %6u %6u %10zu %ld.%d.%d-%d:%d:%d.%ld\n",
1640                                                 client->name, client->pid, client->tid, buffer->size,
1641                                                 t.tm_year + 1900, t.tm_mon + 1,
1642                                                 t.tm_mday, t.tm_hour, t.tm_min,
1643                                                 t.tm_sec, buffer->alloc_time.tv_usec);
1644                                 }
1645                         }
1646                 }
1647                 mutex_unlock(&client->lock);
1648         }
1649         seq_printf(s, "----------------------------------------------------------\n");
1650         seq_printf(s, "orphaned allocations (info is from last known client):"
1651                    "\n");
1652         mutex_lock(&dev->buffer_lock);
1653         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1654                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1655                                                      node);
1656                 if (buffer->heap->id != heap->id)
1657                         continue;
1658                 total_size += buffer->size;
1659                 if (!buffer->handle_count) {
1660                         time_to_tm(buffer->alloc_time.tv_sec, 0, &t);
1661                         seq_printf(s, "%16.s %6u %6u %10zu %d %d %ld.%d.%d-%d:%d:%d.%ld\n",
1662                                 buffer->task_comm, buffer->pid, buffer->tid, buffer->size,
1663                                 buffer->kmap_cnt, atomic_read(&buffer->ref.refcount),
1664                                 t.tm_year + 1900, t.tm_mon + 1,
1665                                 t.tm_mday, t.tm_hour, t.tm_min,
1666                                 t.tm_sec, buffer->alloc_time.tv_usec);
1667                         total_orphaned_size += buffer->size;
1668                 }
1669         }
1670         mutex_unlock(&dev->buffer_lock);
1671         seq_printf(s, "----------------------------------------------------------\n");
1672         seq_printf(s, "%16.s %22zu\n", "total orphaned",
1673                    total_orphaned_size);
1674         seq_printf(s, "%16.s %22zu\n", "total ", total_size);
1675         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1676                 seq_printf(s, "%16.s %22zu\n", "deferred free",
1677                                 heap->free_list_size);
1678         seq_printf(s, "----------------------------------------------------------\n");
1679
1680         if (heap->debug_show)
1681                 heap->debug_show(heap, s, unused);
1682
1683         return 0;
1684 }
1685
1686 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1687 {
1688         return single_open(file, ion_debug_heap_show, inode->i_private);
1689 }
1690
1691 static const struct file_operations debug_heap_fops = {
1692         .open = ion_debug_heap_open,
1693         .read = seq_read,
1694         .llseek = seq_lseek,
1695         .release = single_release,
1696 };
1697
1698 #ifdef DEBUG_HEAP_SHRINKER
1699 static int debug_shrink_set(void *data, u64 val)
1700 {
1701         struct ion_heap *heap = data;
1702         struct shrink_control sc;
1703         int objs;
1704
1705         sc.gfp_mask = -1;
1706         sc.nr_to_scan = 0;
1707
1708         if (!val)
1709                 return 0;
1710
1711         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1712         sc.nr_to_scan = objs;
1713
1714         heap->shrinker.shrink(&heap->shrinker, &sc);
1715         return 0;
1716 }
1717
1718 static int debug_shrink_get(void *data, u64 *val)
1719 {
1720         struct ion_heap *heap = data;
1721         struct shrink_control sc;
1722         int objs;
1723
1724         sc.gfp_mask = -1;
1725         sc.nr_to_scan = 0;
1726
1727         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1728         *val = objs;
1729         return 0;
1730 }
1731
1732 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1733                         debug_shrink_set, "%llu\n");
1734 #endif
1735
1736 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1737 {
1738         struct dentry *debug_file;
1739
1740         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1741             !heap->ops->unmap_dma)
1742                 pr_err("%s: can not add heap with invalid ops struct.\n",
1743                        __func__);
1744
1745         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1746                 ion_heap_init_deferred_free(heap);
1747
1748         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1749                 ion_heap_init_shrinker(heap);
1750
1751         heap->dev = dev;
1752         down_write(&dev->lock);
1753         /* use negative heap->id to reverse the priority -- when traversing
1754            the list later attempt higher id numbers first */
1755         plist_node_init(&heap->node, -heap->id);
1756         plist_add(&heap->node, &dev->heaps);
1757         debug_file = debugfs_create_file(heap->name, 0664,
1758                                         dev->heaps_debug_root, heap,
1759                                         &debug_heap_fops);
1760
1761         if (!debug_file) {
1762                 char buf[256], *path;
1763                 path = dentry_path(dev->heaps_debug_root, buf, 256);
1764                 pr_err("Failed to create heap debugfs at %s/%s\n",
1765                         path, heap->name);
1766         }
1767
1768 #ifdef DEBUG_HEAP_SHRINKER
1769         if (heap->shrinker.shrink) {
1770                 char debug_name[64];
1771
1772                 snprintf(debug_name, 64, "%s_shrink", heap->name);
1773                 debug_file = debugfs_create_file(
1774                         debug_name, 0644, dev->heaps_debug_root, heap,
1775                         &debug_shrink_fops);
1776                 if (!debug_file) {
1777                         char buf[256], *path;
1778                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1779                         pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1780                                 path, debug_name);
1781                 }
1782         }
1783 #endif
1784         up_write(&dev->lock);
1785 }
1786
1787 struct ion_device *ion_device_create(long (*custom_ioctl)
1788                                      (struct ion_client *client,
1789                                       unsigned int cmd,
1790                                       unsigned long arg))
1791 {
1792         struct ion_device *idev;
1793         int ret;
1794
1795         idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1796         if (!idev)
1797                 return ERR_PTR(-ENOMEM);
1798
1799         idev->dev.minor = MISC_DYNAMIC_MINOR;
1800         idev->dev.name = "ion";
1801         idev->dev.fops = &ion_fops;
1802         idev->dev.parent = NULL;
1803         ret = misc_register(&idev->dev);
1804         if (ret) {
1805                 pr_err("ion: failed to register misc device.\n");
1806                 return ERR_PTR(ret);
1807         }
1808
1809         idev->debug_root = debugfs_create_dir("ion", NULL);
1810         if (!idev->debug_root) {
1811                 pr_err("ion: failed to create debugfs root directory.\n");
1812                 goto debugfs_done;
1813         }
1814         idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1815         if (!idev->heaps_debug_root) {
1816                 pr_err("ion: failed to create debugfs heaps directory.\n");
1817                 goto debugfs_done;
1818         }
1819         idev->clients_debug_root = debugfs_create_dir("clients",
1820                                                 idev->debug_root);
1821         if (!idev->clients_debug_root)
1822                 pr_err("ion: failed to create debugfs clients directory.\n");
1823
1824 debugfs_done:
1825
1826         idev->custom_ioctl = custom_ioctl;
1827         idev->buffers = RB_ROOT;
1828         mutex_init(&idev->buffer_lock);
1829         init_rwsem(&idev->lock);
1830         plist_head_init(&idev->heaps);
1831         idev->clients = RB_ROOT;
1832         return idev;
1833 }
1834
1835 void ion_device_destroy(struct ion_device *dev)
1836 {
1837         misc_deregister(&dev->dev);
1838         debugfs_remove_recursive(dev->debug_root);
1839         /* XXX need to free the heaps and clients ? */
1840         kfree(dev);
1841 }
1842
1843 void __init ion_reserve(struct ion_platform_data *data)
1844 {
1845         int i;
1846
1847         for (i = 0; i < data->nr; i++) {
1848                 if (data->heaps[i].size == 0)
1849                         continue;
1850
1851                 if (data->heaps[i].base == 0) {
1852                         phys_addr_t paddr;
1853                         paddr = memblock_alloc_base(data->heaps[i].size,
1854                                                     data->heaps[i].align,
1855                                                     MEMBLOCK_ALLOC_ANYWHERE);
1856                         if (!paddr) {
1857                                 pr_err("%s: error allocating memblock for "
1858                                        "heap %d\n",
1859                                         __func__, i);
1860                                 continue;
1861                         }
1862                         data->heaps[i].base = paddr;
1863                 } else {
1864                         int ret = memblock_reserve(data->heaps[i].base,
1865                                                data->heaps[i].size);
1866                         if (ret)
1867                                 pr_err("memblock reserve of %zx@%lx failed\n",
1868                                        data->heaps[i].size,
1869                                        data->heaps[i].base);
1870                 }
1871                 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1872                         data->heaps[i].name,
1873                         data->heaps[i].base,
1874                         data->heaps[i].size);
1875         }
1876 }