gpu: remove ununsed ion 17/153617/1
authorSeung-Woo Kim <sw0312.kim@samsung.com>
Fri, 29 Sep 2017 02:22:35 +0000 (11:22 +0900)
committerSeung-Woo Kim <sw0312.kim@samsung.com>
Fri, 29 Sep 2017 02:23:54 +0000 (11:23 +0900)
The driver/gpu/ion is not used at all and instead, same module in
driver/staging/android/ion is used. Remove unused ion.

Change-Id: I5004efbd53f2613d86fada3b7ef812ba5177238c
Signed-off-by: Seung-Woo Kim <sw0312.kim@samsung.com>
22 files changed:
drivers/gpu/ion/Kconfig [deleted file]
drivers/gpu/ion/Makefile [deleted file]
drivers/gpu/ion/compat_ion.c [deleted file]
drivers/gpu/ion/compat_ion.h [deleted file]
drivers/gpu/ion/ion.c [deleted file]
drivers/gpu/ion/ion_carveout_heap.c [deleted file]
drivers/gpu/ion/ion_chunk_heap.c [deleted file]
drivers/gpu/ion/ion_cma_heap.c [deleted file]
drivers/gpu/ion/ion_heap.c [deleted file]
drivers/gpu/ion/ion_page_pool.c [deleted file]
drivers/gpu/ion/ion_priv.h [deleted file]
drivers/gpu/ion/ion_system_heap.c [deleted file]
drivers/gpu/ion/ion_system_mapper.c [deleted file]
drivers/gpu/ion/sprd/Makefile [deleted file]
drivers/gpu/ion/sprd/compat_sprd_ion.c [deleted file]
drivers/gpu/ion/sprd/compat_sprd_ion.h [deleted file]
drivers/gpu/ion/sprd/sprd_fence.c [deleted file]
drivers/gpu/ion/sprd/sprd_fence.h [deleted file]
drivers/gpu/ion/sprd/sprd_ion.c [deleted file]
drivers/gpu/ion/sprd/sprd_ion_cma_heap.c [deleted file]
drivers/gpu/ion/tegra/Makefile [deleted file]
drivers/gpu/ion/tegra/tegra_ion.c [deleted file]

diff --git a/drivers/gpu/ion/Kconfig b/drivers/gpu/ion/Kconfig
deleted file mode 100644 (file)
index 68a886c..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-menuconfig ION
-       tristate "Ion Memory Manager"
-       depends on ARM
-       select GENERIC_ALLOCATOR
-       select DMA_SHARED_BUFFER
-       help
-         Chose this option to enable the ION Memory Manager.
-
-config ION_TEGRA
-       tristate "Ion for Tegra"
-       depends on ARCH_TEGRA && ION
-       help
-         Choose this option if you wish to use ion on an nVidia Tegra.
-
-config ION_SPRD
-       tristate "Ion for SPRD"
-       depends on ION
-       help
-         Choose this option if you wish to use ion on an SPRD chip.
-
diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile
deleted file mode 100644 (file)
index 5a61c2f..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-obj-$(CONFIG_ION) +=   ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
-                       ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o
-ifdef CONFIG_COMPAT
-obj-$(CONFIG_ION) += compat_ion.o
-endif
-obj-$(CONFIG_ION_TEGRA) += tegra/
-obj-$(CONFIG_ION_SPRD) += sprd/
diff --git a/drivers/gpu/ion/compat_ion.c b/drivers/gpu/ion/compat_ion.c
deleted file mode 100644 (file)
index 1e1bbe1..0000000
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * drivers/gpu/ion/compat_ion.c
- *
- * Copyright (C) 2013 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/compat.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-
-#include <linux/ion.h>
-#include "compat_ion.h"
-#include "sprd/compat_sprd_ion.h"
-
-/* See drivers/staging/android/uapi/ion.h for the definition of these structs */
-struct compat_ion_allocation_data {
-       compat_size_t len;
-       compat_size_t align;
-       compat_uint_t heap_id_mask;
-       compat_uint_t flags;
-       compat_int_t handle;
-};
-
-struct compat_ion_custom_data {
-       compat_uint_t cmd;
-       compat_ulong_t arg;
-};
-
-struct compat_ion_handle_data {
-       compat_int_t handle;
-};
-
-#define COMPAT_ION_IOC_ALLOC   _IOWR(ION_IOC_MAGIC, 0, \
-                                     struct compat_ion_allocation_data)
-#define COMPAT_ION_IOC_FREE    _IOWR(ION_IOC_MAGIC, 1, \
-                                     struct compat_ion_handle_data)
-#define COMPAT_ION_IOC_CUSTOM  _IOWR(ION_IOC_MAGIC, 6, \
-                                     struct compat_ion_custom_data)
-
-static int compat_get_ion_allocation_data(
-                       struct compat_ion_allocation_data __user *data32,
-                       struct ion_allocation_data __user *data)
-{
-       compat_size_t s;
-       compat_uint_t u;
-       compat_int_t i;
-       int err;
-
-       err = get_user(s, &data32->len);
-       err |= put_user(s, &data->len);
-       err |= get_user(s, &data32->align);
-       err |= put_user(s, &data->align);
-       err |= get_user(u, &data32->heap_id_mask);
-       err |= put_user(u, &data->heap_id_mask);
-       err |= get_user(u, &data32->flags);
-       err |= put_user(u, &data->flags);
-       err |= get_user(i, &data32->handle);
-       err |= put_user(i, &data->handle);
-
-       return err;
-}
-
-static int compat_get_ion_handle_data(
-                       struct compat_ion_handle_data __user *data32,
-                       struct ion_handle_data __user *data)
-{
-       compat_int_t i;
-       int err;
-
-       err = get_user(i, &data32->handle);
-       err |= put_user(i, &data->handle);
-
-       return err;
-}
-
-static int compat_put_ion_allocation_data(
-                       struct compat_ion_allocation_data __user *data32,
-                       struct ion_allocation_data __user *data)
-{
-       compat_size_t s;
-       compat_uint_t u;
-       compat_int_t i;
-       int err;
-
-       err = get_user(s, &data->len);
-       err |= put_user(s, &data32->len);
-       err |= get_user(s, &data->align);
-       err |= put_user(s, &data32->align);
-       err |= get_user(u, &data->heap_id_mask);
-       err |= put_user(u, &data32->heap_id_mask);
-       err |= get_user(u, &data->flags);
-       err |= put_user(u, &data32->flags);
-       err |= get_user(i, &data->handle);
-       err |= put_user(i, &data32->handle);
-
-       return err;
-}
-
-static int compat_get_ion_custom_data(
-                       struct compat_ion_custom_data __user *data32,
-                       struct ion_custom_data __user *data)
-{
-       compat_uint_t cmd;
-       compat_ulong_t arg;
-       int err;
-
-       err = get_user(cmd, &data32->cmd);
-       err |= put_user(cmd, &data->cmd);
-       err |= get_user(arg, &data32->arg);
-       err |= put_user(arg, &data->arg);
-
-       return err;
-};
-
-long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
-       long ret;
-
-       if (!filp->f_op || !filp->f_op->unlocked_ioctl)
-               return -ENOTTY;
-       pr_debug("%s, cmd: 0x%x", __FUNCTION__, cmd);
-       switch (cmd) {
-       case COMPAT_ION_IOC_ALLOC:
-       {
-               struct compat_ion_allocation_data __user *data32;
-               struct ion_allocation_data __user *data;
-               int err;
-
-               data32 = compat_ptr(arg);
-               data = compat_alloc_user_space(sizeof(*data));
-               if (data == NULL)
-                       return -EFAULT;
-
-               err = compat_get_ion_allocation_data(data32, data);
-               if (err)
-                       return err;
-               ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_ALLOC,
-                                                       (unsigned long)data);
-               err = compat_put_ion_allocation_data(data32, data);
-               return ret ? ret : err;
-       }
-       case COMPAT_ION_IOC_FREE:
-       {
-               struct compat_ion_handle_data __user *data32;
-               struct ion_handle_data __user *data;
-               int err;
-
-               data32 = compat_ptr(arg);
-               data = compat_alloc_user_space(sizeof(*data));
-               if (data == NULL)
-                       return -EFAULT;
-
-               err = compat_get_ion_handle_data(data32, data);
-               if (err)
-                       return err;
-
-               return filp->f_op->unlocked_ioctl(filp, ION_IOC_FREE,
-                                                       (unsigned long)data);
-       }
-       case COMPAT_ION_IOC_CUSTOM: {
-               struct compat_ion_custom_data __user *data32;
-               struct ion_custom_data __user *data;
-               int err;
-
-               data32 = compat_ptr(arg);
-               data = compat_alloc_user_space(sizeof(*data));
-               if (data == NULL)
-                       return -EFAULT;
-
-               err = compat_get_ion_custom_data(data32, data);
-               if (err)
-                       return err;
-               if (compat_sprd_ion_ioctl)
-                       return compat_sprd_ion_ioctl(filp, data->cmd, data->arg);
-               else
-                       return filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM,
-                                                               (unsigned long)data);
-       }
-       case ION_IOC_SHARE:
-       case ION_IOC_MAP:
-       case ION_IOC_IMPORT:
-       case ION_IOC_SYNC:
-       case ION_IOC_INVALIDATE:
-               return filp->f_op->unlocked_ioctl(filp, cmd,
-                                               (unsigned long)compat_ptr(arg));
-       default:
-               return -ENOIOCTLCMD;
-       }
-}
diff --git a/drivers/gpu/ion/compat_ion.h b/drivers/gpu/ion/compat_ion.h
deleted file mode 100644 (file)
index 3a9c8c0..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
-
- * drivers/gpu/ion/compat_ion.h
- *
- * Copyright (C) 2013 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_COMPAT_ION_H
-#define _LINUX_COMPAT_ION_H
-
-#if IS_ENABLED(CONFIG_COMPAT)
-
-long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-
-#else
-
-#define compat_ion_ioctl  NULL
-
-#endif /* CONFIG_COMPAT */
-#endif /* _LINUX_COMPAT_ION_H */
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
deleted file mode 100644 (file)
index 718477d..0000000
+++ /dev/null
@@ -1,1852 +0,0 @@
-/*
-
- * drivers/gpu/ion/ion.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/device.h>
-#include <linux/file.h>
-#include <linux/freezer.h>
-#include <linux/fs.h>
-#include <linux/anon_inodes.h>
-#include <linux/ion.h>
-#include <linux/kthread.h>
-#include <linux/list.h>
-#include <linux/memblock.h>
-#include <linux/miscdevice.h>
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/mm_types.h>
-#include <linux/rbtree.h>
-#include <linux/slab.h>
-#include <linux/seq_file.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-#include <linux/debugfs.h>
-#include <linux/dma-buf.h>
-#include <linux/idr.h>
-#include <linux/time.h>
-
-#include "ion_priv.h"
-#include "compat_ion.h"
-
-/**
- * struct ion_device - the metadata of the ion device node
- * @dev:               the actual misc device
- * @buffers:           an rb tree of all the existing buffers
- * @buffer_lock:       lock protecting the tree of buffers
- * @lock:              rwsem protecting the tree of heaps and clients
- * @heaps:             list of all the heaps in the system
- * @user_clients:      list of all the clients created from userspace
- */
-struct ion_device {
-       struct miscdevice dev;
-       struct rb_root buffers;
-       struct mutex buffer_lock;
-       struct rw_semaphore lock;
-       struct plist_head heaps;
-       long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
-                             unsigned long arg);
-       struct rb_root clients;
-       struct dentry *debug_root;
-};
-
-/**
- * struct ion_client - a process/hw block local address space
- * @node:              node in the tree of all clients
- * @dev:               backpointer to ion device
- * @handles:           an rb tree of all the handles in this client
- * @idr:               an idr space for allocating handle ids
- * @lock:              lock protecting the tree of handles
- * @name:              used for debugging
- * @task:              used for debugging
- *
- * A client represents a list of buffers this client may access.
- * The mutex stored here is used to protect both handles tree
- * as well as the handles themselves, and should be held while modifying either.
- */
-struct ion_client {
-       struct rb_node node;
-       struct ion_device *dev;
-       struct rb_root handles;
-       struct idr idr;
-       struct mutex lock;
-       const char *name;
-       struct task_struct *task;
-       pid_t pid;
-       pid_t tid;
-       struct dentry *debug_root;
-};
-
-/**
- * ion_handle - a client local reference to a buffer
- * @ref:               reference count
- * @client:            back pointer to the client the buffer resides in
- * @buffer:            pointer to the buffer
- * @node:              node in the client's handle rbtree
- * @kmap_cnt:          count of times this client has mapped to kernel
- * @id:                        client-unique id allocated by client->idr
- *
- * Modifications to node, map_cnt or mapping should be protected by the
- * lock in the client.  Other fields are never changed after initialization.
- */
-struct ion_handle {
-       struct kref ref;
-       struct ion_client *client;
-       struct ion_buffer *buffer;
-       struct rb_node node;
-       unsigned int kmap_cnt;
-       int id;
-};
-
-static int ion_debug_heap_show_err(struct ion_heap *heap);
-
-
-
-bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
-{
-       return ((buffer->flags & ION_FLAG_CACHED) &&
-               !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
-}
-
-bool ion_buffer_cached(struct ion_buffer *buffer)
-{
-       return !!(buffer->flags & ION_FLAG_CACHED);
-}
-
-static inline struct page *ion_buffer_page(struct page *page)
-{
-       return (struct page *)((unsigned long)page & ~(1UL));
-}
-
-static inline bool ion_buffer_page_is_dirty(struct page *page)
-{
-       return !!((unsigned long)page & 1UL);
-}
-
-static inline void ion_buffer_page_dirty(struct page **page)
-{
-       *page = (struct page *)((unsigned long)(*page) | 1UL);
-}
-
-static inline void ion_buffer_page_clean(struct page **page)
-{
-       *page = (struct page *)((unsigned long)(*page) & ~(1UL));
-}
-
-/* this function should only be called while dev->lock is held */
-static void ion_buffer_add(struct ion_device *dev,
-                          struct ion_buffer *buffer)
-{
-       struct rb_node **p = &dev->buffers.rb_node;
-       struct rb_node *parent = NULL;
-       struct ion_buffer *entry;
-
-       while (*p) {
-               parent = *p;
-               entry = rb_entry(parent, struct ion_buffer, node);
-
-               if (buffer < entry) {
-                       p = &(*p)->rb_left;
-               } else if (buffer > entry) {
-                       p = &(*p)->rb_right;
-               } else {
-                       pr_err("%s: buffer already found.", __func__);
-                       BUG();
-               }
-       }
-
-       rb_link_node(&buffer->node, parent, p);
-       rb_insert_color(&buffer->node, &dev->buffers);
-}
-
-/* this function should only be called while dev->lock is held */
-static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
-                                    struct ion_device *dev,
-                                    unsigned long len,
-                                    unsigned long align,
-                                    unsigned long flags)
-{
-       struct ion_buffer *buffer;
-       struct sg_table *table;
-       struct scatterlist *sg;
-       struct timeval time;
-       int i, ret;
-
-       buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
-       if (!buffer)
-               return ERR_PTR(-ENOMEM);
-
-       buffer->heap = heap;
-       buffer->flags = flags;
-       kref_init(&buffer->ref);
-
-       if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) {
-               bool cached = ion_buffer_cached(buffer);
-               ion_heap_freelist_drain(heap, cached, len);
-       }
-       ret = heap->ops->allocate(heap, buffer, len, align, flags);
-       if (ret) {
-               if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
-                       goto err2;
-
-               ion_heap_freelist_drain(heap, -1, 0);
-               ret = heap->ops->allocate(heap, buffer, len, align,
-                                         flags);
-               if (ret)
-                       goto err2;
-       }
-
-       buffer->dev = dev;
-       buffer->size = len;
-
-       table = heap->ops->map_dma(heap, buffer);
-       if (WARN_ONCE(table == NULL, "heap->ops->map_dma should return ERR_PTR on error"))
-               table = ERR_PTR(-EINVAL);
-       if (IS_ERR(table)) {
-               pr_err("%s: table is error and table is %p!\n",__func__,table);
-               heap->ops->free(buffer);
-               kfree(buffer);
-               return ERR_PTR(PTR_ERR(table));
-       }
-       buffer->sg_table = table;
-       if (ion_buffer_fault_user_mappings(buffer)) {
-               int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
-               struct scatterlist *sg;
-               int i, j, k = 0;
-
-               buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
-               if (!buffer->pages) {
-                       ret = -ENOMEM;
-                       goto err1;
-               }
-
-               for_each_sg(table->sgl, sg, table->nents, i) {
-                       struct page *page = sg_page(sg);
-
-                       for (j = 0; j < sg_dma_len(sg) / PAGE_SIZE; j++)
-                               buffer->pages[k++] = page++;
-               }
-
-       }
-
-       buffer->dev = dev;
-       buffer->size = len;
-       INIT_LIST_HEAD(&buffer->vmas);
-       mutex_init(&buffer->lock);
-       /* this will set up dma addresses for the sglist -- it is not
-          technically correct as per the dma api -- a specific
-          device isn't really taking ownership here.  However, in practice on
-          our systems the only dma_address space is physical addresses.
-          Additionally, we can't afford the overhead of invalidating every
-          allocation via dma_map_sg. The implicit contract here is that
-          memory comming from the heaps is ready for dma, ie if it has a
-          cached mapping that mapping has been invalidated */
-       for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
-               sg_dma_address(sg) = sg_phys(sg);
-       mutex_lock(&dev->buffer_lock);
-       ion_buffer_add(dev, buffer);
-       mutex_unlock(&dev->buffer_lock);
-
-       do_gettimeofday(&time);
-       buffer->alloc_time = time;
-       return buffer;
-
-
-err1:
-       if (buffer->pages)
-               vfree(buffer->pages);
-err2:
-       kfree(buffer);
-       return ERR_PTR(ret);
-}
-
-void ion_buffer_destroy(struct ion_buffer *buffer)
-{
-       if (WARN_ON(buffer->kmap_cnt > 0))
-               buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
-       buffer->heap->ops->unmap_dma(buffer->heap, buffer);
-       buffer->heap->ops->free(buffer);
-       if (buffer->pages)
-               vfree(buffer->pages);
-       kfree(buffer);
-}
-
-static void _ion_buffer_destroy(struct kref *kref)
-{
-       struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
-       struct ion_heap *heap = buffer->heap;
-       struct ion_device *dev = buffer->dev;
-
-#if defined(CONFIG_SPRD_IOMMU)
-       if(buffer->iomap_cnt[IOMMU_GSP]>0)
-       {
-               buffer->iomap_cnt[IOMMU_GSP] = 0;
-               sprd_iova_unmap(IOMMU_GSP,buffer->iova[IOMMU_GSP],buffer->size);
-               sprd_iova_free(IOMMU_GSP,buffer->iova[IOMMU_GSP],buffer->size);
-       }
-
-       if(buffer->iomap_cnt[IOMMU_MM]>0)
-       {
-               buffer->iomap_cnt[IOMMU_MM] = 0;
-               sprd_iova_unmap(IOMMU_MM,buffer->iova[IOMMU_MM],buffer->size);
-               sprd_iova_free(IOMMU_MM,buffer->iova[IOMMU_MM],buffer->size);
-       }
-#endif
-
-       mutex_lock(&dev->buffer_lock);
-       rb_erase(&buffer->node, &dev->buffers);
-       mutex_unlock(&dev->buffer_lock);
-
-       if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
-               ion_heap_freelist_add(heap, buffer);
-       else
-               ion_buffer_destroy(buffer);
-}
-
-static void ion_buffer_get(struct ion_buffer *buffer)
-{
-       kref_get(&buffer->ref);
-}
-
-static int ion_buffer_put(struct ion_buffer *buffer)
-{
-       return kref_put(&buffer->ref, _ion_buffer_destroy);
-}
-
-static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
-{
-       mutex_lock(&buffer->lock);
-       buffer->handle_count++;
-       mutex_unlock(&buffer->lock);
-}
-
-static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
-{
-       /*
-        * when a buffer is removed from a handle, if it is not in
-        * any other handles, copy the taskcomm and the pid of the
-        * process it's being removed from into the buffer.  At this
-        * point there will be no way to track what processes this buffer is
-        * being used by, it only exists as a dma_buf file descriptor.
-        * The taskcomm and pid can provide a debug hint as to where this fd
-        * is in the system
-        */
-       mutex_lock(&buffer->lock);
-       buffer->handle_count--;
-       BUG_ON(buffer->handle_count < 0);
-       if (!buffer->handle_count) {
-               struct task_struct *task;
-
-               task = current->group_leader;
-               get_task_comm(buffer->task_comm, task);
-               buffer->pid = task_pid_nr(task);
-               buffer->tid = task_pid_nr(current);
-       }
-       mutex_unlock(&buffer->lock);
-}
-
-static struct ion_handle *ion_handle_create(struct ion_client *client,
-                                    struct ion_buffer *buffer)
-{
-       struct ion_handle *handle;
-
-       handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
-       if (!handle)
-               return ERR_PTR(-ENOMEM);
-       kref_init(&handle->ref);
-       RB_CLEAR_NODE(&handle->node);
-       handle->client = client;
-       ion_buffer_get(buffer);
-       ion_buffer_add_to_handle(buffer);
-       handle->buffer = buffer;
-
-       return handle;
-}
-
-static void ion_handle_kmap_put(struct ion_handle *);
-
-static void ion_handle_destroy(struct kref *kref)
-{
-       struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
-       struct ion_client *client = handle->client;
-       struct ion_buffer *buffer = handle->buffer;
-
-       mutex_lock(&buffer->lock);
-       while (handle->kmap_cnt)
-               ion_handle_kmap_put(handle);
-       mutex_unlock(&buffer->lock);
-
-       idr_remove(&client->idr, handle->id);
-       if (!RB_EMPTY_NODE(&handle->node))
-               rb_erase(&handle->node, &client->handles);
-
-       ion_buffer_remove_from_handle(buffer);
-       ion_buffer_put(buffer);
-
-       kfree(handle);
-}
-
-
-struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
-{
-       return handle->buffer;
-}
-
-static void ion_handle_get(struct ion_handle *handle)
-{
-       kref_get(&handle->ref);
-}
-
-static int ion_handle_put(struct ion_handle *handle)
-{
-       struct ion_client *client = handle->client;
-       int ret;
-
-       mutex_lock(&client->lock);
-       ret = kref_put(&handle->ref, ion_handle_destroy);
-       mutex_unlock(&client->lock);
-
-       return ret;
-}
-
-static struct ion_handle *ion_handle_lookup(struct ion_client *client,
-                                           struct ion_buffer *buffer)
-{
-       struct rb_node *n = client->handles.rb_node;
-
-       while (n) {
-               struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
-               if (buffer < entry->buffer)
-                       n = n->rb_left;
-               else if (buffer > entry->buffer)
-                       n = n->rb_right;
-               else
-                       return entry;
-       }
-       return ERR_PTR(-EINVAL);
-}
-
-static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
-                                               int id)
-{
-       struct ion_handle *handle;
-
-       mutex_lock(&client->lock);
-       handle = idr_find(&client->idr, id);
-       if (handle)
-               ion_handle_get(handle);
-       mutex_unlock(&client->lock);
-
-       return handle ? handle : ERR_PTR(-EINVAL);
-}
-
-static bool ion_handle_validate(struct ion_client *client,
-                               struct ion_handle *handle)
-{
-       WARN_ON(!mutex_is_locked(&client->lock));
-       return (idr_find(&client->idr, handle->id) == handle);
-}
-
-static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
-{
-       int id;
-       struct rb_node **p = &client->handles.rb_node;
-       struct rb_node *parent = NULL;
-       struct ion_handle *entry;
-
-       id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
-       if (id < 0)
-               return id;
-
-       handle->id = id;
-
-       while (*p) {
-               parent = *p;
-               entry = rb_entry(parent, struct ion_handle, node);
-
-               if (handle->buffer < entry->buffer)
-                       p = &(*p)->rb_left;
-               else if (handle->buffer > entry->buffer)
-                       p = &(*p)->rb_right;
-               else
-                       WARN(1, "%s: buffer already found.", __func__);
-       }
-
-       rb_link_node(&handle->node, parent, p);
-       rb_insert_color(&handle->node, &client->handles);
-
-       return 0;
-}
-
-struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
-                            size_t align, unsigned int heap_id_mask,
-                            unsigned int flags)
-{
-       struct ion_handle *handle;
-       struct ion_device *dev = client->dev;
-       struct ion_buffer *buffer = NULL;
-       struct ion_heap *heap;
-       int ret;
-
-       pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
-                len, align, heap_id_mask, flags);
-       /*
-        * traverse the list of heaps available in this system in priority
-        * order.  If the heap type is supported by the client, and matches the
-        * request of the caller allocate from it.  Repeat until allocate has
-        * succeeded or all heaps have been tried
-        */
-       if (WARN_ON(!len))
-               return ERR_PTR(-EINVAL);
-
-       len = PAGE_ALIGN(len);
-
-       down_read(&dev->lock);
-       plist_for_each_entry(heap, &dev->heaps, node) {
-               /* if the caller didn't specify this heap id */
-               if (!((1 << heap->id) & heap_id_mask))
-                       continue;
-               buffer = ion_buffer_create(heap, dev, len, align, flags);
-               if (!IS_ERR(buffer))
-                       break;
-       }
-       up_read(&dev->lock);
-
-       if (buffer == NULL)
-       {
-               pr_err("%s: buffer is NULL!\n",__func__);
-               ion_debug_heap_show_err(heap);
-               return ERR_PTR(-ENODEV);
-       }
-
-       if (IS_ERR(buffer))
-       {
-               pr_err("%s: ion alloc buffer is error! and the buffer is %p\n",__func__,buffer);
-               return ERR_PTR(PTR_ERR(buffer));
-       }
-
-       handle = ion_handle_create(client, buffer);
-
-       /*
-        * ion_buffer_create will create a buffer with a ref_cnt of 1,
-        * and ion_handle_create will take a second reference, drop one here
-        */
-       ion_buffer_put(buffer);
-
-       if (IS_ERR(handle))
-       {
-               pr_err("%s: handle is error! and the handle is %p\n",__func__,handle);
-               ion_debug_heap_show_err(heap);
-               return handle;
-       }
-
-       mutex_lock(&client->lock);
-       ret = ion_handle_add(client, handle);
-       mutex_unlock(&client->lock);
-       if (ret) {
-               ion_handle_put(handle);
-               handle = ERR_PTR(ret);
-       }
-
-       return handle;
-}
-EXPORT_SYMBOL(ion_alloc);
-
-void ion_free(struct ion_client *client, struct ion_handle *handle)
-{
-       bool valid_handle;
-
-       BUG_ON(client != handle->client);
-
-       mutex_lock(&client->lock);
-       valid_handle = ion_handle_validate(client, handle);
-
-       if (!valid_handle) {
-               WARN(1, "%s: invalid handle passed to free.\n", __func__);
-               mutex_unlock(&client->lock);
-               return;
-       }
-       mutex_unlock(&client->lock);
-       ion_handle_put(handle);
-}
-EXPORT_SYMBOL(ion_free);
-
-int ion_phys(struct ion_client *client, struct ion_handle *handle,
-            ion_phys_addr_t *addr, size_t *len)
-{
-       struct ion_buffer *buffer;
-       int ret;
-
-       mutex_lock(&client->lock);
-       if (!ion_handle_validate(client, handle)) {
-               mutex_unlock(&client->lock);
-               return -EINVAL;
-       }
-
-       buffer = handle->buffer;
-
-       if (!buffer->heap->ops->phys) {
-               pr_err("%s: ion_phys is not implemented by this heap.\n",
-                      __func__);
-               mutex_unlock(&client->lock);
-               return -ENODEV;
-       }
-       mutex_unlock(&client->lock);
-       ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
-       return ret;
-}
-EXPORT_SYMBOL(ion_phys);
-
-int ion_is_phys(struct ion_client *client, struct ion_handle *handle)
-{
-       struct ion_buffer *buffer;
-       int ret = 0;
-
-       mutex_lock(&client->lock);
-       if (!ion_handle_validate(client, handle)) {
-               mutex_unlock(&client->lock);
-               return -EINVAL;
-       }
-
-       buffer = handle->buffer;
-
-       if (!buffer->heap->ops->phys)
-               ret = -1;
-
-       mutex_unlock(&client->lock);
-
-       return ret;
-}
-EXPORT_SYMBOL(ion_is_phys);
-
-static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
-{
-       void *vaddr;
-
-       if (buffer->kmap_cnt) {
-               buffer->kmap_cnt++;
-               return buffer->vaddr;
-       }
-       vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
-       if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error"))
-               return ERR_PTR(-EINVAL);
-       if (IS_ERR(vaddr))
-       {
-               pr_err("%s: vaddr is error and vaddr is %p!\n",__func__,vaddr);
-               return vaddr;
-       }
-       buffer->vaddr = vaddr;
-       buffer->kmap_cnt++;
-       return vaddr;
-}
-
-static void *ion_handle_kmap_get(struct ion_handle *handle)
-{
-       struct ion_buffer *buffer = handle->buffer;
-       void *vaddr;
-
-       if (handle->kmap_cnt) {
-               handle->kmap_cnt++;
-               return buffer->vaddr;
-       }
-       vaddr = ion_buffer_kmap_get(buffer);
-       if (IS_ERR(vaddr))
-       {
-               pr_err("%s: vaddr is error! and vaddr is %p\n",__func__,vaddr);
-               return vaddr;
-       }
-       handle->kmap_cnt++;
-       return vaddr;
-}
-
-static void ion_buffer_kmap_put(struct ion_buffer *buffer)
-{
-       buffer->kmap_cnt--;
-       if (!buffer->kmap_cnt) {
-               buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
-               buffer->vaddr = NULL;
-       }
-}
-
-static void ion_handle_kmap_put(struct ion_handle *handle)
-{
-       struct ion_buffer *buffer = handle->buffer;
-
-       handle->kmap_cnt--;
-       if (!handle->kmap_cnt)
-               ion_buffer_kmap_put(buffer);
-}
-
-int ion_map_iommu(struct ion_client *client, struct ion_handle *handle, int domain_no, unsigned long *ptr_iova)
-{
-       struct ion_buffer *buffer;
-
-       mutex_lock(&client->lock);
-       if (!ion_handle_validate(client, handle)) {
-               pr_err("%s: invalid handle passed to map_kernel.\n",
-                      __func__);
-               mutex_unlock(&client->lock);
-               return -EINVAL;
-       }
-
-       buffer = handle->buffer;
-
-       if (!handle->buffer->heap->ops->map_iommu) {
-               pr_err("%s: map_kernel is not implemented by this heap.\n",
-                      __func__);
-               mutex_unlock(&client->lock);
-               return -ENODEV;
-       }
-
-       mutex_lock(&buffer->lock);
-       handle->buffer->heap->ops->map_iommu(buffer,domain_no,ptr_iova);
-       mutex_unlock(&buffer->lock);
-       mutex_unlock(&client->lock);
-       return 0;
-}
-EXPORT_SYMBOL(ion_map_iommu);
-
-int ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle, int domain_no)
-{
-       struct ion_buffer *buffer;
-
-       mutex_lock(&client->lock);
-       if (!ion_handle_validate(client, handle)) {
-               pr_err("%s: invalid handle passed to map_kernel.\n",
-                      __func__);
-               mutex_unlock(&client->lock);
-               return -EINVAL;
-       }
-
-       buffer = handle->buffer;
-
-       if (!handle->buffer->heap->ops->map_iommu) {
-               pr_err("%s: map_kernel is not implemented by this heap.\n",
-                      __func__);
-               mutex_unlock(&client->lock);
-               return -ENODEV;
-       }
-
-       mutex_lock(&buffer->lock);
-       handle->buffer->heap->ops->unmap_iommu(buffer,domain_no);
-       mutex_unlock(&buffer->lock);
-       mutex_unlock(&client->lock);
-       return 0;
-}
-EXPORT_SYMBOL(ion_unmap_iommu);
-
-void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
-{
-       struct ion_buffer *buffer;
-       void *vaddr;
-
-       mutex_lock(&client->lock);
-       if (!ion_handle_validate(client, handle)) {
-               pr_err("%s: invalid handle passed to map_kernel.\n",
-                      __func__);
-               mutex_unlock(&client->lock);
-               return ERR_PTR(-EINVAL);
-       }
-
-       buffer = handle->buffer;
-
-       if (!handle->buffer->heap->ops->map_kernel) {
-               pr_err("%s: map_kernel is not implemented by this heap.\n",
-                      __func__);
-               mutex_unlock(&client->lock);
-               return ERR_PTR(-ENODEV);
-       }
-
-       mutex_lock(&buffer->lock);
-       vaddr = ion_handle_kmap_get(handle);
-       mutex_unlock(&buffer->lock);
-       mutex_unlock(&client->lock);
-       return vaddr;
-}
-EXPORT_SYMBOL(ion_map_kernel);
-
-void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
-{
-       struct ion_buffer *buffer;
-
-       mutex_lock(&client->lock);
-       buffer = handle->buffer;
-       mutex_lock(&buffer->lock);
-       ion_handle_kmap_put(handle);
-       mutex_unlock(&buffer->lock);
-       mutex_unlock(&client->lock);
-}
-EXPORT_SYMBOL(ion_unmap_kernel);
-
-static int ion_debug_client_show(struct seq_file *s, void *unused)
-{
-       struct ion_client *client = s->private;
-       struct rb_node *n;
-       size_t sizes[ION_NUM_HEAP_IDS] = {0};
-       const char *names[ION_NUM_HEAP_IDS] = {0};
-       int i;
-
-       mutex_lock(&client->lock);
-       for (n = rb_first(&client->handles); n; n = rb_next(n)) {
-               struct ion_handle *handle = rb_entry(n, struct ion_handle,
-                                                    node);
-               unsigned int id = handle->buffer->heap->id;
-
-               if (!names[id])
-                       names[id] = handle->buffer->heap->name;
-               sizes[id] += handle->buffer->size;
-       }
-       mutex_unlock(&client->lock);
-
-       seq_printf(s, "%32.32s: %32.32s\n", "heap_name", "size_in_bytes");
-       for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
-               if (!names[i])
-                       continue;
-               seq_printf(s, "%32.32s: %32u\n", names[i], sizes[i]);
-       }
-       return 0;
-}
-
-static int ion_debug_client_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, ion_debug_client_show, inode->i_private);
-}
-
-static const struct file_operations debug_client_fops = {
-       .open = ion_debug_client_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-struct ion_client *ion_client_create(struct ion_device *dev,
-                                    const char *name)
-{
-       struct ion_client *client;
-       struct task_struct *task;
-       struct rb_node **p;
-       struct rb_node *parent = NULL;
-       struct ion_client *entry;
-       char debug_name[64];
-       pid_t pid;
-       pid_t tid;
-
-       get_task_struct(current->group_leader);
-       task_lock(current->group_leader);
-       pid = task_pid_nr(current->group_leader);
-       tid = task_pid_nr(current);
-       /* don't bother to store task struct for kernel threads,
-          they can't be killed anyway */
-       if (current->group_leader->flags & PF_KTHREAD) {
-               put_task_struct(current->group_leader);
-               task = NULL;
-       } else {
-               task = current->group_leader;
-       }
-       task_unlock(current->group_leader);
-
-       client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
-       if (!client) {
-               if (task)
-                       put_task_struct(current->group_leader);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       client->dev = dev;
-       client->handles = RB_ROOT;
-       idr_init(&client->idr);
-       mutex_init(&client->lock);
-       client->name = name;
-       client->task = task;
-       client->pid = pid;
-       client->tid = tid;
-
-       down_write(&dev->lock);
-       p = &dev->clients.rb_node;
-       while (*p) {
-               parent = *p;
-               entry = rb_entry(parent, struct ion_client, node);
-
-               if (client < entry)
-                       p = &(*p)->rb_left;
-               else if (client > entry)
-                       p = &(*p)->rb_right;
-       }
-       rb_link_node(&client->node, parent, p);
-       rb_insert_color(&client->node, &dev->clients);
-
-       snprintf(debug_name, 64, "%u", client->pid);
-       client->debug_root = debugfs_create_file(debug_name, 0664,
-                                                dev->debug_root, client,
-                                                &debug_client_fops);
-       up_write(&dev->lock);
-
-       return client;
-}
-EXPORT_SYMBOL(ion_client_create);
-
-void ion_client_destroy(struct ion_client *client)
-{
-       struct ion_device *dev = client->dev;
-       struct rb_node *n;
-
-       pr_debug("%s: %d\n", __func__, __LINE__);
-       while ((n = rb_first(&client->handles))) {
-               struct ion_handle *handle = rb_entry(n, struct ion_handle,
-                                                    node);
-               ion_handle_destroy(&handle->ref);
-       }
-
-       idr_destroy(&client->idr);
-
-       down_write(&dev->lock);
-       if (client->task)
-               put_task_struct(client->task);
-       rb_erase(&client->node, &dev->clients);
-       debugfs_remove_recursive(client->debug_root);
-       up_write(&dev->lock);
-
-       kfree(client);
-}
-EXPORT_SYMBOL(ion_client_destroy);
-
-int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
-                       unsigned long *size, unsigned int *heap_id)
-{
-       struct ion_buffer *buffer;
-       struct ion_heap *heap;
-
-       mutex_lock(&client->lock);
-       if (!ion_handle_validate(client, handle)) {
-               pr_err("%s: invalid handle passed to %s.\n",
-                               __func__, __func__);
-               mutex_unlock(&client->lock);
-               return -EINVAL;
-       }
-       buffer = handle->buffer;
-       mutex_lock(&buffer->lock);
-       heap = buffer->heap;
-       *heap_id = (1 << heap->id);
-       *size = buffer->size;
-       mutex_unlock(&buffer->lock);
-       mutex_unlock(&client->lock);
-
-       return 0;
-}
-EXPORT_SYMBOL(ion_handle_get_size);
-
-struct sg_table *ion_sg_table(struct ion_client *client,
-                             struct ion_handle *handle)
-{
-       struct ion_buffer *buffer;
-       struct sg_table *table;
-
-       mutex_lock(&client->lock);
-       if (!ion_handle_validate(client, handle)) {
-               pr_err("%s: invalid handle passed to map_dma.\n",
-                      __func__);
-               mutex_unlock(&client->lock);
-               return ERR_PTR(-EINVAL);
-       }
-       buffer = handle->buffer;
-       table = buffer->sg_table;
-       mutex_unlock(&client->lock);
-       return table;
-}
-EXPORT_SYMBOL(ion_sg_table);
-
-static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
-                                      struct device *dev,
-                                      enum dma_data_direction direction);
-
-static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
-                                       enum dma_data_direction direction)
-{
-       struct dma_buf *dmabuf = attachment->dmabuf;
-       struct ion_buffer *buffer = dmabuf->priv;
-
-       ion_buffer_sync_for_device(buffer, attachment->dev, direction);
-       return buffer->sg_table;
-}
-
-static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
-                             struct sg_table *table,
-                             enum dma_data_direction direction)
-{
-}
-
-struct ion_vma_list {
-       struct list_head list;
-       struct vm_area_struct *vma;
-};
-
-static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
-                                      struct device *dev,
-                                      enum dma_data_direction dir)
-{
-       struct ion_vma_list *vma_list;
-       int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
-       int i;
-
-       pr_debug("%s: syncing for device %s\n", __func__,
-                dev ? dev_name(dev) : "null");
-
-       if (!ion_buffer_fault_user_mappings(buffer))
-               return;
-
-       mutex_lock(&buffer->lock);
-       for (i = 0; i < pages; i++) {
-               struct page *page = buffer->pages[i];
-               if (ion_buffer_page_is_dirty(page))
-               {
-#ifdef CONFIG_64BIT
-       dma_sync_single_for_device(NULL, (dma_addr_t)page_to_phys(page),
-               PAGE_SIZE, dir);
-#else
-       arm_dma_ops.sync_single_for_device(NULL,
-               pfn_to_dma(NULL, page_to_pfn(page)),
-               PAGE_SIZE, dir);
-#endif
-               }
-               ion_buffer_page_clean(buffer->pages + i);
-       }
-       list_for_each_entry(vma_list, &buffer->vmas, list) {
-               struct vm_area_struct *vma = vma_list->vma;
-
-               zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
-                              NULL);
-       }
-       mutex_unlock(&buffer->lock);
-}
-
-int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
-       struct ion_buffer *buffer = vma->vm_private_data;
-       unsigned long pfn;
-       int ret;
-
-       mutex_lock(&buffer->lock);
-       ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
-       BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
-
-       pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
-       ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
-       mutex_unlock(&buffer->lock);
-       if (ret)
-               return VM_FAULT_ERROR;
-
-       return VM_FAULT_NOPAGE;
-}
-
-static void ion_vm_open(struct vm_area_struct *vma)
-{
-       struct ion_buffer *buffer = vma->vm_private_data;
-       struct ion_vma_list *vma_list;
-
-       vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
-       if (!vma_list)
-               return;
-       vma_list->vma = vma;
-       mutex_lock(&buffer->lock);
-       list_add(&vma_list->list, &buffer->vmas);
-       mutex_unlock(&buffer->lock);
-       pr_debug("%s: adding %p\n", __func__, vma);
-}
-
-static void ion_vm_close(struct vm_area_struct *vma)
-{
-       struct ion_buffer *buffer = vma->vm_private_data;
-       struct ion_vma_list *vma_list, *tmp;
-
-       pr_debug("%s\n", __func__);
-       mutex_lock(&buffer->lock);
-       list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
-               if (vma_list->vma != vma)
-                       continue;
-               list_del(&vma_list->list);
-               kfree(vma_list);
-               pr_debug("%s: deleting %p\n", __func__, vma);
-               break;
-       }
-       mutex_unlock(&buffer->lock);
-}
-
-struct vm_operations_struct ion_vma_ops = {
-       .open = ion_vm_open,
-       .close = ion_vm_close,
-       .fault = ion_vm_fault,
-};
-
-static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
-{
-       struct ion_buffer *buffer = dmabuf->priv;
-       int ret = 0;
-
-       if (!buffer->heap->ops->map_user) {
-               pr_err("%s: this heap does not define a method for mapping "
-                      "to userspace\n", __func__);
-               return -EINVAL;
-       }
-
-       if (ion_buffer_fault_user_mappings(buffer)) {
-               vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
-                                                       VM_DONTDUMP;
-               vma->vm_private_data = buffer;
-               vma->vm_ops = &ion_vma_ops;
-               ion_vm_open(vma);
-               return 0;
-       }
-
-       if (!(buffer->flags & ION_FLAG_CACHED))
-               vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
-       mutex_lock(&buffer->lock);
-       /* now map it to userspace */
-       ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
-       mutex_unlock(&buffer->lock);
-
-       if (ret)
-               pr_err("%s: failure mapping buffer to userspace\n",
-                      __func__);
-
-       return ret;
-}
-
-static void ion_dma_buf_release(struct dma_buf *dmabuf)
-{
-       struct ion_buffer *buffer = dmabuf->priv;
-       ion_buffer_put(buffer);
-}
-
-static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
-{
-       struct ion_buffer *buffer = dmabuf->priv;
-       return buffer->vaddr + offset * PAGE_SIZE;
-}
-
-static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
-                              void *ptr)
-{
-       return;
-}
-
-static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
-                                       size_t len,
-                                       enum dma_data_direction direction)
-{
-       struct ion_buffer *buffer = dmabuf->priv;
-       void *vaddr;
-
-       if (!buffer->heap->ops->map_kernel) {
-               pr_err("%s: map kernel is not implemented by this heap.\n",
-                      __func__);
-               return -ENODEV;
-       }
-
-       mutex_lock(&buffer->lock);
-       vaddr = ion_buffer_kmap_get(buffer);
-       mutex_unlock(&buffer->lock);
-       if (IS_ERR(vaddr))
-       {
-               pr_err("%s: vaddr is error and vaddr is %p!\n",__func__,vaddr);
-               return PTR_ERR(vaddr);
-       }
-       return 0;
-}
-
-static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
-                                      size_t len,
-                                      enum dma_data_direction direction)
-{
-       struct ion_buffer *buffer = dmabuf->priv;
-
-       mutex_lock(&buffer->lock);
-       ion_buffer_kmap_put(buffer);
-       mutex_unlock(&buffer->lock);
-}
-
-struct dma_buf_ops dma_buf_ops = {
-       .map_dma_buf = ion_map_dma_buf,
-       .unmap_dma_buf = ion_unmap_dma_buf,
-       .mmap = ion_mmap,
-       .release = ion_dma_buf_release,
-       .begin_cpu_access = ion_dma_buf_begin_cpu_access,
-       .end_cpu_access = ion_dma_buf_end_cpu_access,
-       .kmap_atomic = ion_dma_buf_kmap,
-       .kunmap_atomic = ion_dma_buf_kunmap,
-       .kmap = ion_dma_buf_kmap,
-       .kunmap = ion_dma_buf_kunmap,
-};
-
-struct dma_buf *ion_share_dma_buf(struct ion_client *client,
-                                               struct ion_handle *handle)
-{
-       struct ion_buffer *buffer;
-       struct dma_buf *dmabuf;
-       bool valid_handle;
-
-       mutex_lock(&client->lock);
-       valid_handle = ion_handle_validate(client, handle);
-       if (!valid_handle) {
-               WARN(1, "%s: invalid handle passed to share.\n", __func__);
-               mutex_unlock(&client->lock);
-               return ERR_PTR(-EINVAL);
-       }
-
-       buffer = handle->buffer;
-       ion_buffer_get(buffer);
-       mutex_unlock(&client->lock);
-       dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
-       if (IS_ERR(dmabuf)) {
-               pr_err("%s: dmabuf export is error and dmabuf is %p!\n",__func__,dmabuf);
-               ion_buffer_put(buffer);
-               return dmabuf;
-       }
-
-       return dmabuf;
-}
-EXPORT_SYMBOL(ion_share_dma_buf);
-
-int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
-{
-       struct dma_buf *dmabuf;
-       int fd;
-
-       dmabuf = ion_share_dma_buf(client, handle);
-       if (IS_ERR(dmabuf))
-       {
-               pr_err("%s: dmabuf is error and dmabuf is %p!\n",__func__,dmabuf);
-               return PTR_ERR(dmabuf);
-       }
-
-       fd = dma_buf_fd(dmabuf, O_CLOEXEC);
-       if (fd < 0)
-       {
-               pr_err("%s: dmabuf fd is error!\n",__func__);
-               dma_buf_put(dmabuf);
-       }
-
-       return fd;
-}
-EXPORT_SYMBOL(ion_share_dma_buf_fd);
-
-struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
-{
-       struct dma_buf *dmabuf;
-       struct ion_buffer *buffer;
-       struct ion_handle *handle;
-       int ret;
-
-       dmabuf = dma_buf_get(fd);
-       if (IS_ERR(dmabuf)) {
-               pr_err("ion_import_dma_buf() dmabuf=0x%lx, fd:%d, dma_buf_get error!\n", (unsigned long)dmabuf, fd);
-               return ERR_PTR(PTR_ERR(dmabuf));
-       }
-       /* if this memory came from ion */
-
-       if (dmabuf->ops != &dma_buf_ops) {
-               pr_err("%s: can not import dmabuf from another exporter\n",
-                      __func__);
-               dma_buf_put(dmabuf);
-               return ERR_PTR(-EINVAL);
-       }
-       buffer = dmabuf->priv;
-
-       mutex_lock(&client->lock);
-       /* if a handle exists for this buffer just take a reference to it */
-       handle = ion_handle_lookup(client, buffer);
-       if (!IS_ERR(handle)) {
-               ion_handle_get(handle);
-               mutex_unlock(&client->lock);
-               goto end;
-       }
-       mutex_unlock(&client->lock);
-       handle = ion_handle_create(client, buffer);
-       if (IS_ERR(handle)) {
-               pr_err("ion_import_dma_buf() handle=0x%lx ion_handle_create error!\n", (unsigned long)handle);
-               goto end;
-       }
-       mutex_lock(&client->lock);
-       ret = ion_handle_add(client, handle);
-       mutex_unlock(&client->lock);
-       if (ret) {
-               pr_err("ion_import_dma_buf() ion_handle_add error %d!\n", ret);
-               ion_handle_put(handle);
-               handle = ERR_PTR(ret);
-       }
-end:
-       dma_buf_put(dmabuf);
-       return handle;
-}
-EXPORT_SYMBOL(ion_import_dma_buf);
-
-static int ion_invalidate_for_cpu(struct ion_client *client, int fd)
-{
-       struct dma_buf *dmabuf;
-       struct ion_buffer *buffer;
-
-       dmabuf = dma_buf_get(fd);
-       if (IS_ERR(dmabuf))
-       {
-               pr_err("%s: dmabuf is error and dmabuf is %p!\n",__func__,dmabuf);
-               return PTR_ERR(dmabuf);
-       }
-
-       /* if this memory came from ion */
-       if (dmabuf->ops != &dma_buf_ops) {
-               pr_err("%s: can not sync dmabuf from another exporter\n",
-                      __func__);
-               dma_buf_put(dmabuf);
-               return -EINVAL;
-       }
-       buffer = dmabuf->priv;
-
-       dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
-                              buffer->sg_table->nents, DMA_FROM_DEVICE);
-       dma_buf_put(dmabuf);
-       return 0;
-}
-
-static int ion_sync_for_device(struct ion_client *client, int fd)
-{
-       struct dma_buf *dmabuf;
-       struct ion_buffer *buffer;
-
-       dmabuf = dma_buf_get(fd);
-       if (IS_ERR(dmabuf))
-       {
-               pr_err("%s: the dmabuf is err dmabuf is %p\n",__func__,dmabuf);
-               return PTR_ERR(dmabuf);
-       }
-
-       /* if this memory came from ion */
-       if (dmabuf->ops != &dma_buf_ops) {
-               pr_err("%s: can not sync dmabuf from another exporter\n",
-                      __func__);
-               dma_buf_put(dmabuf);
-               return -EINVAL;
-       }
-       buffer = dmabuf->priv;
-
-       dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
-                              buffer->sg_table->nents, DMA_BIDIRECTIONAL);
-       dma_buf_put(dmabuf);
-       return 0;
-}
-
-/* fix up the cases where the ioctl direction bits are incorrect */
-static unsigned int ion_ioctl_dir(unsigned int cmd)
-{
-       switch (cmd) {
-       case ION_IOC_SYNC:
-       case ION_IOC_FREE:
-       case ION_IOC_CUSTOM:
-               return _IOC_WRITE;
-       default:
-               return _IOC_DIR(cmd);
-       }
-}
-
-static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
-       struct ion_client *client = filp->private_data;
-       struct ion_device *dev = client->dev;
-       struct ion_handle *cleanup_handle = NULL;
-       long ret = 0;
-       unsigned int dir;
-
-       union {
-               struct ion_fd_data fd;
-               struct ion_allocation_data allocation;
-               struct ion_handle_data handle;
-               struct ion_custom_data custom;
-       } data;
-
-       dir = ion_ioctl_dir(cmd);
-
-       pr_info("%s:cmd[0x%x]\n", __func__, cmd);
-
-       if (_IOC_SIZE(cmd) > sizeof(data)) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       if (dir & _IOC_WRITE)
-               if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) {
-                       ret = -EFAULT;
-                       goto out;
-               }
-
-       switch (cmd) {
-       case ION_IOC_ALLOC:
-       {
-               struct ion_handle *handle;
-
-               handle = ion_alloc(client, data.allocation.len,
-                                               data.allocation.align,
-                                               data.allocation.heap_id_mask,
-                                               data.allocation.flags);
-               if (IS_ERR(handle)) {
-                       pr_err("%s: ion alloc error! and handle is %p\n",__func__,handle);
-                       ret = PTR_ERR(handle);
-                       goto out;
-               }
-
-               data.allocation.handle = handle->id;
-
-               cleanup_handle = handle;
-               break;
-       }
-       case ION_IOC_FREE:
-       {
-               struct ion_handle *handle;
-
-               handle = ion_handle_get_by_id(client, data.handle.handle);
-               if (IS_ERR(handle)) {
-                       pr_err("%s: ion free error!\n",__func__);
-                       ret = PTR_ERR(handle);
-                       goto out;
-               }
-               ion_free(client, handle);
-               ion_handle_put(handle);
-               break;
-       }
-       case ION_IOC_SHARE:
-       case ION_IOC_MAP:
-       {
-               struct ion_handle *handle;
-
-               handle = ion_handle_get_by_id(client, data.handle.handle);
-               if (IS_ERR(handle)) {
-                       pr_err("%s: ion map handle error!\n",__func__);
-                       ret = PTR_ERR(handle);
-                       goto out;
-               }
-               data.fd.fd = ion_share_dma_buf_fd(client, handle);
-               ion_handle_put(handle);
-               if (data.fd.fd < 0) {
-                       pr_err("%s: ion map data.fd error!\n",__func__);
-                       ret = data.fd.fd;
-               }
-               break;
-       }
-       case ION_IOC_IMPORT:
-       {
-               struct ion_handle *handle;
-               handle = ion_import_dma_buf(client, data.fd.fd);
-               if (IS_ERR(handle)) {
-                       pr_err("%s: ion import error! and handle is %p\n",__func__,handle);
-                       ret = PTR_ERR(handle);
-               } else {
-                       data.handle.handle = handle->id;
-               }
-               break;
-       }
-       case ION_IOC_INVALIDATE:
-       {
-               ret = ion_invalidate_for_cpu(client, data.fd.fd);
-               break;
-       }
-       case ION_IOC_SYNC:
-       {
-               ret = ion_sync_for_device(client, data.fd.fd);
-               break;
-       }
-       case ION_IOC_CUSTOM:
-       {
-               if (!dev->custom_ioctl) {
-                       ret = -ENOTTY;
-                       goto out;
-               }
-               ret = dev->custom_ioctl(client, data.custom.cmd,
-                                               data.custom.arg);
-               break;
-       }
-       default:
-               ret = -ENOTTY;
-               goto out;
-       }
-
-       if (dir & _IOC_READ) {
-               if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
-                       if (cleanup_handle)
-                               ion_free(client, cleanup_handle);
-                       ret = -EFAULT;
-                       goto out;
-               }
-       }
-
-out:
-       pr_info("%s:cmd[0x%x]ret[%d]\n", __func__, cmd, ret);
-       return ret;
-}
-
-static int ion_release(struct inode *inode, struct file *file)
-{
-       struct ion_client *client = file->private_data;
-
-       pr_debug("%s: %d\n", __func__, __LINE__);
-       ion_client_destroy(client);
-       return 0;
-}
-
-static int ion_open(struct inode *inode, struct file *file)
-{
-       struct miscdevice *miscdev = file->private_data;
-       struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
-       struct ion_client *client;
-
-       pr_debug("%s: %d\n", __func__, __LINE__);
-       client = ion_client_create(dev, "user");
-       if (IS_ERR(client))
-       {
-               pr_err("%s: client is error and client is %p!\n",__func__,client);
-               return PTR_ERR(client);
-       }
-       file->private_data = client;
-
-       return 0;
-}
-
-static const struct file_operations ion_fops = {
-       .owner          = THIS_MODULE,
-       .open           = ion_open,
-       .release        = ion_release,
-       .unlocked_ioctl = ion_ioctl,
-       .compat_ioctl   = compat_ion_ioctl,
-};
-
-static size_t ion_debug_heap_total(struct ion_client *client,
-                                  unsigned int id)
-{
-       size_t size = 0;
-       struct rb_node *n;
-
-       mutex_lock(&client->lock);
-       for (n = rb_first(&client->handles); n; n = rb_next(n)) {
-               struct ion_handle *handle = rb_entry(n,
-                                                    struct ion_handle,
-                                                    node);
-               if (handle->buffer->heap->id == id)
-                       size += handle->buffer->size;
-       }
-       mutex_unlock(&client->lock);
-       return size;
-}
-
-static int ion_debug_heap_show_err(struct ion_heap *heap)
-{
-       //struct ion_heap *heap = s->private;
-       struct ion_device *dev = heap->dev;
-       struct rb_node *n;
-       size_t total_size = 0;
-       size_t total_orphaned_size = 0;
-
-       pr_err("%8.s %8.s %8.s\n", "client", "pid", "size");
-       pr_err("----------------------------------------------------\n");
-
-       for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
-               struct ion_client *client = rb_entry(n, struct ion_client,
-                                                    node);
-               size_t size = ion_debug_heap_total(client, heap->id);
-               if (!size)
-                       continue;
-               if (client->task) {
-                       char task_comm[TASK_COMM_LEN];
-
-                       get_task_comm(task_comm, client->task);
-                       pr_err("client:%s pid:%u size:%u\n", task_comm,
-                                  client->pid, size);
-               } else {
-                       pr_err("client:%s pid:%u size:%u\n", client->name,
-                                  client->pid, size);
-               }
-       }
-       pr_err("----------------------------------------------------\n");
-       pr_err("orphaned allocations (info is from last known client):"
-                  "\n");
-       mutex_lock(&dev->buffer_lock);
-       for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
-               struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
-                                                    node);
-               if (buffer->heap->id != heap->id)
-                       continue;
-               total_size += buffer->size;
-               if (!buffer->handle_count) {
-                       pr_err("client:%s pid:%u  size:%u  kmap_cnt:%d  refcount:%d\n", buffer->task_comm,
-                                  buffer->pid, buffer->size, buffer->kmap_cnt,
-                                  atomic_read(&buffer->ref.refcount));
-                       total_orphaned_size += buffer->size;
-               }
-       }
-       mutex_unlock(&dev->buffer_lock);
-       pr_err("----------------------------------------------------\n");
-       pr_err("%s    %u\n", "total orphaned",
-                  total_orphaned_size);
-       pr_err("%s    %u\n", "total ", total_size);
-       if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
-               pr_err("%s    %u\n", "deferred free",
-                               heap->free_list_size);
-       pr_err( "----------------------------------------------------\n");
-
-
-       return 0;
-}
-
-
-static int ion_debug_heap_show(struct seq_file *s, void *unused)
-{
-       struct ion_heap *heap = s->private;
-       struct ion_device *dev = heap->dev;
-       struct rb_node *n;
-       struct rb_node *r;
-       struct tm t;
-       size_t total_size = 0;
-       size_t total_orphaned_size = 0;
-
-       seq_printf(s, "%8.s %8.s %8.s %8.s\n", "client", "pid", "size", "time");
-       seq_printf(s, "----------------------------------------------------\n");
-
-       for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
-               struct ion_client *client = rb_entry(n, struct ion_client,
-                                                    node);
-
-               mutex_lock(&client->lock);
-               for (r = rb_first(&client->handles); r; r = rb_next(r)) {
-                       struct ion_handle *handle = rb_entry(r,
-                                                                struct ion_handle,
-                                                                node);
-                       struct ion_buffer *buffer = handle->buffer;
-
-                       if (buffer->heap->id == heap->id) {
-                               if (!buffer->size)
-                                       continue;
-                               time_to_tm(buffer->alloc_time.tv_sec, 0, &t);
-                               if (client->task) {
-                                       char task_comm[TASK_COMM_LEN];
-
-                                       get_task_comm(task_comm, client->task);
-                                       seq_printf(s, "client:%s pid:%3u tid:%4u size:%8u %ld.%d.%d-%d:%d:%d.%d\n",
-                                               task_comm, client->pid, client->tid, buffer->size,
-                                               t.tm_year + 1900, t.tm_mon + 1,
-                                               t.tm_mday, t.tm_hour, t.tm_min,
-                                               t.tm_sec, buffer->alloc_time.tv_usec);
-                               } else {
-                                       seq_printf(s, "client:%s pid:%3u tid:%4u size:%8u %ld.%d.%d-%d:%d:%d.%d\n",
-                                               client->name, client->pid, client->tid, buffer->size,
-                                               t.tm_year + 1900, t.tm_mon + 1,
-                                               t.tm_mday, t.tm_hour, t.tm_min,
-                                               t.tm_sec, buffer->alloc_time.tv_usec);
-                               }
-                       }
-               }
-               mutex_unlock(&client->lock);
-       }
-       seq_printf(s, "----------------------------------------------------\n");
-       seq_printf(s, "orphaned allocations (info is from last known client):"
-                  "\n");
-       mutex_lock(&dev->buffer_lock);
-       for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
-               struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
-                                                    node);
-               if (buffer->heap->id != heap->id)
-                       continue;
-               total_size += buffer->size;
-               if (!buffer->handle_count) {
-                       time_to_tm(buffer->alloc_time.tv_sec, 0, &t);
-                       seq_printf(s, "client:%s pid:%3u tid:%4u size:%8u %d %d %ld.%d.%d-%d:%d:%d.%d\n",
-                               buffer->task_comm, buffer->pid, buffer->tid, buffer->size,
-                               buffer->kmap_cnt, atomic_read(&buffer->ref.refcount),
-                               t.tm_year + 1900, t.tm_mon + 1,
-                               t.tm_mday, t.tm_hour, t.tm_min,
-                               t.tm_sec, buffer->alloc_time.tv_usec);
-                       total_orphaned_size += buffer->size;
-               }
-       }
-       mutex_unlock(&dev->buffer_lock);
-       seq_printf(s, "----------------------------------------------------\n");
-       seq_printf(s, "%s    %u\n", "total orphaned",
-                  total_orphaned_size);
-       seq_printf(s, "%s    %u\n", "total ", total_size);
-       if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
-               seq_printf(s, "%s    %u\n", "deferred free",
-                               heap->free_list_size);
-       if(heap->type == ION_HEAP_TYPE_SYSTEM)
-               seq_printf(s, "%s        %u\n", "deferred catched", ion_system_heap_debug_defer_catched(heap));
-
-       seq_printf(s, "----------------------------------------------------\n");
-
-       if (heap->debug_show)
-               heap->debug_show(heap, s, unused);
-
-       return 0;
-}
-
-static int ion_debug_heap_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, ion_debug_heap_show, inode->i_private);
-}
-
-static const struct file_operations debug_heap_fops = {
-       .open = ion_debug_heap_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-#ifdef DEBUG_HEAP_SHRINKER
-static int debug_shrink_set(void *data, u64 val)
-{
-        struct ion_heap *heap = data;
-        struct shrink_control sc;
-        int objs;
-
-        sc.gfp_mask = -1;
-        sc.nr_to_scan = 0;
-
-        if (!val)
-                return 0;
-
-        objs = heap->shrinker.shrink(&heap->shrinker, &sc);
-        sc.nr_to_scan = objs;
-
-        heap->shrinker.shrink(&heap->shrinker, &sc);
-        return 0;
-}
-
-static int debug_shrink_get(void *data, u64 *val)
-{
-        struct ion_heap *heap = data;
-        struct shrink_control sc;
-        int objs;
-
-        sc.gfp_mask = -1;
-        sc.nr_to_scan = 0;
-
-        objs = heap->shrinker.shrink(&heap->shrinker, &sc);
-        *val = objs;
-        return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
-                        debug_shrink_set, "%llu\n");
-#endif
-
-void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
-{
-       if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
-           !heap->ops->unmap_dma)
-               pr_err("%s: can not add heap with invalid ops struct.\n",
-                      __func__);
-
-       if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
-               ion_heap_init_deferred_free(heap);
-
-       heap->dev = dev;
-       down_write(&dev->lock);
-       /* use negative heap->id to reverse the priority -- when traversing
-          the list later attempt higher id numbers first */
-       plist_node_init(&heap->node, -heap->id);
-       plist_add(&heap->node, &dev->heaps);
-       debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
-                           &debug_heap_fops);
-#ifdef DEBUG_HEAP_SHRINKER
-       if (heap->shrinker.shrink) {
-               char debug_name[64];
-
-               snprintf(debug_name, 64, "%s_shrink", heap->name);
-               debugfs_create_file(debug_name, 0644, dev->debug_root, heap,
-                                   &debug_shrink_fops);
-       }
-#endif
-       up_write(&dev->lock);
-}
-
-struct ion_device *ion_device_create(long (*custom_ioctl)
-                                    (struct ion_client *client,
-                                     unsigned int cmd,
-                                     unsigned long arg))
-{
-       struct ion_device *idev;
-       int ret;
-
-       idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
-       if (!idev)
-               return ERR_PTR(-ENOMEM);
-
-       idev->dev.minor = MISC_DYNAMIC_MINOR;
-       idev->dev.name = "ion";
-       idev->dev.fops = &ion_fops;
-       idev->dev.parent = NULL;
-       ret = misc_register(&idev->dev);
-       if (ret) {
-               pr_err("ion: failed to register misc device.\n");
-               return ERR_PTR(ret);
-       }
-
-       idev->debug_root = debugfs_create_dir("ion", NULL);
-       if (!idev->debug_root)
-               pr_err("ion: failed to create debug files.\n");
-
-       idev->custom_ioctl = custom_ioctl;
-       idev->buffers = RB_ROOT;
-       mutex_init(&idev->buffer_lock);
-       init_rwsem(&idev->lock);
-       plist_head_init(&idev->heaps);
-       idev->clients = RB_ROOT;
-       return idev;
-}
-
-void ion_device_destroy(struct ion_device *dev)
-{
-       misc_deregister(&dev->dev);
-       /* XXX need to free the heaps and clients ? */
-       kfree(dev);
-}
-
-void __init ion_reserve(struct ion_platform_data *data)
-{
-       int i;
-
-       for (i = 0; i < data->nr; i++) {
-               if (data->heaps[i].size == 0)
-                       continue;
-
-               if (data->heaps[i].base == 0) {
-                       phys_addr_t paddr;
-                       paddr = memblock_alloc_base(data->heaps[i].size,
-                                                   data->heaps[i].align,
-                                                   MEMBLOCK_ALLOC_ANYWHERE);
-                       if (!paddr) {
-                               pr_err("%s: error allocating memblock for "
-                                      "heap %d\n",
-                                       __func__, i);
-                               continue;
-                       }
-                       data->heaps[i].base = paddr;
-               } else {
-                       int ret = memblock_reserve(data->heaps[i].base,
-                                              data->heaps[i].size);
-                       if (ret)
-                               pr_err("memblock reserve of %zx@%lx failed\n",
-                                      data->heaps[i].size,
-                                      data->heaps[i].base);
-               }
-               pr_info("%s: %s reserved base %lx size %zu\n", __func__,
-                       data->heaps[i].name,
-                       data->heaps[i].base,
-                       data->heaps[i].size);
-       }
-}
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
deleted file mode 100644 (file)
index 3f046b9..0000000
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * drivers/gpu/ion/ion_carveout_heap.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-#include <linux/spinlock.h>
-
-#include <linux/err.h>
-#include <linux/genalloc.h>
-#include <linux/io.h>
-#include <linux/ion.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include "ion_priv.h"
-
-#ifdef CONFIG_ARCH_SCX35L64
-#include <asm/io.h>
-#else
-#include <asm/mach/map.h>
-#endif
-
-struct ion_carveout_heap {
-       struct ion_heap heap;
-       struct gen_pool *pool;
-       ion_phys_addr_t base;
-};
-
-ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
-                                     unsigned long size,
-                                     unsigned long align)
-{
-       struct ion_carveout_heap *carveout_heap =
-               container_of(heap, struct ion_carveout_heap, heap);
-       unsigned long offset = gen_pool_alloc(carveout_heap->pool, size);
-
-       if (!offset)
-               return ION_CARVEOUT_ALLOCATE_FAIL;
-
-       return offset;
-}
-
-void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
-                      unsigned long size)
-{
-       struct ion_carveout_heap *carveout_heap =
-               container_of(heap, struct ion_carveout_heap, heap);
-
-       if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
-               return;
-       gen_pool_free(carveout_heap->pool, addr, size);
-}
-
-static int ion_carveout_heap_phys(struct ion_heap *heap,
-                                 struct ion_buffer *buffer,
-                                 ion_phys_addr_t *addr, size_t *len)
-{
-       *addr = buffer->priv_phys;
-       *len = buffer->size;
-       return 0;
-}
-
-static int ion_carveout_heap_allocate(struct ion_heap *heap,
-                                     struct ion_buffer *buffer,
-                                     unsigned long size, unsigned long align,
-                                     unsigned long flags)
-{
-       buffer->priv_phys = ion_carveout_allocate(heap, size, align);
-       return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0;
-}
-
-static void ion_carveout_heap_free(struct ion_buffer *buffer)
-{
-       struct ion_heap *heap = buffer->heap;
-
-       ion_carveout_free(heap, buffer->priv_phys, buffer->size);
-       buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
-}
-
-struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
-                                             struct ion_buffer *buffer)
-{
-       struct sg_table *table;
-       int ret;
-
-       table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
-       if (!table)
-               return ERR_PTR(-ENOMEM);
-       ret = sg_alloc_table(table, 1, GFP_KERNEL);
-       if (ret) {
-               kfree(table);
-               return ERR_PTR(ret);
-       }
-       sg_set_page(table->sgl, phys_to_page(buffer->priv_phys), buffer->size,
-                   0);
-       return table;
-}
-
-void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
-                                struct ion_buffer *buffer)
-{
-       sg_free_table(buffer->sg_table);
-       kfree(buffer->sg_table);
-       buffer->sg_table=NULL;
-}
-
-void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
-                                  struct ion_buffer *buffer)
-{
-       void *ret;
-#ifndef CONFIG_ARCH_SCX35L64
-       pgprot_t mtype = MT_MEMORY_NONCACHED;
-#else
-       pgprot_t mtype = 11;
-#endif
-       if (buffer->flags & ION_FLAG_CACHED)
-#ifndef CONFIG_ARCH_SCX35L64
-               mtype = MT_MEMORY;
-#else
-               mtype = 9;
-#endif
-
-#ifndef CONFIG_ARCH_SCX35L64
-       ret = __arm_ioremap(buffer->priv_phys, buffer->size,
-                             mtype);
-#else
-        ret = __ioremap(buffer->priv_phys, buffer->size, mtype);
-#endif
-
-       if (ret == NULL)
-               return ERR_PTR(-ENOMEM);
-
-       return ret;
-}
-
-void ion_carveout_heap_unmap_kernel(struct ion_heap *heap,
-                                   struct ion_buffer *buffer)
-{
-#ifndef CONFIG_ARCH_SCX35L64
-       __arm_iounmap(buffer->vaddr);
-#else
-      __iounmap(buffer->vaddr);
-#endif
-       buffer->vaddr = NULL;
-       return;
-}
-
-int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
-                              struct vm_area_struct *vma)
-{
-       return remap_pfn_range(vma, vma->vm_start,
-                              __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
-                              vma->vm_end - vma->vm_start,
-                              pgprot_noncached(vma->vm_page_prot));
-}
-
-#if defined(CONFIG_SPRD_IOMMU)
-int ion_carveout_heap_map_iommu(struct ion_buffer *buffer, int domain_num, unsigned long *ptr_iova)
-{
-       int ret=0;
-       if(0==buffer->iomap_cnt[domain_num])
-       {
-               buffer->iova[domain_num]=sprd_iova_alloc(domain_num,buffer->size);
-               ret = sprd_iova_map(domain_num,buffer->iova[domain_num],
-                               buffer->size, buffer->sg_table);
-       }
-       *ptr_iova=buffer->iova[domain_num];
-       buffer->iomap_cnt[domain_num]++;
-       return ret;
-}
-int ion_carveout_heap_unmap_iommu(struct ion_buffer *buffer, int domain_num)
-{
-       int ret=0;
-       buffer->iomap_cnt[domain_num]--;
-       if(0==buffer->iomap_cnt[domain_num])
-       {
-               ret=sprd_iova_unmap(domain_num,buffer->iova[domain_num],
-                               buffer->size);
-               sprd_iova_free(domain_num,buffer->iova[domain_num],buffer->size);
-               buffer->iova[domain_num]=0;
-       }
-       return ret;
-}
-#endif
-
-static struct ion_heap_ops carveout_heap_ops = {
-       .allocate = ion_carveout_heap_allocate,
-       .free = ion_carveout_heap_free,
-       .phys = ion_carveout_heap_phys,
-       .map_dma = ion_carveout_heap_map_dma,
-       .unmap_dma = ion_carveout_heap_unmap_dma,
-       .map_user = ion_carveout_heap_map_user,
-       .map_kernel = ion_carveout_heap_map_kernel,
-       .unmap_kernel = ion_carveout_heap_unmap_kernel,
-#if defined(CONFIG_SPRD_IOMMU)
-       .map_iommu = ion_carveout_heap_map_iommu,
-       .unmap_iommu = ion_carveout_heap_unmap_iommu,
-#endif
-};
-
-struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
-{
-       struct ion_carveout_heap *carveout_heap;
-
-       carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
-       if (!carveout_heap)
-               return ERR_PTR(-ENOMEM);
-
-       carveout_heap->pool = gen_pool_create(12, -1);
-       if (!carveout_heap->pool) {
-               kfree(carveout_heap);
-               return ERR_PTR(-ENOMEM);
-       }
-       carveout_heap->base = heap_data->base;
-       gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
-                    -1);
-       carveout_heap->heap.ops = &carveout_heap_ops;
-       carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
-
-       return &carveout_heap->heap;
-}
-
-void ion_carveout_heap_destroy(struct ion_heap *heap)
-{
-       struct ion_carveout_heap *carveout_heap =
-            container_of(heap, struct  ion_carveout_heap, heap);
-
-       gen_pool_destroy(carveout_heap->pool);
-       kfree(carveout_heap);
-       carveout_heap = NULL;
-}
diff --git a/drivers/gpu/ion/ion_chunk_heap.c b/drivers/gpu/ion/ion_chunk_heap.c
deleted file mode 100644 (file)
index 5d51b60..0000000
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * drivers/gpu/ion/ion_chunk_heap.c
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-//#include <linux/spinlock.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/genalloc.h>
-#include <linux/io.h>
-#include <linux/ion.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include "ion_priv.h"
-
-#ifndef CONFIG_ARCH_SCX35L64
-#include <asm/mach/map.h>
-#endif
-
-struct ion_chunk_heap {
-       struct ion_heap heap;
-       struct gen_pool *pool;
-       ion_phys_addr_t base;
-       unsigned long chunk_size;
-       unsigned long size;
-       unsigned long allocated;
-};
-
-static int ion_chunk_heap_allocate(struct ion_heap *heap,
-                                     struct ion_buffer *buffer,
-                                     unsigned long size, unsigned long align,
-                                     unsigned long flags)
-{
-       struct ion_chunk_heap *chunk_heap =
-               container_of(heap, struct ion_chunk_heap, heap);
-       struct sg_table *table;
-       struct scatterlist *sg;
-       int ret, i;
-       unsigned long num_chunks;
-       unsigned long allocated_size;
-
-       if (ion_buffer_fault_user_mappings(buffer))
-               return -ENOMEM;
-
-       allocated_size = ALIGN(size, chunk_heap->chunk_size);
-       num_chunks = allocated_size / chunk_heap->chunk_size;
-
-       if (allocated_size > chunk_heap->size - chunk_heap->allocated)
-               return -ENOMEM;
-
-       table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
-       if (!table)
-               return -ENOMEM;
-       ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
-       if (ret) {
-               kfree(table);
-               return ret;
-       }
-
-       sg = table->sgl;
-       for (i = 0; i < num_chunks; i++) {
-               unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
-                                                    chunk_heap->chunk_size);
-               if (!paddr)
-                       goto err;
-               sg_set_page(sg, phys_to_page(paddr), chunk_heap->chunk_size, 0);
-               sg = sg_next(sg);
-       }
-
-       buffer->priv_virt = table;
-       chunk_heap->allocated += allocated_size;
-       return 0;
-err:
-       sg = table->sgl;
-       for (i -= 1; i >= 0; i--) {
-               gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
-                             sg_dma_len(sg));
-               sg = sg_next(sg);
-       }
-       sg_free_table(table);
-       kfree(table);
-       return -ENOMEM;
-}
-
-static void ion_chunk_heap_free(struct ion_buffer *buffer)
-{
-       struct ion_heap *heap = buffer->heap;
-       struct ion_chunk_heap *chunk_heap =
-               container_of(heap, struct ion_chunk_heap, heap);
-       struct sg_table *table = buffer->priv_virt;
-       struct scatterlist *sg;
-       int i;
-       unsigned long allocated_size;
-
-       allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
-
-       ion_heap_buffer_zero(buffer);
-
-       for_each_sg(table->sgl, sg, table->nents, i) {
-               if (ion_buffer_cached(buffer))
-#ifdef CONFIG_64BIT
-       dma_sync_single_for_device(NULL,
-               (dma_addr_t)page_to_phys(sg_page(sg)),
-               sg_dma_len(sg), DMA_BIDIRECTIONAL);
-#else
-       arm_dma_ops.sync_single_for_device(NULL,
-               pfn_to_dma(NULL, page_to_pfn(sg_page(sg))),
-               sg_dma_len(sg), DMA_BIDIRECTIONAL);
-
-#endif
-               gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
-                             sg_dma_len(sg));
-       }
-       chunk_heap->allocated -= allocated_size;
-       sg_free_table(table);
-       kfree(table);
-}
-
-struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
-                                        struct ion_buffer *buffer)
-{
-       return buffer->priv_virt;
-}
-
-void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
-                              struct ion_buffer *buffer)
-{
-       return;
-}
-
-static struct ion_heap_ops chunk_heap_ops = {
-       .allocate = ion_chunk_heap_allocate,
-       .free = ion_chunk_heap_free,
-       .map_dma = ion_chunk_heap_map_dma,
-       .unmap_dma = ion_chunk_heap_unmap_dma,
-       .map_user = ion_heap_map_user,
-       .map_kernel = ion_heap_map_kernel,
-       .unmap_kernel = ion_heap_unmap_kernel,
-};
-
-struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
-{
-       struct ion_chunk_heap *chunk_heap;
-       struct vm_struct *vm_struct;
-       pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
-       int i, ret;
-
-
-       chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
-       if (!chunk_heap)
-               return ERR_PTR(-ENOMEM);
-
-       chunk_heap->chunk_size = (unsigned long)heap_data->priv;
-       chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
-                                          PAGE_SHIFT, -1);
-       if (!chunk_heap->pool) {
-               ret = -ENOMEM;
-               goto error_gen_pool_create;
-       }
-       chunk_heap->base = heap_data->base;
-       chunk_heap->size = heap_data->size;
-       chunk_heap->allocated = 0;
-
-       vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
-       if (!vm_struct) {
-               ret = -ENOMEM;
-               goto error;
-       }
-       for (i = 0; i < chunk_heap->size; i += PAGE_SIZE) {
-               struct page *page = phys_to_page(chunk_heap->base + i);
-               struct page **pages = &page;
-
-               ret = map_vm_area(vm_struct, pgprot, &pages);
-               if (ret)
-                       goto error_map_vm_area;
-               memset(vm_struct->addr, 0, PAGE_SIZE);
-               unmap_kernel_range((unsigned long)vm_struct->addr, PAGE_SIZE);
-       }
-       free_vm_area(vm_struct);
-
-#ifdef CONFIG_64BIT
-       dma_sync_single_for_device(NULL,
-               (dma_addr_t)page_to_phys(phys_to_page(heap_data->base)),
-               heap_data->size, DMA_BIDIRECTIONAL);
-#else
-       arm_dma_ops.sync_single_for_device(NULL,
-               pfn_to_dma(NULL, page_to_pfn(phys_to_page(heap_data->base))),
-               heap_data->size, DMA_BIDIRECTIONAL);
-#endif
-
-       gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
-       chunk_heap->heap.ops = &chunk_heap_ops;
-       chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
-       chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
-       pr_info("%s: base %lu size %u align %ld\n", __func__, chunk_heap->base,
-               heap_data->size, heap_data->align);
-
-       return &chunk_heap->heap;
-
-error_map_vm_area:
-       free_vm_area(vm_struct);
-error:
-       gen_pool_destroy(chunk_heap->pool);
-error_gen_pool_create:
-       kfree(chunk_heap);
-       return ERR_PTR(ret);
-}
-
-void ion_chunk_heap_destroy(struct ion_heap *heap)
-{
-       struct ion_chunk_heap *chunk_heap =
-            container_of(heap, struct  ion_chunk_heap, heap);
-
-       gen_pool_destroy(chunk_heap->pool);
-       kfree(chunk_heap);
-       chunk_heap = NULL;
-}
diff --git a/drivers/gpu/ion/ion_cma_heap.c b/drivers/gpu/ion/ion_cma_heap.c
deleted file mode 100644 (file)
index ae135cc..0000000
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * drivers/gpu/ion/ion_cma_heap.c
- *
- * Copyright (C) Linaro 2012
- * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/device.h>
-#include <linux/ion.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/dma-mapping.h>
-
-/* for ion_heap_ops structure */
-#include "ion_priv.h"
-
-#define ION_CMA_ALLOCATE_FAILED -1
-#define ION_IS_CACHED(__flags)  ((__flags) & ION_FLAG_CACHED)
-
-struct ion_cma_heap {
-       struct ion_heap heap;
-       struct device *dev;
-};
-
-#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
-
-struct ion_cma_buffer_info {
-       void *cpu_addr;
-       dma_addr_t handle;
-       struct sg_table *table;
-       bool is_cached;
-};
-
-/*
- * Create scatter-list for the already allocated DMA buffer.
- * This function could be replaced by dma_common_get_sgtable
- * as soon as it will avalaible.
- */
-int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
-                       void *cpu_addr, dma_addr_t handle, size_t size)
-{
-       struct page *page = virt_to_page(cpu_addr);
-       int ret;
-
-       ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
-       if (unlikely(ret))
-               return ret;
-
-       sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
-       return 0;
-}
-
-/* ION CMA heap operations functions */
-static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
-                           unsigned long len, unsigned long align,
-                           unsigned long flags)
-{
-       struct ion_cma_heap *cma_heap = to_cma_heap(heap);
-       struct device *dev = cma_heap->dev;
-       struct ion_cma_buffer_info *info;
-
-       dev_dbg(dev, "Request buffer allocation len %ld\n", len);
-
-       info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
-       if (!info) {
-               dev_err(dev, "Can't allocate buffer info\n");
-               return ION_CMA_ALLOCATE_FAILED;
-       }
-
-       if (!ION_IS_CACHED(flags))
-               info->cpu_addr = dma_alloc_writecombine(dev, len,
-                               &(info->handle), GFP_KERNEL);
-       else
-               info->cpu_addr = dma_alloc_nonconsistent(dev, len,
-                               &(info->handle), GFP_KERNEL);
-
-       if (!info->cpu_addr) {
-               dev_err(dev, "Fail to allocate buffer\n");
-               goto err;
-       }
-
-       info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
-       if (!info->table) {
-               dev_err(dev, "Fail to allocate sg table\n");
-               goto free_mem;
-       }
-
-       info->is_cached = ION_IS_CACHED(flags);
-
-       if (ion_cma_get_sgtable
-                       (dev, info->table, info->cpu_addr, info->handle, len))
-               goto free_table;
-       /* keep this for memory release */
-       buffer->priv_virt = info;
-       dev_dbg(dev, "Allocate buffer %p\n", buffer);
-       return 0;
-
-free_table:
-       kfree(info->table);
-free_mem:
-       dma_free_coherent(dev, len, info->cpu_addr, info->handle);
-err:
-       kfree(info);
-       return ION_CMA_ALLOCATE_FAILED;
-}
-
-static void ion_cma_free(struct ion_buffer *buffer)
-{
-       struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
-       struct device *dev = cma_heap->dev;
-       struct ion_cma_buffer_info *info = buffer->priv_virt;
-
-       dev_dbg(dev, "Release buffer %p\n", buffer);
-       /* release memory */
-       dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
-       /* release sg table */
-       sg_free_table(info->table);
-       kfree(info->table);
-       kfree(info);
-}
-
-/* return physical address in addr */
-static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
-                       ion_phys_addr_t *addr, size_t *len)
-{
-       struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
-       struct device *dev = cma_heap->dev;
-       struct ion_cma_buffer_info *info = buffer->priv_virt;
-
-       dev_dbg(dev, "Return buffer %p physical address 0x%x\n", buffer,
-               info->handle);
-
-       *addr = info->handle;
-       *len = buffer->size;
-
-       return 0;
-}
-
-struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
-                                        struct ion_buffer *buffer)
-{
-       struct ion_cma_buffer_info *info = buffer->priv_virt;
-
-       return info->table;
-}
-
-void ion_cma_heap_unmap_dma(struct ion_heap *heap,
-                              struct ion_buffer *buffer)
-{
-       return;
-}
-
-static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
-                       struct vm_area_struct *vma)
-{
-       struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
-       struct device *dev = cma_heap->dev;
-       struct ion_cma_buffer_info *info = buffer->priv_virt;
-
-       if (!info->is_cached)
-               return dma_mmap_writecombine(dev, vma, info->cpu_addr,
-                               info->handle, buffer->size);
-       else
-               return dma_mmap_nonconsistent(dev, vma, info->cpu_addr,
-                               info->handle, buffer->size);
-}
-
-void *ion_cma_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
-{
-       struct ion_cma_buffer_info *info = buffer->priv_virt;
-       /* kernel memory mapping has been done at allocation time */
-       return info->cpu_addr;
-}
-
-static void ion_cma_unmap_kernel(struct ion_heap *heap,
-                                       struct ion_buffer *buffer)
-{
-}
-static struct ion_heap_ops ion_cma_ops = {
-       .allocate = ion_cma_allocate,
-       .free = ion_cma_free,
-       .map_dma = ion_cma_heap_map_dma,
-       .unmap_dma = ion_cma_heap_unmap_dma,
-       .phys = ion_cma_phys,
-       .map_user = ion_cma_mmap,
-       .map_kernel = ion_cma_map_kernel,
-       .unmap_kernel = ion_cma_unmap_kernel,
-};
-
-struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data, struct device *dev)
-{
-       struct ion_cma_heap *cma_heap;
-       uint64_t sprd_dmamask = DMA_BIT_MASK(32);
-       cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL);
-
-       if (!cma_heap)
-               return ERR_PTR(-ENOMEM);
-
-       cma_heap->heap.ops = &ion_cma_ops;
-       /* get device from private heaps data, later it will be
-        * used to make the link with reserved CMA memory */
-       dev->dma_mask = &sprd_dmamask;
-       dev->coherent_dma_mask = DMA_BIT_MASK(32);
-       cma_heap->dev = dev;
-       cma_heap->heap.type = ION_HEAP_TYPE_DMA;
-       return &cma_heap->heap;
-}
-
-void ion_cma_heap_destroy(struct ion_heap *heap)
-{
-       struct ion_cma_heap *cma_heap = to_cma_heap(heap);
-
-       kfree(cma_heap);
-}
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c
deleted file mode 100644 (file)
index 58b1217..0000000
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * drivers/gpu/ion/ion_heap.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/err.h>
-#include <linux/freezer.h>
-#include <linux/ion.h>
-#include <linux/kthread.h>
-#include <linux/mm.h>
-#include <linux/rtmutex.h>
-#include <linux/sched.h>
-#include <linux/scatterlist.h>
-#include <linux/vmalloc.h>
-#include "ion_priv.h"
-
-void *ion_heap_map_kernel(struct ion_heap *heap,
-                         struct ion_buffer *buffer)
-{
-       struct scatterlist *sg;
-       int i, j;
-       void *vaddr;
-       pgprot_t pgprot;
-       struct sg_table *table = buffer->sg_table;
-       int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
-       struct page **pages = vmalloc(sizeof(struct page *) * npages);
-       struct page **tmp = pages;
-
-       if (!pages)
-               return 0;
-
-       if (buffer->flags & ION_FLAG_CACHED)
-               pgprot = PAGE_KERNEL;
-       else
-               pgprot = pgprot_writecombine(PAGE_KERNEL);
-
-       for_each_sg(table->sgl, sg, table->nents, i) {
-               int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
-               struct page *page = sg_page(sg);
-               BUG_ON(i >= npages);
-               for (j = 0; j < npages_this_entry; j++) {
-                       *(tmp++) = page++;
-               }
-       }
-       vaddr = vmap(pages, npages, VM_MAP, pgprot);
-       vfree(pages);
-
-       if (vaddr == NULL)
-               return ERR_PTR(-ENOMEM);
-
-       return vaddr;
-}
-
-void ion_heap_unmap_kernel(struct ion_heap *heap,
-                          struct ion_buffer *buffer)
-{
-       vunmap(buffer->vaddr);
-}
-
-int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
-                     struct vm_area_struct *vma)
-{
-       struct sg_table *table = buffer->sg_table;
-       unsigned long addr = vma->vm_start;
-       unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
-       struct scatterlist *sg;
-       int i;
-
-       for_each_sg(table->sgl, sg, table->nents, i) {
-               struct page *page = sg_page(sg);
-               unsigned long remainder = vma->vm_end - addr;
-               unsigned long len = sg_dma_len(sg);
-
-               if (offset >= sg_dma_len(sg)) {
-                       offset -= sg_dma_len(sg);
-                       continue;
-               } else if (offset) {
-                       page += offset / PAGE_SIZE;
-                       len = sg_dma_len(sg) - offset;
-                       offset = 0;
-               }
-               len = min(len, remainder);
-               remap_pfn_range(vma, addr, page_to_pfn(page), len,
-                               vma->vm_page_prot);
-               addr += len;
-               if (addr >= vma->vm_end)
-                       return 0;
-       }
-       return 0;
-}
-
-int ion_heap_buffer_zero(struct ion_buffer *buffer)
-{
-       struct sg_table *table = buffer->sg_table;
-       pgprot_t pgprot;
-       struct scatterlist *sg;
-       struct vm_struct *vm_struct;
-       int i, j, ret = 0;
-
-       if (buffer->flags & ION_FLAG_CACHED)
-               pgprot = PAGE_KERNEL;
-       else
-               pgprot = pgprot_writecombine(PAGE_KERNEL);
-
-       vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
-       if (!vm_struct)
-               return -ENOMEM;
-
-       for_each_sg(table->sgl, sg, table->nents, i) {
-               struct page *page = sg_page(sg);
-               unsigned long len = sg_dma_len(sg);
-
-               for (j = 0; j < len / PAGE_SIZE; j++) {
-                       struct page *sub_page = page + j;
-                       struct page **pages = &sub_page;
-                       ret = map_vm_area(vm_struct, pgprot, &pages);
-                       if (ret)
-                               goto end;
-                       memset(vm_struct->addr, 0, PAGE_SIZE);
-                       unmap_kernel_range((unsigned long)vm_struct->addr,
-                                          PAGE_SIZE);
-               }
-       }
-end:
-       free_vm_area(vm_struct);
-       return ret;
-}
-
-struct page *ion_heap_alloc_pages(struct ion_buffer *buffer, gfp_t gfp_flags,
-                                 unsigned int order)
-{
-       struct page *page = alloc_pages(gfp_flags, order);
-
-       if (!page)
-               return page;
-
-       if (ion_buffer_fault_user_mappings(buffer))
-               split_page(page, order);
-
-       return page;
-}
-
-void ion_heap_free_pages(struct ion_buffer *buffer, struct page *page,
-                        unsigned int order)
-{
-       int i;
-
-       if (!ion_buffer_fault_user_mappings(buffer)) {
-               __free_pages(page, order);
-               return;
-       }
-       for (i = 0; i < (1 << order); i++)
-               __free_page(page + i);
-}
-
-void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer * buffer)
-{
-       spin_lock(&heap->free_lock);
-       list_add(&buffer->list, &heap->free_list);
-       heap->free_list_size += buffer->size;
-       spin_unlock(&heap->free_lock);
-       wake_up(&heap->waitqueue);
-}
-
-size_t ion_heap_freelist_size(struct ion_heap *heap)
-{
-       size_t size;
-
-       spin_lock(&heap->free_lock);
-       size = heap->free_list_size;
-       spin_unlock(&heap->free_lock);
-
-       return size;
-}
-
-static size_t _ion_heap_freelist_drain(struct ion_heap *heap, int cached, size_t size,
-                        bool skip_pools)
-{
-       struct ion_buffer *buffer, *tmp;
-       size_t total_drained = 0;
-       struct list_head free_list;
-
-       if (ion_heap_freelist_size(heap) == 0)
-               return 0;
-
-       spin_lock(&heap->free_lock);
-       if (size == 0)
-               size = heap->free_list_size;
-
-        INIT_LIST_HEAD(&free_list);
-
-        list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) {
-            if (total_drained >= size)
-                break;
-            if (!(cached < 0 || cached == ion_buffer_cached(buffer)))
-                continue;
-            list_del(&buffer->list);
-            list_add(&buffer->list, &free_list);
-            heap->free_list_size -= buffer->size;
-            if (skip_pools)
-                buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
-            total_drained += buffer->size;
-        }
-        spin_unlock(&heap->free_lock);
-
-        list_for_each_entry_safe(buffer, tmp, &free_list, list) {
-            list_del(&buffer->list);
-            ion_buffer_destroy(buffer);
-        }
-
-       return total_drained;
-}
-
-size_t ion_heap_freelist_drain(struct ion_heap *heap, int cached, size_t size)
-{
-       return _ion_heap_freelist_drain(heap, cached, size, false);
-}
-
-size_t ion_heap_freelist_shrink(struct ion_heap *heap, int cached, size_t size)
-{
-       return _ion_heap_freelist_drain(heap, cached, size, true);
-}
-
-
-int ion_heap_deferred_free(void *data)
-{
-       struct ion_heap *heap = data;
-
-       while (true) {
-               struct ion_buffer *buffer;
-
-               wait_event_freezable(heap->waitqueue,
-                                    ion_heap_freelist_size(heap) > 0);
-
-               spin_lock(&heap->free_lock);
-               if (list_empty(&heap->free_list)) {
-                       /*
-                        *  Sprd Change
-                        *  Add a protect to avoid the thread waked up allways
-                        *  when free_list_size is overwrited by abnormal operation.
-                        * */
-                       if (heap->free_list_size > 0)
-                       {
-                           printk(KERN_INFO "ion buffer free_list_size:%u is in abnormal state, so do reset\n",
-                                           (unsigned int)heap->free_list_size);
-                           heap->free_list_size = 0;
-                       }
-                       spin_unlock(&heap->free_lock);
-                       continue;
-               }
-               buffer = list_first_entry(&heap->free_list, struct ion_buffer,
-                                         list);
-               list_del(&buffer->list);
-               heap->free_list_size -= buffer->size;
-               spin_unlock(&heap->free_lock);
-               ion_buffer_destroy(buffer);
-       }
-
-       return 0;
-}
-
-int ion_heap_init_deferred_free(struct ion_heap *heap)
-{
-       struct sched_param param = { .sched_priority = 0 };
-
-       INIT_LIST_HEAD(&heap->free_list);
-       heap->free_list_size = 0;
-       spin_lock_init(&heap->free_lock);
-       init_waitqueue_head(&heap->waitqueue);
-       heap->task = kthread_run(ion_heap_deferred_free, heap,
-                                "%s", heap->name);
-       sched_setscheduler(heap->task, SCHED_IDLE, &param);
-       if (IS_ERR(heap->task)) {
-               pr_err("%s: creating thread for deferred free failed\n",
-                      __func__);
-               return PTR_RET(heap->task);
-       }
-       return 0;
-}
-
-struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
-{
-       struct ion_heap *heap = NULL;
-
-       switch (heap_data->type) {
-       case ION_HEAP_TYPE_SYSTEM_CONTIG:
-               heap = ion_system_contig_heap_create(heap_data);
-               break;
-       case ION_HEAP_TYPE_SYSTEM:
-               heap = ion_system_heap_create(heap_data);
-               break;
-       case ION_HEAP_TYPE_CARVEOUT:
-               heap = ion_carveout_heap_create(heap_data);
-               break;
-       case ION_HEAP_TYPE_CHUNK:
-               heap = ion_chunk_heap_create(heap_data);
-               break;
-       case ION_HEAP_TYPE_DMA:
-               heap = ion_cma_heap_create(heap_data, NULL);
-               break;
-       default:
-               pr_err("%s: Invalid heap type %d\n", __func__,
-                      heap_data->type);
-               return ERR_PTR(-EINVAL);
-       }
-
-       if (IS_ERR_OR_NULL(heap)) {
-               pr_err("%s: error creating heap %s type %d base %lu size %u\n",
-                      __func__, heap_data->name, heap_data->type,
-                      heap_data->base, heap_data->size);
-               return ERR_PTR(-EINVAL);
-       }
-
-       heap->name = heap_data->name;
-       heap->id = heap_data->id;
-       return heap;
-}
-
-void ion_heap_destroy(struct ion_heap *heap)
-{
-       if (!heap)
-               return;
-
-       switch (heap->type) {
-       case ION_HEAP_TYPE_SYSTEM_CONTIG:
-               ion_system_contig_heap_destroy(heap);
-               break;
-       case ION_HEAP_TYPE_SYSTEM:
-               ion_system_heap_destroy(heap);
-               break;
-       case ION_HEAP_TYPE_CARVEOUT:
-               ion_carveout_heap_destroy(heap);
-               break;
-       case ION_HEAP_TYPE_CHUNK:
-               ion_chunk_heap_destroy(heap);
-               break;
-       case ION_HEAP_TYPE_DMA:
-               ion_cma_heap_destroy(heap);
-               break;
-       default:
-               pr_err("%s: Invalid heap type %d\n", __func__,
-                      heap->type);
-       }
-}
diff --git a/drivers/gpu/ion/ion_page_pool.c b/drivers/gpu/ion/ion_page_pool.c
deleted file mode 100644 (file)
index 8e308fe..0000000
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * drivers/gpu/ion/ion_mem_pool.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/debugfs.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/fs.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include "ion_priv.h"
-
-struct ion_page_pool_item {
-       struct page *page;
-       struct list_head list;
-};
-
-static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
-{
-       struct page *page = alloc_pages(pool->gfp_mask, pool->order);
-
-       if (!page)
-               return NULL;
-       /* this is only being used to flush the page for dma,
-          this api is not really suitable for calling from a driver
-          but no better way to flush a page for dma exist at this time */
-#ifdef CONFIG_64BIT
-       dma_sync_single_for_device(NULL, (dma_addr_t)page_to_phys(page),
-               PAGE_SIZE << pool->order,
-               DMA_BIDIRECTIONAL);
-#else
-       arm_dma_ops.sync_single_for_device(NULL,
-               pfn_to_dma(NULL, page_to_pfn(page)),
-               PAGE_SIZE << pool->order,
-               DMA_BIDIRECTIONAL);
-#endif
-
-       return page;
-}
-
-static void ion_page_pool_free_pages(struct ion_page_pool *pool,
-                                    struct page *page)
-{
-       __free_pages(page, pool->order);
-}
-
-static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
-{
-       struct ion_page_pool_item *item;
-
-       item = kmalloc(sizeof(struct ion_page_pool_item), GFP_KERNEL);
-       if (!item)
-               return -ENOMEM;
-
-       mutex_lock(&pool->mutex);
-       item->page = page;
-       if (PageHighMem(page)) {
-               list_add_tail(&item->list, &pool->high_items);
-               pool->high_count++;
-       } else {
-               list_add_tail(&item->list, &pool->low_items);
-               pool->low_count++;
-       }
-       mutex_unlock(&pool->mutex);
-       return 0;
-}
-
-static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
-{
-       struct ion_page_pool_item *item;
-       struct page *page;
-
-       if (high) {
-               BUG_ON(!pool->high_count);
-               item = list_first_entry(&pool->high_items,
-                                       struct ion_page_pool_item, list);
-               pool->high_count--;
-       } else {
-               BUG_ON(!pool->low_count);
-               item = list_first_entry(&pool->low_items,
-                                       struct ion_page_pool_item, list);
-               pool->low_count--;
-       }
-
-       list_del(&item->list);
-       page = item->page;
-       kfree(item);
-       return page;
-}
-
-void *ion_page_pool_alloc(struct ion_page_pool *pool)
-{
-       struct page *page = NULL;
-
-       BUG_ON(!pool);
-
-       mutex_lock(&pool->mutex);
-       if (pool->high_count)
-               page = ion_page_pool_remove(pool, true);
-       else if (pool->low_count)
-               page = ion_page_pool_remove(pool, false);
-       mutex_unlock(&pool->mutex);
-
-       if (!page)
-               page = ion_page_pool_alloc_pages(pool);
-
-       return page;
-}
-
-void ion_page_pool_free(struct ion_page_pool *pool, struct page* page)
-{
-       int ret;
-
-       ret = ion_page_pool_add(pool, page);
-       if (ret)
-               ion_page_pool_free_pages(pool, page);
-}
-
-static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
-{
-       int total = 0;
-
-       total += high ? (pool->high_count + pool->low_count) *
-               (1 << pool->order) :
-                       pool->low_count * (1 << pool->order);
-       return total;
-}
-
-int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
-                               int nr_to_scan)
-{
-       int nr_freed = 0;
-       int i;
-       bool high;
-
-       high = gfp_mask & __GFP_HIGHMEM;
-
-       if (nr_to_scan == 0)
-               return ion_page_pool_total(pool, high);
-
-       for (i = 0; i < nr_to_scan; i++) {
-               struct page *page;
-
-               mutex_lock(&pool->mutex);
-               if (high && pool->high_count) {
-                       page = ion_page_pool_remove(pool, true);
-               } else if (pool->low_count) {
-                       page = ion_page_pool_remove(pool, false);
-               } else {
-                       mutex_unlock(&pool->mutex);
-                       break;
-               }
-               mutex_unlock(&pool->mutex);
-               ion_page_pool_free_pages(pool, page);
-               nr_freed += (1 << pool->order);
-       }
-
-       return nr_freed;
-}
-
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
-{
-       struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
-                                            GFP_KERNEL);
-       if (!pool)
-               return NULL;
-       pool->high_count = 0;
-       pool->low_count = 0;
-       INIT_LIST_HEAD(&pool->low_items);
-       INIT_LIST_HEAD(&pool->high_items);
-       pool->gfp_mask = gfp_mask;
-       pool->order = order;
-       mutex_init(&pool->mutex);
-       plist_node_init(&pool->list, order);
-
-       return pool;
-}
-
-void ion_page_pool_destroy(struct ion_page_pool *pool)
-{
-       kfree(pool);
-}
-
-static int __init ion_page_pool_init(void)
-{
-       return 0;
-}
-
-static void __exit ion_page_pool_exit(void)
-{
-}
-
-module_init(ion_page_pool_init);
-module_exit(ion_page_pool_exit);
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
deleted file mode 100644 (file)
index fe53a0b..0000000
+++ /dev/null
@@ -1,406 +0,0 @@
-/*
- * drivers/gpu/ion/ion_priv.h
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _ION_PRIV_H
-#define _ION_PRIV_H
-
-#include <linux/ion.h>
-#include <linux/kref.h>
-#include <linux/mm_types.h>
-#include <linux/mutex.h>
-#include <linux/rbtree.h>
-#include <linux/sched.h>
-#include <linux/shrinker.h>
-#include <linux/types.h>
-#include <linux/sprd_iommu.h>
-
-struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
-
-/**
- * struct ion_buffer - metadata for a particular buffer
- * @ref:               refernce count
- * @node:              node in the ion_device buffers tree
- * @dev:               back pointer to the ion_device
- * @heap:              back pointer to the heap the buffer came from
- * @flags:             buffer specific flags
- * @private_flags:     internal buffer specific flags
- * @size:              size of the buffer
- * @priv_virt:         private data to the buffer representable as
- *                     a void *
- * @priv_phys:         private data to the buffer representable as
- *                     an ion_phys_addr_t (and someday a phys_addr_t)
- * @lock:              protects the buffers cnt fields
- * @kmap_cnt:          number of times the buffer is mapped to the kernel
- * @vaddr:             the kenrel mapping if kmap_cnt is not zero
- * @dmap_cnt:          number of times the buffer is mapped for dma
- * @sg_table:          the sg table for the buffer if dmap_cnt is not zero
- * @pages:             flat array of pages in the buffer -- used by fault
- *                     handler and only valid for buffers that are faulted in
- * @vmas:              list of vma's mapping this buffer
- * @handle_count:      count of handles referencing this buffer
- * @task_comm:         taskcomm of last client to reference this buffer in a
- *                     handle, used for debugging
- * @pid:               pid of last client to reference this buffer in a
- *                     handle, used for debugging
-*/
-struct ion_buffer {
-       struct kref ref;
-       union {
-               struct rb_node node;
-               struct list_head list;
-       };
-       struct ion_device *dev;
-       struct ion_heap *heap;
-       unsigned long flags;
-       unsigned long private_flags;
-       size_t size;
-       union {
-               void *priv_virt;
-               ion_phys_addr_t priv_phys;
-       };
-       struct mutex lock;
-       int kmap_cnt;
-       void *vaddr;
-       int dmap_cnt;
-       struct sg_table *sg_table;
-       struct page **pages;
-       int iomap_cnt[IOMMU_MAX];
-       unsigned long iova[IOMMU_MAX];
-       struct list_head vmas;
-       /* used to track orphaned buffers */
-       int handle_count;
-       char task_comm[TASK_COMM_LEN];
-       pid_t pid;
-       pid_t tid;
-       struct timeval alloc_time;
-};
-void ion_buffer_destroy(struct ion_buffer *buffer);
-
-/**
- * struct ion_heap_ops - ops to operate on a given heap
- * @allocate:          allocate memory
- * @free:              free memory
- * @phys               get physical address of a buffer (only define on
- *                     physically contiguous heaps)
- * @map_dma            map the memory for dma to a scatterlist
- * @unmap_dma          unmap the memory for dma
- * @map_kernel         map memory to the kernel
- * @unmap_kernel       unmap memory to the kernel
- * @map_user           map memory to userspace
- *
- * allocate, phys, and map_user return 0 on success, -errno on error.
- * map_dma and map_kernel return pointer on success, ERR_PTR on error.
- */
-struct ion_heap_ops {
-       int (*allocate) (struct ion_heap *heap,
-                        struct ion_buffer *buffer, unsigned long len,
-                        unsigned long align, unsigned long flags);
-       void (*free) (struct ion_buffer *buffer);
-       int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
-                    ion_phys_addr_t *addr, size_t *len);
-       struct sg_table *(*map_dma) (struct ion_heap *heap,
-                                       struct ion_buffer *buffer);
-       void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
-       void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
-       void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
-       int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
-                        struct vm_area_struct *vma);
-       int (*map_iommu)(struct ion_buffer *buffer, int domain_num, unsigned long *ptr_iova);
-       int (*unmap_iommu)(struct ion_buffer *buffer, int domain_num);
-};
-
-/**
- * heap flags - flags between the heaps and core ion code
- */
-#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
-
-/**
- * private flags - flags internal to ion
- */
-/*
- * Buffer is being freed from a shrinker function. Skip any possible
- * heap-specific caching mechanism (e.g. page pools). Guarantees that
- * any buffer storage that came from the system allocator will be
- * returned to the system allocator.
- */
-#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0)
-
-/**
- * struct ion_heap - represents a heap in the system
- * @node:              rb node to put the heap on the device's tree of heaps
- * @dev:               back pointer to the ion_device
- * @type:              type of heap
- * @ops:               ops struct as above
- * @flags:             flags
- * @id:                        id of heap, also indicates priority of this heap when
- *                     allocating.  These are specified by platform data and
- *                     MUST be unique
- * @name:              used for debugging
- * @priv:              private heap data
- * @shrinker:          a shrinker for the heap, if the heap caches system
- *                     memory, it must define a shrinker to return it on low
- *                     memory conditions, this includes system memory cached
- *                     in the deferred free lists for heaps that support it
- * @free_list:         free list head if deferred free is used
- * @free_list_size     size of the deferred free list in bytes
- * @lock:              protects the free list
- * @waitqueue:         queue to wait on from deferred free thread
- * @task:              task struct of deferred free thread
- * @debug_show:                called when heap debug file is read to add any
- *                     heap specific debug info to output
- *
- * Represents a pool of memory from which buffers can be made.  In some
- * systems the only heap is regular system memory allocated via vmalloc.
- * On others, some blocks might require large physically contiguous buffers
- * that are allocated from a specially reserved heap.
- */
-struct ion_heap {
-       struct plist_node node;
-       struct ion_device *dev;
-       enum ion_heap_type type;
-       struct ion_heap_ops *ops;
-       unsigned long flags;
-       unsigned int id;
-       const char *name;
-       void *priv;
-       struct shrinker shrinker;
-       struct list_head free_list;
-       size_t free_list_size;
-       spinlock_t free_lock;
-       wait_queue_head_t waitqueue;
-       struct task_struct *task;
-       int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
-};
-
-/**
- * ion_buffer_cached - this ion buffer is cached
- * @buffer:            buffer
- *
- * indicates whether this ion buffer is cached
- */
-bool ion_buffer_cached(struct ion_buffer *buffer);
-
-/**
- * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
- * @buffer:            buffer
- *
- * indicates whether userspace mappings of this buffer will be faulted
- * in, this can affect how buffers are allocated from the heap.
- */
-bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
-
-/**
- * ion_device_create - allocates and returns an ion device
- * @custom_ioctl:      arch specific ioctl function if applicable
- *
- * returns a valid device or -PTR_ERR
- */
-struct ion_device *ion_device_create(long (*custom_ioctl)
-                                    (struct ion_client *client,
-                                     unsigned int cmd,
-                                     unsigned long arg));
-
-/**
- * ion_device_destroy - free and device and it's resource
- * @dev:               the device
- */
-void ion_device_destroy(struct ion_device *dev);
-
-/**
- * ion_device_add_heap - adds a heap to the ion device
- * @dev:               the device
- * @heap:              the heap to add
- */
-void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
-
-/**
- * some helpers for common operations on buffers using the sg_table
- * and vaddr fields
- */
-void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *);
-void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
-int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
-                       struct vm_area_struct *);
-int ion_heap_buffer_zero(struct ion_buffer *buffer);
-
-/**
- * ion_heap_alloc_pages - allocate pages from alloc_pages
- * @buffer:            the buffer to allocate for, used to extract the flags
- * @gfp_flags:         the gfp_t for the allocation
- * @order:             the order of the allocatoin
- *
- * This funciton allocations from alloc pages and also does any other
- * necessary operations based on the buffer->flags.  For buffers which
- * will be faulted in the pages are split using split_page
- */
-struct page *ion_heap_alloc_pages(struct ion_buffer *buffer, gfp_t gfp_flags,
-                                 unsigned int order);
-
-/**
- * ion_heap_init_deferred_free -- initialize deferred free functionality
- * @heap:              the heap
- *
- * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
- * be called to setup deferred frees. Calls to free the buffer will
- * return immediately and the actual free will occur some time later
- */
-int ion_heap_init_deferred_free(struct ion_heap *heap);
-
-/**
- * ion_heap_freelist_add - add a buffer to the deferred free list
- * @heap:              the heap
- * @buffer:            the buffer
- *
- * Adds an item to the deferred freelist.
- */
-void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
-
-/**
- * ion_heap_freelist_drain - drain the deferred free list to
- * the reserved list
- * @heap:              the heap
- * @cached:            only drain the cached buffer list
- * @size:              ammount of memory to drain in bytes
- *
- * Drains the indicated amount of memory from the deferred freelist immediately.
- * Returns the total amount freed.  The total freed may be higher depending
- * on the size of the items in the list, or lower if there is insufficient
- * total memory on the freelist.
- */
-size_t ion_heap_freelist_drain(struct ion_heap *heap, int cached, size_t size);
-
-/**
- * ion_heap_freelist_shrink - drain the deferred free
- *                             list, skipping any heap-specific
- *                             pooling or caching mechanisms
- *
- * @heap:              the heap
- * @size:              amount of memory to drain in bytes
- *
- * Drains the indicated amount of memory from the deferred freelist immediately.
- * Returns the total amount freed.  The total freed may be higher depending
- * on the size of the items in the list, or lower if there is insufficient
- * total memory on the freelist.
- *
- * Unlike with @ion_heap_freelist_drain, don't put any pages back into
- * page pools or otherwise cache the pages. Everything must be
- * genuinely free'd back to the system. If you're free'ing from a
- * shrinker you probably want to use this. Note that this relies on
- * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
- * flag.
- */
-size_t ion_heap_freelist_shrink(struct ion_heap *heap, int cached, size_t size);
-
-/**
- * ion_heap_freelist_size - returns the size of the freelist in bytes
- * @heap:              the heap
- */
-size_t ion_heap_freelist_size(struct ion_heap *heap);
-
-
-/**
- * functions for creating and destroying the built in ion heaps.
- * architectures can add their own custom architecture specific
- * heaps as appropriate.
- */
-
-int ion_system_heap_debug_defer_catched(struct ion_heap *heap);
-struct ion_heap *ion_heap_create(struct ion_platform_heap *);
-void ion_heap_destroy(struct ion_heap *);
-struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
-void ion_system_heap_destroy(struct ion_heap *);
-
-struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
-void ion_system_contig_heap_destroy(struct ion_heap *);
-
-struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
-void ion_carveout_heap_destroy(struct ion_heap *);
-
-struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *);
-void ion_chunk_heap_destroy(struct ion_heap *);
-struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *, struct device *);
-void ion_cma_heap_destroy(struct ion_heap *);
-
-/**
- * kernel api to allocate/free from carveout -- used when carveout is
- * used to back an architecture specific custom heap
- */
-ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
-                                     unsigned long align);
-void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
-                      unsigned long size);
-/**
- * The carveout heap returns physical addresses, since 0 may be a valid
- * physical address, this is used to indicate allocation failed
- */
-#define ION_CARVEOUT_ALLOCATE_FAIL -1
-
-/**
- * functions for creating and destroying a heap pool -- allows you
- * to keep a pool of pre allocated memory to use from your heap.  Keeping
- * a pool of memory that is ready for dma, ie any cached mapping have been
- * invalidated from the cache, provides a significant peformance benefit on
- * many systems */
-
-/**
- * struct ion_page_pool - pagepool struct
- * @high_count:                number of highmem items in the pool
- * @low_count:         number of lowmem items in the pool
- * @high_items:                list of highmem items
- * @low_items:         list of lowmem items
- * @shrinker:          a shrinker for the items
- * @mutex:             lock protecting this struct and especially the count
- *                     item list
- * @alloc:             function to be used to allocate pageory when the pool
- *                     is empty
- * @free:              function to be used to free pageory back to the system
- *                     when the shrinker fires
- * @gfp_mask:          gfp_mask to use from alloc
- * @order:             order of pages in the pool
- * @list:              plist node for list of pools
- *
- * Allows you to keep a pool of pre allocated pages to use from your heap.
- * Keeping a pool of pages that is ready for dma, ie any cached mapping have
- * been invalidated from the cache, provides a significant peformance benefit
- * on many systems
- */
-struct ion_page_pool {
-       int high_count;
-       int low_count;
-       struct list_head high_items;
-       struct list_head low_items;
-       struct mutex mutex;
-       gfp_t gfp_mask;
-       unsigned int order;
-       struct plist_node list;
-};
-
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
-void ion_page_pool_destroy(struct ion_page_pool *);
-void *ion_page_pool_alloc(struct ion_page_pool *);
-void ion_page_pool_free(struct ion_page_pool *, struct page *);
-
-/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
- * @pool:              the pool
- * @gfp_mask:          the memory type to reclaim
- * @nr_to_scan:                number of items to shrink in pages
- *
- * returns the number of items freed in pages
- */
-int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
-                         int nr_to_scan);
-
-#endif /* _ION_PRIV_H */
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
deleted file mode 100644 (file)
index 20f9857..0000000
+++ /dev/null
@@ -1,704 +0,0 @@
-/*
- * drivers/gpu/ion/ion_system_heap.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <asm/page.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/highmem.h>
-#include <linux/ion.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/list_sort.h>
-#include "ion_priv.h"
-
-static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
-                                           __GFP_NOWARN | __GFP_NORETRY |
-                                           __GFP_NOMEMALLOC | __GFP_NO_KSWAPD) &
-                                          ~__GFP_WAIT;
-static unsigned int low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO |
-                                        __GFP_NOWARN);
-#define HIGH_PAGE_ORDER         (4)
-static const unsigned int orders[] = {8, 4, 0};
-static const int num_orders = ARRAY_SIZE(orders);
-static int order_to_index(unsigned int order)
-{
-       int i;
-       for (i = 0; i < num_orders; i++)
-               if (order == orders[i])
-                       return i;
-       BUG();
-       return -1;
-}
-
-static unsigned int order_to_size(int order)
-{
-       return PAGE_SIZE << order;
-}
-
-struct ion_system_heap {
-       struct ion_heap heap;
-       struct ion_page_pool **pools;
-       struct list_head deferred;
-       struct rt_mutex lock;
-};
-
-struct page_info {
-       struct page *page;
-       unsigned int order;
-       struct list_head list;
-       bool split_pages;
-};
-
-static struct page *alloc_buffer_page(struct ion_system_heap *heap,
-                                     struct ion_buffer *buffer,
-                                     unsigned long order)
-{
-       bool cached = ion_buffer_cached(buffer);
-       struct ion_page_pool *pool = heap->pools[order_to_index(order)];
-       struct page *page;
-
-       if (!cached) {
-               page = ion_page_pool_alloc(pool);
-       } else {
-               gfp_t gfp_flags = low_order_gfp_flags;
-
-               if (order > HIGH_PAGE_ORDER)
-                       gfp_flags = high_order_gfp_flags;
-               page = ion_heap_alloc_pages(buffer, gfp_flags, order);
-               if (!page)
-                       return 0;
-#ifdef CONFIG_64BIT
-       dma_sync_single_for_device(NULL, (dma_addr_t)page_to_phys(page),
-               PAGE_SIZE << order, DMA_BIDIRECTIONAL);
-#else
-       arm_dma_ops.sync_single_for_device(NULL,
-               pfn_to_dma(NULL, page_to_pfn(page)),
-               PAGE_SIZE << order, DMA_BIDIRECTIONAL);
-#endif
-
-       }
-       if (!page)
-               return 0;
-
-       return page;
-}
-
-static void free_buffer_page(struct ion_system_heap *heap,
-                            struct ion_buffer *buffer, struct page *page,
-                            unsigned int order)
-{
-       bool cached = ion_buffer_cached(buffer);
-       bool split_pages = ion_buffer_fault_user_mappings(buffer);
-       int i;
-
-       if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) {
-               struct ion_page_pool *pool = heap->pools[order_to_index(order)];
-               ion_page_pool_free(pool, page);
-       } else if (split_pages) {
-               for (i = 0; i < (1 << order); i++)
-                       __free_page(page + i);
-       } else {
-               __free_pages(page, order);
-       }
-}
-
-static struct page_info *alloc_deferred_page(struct ion_system_heap *heap,
-                                            bool split_pages,
-                                            unsigned int order)
-{
-       struct page_info *found = NULL;
-       struct page_info *info;
-
-       rt_mutex_lock(&heap->lock);
-       if (!(list_empty(&heap->deferred))) {
-               struct page_info *saved = NULL;
-               list_for_each_entry(info, &heap->deferred, list) {
-                       if (info->order == order) {
-                               if (split_pages == info->split_pages) {
-                                       found = info;
-                                       list_del(&found->list);
-                                       break;
-                               }
-                               if (!saved && split_pages && !info->split_pages)
-                                       saved = info;
-                       }
-                       if (info->order < order) {
-                               if (saved) {
-                                       split_page(saved->page, saved->order);
-                                       saved->split_pages = 1;
-                                       found = saved;
-                                       list_del(&found->list);
-                               }
-                               break;
-                       }
-               }
-       }
-       rt_mutex_unlock(&heap->lock);
-
-       /* either low_order_gfp_flags or high_order_gfp_flags has __GFP_ZERO */
-       if (found) {
-               int i;
-
-               for (i = 0; i < (1 << found->order); i++)
-                       clear_highpage(found->page + i);
-#ifdef CONFIG_64BIT
-               dma_sync_single_for_device(NULL, (dma_addr_t)page_to_phys(found->page),
-                       PAGE_SIZE << found->order, DMA_BIDIRECTIONAL);
-#else
-               arm_dma_ops.sync_single_for_device(NULL,
-                       pfn_to_dma(NULL, page_to_pfn(found->page)),
-                       PAGE_SIZE << found->order, DMA_BIDIRECTIONAL);
-#endif
-       }
-       return found;
-}
-
-static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
-                                                struct ion_buffer *buffer,
-                                                unsigned long size,
-                                                unsigned int max_order)
-{
-       struct page *page;
-       struct page_info *info;
-       int i;
-       bool cached = ion_buffer_cached(buffer);
-       bool split_pages = ion_buffer_fault_user_mappings(buffer);
-
-       for (i = 0; i < num_orders; i++) {
-               if (size < order_to_size(orders[i]))
-                       continue;
-               if (max_order < orders[i])
-                       continue;
-
-               info = NULL;
-               if (cached)
-                       info = alloc_deferred_page(heap, split_pages,
-                                                  orders[i]);
-               if (!info) {
-                       page = alloc_buffer_page(heap, buffer, orders[i]);
-                       if (!page)
-                               continue;
-
-                       info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
-                       /*
-                         Android CL CID : 26180 => Galaxy-Core-Prime IN INDIA
-                       */
-                       if (!info)
-                               return -ENOMEM;
-                       info->page = page;
-                       info->order = orders[i];
-                       info->split_pages = split_pages;
-               }
-               return info;
-       }
-       return NULL;
-}
-
-static int deferred_freepages_cmp(void *priv, struct list_head *a,
-                                 struct list_head *b)
-{
-       struct page_info *infoa, *infob;
-
-       infoa = list_entry(a, struct page_info, list);
-       infob = list_entry(b, struct page_info, list);
-
-       if (infoa->order > infob->order)
-               return 1;
-       if (infoa->order < infob->order)
-               return -1;
-       return 0;
-}
-
-struct ion_system_buffer_info {
-       struct list_head pages;
-       struct sg_table table;
-};
-
-static int ion_system_heap_allocate(struct ion_heap *heap,
-                                    struct ion_buffer *buffer,
-                                    unsigned long size, unsigned long align,
-                                    unsigned long flags)
-{
-       struct ion_system_heap *sys_heap = container_of(heap,
-                                                       struct ion_system_heap,
-                                                       heap);
-       struct ion_system_buffer_info *priv;
-       struct scatterlist *sg;
-       int ret;
-       struct page_info *info, *tmp_info;
-       int i = 0;
-       long size_remaining = PAGE_ALIGN(size);
-       unsigned int max_order = orders[0];
-       struct timeval val_start;
-       struct timeval val_end;
-       uint64_t time_start;
-       uint64_t time_end;
-
-       priv = kmalloc(sizeof(struct ion_system_buffer_info), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&priv->pages);
-
-       do_gettimeofday(&val_start);
-       time_start = val_start.tv_sec * 1000000 + val_start.tv_usec;
-       while (size_remaining > 0) {
-               info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
-               if (!info)
-                       goto err;
-               list_add_tail(&info->list, &priv->pages);
-               size_remaining -= (1 << info->order) * PAGE_SIZE;
-               max_order = info->order;
-               i++;
-       }
-       do_gettimeofday(&val_end);
-       time_end = val_end.tv_sec * 1000000 + val_end.tv_usec;
-       pr_debug("%s, size:%8ld, time:%8lld us\n", __func__, size,
-               time_end - time_start);
-
-       ret = sg_alloc_table(&priv->table, i, GFP_KERNEL);
-       if (ret)
-               goto err;
-
-       sg = priv->table.sgl;
-       list_for_each_entry(info, &priv->pages, list) {
-               struct page *page = info->page;
-               sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0);
-               sg = sg_next(sg);
-       }
-
-       buffer->priv_virt = priv;
-       return 0;
-err:
-       if (ion_buffer_cached(buffer)) {
-               rt_mutex_lock(&sys_heap->lock);
-               list_splice(&priv->pages, &sys_heap->deferred);
-               list_sort(NULL, &sys_heap->deferred, deferred_freepages_cmp);
-               rt_mutex_unlock(&sys_heap->lock);
-       } else {
-               list_for_each_entry_safe(info, tmp_info, &priv->pages, list) {
-                       free_buffer_page(sys_heap, buffer, info->page, info->order);
-                       kfree(info);
-               }
-       }
-       kfree(priv);
-       return -ENOMEM;
-}
-
-#define BAD_PAGE_WORKAROUND
-void ion_system_heap_free(struct ion_buffer *buffer)
-{
-       struct ion_heap *heap = buffer->heap;
-       struct ion_system_heap *sys_heap = container_of(heap,
-                                                       struct ion_system_heap,
-                                                       heap);
-       struct sg_table *table = buffer->sg_table;
-       bool cached = ion_buffer_cached(buffer);
-       struct scatterlist *sg;
-       int i;
-       struct page_info *info, *tmp_info;
-       struct ion_system_buffer_info *priv = buffer->priv_virt;
-#ifdef BAD_PAGE_WORKAROUND
-       unsigned int last_length = 0;
-       struct page *wrong_page = NULL;
-       static int sg_err_count = 0;
-#endif
-
-       /* uncached pages come from the page pools, zero them before returning
-          for security purposes (other allocations are zerod at alloc time */
-       if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) {
-               ion_heap_buffer_zero(buffer);
-
-#ifndef BAD_PAGE_WORKAROUND    /* FIXME: this is a temp workaround */
-       for_each_sg(table->sgl, sg, table->nents, i)
-               free_buffer_page(sys_heap, buffer, sg_page(sg),
-                               get_order(sg_dma_len(sg)));
-#else
-       /* we assume there's only one wrong sg node in the list, with 0 length,
-        * and we assume its order is the smaller one of its two neighbours'
-        */
-       for_each_sg(table->sgl, sg, table->nents, i) {
-               if (sg_dma_len(sg) == 0) {
-                       wrong_page = sg_page(sg);
-                       sg_err_count++;
-                       printk(KERN_ERR \
-                         "ION_SYSTEM_HEAP: found 0 length sg (%d)!!!\n",\
-                          sg_err_count);
-                       continue;
-               }
-               if (wrong_page != NULL) {
-                       free_buffer_page(sys_heap, buffer, wrong_page,
-                               get_order((min(sg_dma_len(sg), last_length))));
-                       wrong_page = NULL;
-               }
-               last_length = sg_dma_len(sg);
-               free_buffer_page(sys_heap, buffer, sg_page(sg),
-                               get_order(sg_dma_len(sg)));
-       }
-       if (wrong_page != NULL) {
-               free_buffer_page(sys_heap, buffer, wrong_page,
-                       get_order(last_length));
-       }
-#endif
-       } else {
-               rt_mutex_lock(&sys_heap->lock);
-               list_splice_init(&priv->pages, &sys_heap->deferred);
-               list_sort(NULL, &sys_heap->deferred, deferred_freepages_cmp);
-               rt_mutex_unlock(&sys_heap->lock);
-       }
-
-       sg_free_table(table);
-       if (!list_empty(&priv->pages)) {
-               list_for_each_entry_safe(info, tmp_info, &priv->pages, list) {
-                       list_del(&info->list);
-                       kfree(info);
-               }
-       }
-       kfree(priv);
-}
-
-struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
-                                        struct ion_buffer *buffer)
-{
-       return &(((struct ion_system_buffer_info *) buffer->priv_virt)->table);
-}
-
-void ion_system_heap_unmap_dma(struct ion_heap *heap,
-                              struct ion_buffer *buffer)
-{
-       return;
-}
-
-#if defined(CONFIG_SPRD_IOMMU)
-int ion_system_heap_map_iommu(struct ion_buffer *buffer, int domain_num, unsigned long *ptr_iova)
-{
-       int ret=0;
-       if (0 == buffer->iomap_cnt[domain_num]) {
-               buffer->iova[domain_num] = sprd_iova_alloc(domain_num,buffer->size);
-               ret = sprd_iova_map(domain_num, buffer->iova[domain_num],
-                               buffer->size, buffer->sg_table);
-       }
-       *ptr_iova=buffer->iova[domain_num];
-       buffer->iomap_cnt[domain_num]++;
-       return ret;
-}
-int ion_system_heap_unmap_iommu(struct ion_buffer *buffer, int domain_num)
-{
-       int ret=0;
-       if (buffer->iomap_cnt[domain_num] > 0) {
-               buffer->iomap_cnt[domain_num]--;
-               if (0 == buffer->iomap_cnt[domain_num]) {
-                       ret = sprd_iova_unmap(domain_num, buffer->iova[domain_num],
-                                       buffer->size);
-                       sprd_iova_free(domain_num, buffer->iova[domain_num], buffer->size);
-                       buffer->iova[domain_num] = 0;
-               }
-       }
-       return ret;
-}
-#endif
-
-static struct ion_heap_ops system_heap_ops = {
-       .allocate = ion_system_heap_allocate,
-       .free = ion_system_heap_free,
-       .map_dma = ion_system_heap_map_dma,
-       .unmap_dma = ion_system_heap_unmap_dma,
-       .map_kernel = ion_heap_map_kernel,
-       .unmap_kernel = ion_heap_unmap_kernel,
-       .map_user = ion_heap_map_user,
-#if defined(CONFIG_SPRD_IOMMU)
-       .map_iommu = ion_system_heap_map_iommu,
-       .unmap_iommu = ion_system_heap_unmap_iommu,
-#endif
-};
-
-static int ion_deferred_list_shrink(struct ion_system_heap *heap,
-                                   gfp_t gfp_mask,
-                                   int nr_to_scan)
-{
-       int nr_freed = 0;
-       int i, j;
-       struct page_info *info;
-
-       rt_mutex_lock(&heap->lock);
-       if (nr_to_scan == 0) {
-               list_for_each_entry(info, &heap->deferred, list)
-                       nr_freed += 1 << info->order;
-       } else {
-               for (i = 0; i < nr_to_scan; i++) {
-                       struct list_head *last = &heap->deferred;
-
-                       if (list_empty(last))
-                               break;
-                       last = last->prev;
-                       list_del(last);
-                       info = list_entry(last, struct page_info, list);
-                       nr_freed += 1 << info->order;
-
-                       if (info->split_pages) {
-                               for (j = 0; j < (1 << info->order); j++)
-                                       __free_page(info->page + j);
-                       } else {
-                               __free_pages(info->page, info->order);
-                       }
-                       kfree(info);
-               }
-       }
-       rt_mutex_unlock(&heap->lock);
-
-       return nr_freed;
-}
-
-static int ion_system_heap_shrink(struct shrinker *shrinker,
-                                 struct shrink_control *sc) {
-
-       struct ion_heap *heap = container_of(shrinker, struct ion_heap,
-                                            shrinker);
-       struct ion_system_heap *sys_heap = container_of(heap,
-                                                       struct ion_system_heap,
-                                                       heap);
-       int nr_total = 0;
-       int nr_freed = 0;
-       int i, nr_to_scan = sc->nr_to_scan;
-
-       if (nr_to_scan == 0)
-               goto end;
-
-       /* shrink the free list first, no point in zeroing the memory if
-          we're just going to reclaim it */
-       nr_freed += ion_heap_freelist_shrink(heap, -1, nr_to_scan * PAGE_SIZE) /
-               PAGE_SIZE;
-
-       if (nr_freed >= nr_to_scan)
-               goto end;
-
-       for (i = 0; i < num_orders; i++) {
-               struct ion_page_pool *pool = sys_heap->pools[i];
-
-               int freed = ion_page_pool_shrink(pool, sc->gfp_mask,
-                                                nr_to_scan);
-               nr_freed += freed;
-               nr_to_scan -= freed;
-               if (nr_to_scan <= 0)
-                       break;
-       }
-       if (nr_to_scan > 0)
-               nr_freed += ion_deferred_list_shrink(sys_heap, sc->gfp_mask,
-                                                    nr_to_scan);
-
-end:
-       /* total number of items is whatever the page pools are holding
-          plus whatever's in the freelist */
-       for (i = 0; i < num_orders; i++) {
-               struct ion_page_pool *pool = sys_heap->pools[i];
-               nr_total += ion_page_pool_shrink(pool, sc->gfp_mask, 0);
-       }
-       nr_total += ion_deferred_list_shrink(sys_heap, sc->gfp_mask, 0);
-       nr_total += ion_heap_freelist_size(heap) / PAGE_SIZE;
-       return nr_total;
-
-}
-
-static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
-                                     void *unused)
-{
-
-       struct ion_system_heap *sys_heap = container_of(heap,
-                                                       struct ion_system_heap,
-                                                       heap);
-       int i;
-       for (i = 0; i < num_orders; i++) {
-               struct ion_page_pool *pool = sys_heap->pools[i];
-               seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
-                          pool->high_count, pool->order,
-                          (1 << pool->order) * PAGE_SIZE * pool->high_count);
-               seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
-                          pool->low_count, pool->order,
-                          (1 << pool->order) * PAGE_SIZE * pool->low_count);
-       }
-       return 0;
-}
-
-int ion_system_heap_debug_defer_catched(struct ion_heap *heap)
-{
-       struct ion_system_heap *sys_heap = container_of(heap,
-                       struct ion_system_heap,
-                       heap);
-       struct page_info *info = NULL;
-       size_t total_deferred_size = 0;
-
-       rt_mutex_lock(&sys_heap->lock);
-       if (!(list_empty(&sys_heap->deferred))) {
-               list_for_each_entry(info, &sys_heap->deferred, list)
-                       total_deferred_size += (1 << info->order) * PAGE_SIZE;
-       }
-       rt_mutex_unlock(&sys_heap->lock);
-
-       return total_deferred_size;
-}
-
-struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
-{
-       struct ion_system_heap *heap;
-       int i;
-
-       heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
-       if (!heap)
-               return ERR_PTR(-ENOMEM);
-       heap->heap.ops = &system_heap_ops;
-       heap->heap.type = ION_HEAP_TYPE_SYSTEM;
-       heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
-       heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
-                             GFP_KERNEL);
-       if (!heap->pools)
-               goto err_alloc_pools;
-       for (i = 0; i < num_orders; i++) {
-               struct ion_page_pool *pool;
-               gfp_t gfp_flags = low_order_gfp_flags;
-
-               if (orders[i] > HIGH_PAGE_ORDER)
-                       gfp_flags = high_order_gfp_flags;
-               pool = ion_page_pool_create(gfp_flags, orders[i]);
-               if (!pool)
-                       goto err_create_pool;
-               heap->pools[i] = pool;
-       }
-
-       INIT_LIST_HEAD(&heap->deferred);
-       rt_mutex_init(&heap->lock);
-       heap->heap.shrinker.shrink = ion_system_heap_shrink;
-       heap->heap.shrinker.seeks = DEFAULT_SEEKS;
-       heap->heap.shrinker.batch = 0;
-       register_shrinker(&heap->heap.shrinker);
-       heap->heap.debug_show = ion_system_heap_debug_show;
-       return &heap->heap;
-err_create_pool:
-       for (i = 0; i < num_orders; i++)
-               if (heap->pools[i])
-                       ion_page_pool_destroy(heap->pools[i]);
-       kfree(heap->pools);
-err_alloc_pools:
-       kfree(heap);
-       return ERR_PTR(-ENOMEM);
-}
-
-void ion_system_heap_destroy(struct ion_heap *heap)
-{
-       struct ion_system_heap *sys_heap = container_of(heap,
-                                                       struct ion_system_heap,
-                                                       heap);
-       int i;
-
-       for (i = 0; i < num_orders; i++)
-               ion_page_pool_destroy(sys_heap->pools[i]);
-       kfree(sys_heap->pools);
-       kfree(sys_heap);
-}
-
-static int ion_system_contig_heap_allocate(struct ion_heap *heap,
-                                          struct ion_buffer *buffer,
-                                          unsigned long len,
-                                          unsigned long align,
-                                          unsigned long flags)
-{
-       buffer->priv_virt = kzalloc(len, GFP_KERNEL);
-       if (!buffer->priv_virt)
-               return -ENOMEM;
-       return 0;
-}
-
-void ion_system_contig_heap_free(struct ion_buffer *buffer)
-{
-       kfree(buffer->priv_virt);
-}
-
-static int ion_system_contig_heap_phys(struct ion_heap *heap,
-                                      struct ion_buffer *buffer,
-                                      ion_phys_addr_t *addr, size_t *len)
-{
-       *addr = virt_to_phys(buffer->priv_virt);
-       *len = buffer->size;
-       return 0;
-}
-
-struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
-                                               struct ion_buffer *buffer)
-{
-       struct sg_table *table;
-       int ret;
-
-       table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
-       if (!table)
-               return ERR_PTR(-ENOMEM);
-       ret = sg_alloc_table(table, 1, GFP_KERNEL);
-       if (ret) {
-               kfree(table);
-               return ERR_PTR(ret);
-       }
-       sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
-                   0);
-       return table;
-}
-
-void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
-                                     struct ion_buffer *buffer)
-{
-       sg_free_table(buffer->sg_table);
-       kfree(buffer->sg_table);
-}
-
-int ion_system_contig_heap_map_user(struct ion_heap *heap,
-                                   struct ion_buffer *buffer,
-                                   struct vm_area_struct *vma)
-{
-       unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
-       return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
-                              vma->vm_end - vma->vm_start,
-                              vma->vm_page_prot);
-
-}
-
-static struct ion_heap_ops kmalloc_ops = {
-       .allocate = ion_system_contig_heap_allocate,
-       .free = ion_system_contig_heap_free,
-       .phys = ion_system_contig_heap_phys,
-       .map_dma = ion_system_contig_heap_map_dma,
-       .unmap_dma = ion_system_contig_heap_unmap_dma,
-       .map_kernel = ion_heap_map_kernel,
-       .unmap_kernel = ion_heap_unmap_kernel,
-       .map_user = ion_system_contig_heap_map_user,
-};
-
-struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
-{
-       struct ion_heap *heap;
-
-       heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
-       if (!heap)
-               return ERR_PTR(-ENOMEM);
-       heap->ops = &kmalloc_ops;
-       heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
-       return heap;
-}
-
-void ion_system_contig_heap_destroy(struct ion_heap *heap)
-{
-       kfree(heap);
-}
-
diff --git a/drivers/gpu/ion/ion_system_mapper.c b/drivers/gpu/ion/ion_system_mapper.c
deleted file mode 100644 (file)
index 692458e..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * drivers/gpu/ion/ion_system_mapper.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/err.h>
-#include <linux/ion.h>
-#include <linux/memory.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include "ion_priv.h"
-/*
- * This mapper is valid for any heap that allocates memory that already has
- * a kernel mapping, this includes vmalloc'd memory, kmalloc'd memory,
- * pages obtained via io_remap, etc.
- */
-static void *ion_kernel_mapper_map(struct ion_mapper *mapper,
-                                  struct ion_buffer *buffer,
-                                  struct ion_mapping **mapping)
-{
-       if (!((1 << buffer->heap->type) & mapper->heap_mask)) {
-               pr_err("%s: attempting to map an unsupported heap\n", __func__);
-               return ERR_PTR(-EINVAL);
-       }
-       /* XXX REVISIT ME!!! */
-       *((unsigned long *)mapping) = (unsigned long)buffer->priv;
-       return buffer->priv;
-}
-
-static void ion_kernel_mapper_unmap(struct ion_mapper *mapper,
-                                   struct ion_buffer *buffer,
-                                   struct ion_mapping *mapping)
-{
-       if (!((1 << buffer->heap->type) & mapper->heap_mask))
-               pr_err("%s: attempting to unmap an unsupported heap\n",
-                      __func__);
-}
-
-static void *ion_kernel_mapper_map_kernel(struct ion_mapper *mapper,
-                                       struct ion_buffer *buffer,
-                                       struct ion_mapping *mapping)
-{
-       if (!((1 << buffer->heap->type) & mapper->heap_mask)) {
-               pr_err("%s: attempting to unmap an unsupported heap\n",
-                      __func__);
-               return ERR_PTR(-EINVAL);
-       }
-       return buffer->priv;
-}
-
-static int ion_kernel_mapper_map_user(struct ion_mapper *mapper,
-                                     struct ion_buffer *buffer,
-                                     struct vm_area_struct *vma,
-                                     struct ion_mapping *mapping)
-{
-       int ret;
-
-       switch (buffer->heap->type) {
-       case ION_HEAP_KMALLOC:
-       {
-               unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv));
-               ret = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
-                                     vma->vm_end - vma->vm_start,
-                                     vma->vm_page_prot);
-               break;
-       }
-       case ION_HEAP_VMALLOC:
-               ret = remap_vmalloc_range(vma, buffer->priv, vma->vm_pgoff);
-               break;
-       default:
-               pr_err("%s: attempting to map unsupported heap to userspace\n",
-                      __func__);
-               return -EINVAL;
-       }
-
-       return ret;
-}
-
-static struct ion_mapper_ops ops = {
-       .map = ion_kernel_mapper_map,
-       .map_kernel = ion_kernel_mapper_map_kernel,
-       .map_user = ion_kernel_mapper_map_user,
-       .unmap = ion_kernel_mapper_unmap,
-};
-
-struct ion_mapper *ion_system_mapper_create(void)
-{
-       struct ion_mapper *mapper;
-       mapper = kzalloc(sizeof(struct ion_mapper), GFP_KERNEL);
-       if (!mapper)
-               return ERR_PTR(-ENOMEM);
-       mapper->type = ION_SYSTEM_MAPPER;
-       mapper->ops = &ops;
-       mapper->heap_mask = (1 << ION_HEAP_VMALLOC) | (1 << ION_HEAP_KMALLOC);
-       return mapper;
-}
-
-void ion_system_mapper_destroy(struct ion_mapper *mapper)
-{
-       kfree(mapper);
-}
-
diff --git a/drivers/gpu/ion/sprd/Makefile b/drivers/gpu/ion/sprd/Makefile
deleted file mode 100644 (file)
index 1a880fd..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-obj-y += sprd_ion.o sprd_fence.o
-
-ifdef CONFIG_COMPAT
-obj-$(CONFIG_ION) += compat_sprd_ion.o
-endif
diff --git a/drivers/gpu/ion/sprd/compat_sprd_ion.c b/drivers/gpu/ion/sprd/compat_sprd_ion.c
deleted file mode 100644 (file)
index 3714159..0000000
+++ /dev/null
@@ -1,336 +0,0 @@
-/*
- * drivers/gpu/ion/compat_ion.c
- *
- * Copyright (C) 2013 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/compat.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-
-#include <linux/ion.h>
-#include <video/ion_sprd.h>
-#include "sprd_fence.h"
-#include "compat_sprd_ion.h"
-
-/* See kernel/include/video/ion_sprd.h for the definition of these structs */
-struct compat_ion_phys_data {
-       compat_int_t fd_buffer;
-       compat_ulong_t phys;
-       compat_size_t size;
-};
-
-struct compat_ion_msync_data {
-       compat_int_t fd_buffer;
-       compat_uptr_t vaddr;
-       compat_uptr_t paddr;
-       compat_size_t size;
-};
-
-struct compat_ion_mmu_data {
-       compat_int_t master_id;
-       compat_int_t fd_buffer;
-       compat_ulong_t iova_addr;
-       compat_size_t iova_size;
-};
-
-static int compat_get_ion_phys_data(
-                       struct compat_ion_phys_data __user *data32,
-                       struct ion_phys_data __user *data)
-{
-       compat_int_t i;
-       int err;
-
-       err = get_user(i, &data32->fd_buffer);
-       err |= put_user(i, &data->fd_buffer);
-
-       return err;
-};
-
-static int compat_put_ion_phys_data(
-                       struct compat_ion_phys_data __user *data32,
-                       struct ion_phys_data __user *data)
-{
-       compat_ulong_t ul;
-       compat_size_t s;
-       int err;
-
-       err = get_user(ul, &data->phys);
-       err |= put_user(ul, &data32->phys);
-       err |= get_user(s, &data->size);
-       err |= put_user(s, &data32->size);
-
-       return err;
-};
-
-static int compat_get_ion_msync_data(
-                       struct compat_ion_msync_data __user *data32,
-                       struct ion_msync_data __user *data)
-{
-       compat_int_t i;
-       compat_uptr_t up;
-       compat_size_t s;
-       int err;
-
-       err = get_user(i, &data32->fd_buffer);
-       err |= put_user(i, &data->fd_buffer);
-       err |= get_user(up, &data32->vaddr);
-       err |= put_user(up, &data->vaddr);
-       err |= get_user(up, &data32->paddr);
-       err |= put_user(up, &data->paddr);
-       err |= get_user(s, &data32->size);
-       err |= put_user(s, &data->size);
-
-       return err;
-};
-
-static int compat_get_ion_mmu_data(
-                       struct compat_ion_mmu_data __user *data32,
-                       struct ion_mmu_data __user *data)
-{
-       compat_int_t i;
-       compat_ulong_t ul;
-       compat_size_t s;
-       int err;
-
-       err = get_user(i, &data32->master_id);
-       err |= put_user(i, &data->master_id);
-       err |= get_user(i, &data32->fd_buffer);
-       err |= put_user(i, &data->fd_buffer);
-       err |= get_user(ul, &data32->iova_addr);
-       err |= put_user(ul, &data->iova_addr);
-       err |= get_user(s, &data32->iova_size);
-       err |= put_user(s, &data->iova_size);
-
-       return err;
-};
-
-static int compat_put_ion_mmu_data(
-                       struct compat_ion_mmu_data __user *data32,
-                       struct ion_mmu_data __user *data)
-{
-       compat_int_t i;
-       compat_ulong_t ul;
-       compat_size_t s;
-       int err;
-
-       err = get_user(i, &data->master_id);
-       err |= put_user(i, &data32->master_id);
-       err |= get_user(i, &data->fd_buffer);
-       err |= put_user(i, &data32->fd_buffer);
-       err |= get_user(ul, &data->iova_addr);
-       err |= put_user(ul, &data32->iova_addr);
-       err |= get_user(s, &data->iova_size);
-       err |= put_user(s, &data32->iova_size);
-
-       return err;
-};
-
-long compat_sprd_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
-#if 0
-       struct ion_client *client = filp->private_data;
-       struct ion_device *dev = client->dev;
-
-       if (!filp->f_op || !dev->custom_ioctl)
-               return -ENOTTY;
-#endif
-       long ret;
-
-       pr_debug("%s, cmd: %u", __FUNCTION__, cmd);
-       switch (cmd) {
-       case ION_SPRD_CUSTOM_PHYS:
-       {
-               struct compat_ion_phys_data __user *data32;
-               struct ion_phys_data __user *data;
-               int err;
-
-               data32 = compat_ptr(arg);
-               data = compat_alloc_user_space(sizeof(*data));
-               if (data == NULL)
-                       return -EFAULT;
-
-               err = compat_get_ion_phys_data(data32, data);
-               if (err)
-                       return err;
-#if 0
-               ret = dev->custom_ioctl(client, ION_SPRD_CUSTOM_PHYS,
-                                               (unsigned long)data);
-#else
-               ret = sprd_ion_ioctl(filp, ION_SPRD_CUSTOM_PHYS, (unsigned long)data);
-#endif
-               err = compat_put_ion_phys_data(data32, data);
-               return ret ? ret : err;
-       }
-       case ION_SPRD_CUSTOM_MSYNC:
-       {
-               struct compat_ion_msync_data __user *data32;
-               struct ion_msync_data __user *data;
-               int err;
-       
-               data32 = compat_ptr(arg);
-               data = compat_alloc_user_space(sizeof(*data));
-               if (data == NULL)
-                       return -EFAULT;
-       
-               err = compat_get_ion_msync_data(data32, data);
-               if (err)
-                       return err;
-#if 0
-               ret = dev->custom_ioctl(client, ION_SPRD_CUSTOM_MSYNC,
-                                               (unsigned long)data);
-#else
-               ret = sprd_ion_ioctl(filp, ION_SPRD_CUSTOM_MSYNC, (unsigned long)data);
-#endif
-               return ret;
-       }
-#if defined(CONFIG_SPRD_IOMMU)
-       case ION_SPRD_CUSTOM_GSP_MAP:
-       {
-               struct compat_ion_mmu_data __user *data32;
-               struct ion_mmu_data __user *data;
-               int err;
-
-               data32 = compat_ptr(arg);
-               data = compat_alloc_user_space(sizeof(*data));
-               if (data == NULL)
-                       return -EFAULT;
-
-               err = compat_get_ion_mmu_data(data32, data);
-               if (err)
-                       return err;
-#if 0
-               ret = dev->custom_ioctl(client, ION_SPRD_CUSTOM_GSP_MAP,
-                                               (unsigned long)data);
-#else
-               ret = sprd_ion_ioctl(filp, ION_SPRD_CUSTOM_GSP_MAP, (unsigned long)data);
-#endif
-               err = compat_put_ion_mmu_data(data32, data);
-               return ret ? ret : err;
-       }
-       case ION_SPRD_CUSTOM_GSP_UNMAP:
-       {
-               struct compat_ion_mmu_data __user *data32;
-               struct ion_mmu_data __user *data;
-               int err;
-
-               data32 = compat_ptr(arg);
-               data = compat_alloc_user_space(sizeof(*data));
-               if (data == NULL)
-                       return -EFAULT;
-
-               err = compat_get_ion_mmu_data(data32, data);
-               if (err)
-                       return err;
-#if 0
-               ret = dev->custom_ioctl(client, ION_SPRD_CUSTOM_GSP_UNMAP,
-                                               (unsigned long)data);
-#else
-               ret = sprd_ion_ioctl(filp, ION_SPRD_CUSTOM_GSP_UNMAP, (unsigned long)data);
-#endif
-               err = compat_put_ion_mmu_data(data32, data);
-               return ret ? ret : err;
-       }
-       case ION_SPRD_CUSTOM_MM_MAP:
-       {
-               struct compat_ion_mmu_data __user *data32;
-               struct ion_mmu_data __user *data;
-               int err;
-
-               data32 = compat_ptr(arg);
-               data = compat_alloc_user_space(sizeof(*data));
-               if (data == NULL)
-                       return -EFAULT;
-
-               err = compat_get_ion_mmu_data(data32, data);
-               if (err)
-                       return err;
-#if 0
-               ret = dev->custom_ioctl(client, ION_SPRD_CUSTOM_MM_MAP,
-                                               (unsigned long)data);
-#else
-               ret = sprd_ion_ioctl(filp, ION_SPRD_CUSTOM_MM_MAP, (unsigned long)data);
-#endif
-               err = compat_put_ion_mmu_data(data32, data);
-               return ret ? ret : err;
-       }
-       case ION_SPRD_CUSTOM_MM_UNMAP:
-       {
-               struct compat_ion_mmu_data __user *data32;
-               struct ion_mmu_data __user *data;
-               int err;
-
-               data32 = compat_ptr(arg);
-               data = compat_alloc_user_space(sizeof(*data));
-               if (data == NULL)
-                       return -EFAULT;
-
-               err = compat_get_ion_mmu_data(data32, data);
-               if (err)
-                       return err;
-#if 0
-               ret = dev->custom_ioctl(client, ION_SPRD_CUSTOM_MM_UNMAP,
-                                               (unsigned long)data);
-#else
-               ret = sprd_ion_ioctl(filp, ION_SPRD_CUSTOM_MM_UNMAP, (unsigned long)data);
-#endif
-               err = compat_put_ion_mmu_data(data32, data);
-               return ret ? ret : err;
-       }
-#endif
-       case ION_SPRD_CUSTOM_FENCE_CREATE:
-       {
-               int ret = -1;
-               struct ion_fence_data data;
-
-               if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
-                       pr_err("FENCE_CREATE user data is err\n");
-                       return -EFAULT;
-               }
-
-               ret = sprd_fence_build(&data);
-               if (ret != 0) {
-                       pr_err("sprd_fence_build failed\n");
-                       return -EFAULT;
-               }
-
-               if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
-                       sprd_fence_destroy(&data);
-                       pr_err("copy_to_user fence failed\n");
-                       return -EFAULT;
-               }
-
-               break;
-    }
-       case ION_SPRD_CUSTOM_FENCE_SIGNAL:
-       {
-               struct ion_fence_data data;
-
-               if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
-                       pr_err("FENCE_CREATE user data is err\n");
-                       return -EFAULT;
-               }
-
-               sprd_fence_signal(&data);
-
-               break;
-       }
-       case ION_SPRD_CUSTOM_FENCE_DUP:
-       {
-               break;
-
-       }
-       default:
-               return -ENOIOCTLCMD;
-       }
-}
diff --git a/drivers/gpu/ion/sprd/compat_sprd_ion.h b/drivers/gpu/ion/sprd/compat_sprd_ion.h
deleted file mode 100644 (file)
index a7dc385..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
-
- * drivers/gpu/ion/compat_ion.h
- *
- * Copyright (C) 2013 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_COMPAT_SPRD_ION_H
-#define _LINUX_COMPAT_SPRD_ION_H
-
-#if IS_ENABLED(CONFIG_COMPAT)
-
-long compat_sprd_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-
-#else
-
-#define compat_sprd_ion_ioctl  NULL
-
-#endif /* CONFIG_COMPAT */
-#endif /* _LINUX_COMPAT_SPRD_ION_H */
diff --git a/drivers/gpu/ion/sprd/sprd_fence.c b/drivers/gpu/ion/sprd/sprd_fence.c
deleted file mode 100644 (file)
index 0baafe8..0000000
+++ /dev/null
@@ -1,469 +0,0 @@
-/*
- * Copyright (C) 2012 Spreadtrum Communications Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include "sprd_fence.h"
-#include <linux/kernel.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-
-#define WAIT_FENCE_TIMEOUT 200
-
-/*
- *  Porting from software sync
- *  (sw_sync.h sw_sync.c)
- * */
-static int sprd_sync_cmp(u32 a, u32 b)
-{
-    if (a == b)
-        return 0;
-
-    return ((s32)a - (s32)b) < 0 ? -1 : 1;
-}
-
-struct sync_pt *sprd_sync_pt_create(struct sprd_sync_timeline *obj, u32 value)
-{
-    struct sprd_sync_pt *pt;
-
-    pt = (struct sprd_sync_pt *)
-        sync_pt_create(&obj->obj, sizeof(struct sprd_sync_pt));
-
-    pt->value = value;
-
-    return (struct sync_pt *)pt;
-}
-
-static struct sync_pt *sprd_sync_pt_dup(struct sync_pt *sync_pt)
-{
-    struct sprd_sync_pt *pt = (struct sprd_sync_pt *) sync_pt;
-    struct sprd_sync_timeline *obj =
-        (struct sprd_sync_timeline *)sync_pt->parent;
-
-    return (struct sync_pt *) sprd_sync_pt_create(obj, pt->value);
-}
-
-static int sprd_sync_pt_has_signaled(struct sync_pt *sync_pt)
-{
-    struct sprd_sync_pt *pt = (struct sprd_sync_pt *)sync_pt;
-    struct sprd_sync_timeline *obj =
-        (struct sprd_sync_timeline *)sync_pt->parent;
-
-    return sprd_sync_cmp(obj->value, pt->value) >= 0;
-}
-
-static int sprd_sync_pt_compare(struct sync_pt *a, struct sync_pt *b)
-{
-    struct sprd_sync_pt *pt_a = (struct sprd_sync_pt *)a;
-    struct sprd_sync_pt *pt_b = (struct sprd_sync_pt *)b;
-
-    return sprd_sync_cmp(pt_a->value, pt_b->value);
-}
-
-
-static int sprd_sync_fill_driver_data(struct sync_pt *sync_pt,
-                                    void *data, int size)
-{
-    struct sprd_sync_pt *pt = (struct sprd_sync_pt *)sync_pt;
-
-    if (size < sizeof(pt->value))
-        return -ENOMEM;
-
-    memcpy(data, &pt->value, sizeof(pt->value));
-
-    return sizeof(pt->value);
-}
-
-static void sprd_sync_timeline_value_str(struct sync_timeline *sync_timeline,
-                                       char *str, int size)
-{
-    struct sprd_sync_timeline *timeline =
-        (struct sprd_sync_timeline *)sync_timeline;
-    snprintf(str, size, "%d", timeline->value);
-}
-
-static void sprd_sync_pt_value_str(struct sync_pt *sync_pt,
-                                 char *str, int size)
-{
-    struct sprd_sync_pt *pt = (struct sprd_sync_pt *)sync_pt;
-    snprintf(str, size, "%d", pt->value);
-}
-
-static struct sync_timeline_ops sprd_sync_timeline_ops = {
-    .driver_name = "sprd_sync",
-    .dup = sprd_sync_pt_dup,
-    .has_signaled = sprd_sync_pt_has_signaled,
-    .compare = sprd_sync_pt_compare,
-    .fill_driver_data = sprd_sync_fill_driver_data,
-    .timeline_value_str = sprd_sync_timeline_value_str,
-    .pt_value_str = sprd_sync_pt_value_str,
-};
-
-struct sprd_sync_timeline *sprd_sync_timeline_create(const char *name)
-{
-    struct sprd_sync_timeline *obj = (struct sprd_sync_timeline *)
-        sync_timeline_create(&sprd_sync_timeline_ops,
-                             sizeof(struct sprd_sync_timeline),
-                             name);
-
-    return obj;
-}
-
-void sprd_sync_timeline_inc(struct sprd_sync_timeline *obj, u32 inc)
-{
-    obj->value += inc;
-
-    sync_timeline_signal(&obj->obj);
-}
-
-/*
- *  Porting software sync done
- * */
-
-/*
- *  user interface
- * */
-
-/*
- *  Now build two timeline,
- *  one for PrimaryDisplayDevice,
- *  the other for VirtualDisplayDevice.
- *  If we only use single timeline, the later Device will
- *  block the former Device.
- * */
-static struct sync_timeline_data sprd_timeline;
-static struct sync_timeline_data sprd_timeline_virtual;
-int sprd_create_timeline(enum SPRD_DEVICE_SYNC_TYPE type)
-{
-       if (type == SPRD_DEVICE_PRIMARY_SYNC)
-       {
-               mutex_lock(&(sprd_timeline.sync_mutex));
-               if (sprd_timeline.timeline == NULL)
-               {
-                       sprd_timeline.timeline = sprd_sync_timeline_create("sprd-timeline");
-                       if (sprd_timeline.timeline == NULL)
-                       {
-                               printk(KERN_ERR "create_timeline, cannot create time line\n");
-                               mutex_unlock(&(sprd_timeline.sync_mutex));
-                               return -ENOMEM;
-                       }
-                       else
-                       {
-                               sprd_timeline.timeline_value = 0;
-                       }
-               }
-               mutex_unlock(&(sprd_timeline.sync_mutex));
-       }
-       else if (type == SPRD_DEVICE_VIRTUAL_SYNC)
-       {
-               mutex_lock(&(sprd_timeline_virtual.sync_mutex));
-               if (sprd_timeline_virtual.timeline == NULL)
-               {
-                       sprd_timeline_virtual.timeline = sprd_sync_timeline_create("sprd-timeline-virtual");
-                       if (sprd_timeline_virtual.timeline == NULL)
-                       {
-                               printk(KERN_ERR "create_timeline, cannot create virtual time line\n");
-                               mutex_unlock(&(sprd_timeline_virtual.sync_mutex));
-                               return -ENOMEM;
-                       }
-                       else
-                       {
-                               sprd_timeline_virtual.timeline_value = 0;
-                       }
-               }
-               mutex_unlock(&(sprd_timeline_virtual.sync_mutex));
-       }
-
-    return 0;
-}
-
-int sprd_destroy_timeline(struct sync_timeline_data *parent)
-{
-       struct sprd_sync_timeline *obj = NULL;
-
-       if (parent->timeline == NULL)
-       {
-               printk(KERN_ERR "sprd_fence timeline has been released\n");
-               return 0;
-       }
-
-       mutex_lock(&(parent->sync_mutex));
-       obj = parent->timeline;
-       sync_timeline_destroy(&obj->obj);
-       mutex_unlock(&(parent->sync_mutex));
-
-       return 0;
-}
-
-int open_sprd_sync_timeline(void)
-{
-       int ret = -1;
-       static int init = 0;
-
-       if (init == 0)
-       {
-               sprd_timeline.timeline_value = -1;
-               sprd_timeline.timeline = NULL;
-               mutex_init(&(sprd_timeline.sync_mutex));
-
-               sprd_timeline_virtual.timeline_value = -1;
-               sprd_timeline_virtual.timeline = NULL;
-               mutex_init(&(sprd_timeline_virtual.sync_mutex));
-
-               init = 1;
-       }
-
-       ret = sprd_create_timeline(SPRD_DEVICE_PRIMARY_SYNC);
-       if (ret < 0)
-       {
-               return ret;
-       }
-       ret = sprd_create_timeline(SPRD_DEVICE_VIRTUAL_SYNC);
-
-       return ret;
-}
-
-int close_sprd_sync_timeline(void)
-{
-       int ret = -1;
-
-       ret = sprd_destroy_timeline(&sprd_timeline);
-
-       ret = sprd_destroy_timeline(&sprd_timeline_virtual);
-
-       return ret;
-}
-
-int sprd_fence_create(struct ion_fence_data *data, char *name)
-{
-       int fd = get_unused_fd();
-       struct sync_pt *pt;
-       struct sync_fence *fence;
-       struct sync_timeline_data *parent = NULL;
-
-       if (data == NULL || name == NULL)
-       {
-               printk(KERN_ERR "sprd_fence_create input para is NULL\n");
-               return -EFAULT;
-       }
-
-
-       if (fd < 0)
-       {
-               fd = get_unused_fd();
-               if (fd < 0)
-               {
-                       printk(KERN_ERR "sprd_sync_pt_create failed to get fd\n");
-               }
-               return -EFAULT;
-       }
-
-       if (data->device_type == SPRD_DEVICE_PRIMARY_SYNC)
-       {
-               parent = &sprd_timeline;
-       }
-       else if (data->device_type == SPRD_DEVICE_VIRTUAL_SYNC)
-       {
-               parent = &sprd_timeline_virtual;
-       }
-
-       if (parent == NULL)
-       {
-               printk(KERN_ERR "sprd_fence_create failed to get sync timeline\n");
-               return -EFAULT;
-       }
-
-       mutex_lock(&(parent->sync_mutex));
-
-       pt = sprd_sync_pt_create(parent->timeline, parent->timeline_value + data->life_value);
-       if (pt == NULL)
-       {
-               printk(KERN_ERR "sprd_sync_pt_create failed\n");
-               goto err;
-       }
-
-       fence = sync_fence_create(name, pt);
-       if (fence == NULL)
-       {
-               sync_pt_free(pt);
-               printk(KERN_ERR "sprd_create_fence failed\n");
-               goto err;
-       }
-
-       sync_fence_install(fence, fd);
-
-       mutex_unlock(&(parent->sync_mutex));
-
-       pr_debug("create a fence: %p, fd: %d, life_value: %d, name: %s\n",
-                       (void *)fence, fd, data->life_value, name);
-
-    return fd;
-
-err:
-   put_unused_fd(fd);
-   mutex_unlock(&(parent->sync_mutex));
-   return -ENOMEM;
-}
-
-int sprd_fence_destroy(struct ion_fence_data *data)
-{
-       if (data == NULL)
-       {
-               printk(KERN_ERR "sprd_fence_destroy parameters NULL\n");
-               return -EFAULT;
-       }
-
-       if (data->release_fence_fd >= 0)
-       {
-               struct sync_fence *fence = sync_fence_fdget(data->release_fence_fd);
-               if (fence == NULL)
-               {
-                       printk(KERN_ERR "sprd_fence_destroy failed fence == NULL\n");
-                       return -EFAULT;
-               }
-       }
-       else if (data->retired_fence_fd >= 0)
-       {
-               struct sync_fence *fence = sync_fence_fdget(data->retired_fence_fd);
-               if (fence == NULL)
-               {
-                       printk(KERN_ERR "sprd_fence_destroy failed fence == NULL\n");
-               }       return -EFAULT;
-       }
-
-       return 0;
-}
-int sprd_fence_build(struct ion_fence_data *data)
-{
-       if (data == NULL)
-       {
-               printk(KERN_ERR "sprd_fence_build input para is NULL\n");
-               return -EFAULT;
-       }
-
-       data->release_fence_fd = sprd_fence_create(data, "HWCRelease");
-       if (data->release_fence_fd < 0)
-       {
-               printk(KERN_ERR "sprd_fence_build failed to create release fence\n");
-               return -ENOMEM;
-       }
-
-       data->retired_fence_fd = sprd_fence_create(data, "HWCRetire");
-       if (data->retired_fence_fd < 0)
-       {
-               printk(KERN_ERR "sprd_fence_build failed to create release fence\n");
-               return -ENOMEM;
-       }
-
-       return 0;
-}
-
-int sprd_fence_signal_timeline(struct sync_timeline_data *parent)
-{
-       if (parent == NULL)
-       {
-               printk(KERN_ERR "sprd_fence_signal_timeline timeline is NULL\n");
-               return -EFAULT;
-       }
-
-       mutex_lock(&(parent->sync_mutex));
-
-       if (parent->timeline)
-       {
-               sprd_sync_timeline_inc(parent->timeline, 1);
-               parent->timeline_value++;
-
-               /*
-                *  For avoiding overflow
-                * */
-               if (parent->timeline_value < 0)
-               {
-                       parent->timeline_value = 0;
-               }
-       }
-
-       mutex_unlock(&(parent->sync_mutex));
-
-       pr_debug("sprd_signal_fence value: %d\n", parent->timeline_value);
-
-       return 0;
-}
-int sprd_fence_signal(struct ion_fence_data *data)
-{
-       struct sync_timeline_data *parent = NULL;
-       enum SPRD_DEVICE_SYNC_TYPE device_type= SPRD_DEVICE_PRIMARY_SYNC;
-
-       if (data == NULL)
-       {
-               printk(KERN_ERR "sprd_fence_signal parameter is NULL\n");
-               return -EFAULT;
-       }
-
-       if (data->device_type == SPRD_DEVICE_PRIMARY_SYNC)
-       {
-               parent = &sprd_timeline;
-       }
-       else if (data->device_type == SPRD_DEVICE_VIRTUAL_SYNC)
-       {
-               parent = &sprd_timeline_virtual;
-       }
-
-       if (parent == NULL)
-       {
-               printk(KERN_ERR "sprd_fence_signal failed to get sync timeline\n");
-               return -EFAULT;
-       }
-
-       sprd_fence_signal_timeline(parent);
-       pr_debug("sprd_signal_fence device_type: %d\n", data->device_type);
-
-       return 0;
-}
-
-int sprd_fence_wait(int fence_fd)
-{
-       int ret = 0;
-
-       struct sync_fence *fence = NULL;
-
-       if (fence_fd < 0)
-       {
-               printk(KERN_ERR "sprd_wait_fence input parameters is NULL\n");
-               return -EFAULT;
-       }
-
-       fence = sync_fence_fdget(fence_fd);
-       if (fence == NULL)
-       {
-               printk(KERN_ERR "sprd_fence_wait failed fence == NULL\n");
-               return -EFAULT;
-       }
-
-       ret = sync_fence_wait(fence, WAIT_FENCE_TIMEOUT);
-       sync_fence_put(fence);
-       if (ret < 0)
-       {
-               printk(KERN_ERR "sync_fence_wait failed, ret = %x\n", ret);
-       }
-
-       pr_debug("sprd_wait_fence wait fence: %p done\n", (void *)fence);
-
-       return ret;
-}
-
-struct sync_pt *sprd_fence_dup(struct sync_pt *sync_pt)
-{
-       struct sync_pt *pt = NULL;
-
-       pt = sprd_sync_pt_dup(sync_pt);
-
-       return pt;
-}
diff --git a/drivers/gpu/ion/sprd/sprd_fence.h b/drivers/gpu/ion/sprd/sprd_fence.h
deleted file mode 100644 (file)
index 6916ccf..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (C) 2012 Spreadtrum Communications Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef _SPRD_FENCE_H_
-#define _SPRD_FENCE_H_
-
-#include <linux/mutex.h>
-#include <linux/types.h>
-#include <linux/sync.h>
-#include <video/ion_sprd.h>
-
-struct sprd_sync_timeline {
-    struct sync_timeline obj;
-    u32 value;
-};
-
-struct sprd_sync_pt {
-    struct sync_pt pt;
-    u32 value;
-};
-
-struct sprd_sync_create_fence_data {
-    __u32 value;
-    char name[32];
-    __s32 fence; /* fd of new fence */
-};
-
-struct sync_timeline_data {
-    int timeline_value;
-    struct mutex sync_mutex;
-    struct sprd_sync_timeline *timeline;
-};
-
-extern int open_sprd_sync_timeline(void);
-extern int close_sprd_sync_timeline(void);
-extern int sprd_fence_build(struct ion_fence_data *data);
-extern int sprd_fence_destroy(struct ion_fence_data *data);
-extern int sprd_fence_signal(struct ion_fence_data *data);
-extern int sprd_fence_wait(int fence_fd);
-extern struct sync_pt *sprd_fence_dup(struct sync_pt *sync_pt);
-
-#endif
diff --git a/drivers/gpu/ion/sprd/sprd_ion.c b/drivers/gpu/ion/sprd/sprd_ion.c
deleted file mode 100644 (file)
index 47c922d..0000000
+++ /dev/null
@@ -1,1155 +0,0 @@
-/*
- * Copyright (C) 2012 Spreadtrum Communications Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/export.h>
-#include <linux/err.h>
-#include <linux/ion.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <video/ion_sprd.h>
-#include "../ion_priv.h"
-
-#include <asm/cacheflush.h>
-
-#include "sprd_fence.h"
-#include <linux/dma-buf.h>
-
-struct ion_device *idev;
-int num_heaps = 0;
-struct ion_heap **heaps;
-
-#if 1
-static unsigned long user_va2pa(struct mm_struct *mm, unsigned long addr)
-{
-       pgd_t *pgd = pgd_offset(mm, addr);
-       unsigned long pa = 0;
-
-       if (!pgd_none(*pgd)) {
-               pud_t *pud = pud_offset(pgd, addr);
-               if (!pud_none(*pud)) {
-                       pmd_t *pmd = pmd_offset(pud, addr);
-                       if (!pmd_none(*pmd)) {
-                               pte_t *ptep, pte;
-
-                               ptep = pte_offset_map(pmd, addr);
-                               pte = *ptep;
-                               if (pte_present(pte))
-                                       pa = pte_val(pte) & PAGE_MASK;
-                               pte_unmap(ptep);
-                       }
-               }
-       }
-
-       return pa;
-}
-#endif
-
-struct ion_client *sprd_ion_client_create(const char *name)
-{
-       return ion_client_create(idev,name);
-}
-EXPORT_SYMBOL(sprd_ion_client_create);
-
-void sprd_ion_client_destroy(struct ion_client *client)
-{
-       ion_client_destroy(client);
-}
-EXPORT_SYMBOL(sprd_ion_client_destroy);
-
-#if defined(CONFIG_SPRD_IOMMU)
-int sprd_map_iommu(struct ion_handle *handle, int domain_num,
-                                       unsigned long *ptr_iova)
-{
-       int ret = 0;
-       struct ion_buffer *buffer = ion_handle_buffer(handle);
-
-       mutex_lock(&buffer->lock);
-       if (0 == buffer->iomap_cnt[domain_num]) {
-               buffer->iova[domain_num] = sprd_iova_alloc(domain_num,
-                                                               buffer->size);
-               ret = sprd_iova_map(domain_num, buffer->iova[domain_num],
-                                                               buffer->size, buffer->sg_table);
-       }
-       *ptr_iova = buffer->iova[domain_num];
-       buffer->iomap_cnt[domain_num]++;
-       mutex_unlock(&buffer->lock);
-
-       return ret;
-}
-EXPORT_SYMBOL(sprd_map_iommu);
-
-int sprd_unmap_iommu(struct ion_handle *handle, int domain_num)
-{
-       int ret = 0;
-       struct ion_buffer *buffer = ion_handle_buffer(handle);
-
-       mutex_lock(&buffer->lock);
-       buffer->iomap_cnt[domain_num]--;
-       if (0 == buffer->iomap_cnt[domain_num]) {
-               ret = sprd_iova_unmap(domain_num, buffer->iova[domain_num],
-                                                               buffer->size);
-               sprd_iova_free(domain_num, buffer->iova[domain_num],
-                                                               buffer->size);
-               buffer->iova[domain_num] = 0;
-       }
-       mutex_unlock(&buffer->lock);
-
-       return ret;
-}
-EXPORT_SYMBOL(sprd_unmap_iommu);
-#endif
-
-int sprd_ion_get_gsp_addr(struct ion_addr_data *data)
-{
-       int ret = 0;
-       struct dma_buf *dmabuf;
-       struct ion_buffer *buffer;
-
-       dmabuf = dma_buf_get(data->fd_buffer);
-       if (IS_ERR(dmabuf)) {
-               pr_err("sprd_ion_get_gsp_addr() dmabuf=0x%lx dma_buf_get error!\n", (unsigned long)dmabuf);
-               return -1;
-       }
-       /* if this memory came from ion */
-#if 0
-       if (dmabuf->ops != &dma_buf_ops) {
-               pr_err("%s: can not import dmabuf from another exporter\n",
-                      __func__);
-               dma_buf_put(dmabuf);
-               return ERR_PTR(-EINVAL);
-       }
-#endif
-       buffer = dmabuf->priv;
-       dma_buf_put(dmabuf);
-
-       if (ION_HEAP_TYPE_SYSTEM == buffer->heap->type) {
-#if defined(CONFIG_SPRD_IOMMU)
-               mutex_lock(&buffer->lock);
-               if(0 == buffer->iomap_cnt[IOMMU_GSP]) {
-                       buffer->iova[IOMMU_GSP] = sprd_iova_alloc(IOMMU_GSP, buffer->size);
-                       ret = sprd_iova_map(IOMMU_GSP, buffer->iova[IOMMU_GSP],
-                                       buffer->size, buffer->sg_table);
-               }
-               if (ret) {
-                       pr_err("%s, sprd_iova_map error, iova: 0x%lx, ret: %d!\n",
-                               __func__, buffer->iova[IOMMU_GSP], ret);
-                       sprd_iova_free(IOMMU_GSP, buffer->iova[IOMMU_GSP], buffer->size);
-                       buffer->iova[IOMMU_GSP] = 0;
-                       data->iova_addr = 0;
-                       data->size = 0;
-               } else {
-                       buffer->iomap_cnt[IOMMU_GSP]++;
-                       data->iova_addr = buffer->iova[IOMMU_GSP];
-                       data->size = buffer->size;
-               }
-               data->iova_enabled = true;
-               mutex_unlock(&buffer->lock);
-#else
-               ret = -1;
-#endif
-       } else {
-               if (!buffer->heap->ops->phys) {
-                       pr_err("%s: ion_phys is not implemented by this heap.\n",
-                              __func__);
-                       return -ENODEV;
-               }
-               ret = buffer->heap->ops->phys(buffer->heap, buffer, &(data->phys_addr), &(data->size));
-               data->iova_enabled = false;
-               if (ret) {
-                       pr_err("%s, get phys error %d!\n", __func__, ret);
-               }
-       }
-
-       return ret;
-}
-EXPORT_SYMBOL(sprd_ion_get_gsp_addr);
-
-int sprd_ion_free_gsp_addr(int fd)
-{
-       int ret = 0;
-       struct dma_buf *dmabuf;
-       struct ion_buffer *buffer;
-
-       dmabuf = dma_buf_get(fd);
-       if (IS_ERR(dmabuf)) {
-               pr_err("sprd_ion_free_gsp_addr() dmabuf=0x%lx dma_buf_get error!\n", (unsigned long)dmabuf);
-               return -1;
-       }
-       /* if this memory came from ion */
-#if 0
-       if (dmabuf->ops != &dma_buf_ops) {
-               pr_err("%s: can not import dmabuf from another exporter\n",
-                      __func__);
-               dma_buf_put(dmabuf);
-               return ERR_PTR(-EINVAL);
-       }
-#endif
-       buffer = dmabuf->priv;
-       dma_buf_put(dmabuf);
-
-       if (ION_HEAP_TYPE_SYSTEM == buffer->heap->type) {
-#if defined(CONFIG_SPRD_IOMMU)
-               mutex_lock(&buffer->lock);
-               if (buffer->iomap_cnt[IOMMU_GSP] > 0) {
-                       buffer->iomap_cnt[IOMMU_GSP]--;
-                       if(0 == buffer->iomap_cnt[IOMMU_GSP]) {
-                               ret = sprd_iova_unmap(IOMMU_GSP, buffer->iova[IOMMU_GSP],
-                                               buffer->size);
-                               sprd_iova_free(IOMMU_GSP, buffer->iova[IOMMU_GSP], buffer->size);
-                               buffer->iova[IOMMU_GSP] = 0;
-                       }
-               }
-               mutex_unlock(&buffer->lock);
-#else
-               ret = -1;
-#endif
-       }
-
-       if (ret) {
-               pr_err("sprd_ion_free_gsp_addr, error %d!\n",ret);
-       }
-
-       return ret;
-}
-EXPORT_SYMBOL(sprd_ion_free_gsp_addr);
-
-long sprd_ion_ioctl(struct file *filp, unsigned int cmd,
-                               unsigned long arg)
-{
-       struct ion_client *client = filp->private_data;
-       int ret = 0;
-
-       switch (cmd) {
-       case ION_SPRD_CUSTOM_PHYS:
-       {
-               struct ion_phys_data data;
-               struct ion_handle *handle;
-
-               if (copy_from_user(&data, (void __user *)arg,
-                               sizeof(data))) {
-                       pr_err("sprd_heap_ioctl alloc copy_from_user error!\n");
-                       return -EFAULT;
-               }
-
-               handle = ion_import_dma_buf(client, data.fd_buffer);
-
-               if (IS_ERR(handle)) {
-                       pr_err("sprd_heap_ioctl alloc handle=0x%lx error!\n", (unsigned long)handle);
-                       return PTR_ERR(handle);
-               }
-
-               ret = ion_phys(client, handle, &data.phys, &data.size);
-               ion_free(client, handle);
-
-               if (ret) {
-                       pr_err("sprd_heap_ioctl alloc ret=0x%x error!\n",ret);
-                       return ret;
-               }
-
-               if (copy_to_user((void __user *)arg,
-                               &data, sizeof(data))) {
-                       pr_err("sprd_heap_ioctl alloc copy_to_user error!\n");
-                       return -EFAULT;
-               }
-
-               pr_debug("sprd_heap_ioctl alloc paddress=0x%lx size=0x%zx\n",data.phys,data.size);
-               break;
-       }
-       case ION_SPRD_CUSTOM_MSYNC:
-       {
-               struct ion_msync_data data;
-               void *v_addr;
-
-               if (copy_from_user(&data, (void __user *)arg,
-                               sizeof(data))) {
-                       pr_err("sprd_heap_ioctl free copy_from_user error!\n");
-                       return -EFAULT;
-               }
-
-               if ((unsigned long)data.vaddr & (PAGE_SIZE - 1)) {
-                       pr_err("sprd_heap_ioctl free data.vaddr=%p error!\n",data.vaddr);
-                       return -EFAULT;
-               }
-
-               pr_debug("sprd_heap_ioctl free vaddress=%p paddress=%p size=0x%zx\n",data.vaddr,data.paddr,data.size);
-#ifdef CONFIG_ARCH_SCX35L64
-               __dma_flush_range(data.vaddr, data.vaddr + data.size);
-#else
-               dmac_flush_range(data.vaddr, data.vaddr + data.size);
-#endif
-               v_addr = data.vaddr;
-               while (v_addr < data.vaddr + data.size) {
-                       unsigned long phy_addr = user_va2pa(current->mm, (unsigned long)v_addr);
-                       if (phy_addr) {
-#ifdef CONFIG_ARCH_SCX35L64
-                               /* not implement in arm64 now */
-#else
-                               outer_flush_range(phy_addr, phy_addr + PAGE_SIZE);
-#endif
-                       }
-                       v_addr += PAGE_SIZE;
-               }
-               break;
-       }
-#if defined(CONFIG_SPRD_IOMMU)
-       case ION_SPRD_CUSTOM_GSP_MAP:
-       {
-               struct ion_mmu_data data;
-               struct ion_handle *handle;
-               struct ion_buffer *buffer;
-
-               if (copy_from_user(&data, (void __user *)arg,
-                               sizeof(data))) {
-                       pr_err("sprd_heap_ioctl gsp map copy_from_user error!\n");
-                       return -EFAULT;
-               }
-
-               handle = ion_import_dma_buf(client, data.fd_buffer);
-
-               if (IS_ERR(handle)) {
-                       pr_err("sprd_heap_ioctl gsp map handle=0x%lx error!\n", (unsigned long)handle);
-                       return PTR_ERR(handle);
-               }
-
-               buffer = ion_handle_buffer(handle);
-
-               mutex_lock(&buffer->lock);
-               if (0 == buffer->iomap_cnt[IOMMU_GSP]) {
-                       buffer->iova[IOMMU_GSP] = sprd_iova_alloc(IOMMU_GSP, buffer->size);
-                       ret = sprd_iova_map(IOMMU_GSP, buffer->iova[IOMMU_GSP],
-                                       buffer->size, buffer->sg_table);
-               }
-               if (ret) {
-                       pr_err("sprd_heap_ioctl gsp map sprd_iova_map error, iova: 0x%lx, ret: %d!\n",
-                               buffer->iova[IOMMU_GSP], ret);
-                       sprd_iova_free(IOMMU_GSP, buffer->iova[IOMMU_GSP], buffer->size);
-                       buffer->iova[IOMMU_GSP] = 0;
-                       data.iova_addr = 0;
-                       data.iova_size = 0;
-               } else {
-                       buffer->iomap_cnt[IOMMU_GSP]++;
-                       data.iova_addr = buffer->iova[IOMMU_GSP];
-                       data.iova_size = buffer->size;
-               }
-               mutex_unlock(&buffer->lock);
-               ion_free(client, handle);
-
-               if (copy_to_user((void __user *)arg,
-                               &data, sizeof(data))) {
-                       pr_err("sprd_heap_ioctl gsp map copy_to_user error!\n");
-                       sprd_iova_free(IOMMU_GSP, data.iova_addr, data.iova_size);
-                       return -EFAULT;
-               }
-               break;
-       }
-       case ION_SPRD_CUSTOM_GSP_UNMAP:
-       {
-               struct ion_mmu_data data;
-               struct ion_handle *handle;
-               struct ion_buffer *buffer;
-
-               if (copy_from_user(&data, (void __user *)arg,
-                               sizeof(data))) {
-                       pr_err("sprd_heap_ioctl gsp unmap copy_from_user error!\n");
-                       return -EFAULT;
-               }
-
-               handle = ion_import_dma_buf(client, data.fd_buffer);
-
-               if (IS_ERR(handle)) {
-                       pr_err("sprd_heap_ioctl gsp unmap handle=0x%lx ion_import_dma_buf error!\n", (unsigned long)handle);
-                       return PTR_ERR(handle);
-               }
-
-               buffer = ion_handle_buffer(handle);
-
-               mutex_lock(&buffer->lock);
-               if (buffer->iomap_cnt[IOMMU_GSP] > 0) {
-                       buffer->iomap_cnt[IOMMU_GSP]--;
-                       if (0 == buffer->iomap_cnt[IOMMU_GSP]) {
-                               ret = sprd_iova_unmap(IOMMU_GSP, buffer->iova[IOMMU_GSP],
-                                               buffer->size);
-                               sprd_iova_free(IOMMU_GSP, buffer->iova[IOMMU_GSP], buffer->size);
-                               buffer->iova[IOMMU_GSP] = 0;
-                       }
-               }
-               mutex_unlock(&buffer->lock);
-               data.iova_addr = 0;
-               data.iova_size = 0;
-               ion_free(client, handle);
-               if (ret) {
-                       pr_err("sprd_heap_ioctl gsp unmap sprd_iova_unmap error %d!\n",ret);
-                       return ret;
-               }
-
-               if (copy_to_user((void __user *)arg,
-                               &data, sizeof(data))) {
-                       pr_err("sprd_heap_ioctl gsp unmap copy_to_user error!\n");
-                       return -EFAULT;
-               }
-               break;
-       }
-       case ION_SPRD_CUSTOM_MM_MAP:
-       {
-               struct ion_mmu_data data;
-               struct ion_handle *handle;
-               struct ion_buffer *buffer;
-
-               if (copy_from_user(&data, (void __user *)arg,
-                               sizeof(data))) {
-                       pr_err("sprd_heap_ioctl mm map copy_from_user error!\n");
-                       return -EFAULT;
-               }
-
-               handle = ion_import_dma_buf(client, data.fd_buffer);
-
-               if (IS_ERR(handle)) {
-                       pr_err("sprd_heap_ioctl mm map handle=0x%lx error!\n", (unsigned long)handle);
-                       return PTR_ERR(handle);
-               }
-
-               buffer = ion_handle_buffer(handle);
-
-               mutex_lock(&buffer->lock);
-               if (0 == buffer->iomap_cnt[IOMMU_MM]) {
-                       buffer->iova[IOMMU_MM] = sprd_iova_alloc(IOMMU_MM, buffer->size);
-                       ret = sprd_iova_map(IOMMU_MM, buffer->iova[IOMMU_MM],
-                                       buffer->size, buffer->sg_table);
-               }
-               if (ret) {
-                       pr_err("sprd_heap_ioctl mm map sprd_iova_map error, iova: 0x%lx, ret: %d!\n",
-                               buffer->iova[IOMMU_MM], ret);
-                       sprd_iova_free(IOMMU_MM, buffer->iova[IOMMU_MM], buffer->size);
-                       buffer->iova[IOMMU_MM] = 0;
-                       data.iova_addr = 0;
-                       data.iova_size = 0;
-               } else {
-                       buffer->iomap_cnt[IOMMU_MM]++;
-                       data.iova_addr = buffer->iova[IOMMU_MM];
-                       data.iova_size = buffer->size;
-               }
-               mutex_unlock(&buffer->lock);
-               ion_free(client, handle);
-
-               if (copy_to_user((void __user *)arg,
-                               &data, sizeof(data))) {
-                       pr_err("sprd_heap_ioctl mm map copy_to_user error!\n");
-                       sprd_iova_free(IOMMU_MM,data.iova_addr,data.iova_size);
-                       return -EFAULT;
-               }
-
-               pr_debug("sprd_heap_ioctl mm map vaddress=0x%lx size=0x%zx\n",data.iova_addr,data.iova_size);
-               break;
-       }
-       case ION_SPRD_CUSTOM_MM_UNMAP:
-       {
-               struct ion_mmu_data data;
-               struct ion_handle *handle;
-               struct ion_buffer *buffer;
-
-               if (copy_from_user(&data, (void __user *)arg,
-                               sizeof(data))) {
-                       pr_err("sprd_heap_ioctl mm unmap copy_from_user error!\n");
-                       return -EFAULT;
-               }
-
-               pr_debug("sprd_heap_ioctl mm unmap vaddress=0x%lx size=0x%zx\n",data.iova_addr,data.iova_size);
-               handle = ion_import_dma_buf(client, data.fd_buffer);
-
-               if (IS_ERR(handle)) {
-                       pr_err("sprd_heap_ioctl mm unmap handle=0x%lx error!\n", (unsigned long)handle);
-                       return PTR_ERR(handle);
-               }
-
-               buffer = ion_handle_buffer(handle);
-
-               mutex_lock(&buffer->lock);
-               if (buffer->iomap_cnt[IOMMU_MM] > 0) {
-                       buffer->iomap_cnt[IOMMU_MM]--;
-                       if (0 == buffer->iomap_cnt[IOMMU_MM]) {
-                               ret = sprd_iova_unmap(IOMMU_MM, buffer->iova[IOMMU_MM],
-                                               buffer->size);
-                               sprd_iova_free(IOMMU_MM, buffer->iova[IOMMU_MM], buffer->size);
-                               buffer->iova[IOMMU_MM] = 0;
-                       }
-               }
-               mutex_unlock(&buffer->lock);
-               data.iova_addr = 0;
-               data.iova_size = 0;
-               ion_free(client, handle);
-               if (ret) {
-                       pr_err("sprd_heap_ioctl mm unmap ret=0x%x error!\n",ret);
-                       return ret;
-               }
-
-               if (copy_to_user((void __user *)arg,
-                               &data, sizeof(data))) {
-                       pr_err("sprd_heap_ioctl mm unmap copy_to_user error!\n");
-                       return -EFAULT;
-               }
-               break;
-       }
-#endif
-       case ION_SPRD_CUSTOM_FENCE_CREATE:
-       {
-               int ret = -1;
-               struct ion_fence_data data;
-
-               if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
-                       pr_err("FENCE_CREATE user data is err\n");
-                       return -EFAULT;
-               }
-
-               ret = sprd_fence_build(&data);
-               if (ret != 0) {
-                       pr_err("sprd_fence_build failed\n");
-                       return -EFAULT;
-               }
-
-               if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
-                       sprd_fence_destroy(&data);
-                       pr_err("copy_to_user fence failed\n");
-                       return -EFAULT;
-               }
-
-               break;
-    }
-       case ION_SPRD_CUSTOM_FENCE_SIGNAL:
-       {
-               struct ion_fence_data data;
-
-               if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
-                       pr_err("FENCE_CREATE user data is err\n");
-                       return -EFAULT;
-               }
-
-               sprd_fence_signal(&data);
-
-               break;
-       }
-       case ION_SPRD_CUSTOM_FENCE_DUP:
-       {
-               break;
-
-       }
-       default:
-               pr_err("sprd_ion Do not support cmd: %d\n", cmd);
-               return -ENOTTY;
-       }
-
-       return ret;
-}
-EXPORT_SYMBOL(sprd_ion_ioctl);
-
-static long sprd_heap_ioctl(struct ion_client *client, unsigned int cmd,
-                               unsigned long arg)
-{
-       int ret = 0;
-
-       pr_info("%s:cmd[%d]\n", __func__, cmd);
-
-       switch (cmd) {
-       case ION_SPRD_CUSTOM_PHYS:
-       {
-               struct ion_phys_data data;
-               struct ion_handle *handle;
-
-               if (copy_from_user(&data, (void __user *)arg,
-                               sizeof(data))) {
-                       pr_err("sprd_heap_ioctl alloc copy_from_user error!\n");
-                       return -EFAULT;
-               }
-
-               handle = ion_import_dma_buf(client, data.fd_buffer);
-
-               if (IS_ERR(handle)) {
-                       pr_err("sprd_heap_ioctl alloc handle=0x%lx error!\n", (unsigned long)handle);
-                       return PTR_ERR(handle);
-               }
-
-               ret = ion_phys(client, handle, &data.phys, &data.size);
-               ion_free(client, handle);
-
-               if (ret) {
-                       pr_err("sprd_heap_ioctl alloc ret=0x%x error!\n",ret);
-                       return ret;
-               }
-
-               if (copy_to_user((void __user *)arg,
-                               &data, sizeof(data))) {
-                       pr_err("sprd_heap_ioctl alloc copy_to_user error!\n");
-                       return -EFAULT;
-               }
-               pr_info("%s:cmd[%d][0x%lx %d]\n",  __func__, cmd, data.phys,data.size);
-               break;
-       }
-       case ION_SPRD_CUSTOM_MSYNC:
-       {
-#if 0
-               struct ion_msync_data data;
-               void *kaddr;
-               void *paddr;
-               size_t size;
-               if (copy_from_user(&data, (void __user *)arg,
-                               sizeof(data))) {
-                       return -EFAULT;
-               }
-               kaddr = data.vaddr;
-               paddr = data.paddr;
-               size = data.size;
-               dmac_flush_range(kaddr, kaddr + size);
-               outer_clean_range((phys_addr_t)paddr, (phys_addr_t)(paddr + size));
-
-/*maybe open in future if support discrete page map so keep this code unremoved here*/
-#else
-               struct ion_msync_data data;
-               void *v_addr;
-
-               if (copy_from_user(&data, (void __user *)arg,
-                               sizeof(data))) {
-                       pr_err("sprd_heap_ioctl free copy_from_user error!\n");
-                       return -EFAULT;
-               }
-
-               if ((unsigned long)data.vaddr & (PAGE_SIZE - 1)) {
-                       pr_err("sprd_heap_ioctl free data.vaddr=%p error!\n",data.vaddr);
-                       return -EFAULT;
-               }
-
-               pr_debug("sprd_heap_ioctl free vaddress=%p paddress=%p size=0x%zx\n",data.vaddr,data.paddr,data.size);
-#ifdef CONFIG_ARCH_SCX35L64
-               __dma_flush_range(data.vaddr, data.vaddr + data.size);
-#else
-               dmac_flush_range(data.vaddr, data.vaddr + data.size);
-#endif
-               v_addr = data.vaddr;
-               while (v_addr < data.vaddr + data.size) {
-                       unsigned long phy_addr = user_va2pa(current->mm, (unsigned long)v_addr);
-                       if (phy_addr) {
-#ifdef CONFIG_ARCH_SCX35L64
-                               /* not implement in arm64 now */
-#else
-                               outer_flush_range(phy_addr, phy_addr + PAGE_SIZE);
-#endif
-                       }
-                       v_addr += PAGE_SIZE;
-               }
-#endif
-               break;
-       }
-#if defined(CONFIG_SPRD_IOMMU)
-       case ION_SPRD_CUSTOM_GSP_MAP:
-       {
-               struct ion_mmu_data data;
-               struct ion_handle *handle;
-               struct ion_buffer *buffer;
-
-               if (copy_from_user(&data, (void __user *)arg,
-                               sizeof(data))) {
-                       pr_err("sprd_heap_ioctl gsp map copy_from_user error!\n");
-                       return -EFAULT;
-               }
-
-               handle = ion_import_dma_buf(client, data.fd_buffer);
-
-               if (IS_ERR(handle)) {
-                       pr_err("sprd_heap_ioctl gsp map handle=0x%lx error!\n", (unsigned long)handle);
-                       return PTR_ERR(handle);
-               }
-
-               buffer = ion_handle_buffer(handle);
-
-               mutex_lock(&buffer->lock);
-               if (0 == buffer->iomap_cnt[IOMMU_GSP]) {
-                       buffer->iova[IOMMU_GSP] = sprd_iova_alloc(IOMMU_GSP, buffer->size);
-                       ret = sprd_iova_map(IOMMU_GSP, buffer->iova[IOMMU_GSP],
-                                       buffer->size, buffer->sg_table);
-               }
-               if (ret) {
-                       pr_err("sprd_heap_ioctl gsp map sprd_iova_map error, iova: 0x%lx, ret: %d!\n",
-                               buffer->iova[IOMMU_GSP], ret);
-                       sprd_iova_free(IOMMU_GSP, buffer->iova[IOMMU_GSP], buffer->size);
-                       buffer->iova[IOMMU_GSP] = 0;
-                       data.iova_addr = 0;
-                       data.iova_size = 0;
-               } else {
-                       buffer->iomap_cnt[IOMMU_GSP]++;
-                       data.iova_addr = buffer->iova[IOMMU_GSP];
-                       data.iova_size = buffer->size;
-               }
-               mutex_unlock(&buffer->lock);
-               ion_free(client, handle);
-
-               if (copy_to_user((void __user *)arg,
-                               &data, sizeof(data))) {
-                       pr_err("sprd_heap_ioctl gsp map copy_to_user error!\n");
-                       sprd_iova_free(IOMMU_GSP, data.iova_addr, data.iova_size);
-                       return -EFAULT;
-               }
-               break;
-       }
-       case ION_SPRD_CUSTOM_GSP_UNMAP:
-       {
-               struct ion_mmu_data data;
-               struct ion_handle *handle;
-               struct ion_buffer *buffer;
-
-               if (copy_from_user(&data, (void __user *)arg,
-                               sizeof(data))) {
-                       pr_err("sprd_heap_ioctl gsp unmap copy_from_user error!\n");
-                       return -EFAULT;
-               }
-
-               handle = ion_import_dma_buf(client, data.fd_buffer);
-
-               if (IS_ERR(handle)) {
-                       pr_err("sprd_heap_ioctl gsp unmap handle=0x%lx ion_import_dma_buf error!\n", (unsigned long)handle);
-                       return PTR_ERR(handle);
-               }
-
-               buffer = ion_handle_buffer(handle);
-
-               mutex_lock(&buffer->lock);
-               if (buffer->iomap_cnt[IOMMU_GSP] > 0) {
-                       buffer->iomap_cnt[IOMMU_GSP]--;
-                       if (0 == buffer->iomap_cnt[IOMMU_GSP]) {
-                               ret = sprd_iova_unmap(IOMMU_GSP, buffer->iova[IOMMU_GSP],
-                                               buffer->size);
-                               sprd_iova_free(IOMMU_GSP, buffer->iova[IOMMU_GSP], buffer->size);
-                               buffer->iova[IOMMU_GSP] = 0;
-                       }
-               }
-               mutex_unlock(&buffer->lock);
-               data.iova_addr = 0;
-               data.iova_size = 0;
-               ion_free(client, handle);
-               if (ret) {
-                       pr_err("sprd_heap_ioctl gsp unmap sprd_iova_unmap error %d!\n",ret);
-                       return ret;
-               }
-
-               if (copy_to_user((void __user *)arg,
-                               &data, sizeof(data))) {
-                       pr_err("sprd_heap_ioctl gsp unmap copy_to_user error!\n");
-                       return -EFAULT;
-               }
-               break;
-       }
-       case ION_SPRD_CUSTOM_MM_MAP:
-       {
-               struct ion_mmu_data data;
-               struct ion_handle *handle;
-               struct ion_buffer *buffer;
-
-               if (copy_from_user(&data, (void __user *)arg,
-                               sizeof(data))) {
-                       pr_err("sprd_heap_ioctl mm map copy_from_user error!\n");
-                       return -EFAULT;
-               }
-
-               handle = ion_import_dma_buf(client, data.fd_buffer);
-
-               if (IS_ERR(handle)) {
-                       pr_err("sprd_heap_ioctl mm map handle=0x%lx error!\n", (unsigned long)handle);
-                       return PTR_ERR(handle);
-               }
-
-               buffer = ion_handle_buffer(handle);
-
-               mutex_lock(&buffer->lock);
-               if (0 == buffer->iomap_cnt[IOMMU_MM]) {
-                       buffer->iova[IOMMU_MM] = sprd_iova_alloc(IOMMU_MM, buffer->size);
-                       ret = sprd_iova_map(IOMMU_MM, buffer->iova[IOMMU_MM],
-                                       buffer->size, buffer->sg_table);
-               }
-               if (ret) {
-                       pr_err("sprd_heap_ioctl mm map sprd_iova_map error, iova: 0x%lx, ret: %d!\n",
-                               buffer->iova[IOMMU_MM], ret);
-                       sprd_iova_free(IOMMU_MM, buffer->iova[IOMMU_MM], buffer->size);
-                       buffer->iova[IOMMU_MM] = 0;
-                       data.iova_addr = 0;
-                       data.iova_size = 0;
-               } else {
-                       buffer->iomap_cnt[IOMMU_MM]++;
-                       data.iova_addr = buffer->iova[IOMMU_MM];
-                       data.iova_size = buffer->size;
-               }
-               mutex_unlock(&buffer->lock);
-               ion_free(client, handle);
-
-               if (copy_to_user((void __user *)arg,
-                               &data, sizeof(data))) {
-                       pr_err("sprd_heap_ioctl mm map copy_to_user error!\n");
-                       sprd_iova_free(IOMMU_MM,data.iova_addr,data.iova_size);
-                       return -EFAULT;
-               }
-               pr_info("%s:cmd[%d][0x%lx %d]\n", __func__, cmd, data.iova_addr,data.iova_size);
-               break;
-       }
-       case ION_SPRD_CUSTOM_MM_UNMAP:
-       {
-               struct ion_mmu_data data;
-               struct ion_handle *handle;
-               struct ion_buffer *buffer;
-
-               if (copy_from_user(&data, (void __user *)arg,
-                               sizeof(data))) {
-                       pr_err("sprd_heap_ioctl mm unmap copy_from_user error!\n");
-                       return -EFAULT;
-               }
-
-               pr_info("%s:cmd[%d][0x%lx %d]\n", __func__, cmd, data.iova_addr,data.iova_size);
-
-               handle = ion_import_dma_buf(client, data.fd_buffer);
-
-               if (IS_ERR(handle)) {
-                       pr_err("sprd_heap_ioctl mm unmap handle=0x%lx error!\n", (unsigned long)handle);
-                       return PTR_ERR(handle);
-               }
-
-               buffer = ion_handle_buffer(handle);
-
-               mutex_lock(&buffer->lock);
-               if (buffer->iomap_cnt[IOMMU_MM] > 0) {
-                       buffer->iomap_cnt[IOMMU_MM]--;
-                       if (0 == buffer->iomap_cnt[IOMMU_MM]) {
-                               ret = sprd_iova_unmap(IOMMU_MM, buffer->iova[IOMMU_MM],
-                                               buffer->size);
-                               sprd_iova_free(IOMMU_MM, buffer->iova[IOMMU_MM], buffer->size);
-                               buffer->iova[IOMMU_MM] = 0;
-                       }
-               }
-               mutex_unlock(&buffer->lock);
-               data.iova_addr = 0;
-               data.iova_size = 0;
-               ion_free(client, handle);
-               if (ret) {
-                       pr_err("sprd_heap_ioctl mm unmap ret=0x%x error!\n",ret);
-                       return ret;
-               }
-
-               if (copy_to_user((void __user *)arg,
-                               &data, sizeof(data))) {
-                       pr_err("sprd_heap_ioctl mm unmap copy_to_user error!\n");
-                       return -EFAULT;
-               }
-               break;
-       }
-#endif
-       case ION_SPRD_CUSTOM_FENCE_CREATE:
-       {
-               int ret = -1;
-               struct ion_fence_data data;
-
-               if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
-                       pr_err("FENCE_CREATE user data is err\n");
-                       return -EFAULT;
-               }
-
-               ret = sprd_fence_build(&data);
-               if (ret != 0) {
-                       pr_err("sprd_fence_build failed\n");
-                       return -EFAULT;
-               }
-
-               if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
-                       sprd_fence_destroy(&data);
-                       pr_err("copy_to_user fence failed\n");
-                       return -EFAULT;
-               }
-
-               break;
-    }
-       case ION_SPRD_CUSTOM_FENCE_SIGNAL:
-       {
-               struct ion_fence_data data;
-
-               if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
-                       pr_err("FENCE_CREATE user data is err\n");
-                       return -EFAULT;
-               }
-
-               sprd_fence_signal(&data);
-
-               break;
-       }
-       case ION_SPRD_CUSTOM_FENCE_DUP:
-       {
-               break;
-
-       }
-       default:
-               pr_err("sprd_ion Do not support cmd: %d\n", cmd);
-               return -ENOTTY;
-       }
-
-       pr_info("%s:cmd[%d]done[%d]\n", __func__, cmd, ret);
-
-       return ret;
-}
-
-
-extern struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *heap_data, struct device *dev);
-extern void ion_cma_heap_destroy(struct ion_heap *heap);
-
-
-static struct ion_heap *__ion_heap_create(struct ion_platform_heap *heap_data, struct device *dev)
-{
-       struct ion_heap *heap = NULL;
-
-       switch ((int)heap_data->type) {
-       case ION_HEAP_TYPE_CUSTOM:
-               heap = ion_cma_heap_create(heap_data, dev);
-               break;
-       default:
-               return ion_heap_create(heap_data);
-       }
-
-       if (IS_ERR_OR_NULL(heap)) {
-               pr_err("%s: error creating heap %s type %d base %lu size %zd\n",
-                      __func__, heap_data->name, heap_data->type,
-                      heap_data->base, heap_data->size);
-               return ERR_PTR(-EINVAL);
-       }
-
-       heap->name = heap_data->name;
-       heap->id = heap_data->id;
-
-       return heap;
-}
-
-static void __ion_heap_destroy(struct ion_heap *heap)
-{
-       if (!heap)
-               return;
-
-       switch ((int)heap->type) {
-       case ION_HEAP_TYPE_CUSTOM:
-               ion_cma_heap_destroy(heap);
-               break;
-       default:
-               ion_heap_destroy(heap);
-       }
-}
-
-static struct ion_platform_data *sprd_ion_parse_dt(struct platform_device *pdev)
-{
-       int i = 0, ret = 0;
-       const struct device_node *parent = pdev->dev.of_node;
-       struct device_node *child = NULL;
-       struct ion_platform_data *pdata = NULL;
-       struct ion_platform_heap *ion_heaps = NULL;
-       struct platform_device *new_dev = NULL;
-       uint32_t val = 0, type = 0;
-       const char *name;
-       uint32_t out_values[2];
-
-       for_each_child_of_node(parent, child)
-               num_heaps++;
-       if (!num_heaps)
-               return ERR_PTR(-EINVAL);
-
-       pr_info("%s: num_heaps=%d\n", __func__, num_heaps);
-
-       pdata = kzalloc(sizeof(struct ion_platform_data), GFP_KERNEL);
-       if (!pdata)
-               return ERR_PTR(-ENOMEM);
-
-       ion_heaps = kzalloc(sizeof(struct ion_platform_heap)*num_heaps, GFP_KERNEL);
-       if (!ion_heaps) {
-               kfree(pdata);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       pdata->heaps = ion_heaps;
-       pdata->nr = num_heaps;
-
-       for_each_child_of_node(parent, child) {
-               new_dev = of_platform_device_create(child, NULL, &pdev->dev);
-               if (!new_dev) {
-                       pr_err("Failed to create device %s\n", child->name);
-                       goto out;
-               }
-
-               pdata->heaps[i].priv = &new_dev->dev;
-
-               ret = of_property_read_u32(child, "reg", &val);
-               if (ret) {
-                       pr_err("%s: Unable to find reg key, ret=%d", __func__, ret);
-                       goto out;
-               }
-               pdata->heaps[i].id = val;
-
-               ret = of_property_read_string(child, "reg-names", &name);
-               if (ret) {
-                       pr_err("%s: Unable to find reg-names key, ret=%d", __func__, ret);
-                       goto out;
-               }
-               pdata->heaps[i].name = name;
-
-               ret = of_property_read_u32(child, "sprd,ion-heap-type", &type);
-               if (ret) {
-                       pr_err("%s: Unable to find ion-heap-type key, ret=%d", __func__, ret);
-                       goto out;
-               }
-               pdata->heaps[i].type = type;
-
-               ret = of_property_read_u32_array(child, "sprd,ion-heap-mem",
-                               out_values, 2);
-               if (!ret) {
-                       pdata->heaps[i].base = out_values[0];
-                       pdata->heaps[i].size = out_values[1];
-               }
-
-               pr_info("%s: heaps[%d]: %s type: %d base: %lu size %u\n",
-                               __func__, i, pdata->heaps[i].name, pdata->heaps[i].type,
-                               pdata->heaps[i].base, pdata->heaps[i].size);
-               ++i;
-       }
-       return pdata;
-out:
-       kfree(pdata->heaps);
-       kfree(pdata);
-       return ERR_PTR(ret);
-}
-
-int sprd_ion_probe(struct platform_device *pdev)
-{
-       int i = 0, ret = -1;
-       struct ion_platform_data *pdata = NULL;
-       uint32_t need_free_pdata;
-
-       if(pdev->dev.of_node) {
-               pdata = sprd_ion_parse_dt(pdev);
-               if (IS_ERR(pdata)) {
-                       return PTR_ERR(pdata);
-               }
-               need_free_pdata = 1;
-       } else {
-               pdata = pdev->dev.platform_data;
-
-               if (!pdata) {
-                       pr_err("sprd_ion_probe failed: No platform data!\n");
-                       return -ENODEV;
-               }
-
-               num_heaps = pdata->nr;
-               if (!num_heaps)
-                       return -EINVAL;
-               need_free_pdata = 0;
-       }
-
-       heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL);
-       if(!heaps) {
-               ret = -ENOMEM;
-               goto out1;
-       }
-
-       idev = ion_device_create(&sprd_heap_ioctl);
-       if (IS_ERR_OR_NULL(idev)) {
-               pr_err("%s,idev is null\n", __FUNCTION__);
-               kfree(heaps);
-               ret = PTR_ERR(idev);
-               goto out1;
-       }
-
-       /* create the heaps as specified in the board file */
-       for (i = 0; i < num_heaps; i++) {
-               struct ion_platform_heap *heap_data = &pdata->heaps[i];
-
-               if(!pdev->dev.of_node) {
-                       heap_data->priv = &pdev->dev;
-               }
-               heaps[i] = __ion_heap_create(heap_data, pdata->heaps[i].priv);
-               if (IS_ERR_OR_NULL(heaps[i])) {
-                       pr_err("%s,heaps is null, i:%d\n", __FUNCTION__,i);
-                       ret = PTR_ERR(heaps[i]);
-                       goto out;
-               }
-               ion_device_add_heap(idev, heaps[i]);
-       }
-       platform_set_drvdata(pdev, idev);
-
-       ret = open_sprd_sync_timeline();
-       if (ret != 0) {
-               pr_err("%s: sprd_create_timeline failed\n", __func__);
-               goto out;
-        }
-
-        if(need_free_pdata) {
-               kfree(pdata->heaps);
-               kfree(pdata);
-       }
-       return 0;
-out:
-       for (i = 0; i < num_heaps; i++) {
-               if (heaps[i])
-                       ion_heap_destroy(heaps[i]);
-       }
-       kfree(heaps);
-out1:
-       if(need_free_pdata) {
-               kfree(pdata->heaps);
-               kfree(pdata);
-       }
-       return ret;
-}
-
-int sprd_ion_remove(struct platform_device *pdev)
-{
-       struct ion_device *idev = platform_get_drvdata(pdev);
-       int i;
-
-       ion_device_destroy(idev);
-       for (i = 0; i < num_heaps; i++)
-               __ion_heap_destroy(heaps[i]);
-       kfree(heaps);
-
-       close_sprd_sync_timeline();
-
-       return 0;
-}
-
-static const struct of_device_id sprd_ion_ids[] __initconst = {
-       { .compatible = "sprd,ion-sprd"},
-       {},
-};
-
-static struct platform_driver ion_driver = {
-       .probe = sprd_ion_probe,
-       .remove = sprd_ion_remove,
-       .driver = {
-               .name = "ion-sprd" ,
-               .of_match_table = of_match_ptr(sprd_ion_ids),
-       }
-};
-
-static int __init ion_init(void)
-{
-       int result;
-       result= platform_driver_register(&ion_driver);
-       pr_info("%s,result:%d\n",__FUNCTION__,result);
-       return result;
-}
-
-static void __exit ion_exit(void)
-{
-       platform_driver_unregister(&ion_driver);
-}
-
-module_init(ion_init);
-module_exit(ion_exit);
-
diff --git a/drivers/gpu/ion/sprd/sprd_ion_cma_heap.c b/drivers/gpu/ion/sprd/sprd_ion_cma_heap.c
deleted file mode 100644 (file)
index e925776..0000000
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * Copyright (C) 2013 Spreadtrum Communications Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#include <linux/spinlock.h>
-
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/ion.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include "../ion_priv.h"
-
-#ifndef CONFIG_ARCH_SCX35L64
-#include <asm/mach/map.h>
-#endif
-#include <video/ion_sprd.h>
-#include <linux/device.h>
-#include <linux/dma-contiguous.h>
-#include <linux/genalloc.h>
-
-#ifdef CONFIG_ION_DEBUG
-#ifndef DEBUG
-#define DEBUG
-#endif
-#endif
-
-#define ION_CMA_ALLOCATE_FAIL -1
-
-struct ion_cma_heap {
-       struct ion_heap heap;
-};
-
-ion_phys_addr_t ion_cma_allocate(struct ion_heap *heap,
-                                     unsigned long size,
-                                     unsigned long align)
-{
-       struct page *page;
-       ion_phys_addr_t phys;
-       int pagecount = ((PAGE_ALIGN(size)) >> PAGE_SHIFT);
-       page = dma_alloc_from_contiguous(heap->priv, pagecount, get_order(size));
-       if(!page) {
-               pr_err("%s:failed size:0x%lx , pageCount:%d\n" , __func__, size , pagecount);
-               return ION_CMA_ALLOCATE_FAIL;
-       }
-       phys = page_to_phys(page);
-       pr_info("%s:size[%d]phy[0x%x ~ 0x%x]\n",__func__, (int)size, (int)phys, (int)(phys + size));
-       return phys;
-}
-
-void ion_cma_free(struct ion_heap *heap, ion_phys_addr_t addr,
-                      unsigned long size)
-{
-       /*free reserved memory*/
-       struct page *page;
-       int pagecount = ((PAGE_ALIGN(size)) >> PAGE_SHIFT);
-
-       if (addr == ION_CMA_ALLOCATE_FAIL)
-               return;
-       page = phys_to_page(addr);
-       dma_release_from_contiguous(heap->priv, page, pagecount);
-       pr_info("%s:size[%d]phy[0x%x ~ 0x%x]\n",__func__, (int)size, (int)addr, (int)(addr + size));
-}
-
-static int ion_cma_heap_phys(struct ion_heap *heap,
-                                 struct ion_buffer *buffer,
-                                 ion_phys_addr_t *addr, size_t *len)
-{
-       *addr = buffer->priv_phys;
-       *len = buffer->size;
-       return 0;
-}
-
-static int ion_cma_heap_allocate(struct ion_heap *heap,
-                                     struct ion_buffer *buffer,
-                                     unsigned long size, unsigned long align,
-                                     unsigned long flags)
-{
-       buffer->priv_phys = ion_cma_allocate(heap, size, align);
-       pr_debug("pgprot_noncached flags 0x%lx\n",flags);
-       if(flags&(1<<31))
-               buffer->flags |= (1<<31);
-       else
-               buffer->flags &= (~(1<<31));
-       buffer->flags |= (flags & 0x7FFF0000);/*for debug*/
-       return buffer->priv_phys == ION_CMA_ALLOCATE_FAIL ? -ENOMEM : 0;
-}
-
-static void ion_cma_heap_free(struct ion_buffer *buffer)
-{
-       struct ion_heap *heap = buffer->heap;
-
-       ion_cma_free(heap, buffer->priv_phys, buffer->size);
-       buffer->priv_phys = ION_CMA_ALLOCATE_FAIL;
-}
-
-struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
-                                             struct ion_buffer *buffer)
-{
-       struct sg_table *table;
-       int ret;
-
-       table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
-       if (!table)
-               return ERR_PTR(-ENOMEM);
-       ret = sg_alloc_table(table, 1, GFP_KERNEL);
-       if (ret) {
-               kfree(table);
-               return ERR_PTR(ret);
-       }
-       sg_set_page(table->sgl, phys_to_page(buffer->priv_phys), buffer->size,
-                   0);
-       return table;
-}
-
-void ion_cma_heap_unmap_dma(struct ion_heap *heap,
-                                struct ion_buffer *buffer)
-{
-       sg_free_table(buffer->sg_table);
-       kfree(buffer->sg_table);
-       buffer->sg_table = NULL;
-}
-
-void *ion_cma_heap_map_kernel(struct ion_heap *heap,
-                                  struct ion_buffer *buffer)
-{
-#ifndef CONFIG_ARCH_SCX35L64
-       int mtype = MT_MEMORY_NONCACHED;
-#else
-       pgprot_t mtype = 11;
-#endif
-
-       if (buffer->flags & ION_FLAG_CACHED)
-#ifndef CONFIG_ARCH_SCX35L64
-               mtype = MT_MEMORY;
-#else
-               mtype = 9;
-#endif
-
-#ifndef CONFIG_ARCH_SCX35L64
-       return __arm_ioremap(buffer->priv_phys, buffer->size,
-                             mtype);
-#else
-        return __ioremap(buffer->priv_phys, buffer->priv_phys, mtype);
-#endif
-}
-
-void ion_cma_heap_unmap_kernel(struct ion_heap *heap,
-                                   struct ion_buffer *buffer)
-{
-#ifndef CONFIG_ARCH_SCX35L64
-       __arm_iounmap(buffer->vaddr);
-#else
-      __iounmap(buffer->vaddr);
-#endif
-
-       buffer->vaddr = NULL;
-       return;
-}
-
-int ion_cma_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
-                              struct vm_area_struct *vma)
-{
-       if((buffer->flags & (1<<31)) )
-       {
-               pr_debug("pgprot_cached buffer->flags 0x%lx\n",buffer->flags);
-               return remap_pfn_range(vma, vma->vm_start,
-                              __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
-                              buffer->size,
-                              (vma->vm_page_prot));
-
-       }
-       else
-       {
-               pr_debug("pgprot_noncached buffer->flags 0x%lx\n",buffer->flags);
-               return remap_pfn_range(vma, vma->vm_start,
-                              __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
-                              vma->vm_end - vma->vm_start,
-                              pgprot_noncached(vma->vm_page_prot));
-
-       }
-}
-
-static struct ion_heap_ops cma_heap_ops = {
-       .allocate = ion_cma_heap_allocate,
-       .free = ion_cma_heap_free,
-       .map_dma = ion_cma_heap_map_dma,
-       .unmap_dma = ion_cma_heap_unmap_dma,
-       .phys = ion_cma_heap_phys,
-       .map_user = ion_cma_heap_map_user,
-       .map_kernel = ion_cma_heap_map_kernel,
-       .unmap_kernel = ion_cma_heap_unmap_kernel,
-};
-
-struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *heap_data, struct device *dev)
-{
-       struct ion_cma_heap *cma_heap;
-       cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL);
-       if (!cma_heap)
-               return ERR_PTR(-ENOMEM);
-
-       cma_heap->heap.ops = &cma_heap_ops;
-       cma_heap->heap.type = ION_HEAP_TYPE_CUSTOM;
-       cma_heap->heap.priv = dev;
-       return &cma_heap->heap;
-}
-
-void ion_cma_heap_destroy(struct ion_heap *heap)
-{
-       struct ion_cma_heap *cma_heap =
-               container_of(heap, struct  ion_cma_heap, heap);
-
-       kfree(cma_heap);
-       cma_heap = NULL;
-}
diff --git a/drivers/gpu/ion/tegra/Makefile b/drivers/gpu/ion/tegra/Makefile
deleted file mode 100644 (file)
index 11cd003..0000000
+++ /dev/null
@@ -1 +0,0 @@
-obj-y += tegra_ion.o
diff --git a/drivers/gpu/ion/tegra/tegra_ion.c b/drivers/gpu/ion/tegra/tegra_ion.c
deleted file mode 100644 (file)
index 7af6e16..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * drivers/gpu/tegra/tegra_ion.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/err.h>
-#include <linux/ion.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include "../ion_priv.h"
-
-struct ion_device *idev;
-struct ion_mapper *tegra_user_mapper;
-int num_heaps;
-struct ion_heap **heaps;
-
-int tegra_ion_probe(struct platform_device *pdev)
-{
-       struct ion_platform_data *pdata = pdev->dev.platform_data;
-       int err;
-       int i;
-
-       num_heaps = pdata->nr;
-
-       heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL);
-
-       idev = ion_device_create(NULL);
-       if (IS_ERR_OR_NULL(idev)) {
-               kfree(heaps);
-               return PTR_ERR(idev);
-       }
-
-       /* create the heaps as specified in the board file */
-       for (i = 0; i < num_heaps; i++) {
-               struct ion_platform_heap *heap_data = &pdata->heaps[i];
-
-               heaps[i] = ion_heap_create(heap_data);
-               if (IS_ERR_OR_NULL(heaps[i])) {
-                       err = PTR_ERR(heaps[i]);
-                       goto err;
-               }
-               ion_device_add_heap(idev, heaps[i]);
-       }
-       platform_set_drvdata(pdev, idev);
-       return 0;
-err:
-       for (i = 0; i < num_heaps; i++) {
-               if (heaps[i])
-                       ion_heap_destroy(heaps[i]);
-       }
-       kfree(heaps);
-       return err;
-}
-
-int tegra_ion_remove(struct platform_device *pdev)
-{
-       struct ion_device *idev = platform_get_drvdata(pdev);
-       int i;
-
-       ion_device_destroy(idev);
-       for (i = 0; i < num_heaps; i++)
-               ion_heap_destroy(heaps[i]);
-       kfree(heaps);
-       return 0;
-}
-
-static struct platform_driver ion_driver = {
-       .probe = tegra_ion_probe,
-       .remove = tegra_ion_remove,
-       .driver = { .name = "ion-tegra" }
-};
-
-static int __init ion_init(void)
-{
-       return platform_driver_register(&ion_driver);
-}
-
-static void __exit ion_exit(void)
-{
-       platform_driver_unregister(&ion_driver);
-}
-
-module_init(ion_init);
-module_exit(ion_exit);
-