tizen 2.4 release
[profile/mobile/platform/kernel/linux-3.10-sc7730.git] / drivers / staging / android / ion / ion_cma_heap.c
1 /*
2  * drivers/gpu/ion/ion_cma_heap.c
3  *
4  * Copyright (C) Linaro 2012
5  * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/device.h>
19 #include <linux/slab.h>
20 #include <linux/errno.h>
21 #include <linux/err.h>
22 #include <linux/dma-mapping.h>
23
24 #include "ion.h"
25 #include "ion_priv.h"
26
27 #define ION_CMA_ALLOCATE_FAILED -1
28 #define ION_IS_CACHED(__flags)  ((__flags) & ION_FLAG_CACHED)
29
30 struct ion_cma_heap {
31         struct ion_heap heap;
32         struct device *dev;
33 };
34
35 #define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
36
37 struct ion_cma_buffer_info {
38         void *cpu_addr;
39         dma_addr_t handle;
40         struct sg_table *table;
41         bool is_cached;
42 };
43
44 /*
45  * Create scatter-list for the already allocated DMA buffer.
46  * This function could be replaced by dma_common_get_sgtable
47  * as soon as it will avalaible.
48  */
49 static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
50                                void *cpu_addr, dma_addr_t handle, size_t size)
51 {
52         struct page *page = virt_to_page(cpu_addr);
53         int ret;
54
55         ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
56         if (unlikely(ret))
57                 return ret;
58
59         sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
60         return 0;
61 }
62
63 /* ION CMA heap operations functions */
64 static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
65                             unsigned long len, unsigned long align,
66                             unsigned long flags)
67 {
68         struct ion_cma_heap *cma_heap = to_cma_heap(heap);
69         struct device *dev = cma_heap->dev;
70         struct ion_cma_buffer_info *info;
71
72         dev_dbg(dev, "Request buffer allocation len %ld\n", len);
73
74         info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
75         if (!info) {
76                 dev_err(dev, "Can't allocate buffer info\n");
77                 return ION_CMA_ALLOCATE_FAILED;
78         }
79
80         if (!ION_IS_CACHED(flags))
81                 info->cpu_addr = dma_alloc_writecombine(dev, len,
82                                 &(info->handle), GFP_KERNEL);
83         else
84                 info->cpu_addr = dma_alloc_nonconsistent(dev, len,
85                                 &(info->handle), GFP_KERNEL);
86
87         if (!info->cpu_addr) {
88                 dev_err(dev, "Fail to allocate buffer\n");
89                 goto err;
90         }
91
92         info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
93         if (!info->table) {
94                 dev_err(dev, "Fail to allocate sg table\n");
95                 goto free_mem;
96         }
97
98         info->is_cached = ION_IS_CACHED(flags);
99
100         if (ion_cma_get_sgtable
101             (dev, info->table, info->cpu_addr, info->handle, len))
102                 goto free_table;
103         /* keep this for memory release */
104         buffer->priv_virt = info;
105         dev_dbg(dev, "Allocate buffer %p\n", buffer);
106         return 0;
107
108 free_table:
109         kfree(info->table);
110 free_mem:
111         dma_free_coherent(dev, len, info->cpu_addr, info->handle);
112 err:
113         kfree(info);
114         return ION_CMA_ALLOCATE_FAILED;
115 }
116
117 static void ion_cma_free(struct ion_buffer *buffer)
118 {
119         struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
120         struct device *dev = cma_heap->dev;
121         struct ion_cma_buffer_info *info = buffer->priv_virt;
122
123         dev_dbg(dev, "Release buffer %p\n", buffer);
124         /* release memory */
125         dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
126         /* release sg table */
127         sg_free_table(info->table);
128         kfree(info->table);
129         kfree(info);
130 }
131
132 /* return physical address in addr */
133 static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
134                         ion_phys_addr_t *addr, size_t *len)
135 {
136         struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
137         struct device *dev = cma_heap->dev;
138         struct ion_cma_buffer_info *info = buffer->priv_virt;
139
140         dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer,
141                 &info->handle);
142
143         *addr = info->handle;
144         *len = buffer->size;
145
146         return 0;
147 }
148
149 static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
150                                              struct ion_buffer *buffer)
151 {
152         struct ion_cma_buffer_info *info = buffer->priv_virt;
153
154         return info->table;
155 }
156
157 static void ion_cma_heap_unmap_dma(struct ion_heap *heap,
158                                    struct ion_buffer *buffer)
159 {
160         return;
161 }
162
163 static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
164                         struct vm_area_struct *vma)
165 {
166         struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
167         struct device *dev = cma_heap->dev;
168         struct ion_cma_buffer_info *info = buffer->priv_virt;
169
170         if (!info->is_cached)
171                 return dma_mmap_writecombine(dev, vma, info->cpu_addr,
172                                 info->handle, buffer->size);
173         else
174                 return dma_mmap_nonconsistent(dev, vma, info->cpu_addr,
175                                 info->handle, buffer->size);
176 }
177
178 static void *ion_cma_map_kernel(struct ion_heap *heap,
179                                 struct ion_buffer *buffer)
180 {
181         struct ion_cma_buffer_info *info = buffer->priv_virt;
182         /* kernel memory mapping has been done at allocation time */
183         return info->cpu_addr;
184 }
185
186 static void ion_cma_unmap_kernel(struct ion_heap *heap,
187                                         struct ion_buffer *buffer)
188 {
189 }
190
191 static struct ion_heap_ops ion_cma_ops = {
192         .allocate = ion_cma_allocate,
193         .free = ion_cma_free,
194         .map_dma = ion_cma_heap_map_dma,
195         .unmap_dma = ion_cma_heap_unmap_dma,
196         .phys = ion_cma_phys,
197         .map_user = ion_cma_mmap,
198         .map_kernel = ion_cma_map_kernel,
199         .unmap_kernel = ion_cma_unmap_kernel,
200 };
201
202 struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data, struct device *dev)
203 {
204         struct ion_cma_heap *cma_heap;
205         uint64_t sprd_dmamask = DMA_BIT_MASK(32);
206         cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL);
207
208         if (!cma_heap)
209                 return ERR_PTR(-ENOMEM);
210
211         cma_heap->heap.ops = &ion_cma_ops;
212         /* get device from private heaps data, later it will be
213          * used to make the link with reserved CMA memory */
214         dev->dma_mask = &sprd_dmamask;
215         dev->coherent_dma_mask = DMA_BIT_MASK(32);
216         cma_heap->dev = dev;
217         cma_heap->heap.type = ION_HEAP_TYPE_DMA;
218         return &cma_heap->heap;
219 }
220
221 void ion_cma_heap_destroy(struct ion_heap *heap)
222 {
223         struct ion_cma_heap *cma_heap = to_cma_heap(heap);
224
225         kfree(cma_heap);
226 }