tizen 2.4 release
[profile/mobile/platform/kernel/linux-3.10-sc7730.git] / drivers / gpu / ion / ion_cma_heap.c
1 /*
2  * drivers/gpu/ion/ion_cma_heap.c
3  *
4  * Copyright (C) Linaro 2012
5  * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/device.h>
19 #include <linux/ion.h>
20 #include <linux/slab.h>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/dma-mapping.h>
24
25 /* for ion_heap_ops structure */
26 #include "ion_priv.h"
27
28 #define ION_CMA_ALLOCATE_FAILED -1
29 #define ION_IS_CACHED(__flags)  ((__flags) & ION_FLAG_CACHED)
30
31 struct ion_cma_heap {
32         struct ion_heap heap;
33         struct device *dev;
34 };
35
36 #define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
37
38 struct ion_cma_buffer_info {
39         void *cpu_addr;
40         dma_addr_t handle;
41         struct sg_table *table;
42         bool is_cached;
43 };
44
45 /*
46  * Create scatter-list for the already allocated DMA buffer.
47  * This function could be replaced by dma_common_get_sgtable
48  * as soon as it will avalaible.
49  */
50 int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
51                         void *cpu_addr, dma_addr_t handle, size_t size)
52 {
53         struct page *page = virt_to_page(cpu_addr);
54         int ret;
55
56         ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
57         if (unlikely(ret))
58                 return ret;
59
60         sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
61         return 0;
62 }
63
64 /* ION CMA heap operations functions */
65 static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
66                             unsigned long len, unsigned long align,
67                             unsigned long flags)
68 {
69         struct ion_cma_heap *cma_heap = to_cma_heap(heap);
70         struct device *dev = cma_heap->dev;
71         struct ion_cma_buffer_info *info;
72
73         dev_dbg(dev, "Request buffer allocation len %ld\n", len);
74
75         info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
76         if (!info) {
77                 dev_err(dev, "Can't allocate buffer info\n");
78                 return ION_CMA_ALLOCATE_FAILED;
79         }
80
81         if (!ION_IS_CACHED(flags))
82                 info->cpu_addr = dma_alloc_writecombine(dev, len,
83                                 &(info->handle), GFP_KERNEL);
84         else
85                 info->cpu_addr = dma_alloc_nonconsistent(dev, len,
86                                 &(info->handle), GFP_KERNEL);
87
88         if (!info->cpu_addr) {
89                 dev_err(dev, "Fail to allocate buffer\n");
90                 goto err;
91         }
92
93         info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
94         if (!info->table) {
95                 dev_err(dev, "Fail to allocate sg table\n");
96                 goto free_mem;
97         }
98
99         info->is_cached = ION_IS_CACHED(flags);
100
101         if (ion_cma_get_sgtable
102                         (dev, info->table, info->cpu_addr, info->handle, len))
103                 goto free_table;
104         /* keep this for memory release */
105         buffer->priv_virt = info;
106         dev_dbg(dev, "Allocate buffer %p\n", buffer);
107         return 0;
108
109 free_table:
110         kfree(info->table);
111 free_mem:
112         dma_free_coherent(dev, len, info->cpu_addr, info->handle);
113 err:
114         kfree(info);
115         return ION_CMA_ALLOCATE_FAILED;
116 }
117
118 static void ion_cma_free(struct ion_buffer *buffer)
119 {
120         struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
121         struct device *dev = cma_heap->dev;
122         struct ion_cma_buffer_info *info = buffer->priv_virt;
123
124         dev_dbg(dev, "Release buffer %p\n", buffer);
125         /* release memory */
126         dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
127         /* release sg table */
128         sg_free_table(info->table);
129         kfree(info->table);
130         kfree(info);
131 }
132
133 /* return physical address in addr */
134 static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
135                         ion_phys_addr_t *addr, size_t *len)
136 {
137         struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
138         struct device *dev = cma_heap->dev;
139         struct ion_cma_buffer_info *info = buffer->priv_virt;
140
141         dev_dbg(dev, "Return buffer %p physical address 0x%x\n", buffer,
142                 info->handle);
143
144         *addr = info->handle;
145         *len = buffer->size;
146
147         return 0;
148 }
149
150 struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
151                                          struct ion_buffer *buffer)
152 {
153         struct ion_cma_buffer_info *info = buffer->priv_virt;
154
155         return info->table;
156 }
157
158 void ion_cma_heap_unmap_dma(struct ion_heap *heap,
159                                struct ion_buffer *buffer)
160 {
161         return;
162 }
163
164 static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
165                         struct vm_area_struct *vma)
166 {
167         struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
168         struct device *dev = cma_heap->dev;
169         struct ion_cma_buffer_info *info = buffer->priv_virt;
170
171         if (!info->is_cached)
172                 return dma_mmap_writecombine(dev, vma, info->cpu_addr,
173                                 info->handle, buffer->size);
174         else
175                 return dma_mmap_nonconsistent(dev, vma, info->cpu_addr,
176                                 info->handle, buffer->size);
177 }
178
179 void *ion_cma_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
180 {
181         struct ion_cma_buffer_info *info = buffer->priv_virt;
182         /* kernel memory mapping has been done at allocation time */
183         return info->cpu_addr;
184 }
185
186 static void ion_cma_unmap_kernel(struct ion_heap *heap,
187                                         struct ion_buffer *buffer)
188 {
189 }
190 static struct ion_heap_ops ion_cma_ops = {
191         .allocate = ion_cma_allocate,
192         .free = ion_cma_free,
193         .map_dma = ion_cma_heap_map_dma,
194         .unmap_dma = ion_cma_heap_unmap_dma,
195         .phys = ion_cma_phys,
196         .map_user = ion_cma_mmap,
197         .map_kernel = ion_cma_map_kernel,
198         .unmap_kernel = ion_cma_unmap_kernel,
199 };
200
201 struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data, struct device *dev)
202 {
203         struct ion_cma_heap *cma_heap;
204         uint64_t sprd_dmamask = DMA_BIT_MASK(32);
205         cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL);
206
207         if (!cma_heap)
208                 return ERR_PTR(-ENOMEM);
209
210         cma_heap->heap.ops = &ion_cma_ops;
211         /* get device from private heaps data, later it will be
212          * used to make the link with reserved CMA memory */
213         dev->dma_mask = &sprd_dmamask;
214         dev->coherent_dma_mask = DMA_BIT_MASK(32);
215         cma_heap->dev = dev;
216         cma_heap->heap.type = ION_HEAP_TYPE_DMA;
217         return &cma_heap->heap;
218 }
219
220 void ion_cma_heap_destroy(struct ion_heap *heap)
221 {
222         struct ion_cma_heap *cma_heap = to_cma_heap(heap);
223
224         kfree(cma_heap);
225 }