tizen 2.4 release
[profile/mobile/platform/kernel/linux-3.10-sc7730.git] / drivers / staging / android / ion / sprd / sprd_ion_cma_heap.c
1 /*
2  * Copyright (C) 2013 Spreadtrum Communications Inc.
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  */
13 #include <linux/spinlock.h>
14
15 #include <linux/err.h>
16 #include <linux/io.h>
17 #include "../ion.h"
18 #include <linux/mm.h>
19 #include <linux/scatterlist.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include "../ion_priv.h"
23
24 #ifndef CONFIG_64BIT
25 #include <asm/mach/map.h>
26 #endif
27 #include <video/ion_sprd.h>
28 #include <linux/device.h>
29 #include <linux/dma-contiguous.h>
30 #include <linux/genalloc.h>
31
32 #ifdef CONFIG_ION_DEBUG
33 #ifndef DEBUG
34 #define DEBUG
35 #endif
36 #endif
37
38 #define ION_CMA_ALLOCATE_FAIL -1
39
40 struct device *ion_dev;
41
42 struct ion_cma_heap {
43         struct ion_heap heap;
44         struct gen_pool *pool;
45         ion_phys_addr_t poolbase;
46         size_t               poolsize;
47 };
48
49 ion_phys_addr_t ion_cma_allocate(struct ion_heap *heap,
50                                       unsigned long size,
51                                       unsigned long align)
52 {
53         struct ion_cma_heap *cma_heap =
54                 container_of(heap, struct ion_cma_heap, heap);
55
56         if(cma_heap->pool == NULL) {
57                 struct page *page;
58                 ion_phys_addr_t phys;
59                 int pagecount = ((PAGE_ALIGN(size)) >> PAGE_SHIFT);
60                 struct timeval val;
61                 int start, end;
62                 do_gettimeofday(&val);
63                 start = (val.tv_sec * 1000000 + val.tv_usec) / 1000;
64                 page = dma_alloc_from_contiguous(ion_dev, pagecount, get_order(size));
65                 do_gettimeofday(&val);
66                 end = (val.tv_sec * 1000000 + val.tv_usec) / 1000;
67                 if(!page) {
68                         pr_err("%s, dma_alloc_from_contiguous() failed!\n", __func__);
69                         pr_err("%s, size:0x%lx , pageCount:%d\n",
70                                 __func__, size , pagecount);
71                         return ION_CMA_ALLOCATE_FAIL;
72                 }
73                 phys = page_to_phys(page);
74                 pr_info("%s, size=%08lx,phy addr=%08lx, time=%dms\n",
75                         __func__, size, phys, end-start);
76                 return phys;
77         } else {
78                 unsigned long offset = gen_pool_alloc(cma_heap->pool, size);
79                 pr_debug("%s, malloc from reserved mem!\n", __func__);
80                 pr_debug("%s, size=%08lx, pool=%p, offset=%08lx \n",
81                         __func__, size, cma_heap->pool, offset);
82                 if (!offset)
83                         return ION_CARVEOUT_ALLOCATE_FAIL;
84                 return offset;
85         }
86 }
87
88 void ion_cma_free(struct ion_heap *heap, ion_phys_addr_t addr,
89                        unsigned long size)
90 {
91         struct ion_cma_heap *cma_heap =
92                 container_of(heap, struct ion_cma_heap, heap);
93
94         if (addr == ION_CMA_ALLOCATE_FAIL)
95                 return;
96         /*free reserved memory*/
97         if((cma_heap->pool != NULL) &&
98                 ((addr < (cma_heap->poolbase +cma_heap->poolsize)) &&
99                 (addr >= cma_heap->poolbase))) {
100                 pr_debug("%s, size=%08lx, pool=%p, base:%08lx , phyaddr=%08lx \n",
101                         __func__, size, cma_heap->pool, cma_heap->poolbase , addr);
102                 gen_pool_free(cma_heap->pool, addr, size);
103         } else {
104                 struct page *page;
105                 int pagecount = ((PAGE_ALIGN(size)) >> PAGE_SHIFT);
106                 page = phys_to_page(addr);
107                 pr_debug("%s, size=%08lx, phyAddr=%08lx \n", __func__, size, addr);
108                 dma_release_from_contiguous(ion_dev, page, pagecount);
109         }
110         return;
111 }
112
113 static int ion_cma_heap_phys(struct ion_heap *heap,
114                                   struct ion_buffer *buffer,
115                                   ion_phys_addr_t *addr, size_t *len)
116 {
117         *addr = buffer->priv_phys;
118         *len = buffer->size;
119         return 0;
120 }
121
122 static int ion_cma_heap_allocate(struct ion_heap *heap,
123                                       struct ion_buffer *buffer,
124                                       unsigned long size, unsigned long align,
125                                       unsigned long flags)
126 {
127         buffer->priv_phys = ion_cma_allocate(heap, size, align);
128         pr_debug("pgprot_noncached flags 0x%lx\n", flags);
129         if(flags&(1<<31))
130                 buffer->flags |= (1<<31);
131         else
132                 buffer->flags &= (~(1<<31));
133         buffer->flags |= (flags & 0x7FFF0000);/*for debug*/
134         return buffer->priv_phys == ION_CMA_ALLOCATE_FAIL ? -ENOMEM : 0;
135 }
136
137 static void ion_cma_heap_free(struct ion_buffer *buffer)
138 {
139         struct ion_heap *heap = buffer->heap;
140
141         ion_cma_free(heap, buffer->priv_phys, buffer->size);
142         buffer->priv_phys = ION_CMA_ALLOCATE_FAIL;
143 }
144
145 struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
146                                               struct ion_buffer *buffer)
147 {
148         struct sg_table *table;
149         int ret;
150
151         table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
152         if (!table)
153                 return ERR_PTR(-ENOMEM);
154         ret = sg_alloc_table(table, 1, GFP_KERNEL);
155         if (ret) {
156                 kfree(table);
157                 return ERR_PTR(ret);
158         }
159         sg_set_page(table->sgl, phys_to_page(buffer->priv_phys), buffer->size,
160                     0);
161         return table;
162 }
163
164 void ion_cma_heap_unmap_dma(struct ion_heap *heap,
165                                  struct ion_buffer *buffer)
166 {
167         sg_free_table(buffer->sg_table);
168         kfree(buffer->sg_table);
169         buffer->sg_table = NULL;
170 }
171
172 void *ion_cma_heap_map_kernel(struct ion_heap *heap,
173                                    struct ion_buffer *buffer)
174 {
175 #ifndef CONFIG_64BIT
176         int mtype = MT_MEMORY_NONCACHED;
177 #else
178         pgprot_t mtype = 11;
179 #endif
180
181         if (buffer->flags & ION_FLAG_CACHED)
182 #ifndef CONFIG_64BIT
183                 mtype = MT_MEMORY;
184 #else
185                 mtype = 9;
186 #endif
187
188 #ifndef CONFIG_64BIT
189         return __arm_ioremap(buffer->priv_phys, buffer->size,
190                               mtype);
191 #else
192         return __ioremap(buffer->priv_phys, buffer->priv_phys, mtype);
193 #endif
194 }
195
196 void ion_cma_heap_unmap_kernel(struct ion_heap *heap,
197                                     struct ion_buffer *buffer)
198 {
199 #ifndef CONFIG_64BIT
200         __arm_iounmap(buffer->vaddr);
201 #else
202       __iounmap(buffer->vaddr);
203 #endif
204
205         buffer->vaddr = NULL;
206         return;
207 }
208
209 int ion_cma_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
210                                struct vm_area_struct *vma)
211 {
212         if((buffer->flags & (1<<31)) )
213         {
214                 pr_debug("pgprot_cached buffer->flags 0x%lx\n", buffer->flags);
215                 return remap_pfn_range(vma, vma->vm_start,
216                                __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
217                                buffer->size,
218                                (vma->vm_page_prot));
219
220         }
221         else
222         {
223                 pr_debug("pgprot_noncached buffer->flags 0x%lx\n",buffer->flags);
224                 return remap_pfn_range(vma, vma->vm_start,
225                                __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
226                                vma->vm_end - vma->vm_start,
227                                pgprot_noncached(vma->vm_page_prot));
228
229         }
230 }
231
232 static struct ion_heap_ops cma_heap_ops = {
233         .allocate = ion_cma_heap_allocate,
234         .free = ion_cma_heap_free,
235         .map_dma = ion_cma_heap_map_dma,
236         .unmap_dma = ion_cma_heap_unmap_dma,
237         .phys = ion_cma_heap_phys,
238         .map_user = ion_cma_heap_map_user,
239         .map_kernel = ion_cma_heap_map_kernel,
240         .unmap_kernel = ion_cma_heap_unmap_kernel,
241 };
242
243 struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *heap_data,
244                                                 struct device *dev)
245 {
246         struct ion_cma_heap *cma_heap;
247
248         cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL);
249         if (!cma_heap)
250                 return ERR_PTR(-ENOMEM);
251         if(heap_data->size != 0) {
252                 cma_heap->pool = gen_pool_create(12 , -1);
253                 if (!cma_heap->pool) {
254                         kfree(cma_heap);
255                         return ERR_PTR(-ENOMEM);
256                 }
257                 cma_heap->poolbase = heap_data->base;
258                 gen_pool_add(cma_heap->pool, cma_heap->poolbase, heap_data->size,
259                                      -1);
260                 cma_heap->poolsize = heap_data->size;
261         } else {
262                 cma_heap->pool = NULL;
263                 cma_heap->poolbase = 0;
264                 cma_heap->poolsize = 0;
265         }
266         cma_heap->heap.ops = &cma_heap_ops;
267         cma_heap->heap.type = ION_HEAP_TYPE_CUSTOM;
268         ion_dev = dev;
269         return &cma_heap->heap;
270 }
271
272 void ion_cma_heap_destroy(struct ion_heap *heap)
273 {
274         struct ion_cma_heap *cma_heap =
275                 container_of(heap, struct  ion_cma_heap, heap);
276         if(cma_heap->pool)
277         {
278                 gen_pool_destroy(cma_heap->pool);
279         }
280
281         kfree(cma_heap);
282         cma_heap = NULL;
283 }