upload tizen1.0 source
[kernel/linux-2.6.36.git] / drivers / media / video / s5p-mfc / s5p_mfc_mem.c
1 /*
2  * linux/drivers/media/video/s5p-mfc/s5p_mfc_mem.c
3  *
4  * Copyright (c) 2010 Samsung Electronics Co., Ltd.
5  *              http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  */
12
13 #include <linux/dma-mapping.h>
14 #include <asm/cacheflush.h>
15
16 #include "s5p_mfc_common.h"
17 #include "s5p_mfc_mem.h"
18 #include "s5p_mfc_pm.h"
19 #include "s5p_mfc_debug.h"
20
21 #if defined(CONFIG_S5P_MFC_VB2_CMA)
22 static const char *s5p_mem_types[] = {
23         MFC_CMA_BANK2,
24         MFC_CMA_BANK1,
25         MFC_CMA_FW
26 };
27
28 static unsigned long s5p_mem_alignments[] = {
29         MFC_CMA_BANK2_ALIGN,
30         MFC_CMA_BANK1_ALIGN,
31         MFC_CMA_FW_ALIGN
32 };
33
34 struct vb2_mem_ops *s5p_mfc_mem_ops(void)
35 {
36         return (struct vb2_mem_ops *)&vb2_cma_memops;
37 }
38
39 void **s5p_mfc_mem_init_multi(struct device *dev)
40 {
41         return (void **)vb2_cma_init_multi(dev, MFC_CMA_ALLOC_CTX_NUM,
42                                            s5p_mem_types, s5p_mem_alignments);
43 }
44
45 void s5p_mfc_mem_cleanup_multi(void **alloc_ctxes)
46 {
47         vb2_cma_cleanup_multi((struct vb2_alloc_ctx **)alloc_ctxes);
48 }
49 #elif defined(CONFIG_S5P_MFC_VB2_DMA_POOL)
50 static unsigned long s5p_mem_base_align[] = {
51         MFC_BASE_ALIGN_ORDER,
52         MFC_BASE_ALIGN_ORDER,
53 };
54 static unsigned long s5p_mem_bank_align[] = {
55         MFC_BANK_A_ALIGN_ORDER,
56         MFC_BANK_B_ALIGN_ORDER,
57 };
58
59 static unsigned long s5p_mem_sizes[] = {
60         3 << 20,
61         3 << 20,
62 };
63
64 struct vb2_mem_ops *s5p_mfc_mem_ops(void)
65 {
66         return (struct vb2_mem_ops *)&vb2_dma_pool_memops;
67 }
68
69 void **s5p_mfc_mem_init_multi(struct device *dev)
70 {
71         return (void **)vb2_dma_pool_init_multi(dev, MFC_ALLOC_CTX_NUM,
72                                                 s5p_mem_base_align,
73                                                 s5p_mem_bank_align,
74                                                 s5p_mem_sizes);
75 }
76
77 void s5p_mfc_mem_cleanup_multi(void **alloc_ctxes)
78 {
79         vb2_dma_pool_cleanup_multi(alloc_ctxes, MFC_ALLOC_CTX_NUM);
80 }
81 #elif defined(CONFIG_S5P_MFC_VB2_SDVMM)
82 struct vb2_mem_ops *s5p_mfc_mem_ops(void)
83 {
84         return (struct vb2_mem_ops *)&vb2_sdvmm_memops;
85 }
86
87 void **s5p_mfc_mem_init_multi(struct device *dev)
88 {
89         struct vb2_vcm vcm;
90         void ** alloc_ctxes;
91         struct vb2_drv vb2_drv;
92
93         vcm.vcm_id = VCM_DEV_MFC;
94         /* FIXME: check port count */
95         vcm.size = SZ_256M;
96
97         vb2_drv.remap_dva = true;
98         vb2_drv.cacheable = false;
99
100         s5p_mfc_power_on();
101         alloc_ctxes = (void **)vb2_sdvmm_init_multi(MFC_ALLOC_CTX_NUM, &vcm,
102                                                                 NULL, &vb2_drv);
103         s5p_mfc_power_off();
104
105         return alloc_ctxes;
106 }
107
108 void s5p_mfc_mem_cleanup_multi(void **alloc_ctxes)
109 {
110         vb2_sdvmm_cleanup_multi(alloc_ctxes);
111 }
112 #endif
113
114 #if defined(CONFIG_S5P_MFC_VB2_SDVMM)
115 void s5p_mfc_cache_clean(const void *start_addr, unsigned long size)
116 {
117         unsigned long paddr;
118         void *cur_addr, *end_addr;
119
120         dmac_map_area(start_addr, size, DMA_TO_DEVICE);
121
122         cur_addr = (void *)((unsigned long)start_addr & PAGE_MASK);
123         end_addr = cur_addr + PAGE_ALIGN(size);
124
125         while (cur_addr < end_addr) {
126                 paddr = page_to_pfn(vmalloc_to_page(cur_addr));
127                 paddr <<= PAGE_SHIFT;
128                 if (paddr)
129                         outer_clean_range(paddr, paddr + PAGE_SIZE);
130                 cur_addr += PAGE_SIZE;
131         }
132
133         /* FIXME: L2 operation optimization */
134         /*
135         unsigned long start, end, unitsize;
136         unsigned long cur_addr, remain;
137
138         dmac_map_area(start_addr, size, DMA_TO_DEVICE);
139
140         cur_addr = (unsigned long)start_addr;
141         remain = size;
142
143         start = page_to_pfn(vmalloc_to_page(cur_addr));
144         start <<= PAGE_SHIFT;
145         if (start & PAGE_MASK) {
146                 unitsize = min((start | PAGE_MASK) - start + 1, remain);
147                 end = start + unitsize;
148                 outer_clean_range(start, end);
149                 remain -= unitsize;
150                 cur_addr += unitsize;
151         }
152
153         while (remain >= PAGE_SIZE) {
154                 start = page_to_pfn(vmalloc_to_page(cur_addr));
155                 start <<= PAGE_SHIFT;
156                 end = start + PAGE_SIZE;
157                 outer_clean_range(start, end);
158                 remain -= PAGE_SIZE;
159                 cur_addr += PAGE_SIZE;
160         }
161
162         if (remain) {
163                 start = page_to_pfn(vmalloc_to_page(cur_addr));
164                 start <<= PAGE_SHIFT;
165                 end = start + remain;
166                 outer_clean_range(start, end);
167         }
168         */
169
170 }
171
172 void s5p_mfc_cache_inv(const void *start_addr, unsigned long size)
173 {
174         unsigned long paddr;
175         void *cur_addr, *end_addr;
176
177         cur_addr = (void *)((unsigned long)start_addr & PAGE_MASK);
178         end_addr = cur_addr + PAGE_ALIGN(size);
179
180         while (cur_addr < end_addr) {
181                 paddr = page_to_pfn(vmalloc_to_page(cur_addr));
182                 paddr <<= PAGE_SHIFT;
183                 if (paddr)
184                         outer_inv_range(paddr, paddr + PAGE_SIZE);
185                 cur_addr += PAGE_SIZE;
186         }
187
188         dmac_unmap_area(start_addr, size, DMA_FROM_DEVICE);
189
190         /* FIXME: L2 operation optimization */
191         /*
192         unsigned long start, end, unitsize;
193         unsigned long cur_addr, remain;
194
195         cur_addr = (unsigned long)start_addr;
196         remain = size;
197
198         start = page_to_pfn(vmalloc_to_page(cur_addr));
199         start <<= PAGE_SHIFT;
200         if (start & PAGE_MASK) {
201                 unitsize = min((start | PAGE_MASK) - start + 1, remain);
202                 end = start + unitsize;
203                 outer_inv_range(start, end);
204                 remain -= unitsize;
205                 cur_addr += unitsize;
206         }
207
208         while (remain >= PAGE_SIZE) {
209                 start = page_to_pfn(vmalloc_to_page(cur_addr));
210                 start <<= PAGE_SHIFT;
211                 end = start + PAGE_SIZE;
212                 outer_inv_range(start, end);
213                 remain -= PAGE_SIZE;
214                 cur_addr += PAGE_SIZE;
215         }
216
217         if (remain) {
218                 start = page_to_pfn(vmalloc_to_page(cur_addr));
219                 start <<= PAGE_SHIFT;
220                 end = start + remain;
221                 outer_inv_range(start, end);
222         }
223
224         dmac_unmap_area(start_addr, size, DMA_FROM_DEVICE);
225         */
226 }
227
228 void s5p_mfc_mem_suspend(void *alloc_ctx)
229 {
230         s5p_mfc_clock_on();
231         vb2_sdvmm_suspend(alloc_ctx);
232         s5p_mfc_clock_off();
233 }
234
235 void s5p_mfc_mem_resume(void *alloc_ctx)
236 {
237         s5p_mfc_clock_on();
238         vb2_sdvmm_resume(alloc_ctx);
239         s5p_mfc_clock_off();
240 }
241 #else
242 void s5p_mfc_cache_clean(const void *start_addr, unsigned long size)
243 {
244         unsigned long paddr;
245
246         dmac_map_area(start_addr, size, DMA_TO_DEVICE);
247         paddr = __pa((unsigned long)start_addr);
248         outer_clean_range(paddr, paddr + size);
249 }
250
251 void s5p_mfc_cache_inv(const void *start_addr, unsigned long size)
252 {
253         unsigned long paddr;
254
255         paddr = __pa((unsigned long)start_addr);
256         outer_inv_range(paddr, paddr + size);
257         dmac_unmap_area(start_addr, size, DMA_FROM_DEVICE);
258 }
259
260 void s5p_mfc_mem_suspend(void *alloc_ctx)
261 {
262         /* NOP */
263 }
264
265 void s5p_mfc_mem_resume(void *alloc_ctx)
266 {
267         /* NOP */
268 }
269 #endif