upload tizen1.0 source
[kernel/linux-2.6.36.git] / drivers / media / video / samsung / ump / linux / ump_osk_low_level_mem.c
1 /*
2  * Copyright (C) 2010 ARM Limited. All rights reserved.
3  *
4  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6  *
7  * A copy of the licence is included with the program, and can also be obtained from Free Software
8  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
9  */
10
11 /**
12  * @file ump_osk_memory.c
13  * Implementation of the OS abstraction layer for the kernel device driver
14  */
15
16 /* needed to detect kernel version specific code */
17 #include <linux/version.h>
18
19 #include "ump_osk.h"
20 #include "ump_uk_types.h"
21 #include "ump_ukk.h"
22 #include "ump_kernel_common.h"
23 #include <linux/module.h>            /* kernel module definitions */
24 #include <linux/kernel.h>
25 #include <linux/mm.h>
26 #include <linux/sched.h>
27 #include <linux/slab.h>
28
29 #include <asm/memory.h>
30 #include <asm/cacheflush.h>
31 #include <linux/dma-mapping.h>
32
33 typedef struct ump_vma_usage_tracker
34 {
35         atomic_t references;
36         ump_memory_allocation *descriptor;
37 } ump_vma_usage_tracker;
38
39 static void ump_vma_open(struct vm_area_struct * vma);
40 static void ump_vma_close(struct vm_area_struct * vma);
41 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
42 static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
43 #else
44 static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address);
45 #endif
46
47 static struct vm_operations_struct ump_vm_ops =
48 {
49         .open = ump_vma_open,
50         .close = ump_vma_close,
51 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
52         .fault = ump_cpu_page_fault_handler
53 #else
54         .nopfn = ump_cpu_page_fault_handler
55 #endif
56 };
57
58 /*
59  * Page fault for VMA region
60  * This should never happen since we always map in the entire virtual memory range.
61  */
62 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
63 static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
64 #else
65 static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address)
66 #endif
67 {
68 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
69         void __user * address;
70         address = vmf->virtual_address;
71 #endif
72         MSG_ERR(("Page-fault in UMP memory region caused by the CPU\n"));
73         MSG_ERR(("VMA: 0x%08lx, virtual address: 0x%08lx\n", (unsigned long)vma, address));
74
75 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
76         return VM_FAULT_SIGBUS;
77 #else
78         return NOPFN_SIGBUS;
79 #endif
80 }
81
82 static void ump_vma_open(struct vm_area_struct * vma)
83 {
84         ump_vma_usage_tracker * vma_usage_tracker;
85         int new_val;
86
87         vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
88         BUG_ON(NULL == vma_usage_tracker);
89
90         new_val = atomic_inc_return(&vma_usage_tracker->references);
91
92         DBG_MSG(4, ("VMA open, VMA reference count incremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
93 }
94
95 static void ump_vma_close(struct vm_area_struct * vma)
96 {
97         ump_vma_usage_tracker * vma_usage_tracker;
98         _ump_uk_unmap_mem_s args;
99         int new_val;
100
101         vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
102         BUG_ON(NULL == vma_usage_tracker);
103
104         new_val = atomic_dec_return(&vma_usage_tracker->references);
105
106         DBG_MSG(4, ("VMA close, VMA reference count decremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
107
108         if (0 == new_val)
109         {
110                 ump_memory_allocation * descriptor;
111
112                 descriptor = vma_usage_tracker->descriptor;
113
114                 args.ctx = descriptor->ump_session;
115                 args.cookie = descriptor->cookie;
116                 args.mapping = descriptor->mapping;
117                 args.size = descriptor->size;
118
119                 args._ukk_private = NULL; /** @note unused */
120
121                 DBG_MSG(4, ("No more VMA references left, releasing UMP memory\n"));
122                 _ump_ukk_unmap_mem( & args );
123
124                 /* vma_usage_tracker is free()d by _ump_osk_mem_mapregion_term() */
125         }
126 }
127
128 _mali_osk_errcode_t _ump_osk_mem_mapregion_init( ump_memory_allocation * descriptor )
129 {
130         ump_vma_usage_tracker * vma_usage_tracker;
131         struct vm_area_struct *vma;
132
133         if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
134
135         vma_usage_tracker = kmalloc(sizeof(ump_vma_usage_tracker), GFP_KERNEL);
136         if (NULL == vma_usage_tracker)
137         {
138                 DBG_MSG(1, ("Failed to allocate memory for ump_vma_usage_tracker in _mali_osk_mem_mapregion_init\n"));
139                 return -_MALI_OSK_ERR_FAULT;
140         }
141
142         vma = (struct vm_area_struct*)descriptor->process_mapping_info;
143         if (NULL == vma )
144         {
145                 kfree(vma_usage_tracker);
146                 return _MALI_OSK_ERR_FAULT;
147         }
148
149         vma->vm_private_data = vma_usage_tracker;
150         vma->vm_flags |= VM_IO;
151         vma->vm_flags |= VM_RESERVED;
152
153         if (0==descriptor->is_cached)
154         {
155                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
156         }
157         DBG_MSG(3, ("Mapping with page_prot: 0x%x\n", vma->vm_page_prot ));
158
159         /* Setup the functions which handle further VMA handling */
160         vma->vm_ops = &ump_vm_ops;
161
162         /* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
163         descriptor->mapping = (void __user*)vma->vm_start;
164
165         atomic_set(&vma_usage_tracker->references, 1); /*this can later be increased if process is forked, see ump_vma_open() */
166         vma_usage_tracker->descriptor = descriptor;
167
168         return _MALI_OSK_ERR_OK;
169 }
170
171 void _ump_osk_mem_mapregion_term( ump_memory_allocation * descriptor )
172 {
173         struct vm_area_struct* vma;
174         ump_vma_usage_tracker * vma_usage_tracker;
175
176         if (NULL == descriptor) return;
177
178         /* Linux does the right thing as part of munmap to remove the mapping
179          * All that remains is that we remove the vma_usage_tracker setup in init() */
180         vma = (struct vm_area_struct*)descriptor->process_mapping_info;
181
182         vma_usage_tracker = vma->vm_private_data;
183
184         /* We only get called if mem_mapregion_init succeeded */
185         kfree(vma_usage_tracker);
186         return;
187 }
188
189 _mali_osk_errcode_t _ump_osk_mem_mapregion_map( ump_memory_allocation * descriptor, u32 offset, u32 * phys_addr, unsigned long size )
190 {
191         struct vm_area_struct *vma;
192         _mali_osk_errcode_t retval;
193
194         if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
195
196         vma = (struct vm_area_struct*)descriptor->process_mapping_info;
197
198         if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
199
200         retval = remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, (*phys_addr) >> PAGE_SHIFT, size, vma->vm_page_prot) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;;
201
202                 DBG_MSG(4, ("Mapping virtual to physical memory. ID: %u, vma: 0x%08lx, virtual addr:0x%08lx, physical addr: 0x%08lx, size:%lu, prot:0x%x, vm_flags:0x%x RETVAL: 0x%x\n",
203                         ump_dd_secure_id_get(descriptor->handle),
204                         (unsigned long)vma,
205                         (unsigned long)(vma->vm_start + offset),
206                         (unsigned long)*phys_addr,
207                         size,
208                         (unsigned int)vma->vm_page_prot, vma->vm_flags, retval));
209
210         return retval;
211 }
212
213 static u32 _ump_osk_virt_to_phys_start(ump_dd_mem * mem, u32 start, u32 address, int *index)
214 {
215         int i;
216         u32 offset = address - start;
217         ump_dd_physical_block *block;
218         u32 sum = 0;
219
220         for (i=0; i<mem->nr_blocks; i++) {
221                 block = &mem->block_array[i];
222                 sum += block->size;
223                 if (sum > offset) {
224                         *index = i;
225                         DBG_MSG(3, ("_ump_osk_virt_to_phys : index : %d, virtual 0x%x, phys 0x%x\n", i, address, (u32)block->addr + offset - (sum -block->size)));
226                         return (u32)block->addr + offset - (sum -block->size);
227                 }
228         }
229
230         return _MALI_OSK_ERR_FAULT;
231 }
232
233 static u32 _ump_osk_virt_to_phys_end(ump_dd_mem * mem, u32 start, u32 address, int *index)
234 {
235         int i;
236         u32 offset = address - start;
237         ump_dd_physical_block *block;
238         u32 sum = 0;
239
240         for (i=0; i<mem->nr_blocks; i++) {
241                 block = &mem->block_array[i];
242                 sum += block->size;
243                 if (sum >= offset) {
244                         *index = i;
245                         DBG_MSG(3, ("_ump_osk_virt_to_phys : index : %d, virtual 0x%x, phys 0x%x\n", i, address, (u32)block->addr + offset - (sum -block->size)));
246                         return (u32)block->addr + offset - (sum -block->size);
247                 }
248         }
249
250         return _MALI_OSK_ERR_FAULT;
251 }
252
253 static void _ump_osk_msync_with_virt(ump_dd_mem * mem, ump_uk_msync_op op, u32 start, u32 address, u32 size)
254 {
255         int start_index, end_index;
256         u32 start_p, end_p;
257
258         DBG_MSG(3, ("Cache flush with user virtual address. start : 0x%x, end : 0x%x, address 0x%x, size 0x%x\n", start, start+mem->size_bytes, address, size));
259
260         start_p = _ump_osk_virt_to_phys_start(mem, start, address, &start_index);
261         end_p = _ump_osk_virt_to_phys_end(mem, start, address+size, &end_index);
262
263         if (start_index==end_index) {
264                 if (op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE)
265                         outer_flush_range(start_p, end_p);
266                 else
267                         outer_clean_range(start_p, end_p);
268         } else {
269                 ump_dd_physical_block *block;
270                 int i;
271
272                 for (i=start_index; i<=end_index; i++) {
273                         block = &mem->block_array[i];
274
275                         if (i == start_index) {
276                                 if (op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE) {
277                                         outer_flush_range(start_p, block->addr+block->size);
278                                 } else {
279                                         outer_clean_range(start_p, block->addr+block->size);
280                                 }
281                         }
282                         else if (i == end_index) {
283                                 if (op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE) {
284                                         outer_flush_range(block->addr, end_p);
285                                 } else {
286                                         outer_clean_range(block->addr, end_p);
287                                 }
288                                 break;
289                         }
290                         else {
291                                 if (op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE) {
292                                         outer_flush_range(block->addr, block->addr+block->size);
293                                 } else {
294                                         outer_clean_range(block->addr, block->addr+block->size);
295                                 }
296                         }
297                 }
298         }
299         return;
300 }
301
302 void _ump_osk_msync( ump_dd_mem * mem, ump_uk_msync_op op, u32 start, u32 address, u32 size)
303 {
304         int i;
305         u32 start_p, end_p;
306         ump_dd_physical_block *block;
307
308         DBG_MSG(3,
309                 ("Flushing nr of blocks: %u. First: paddr: 0x%08x vaddr: 0x%08x size:%dB\n",
310                  mem->nr_blocks, mem->block_array[0].addr,
311                  phys_to_virt(mem->block_array[0].addr),
312                  mem->block_array[0].size));
313
314 #ifndef USING_DMA_FLUSH
315         if (address) {
316                 if ((address >= start)
317                     && ((address + size) <= start + mem->size_bytes)) {
318                         if (op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE)
319                                 dmac_flush_range((void *)address,
320                                                  (void *)(address + size - 1));
321                         else
322                                 dmac_map_area((void *)address, size,
323                                               DMA_TO_DEVICE);
324 #ifdef CONFIG_CACHE_L2X0
325                         _ump_osk_msync_with_virt(mem, op, start, address, size);
326 #endif
327                         return;
328                 }
329         }
330
331         if ((op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE)) {
332                 if ((mem->size_bytes >= SZ_1M)) {
333                         __cpuc_flush_kern_all();
334                         smp_call_function(__cpuc_flush_kern_all, NULL, 1);
335                         outer_flush_all();
336                         return;
337                 } else if ((mem->size_bytes >= SZ_64K)) {
338                         flush_cache_all();
339 #ifdef CONFIG_CACHE_L2X0
340                         for (i = 0; i < mem->nr_blocks; i++) {
341                                 block = &mem->block_array[i];
342                                 start_p = (u32) block->addr;
343                                 end_p = start_p + block->size - 1;
344                                 outer_flush_range(start_p, end_p);
345                         }
346 #endif
347                         return;
348                 }
349         }
350 #endif
351
352         for (i = 0; i < mem->nr_blocks; i++) {
353                 /* TODO: Find out which flush method is best of 1)Dma OR  2)Normal flush functions */
354                 /*#define USING_DMA_FLUSH */
355 #ifdef USING_DMA_FLUSH
356                 DEBUG_ASSERT((PAGE_SIZE == mem->block_array[i].size));
357                 dma_map_page(NULL,
358                              pfn_to_page(mem->block_array[i].
359                                          addr >> PAGE_SHIFT), 0, PAGE_SIZE,
360                              DMA_BIDIRECTIONAL);
361                 /*dma_unmap_page(NULL, mem->block_array[i].addr, PAGE_SIZE, DMA_BIDIRECTIONAL); */
362 #else
363                 block = &mem->block_array[i];
364                 start_p = (u32) block->addr;
365                 end_p = start_p + block->size - 1;
366                 if (op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE) {
367                         dmac_flush_range(phys_to_virt(start_p),
368                                          phys_to_virt(end_p));
369                         outer_flush_range(start_p, end_p);
370                 } else {
371                         dmac_map_area(phys_to_virt(start_p), block->size,
372                                       DMA_TO_DEVICE);
373                         outer_clean_range(start_p, end_p);
374                 }
375 #endif
376         }
377 }
378
379
380 void _ump_osk_mem_mapregion_get( ump_dd_mem ** mem, unsigned long vaddr)
381 {
382         struct mm_struct *mm = current->mm;
383         struct vm_area_struct *vma;
384         ump_vma_usage_tracker * vma_usage_tracker;
385         ump_memory_allocation *descriptor;
386         ump_dd_handle handle;
387
388         DBG_MSG(3, ("_ump_osk_mem_mapregion_get: vaddr 0x%08lx\n", vaddr));
389
390         down_read(&mm->mmap_sem);
391         vma = find_vma(mm, vaddr);
392         up_read(&mm->mmap_sem);
393         if(!vma)
394         {
395                 DBG_MSG(3, ("Not found VMA\n"));
396                 *mem = NULL;
397                 return;
398         }
399         DBG_MSG(4, ("Get vma: 0x%08lx vma->vm_start: 0x%08lx\n", (unsigned long)vma, vma->vm_start));
400
401         vma_usage_tracker = (struct ump_vma_usage_tracker*)vma->vm_private_data;
402         if(vma_usage_tracker == NULL)
403         {
404                 DBG_MSG(3, ("Not found vma_usage_tracker\n"));
405                 *mem = NULL;
406                 return;
407         }
408
409         descriptor = (struct ump_memory_allocation*)vma_usage_tracker->descriptor;
410         handle = (ump_dd_handle)descriptor->handle;
411
412         DBG_MSG(3, ("Get handle: 0x%08lx\n", handle));
413         *mem = (ump_dd_mem*)handle;
414 }