tizen 2.4 release
[kernel/linux-3.0.git] / drivers / gpu / arm / mali400 / ump / linux / ump_osk_low_level_mem.c
1 /*
2  * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
3  *
4  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6  *
7  * A copy of the licence is included with the program, and can also be obtained from Free Software
8  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
9  */
10
11 /**
12  * @file ump_osk_memory.c
13  * Implementation of the OS abstraction layer for the kernel device driver
14  */
15
16 /* needed to detect kernel version specific code */
17 #include <linux/version.h>
18
19 #include "ump_osk.h"
20 #include "ump_uk_types.h"
21 #include "ump_ukk.h"
22 #include "ump_kernel_common.h"
23 #include <linux/module.h>            /* kernel module definitions */
24 #include <linux/kernel.h>
25 #include <linux/mm.h>
26 #include <linux/sched.h> /* MALI_SEC */
27 #include <linux/slab.h>
28
29 #include <asm/memory.h>
30 #include <asm/uaccess.h>                        /* to verify pointers from user space */
31 #include <asm/cacheflush.h>
32 #include <linux/dma-mapping.h>
33
34 typedef struct ump_vma_usage_tracker
35 {
36         atomic_t references;
37         ump_memory_allocation *descriptor;
38 } ump_vma_usage_tracker;
39
40 static void ump_vma_open(struct vm_area_struct * vma);
41 static void ump_vma_close(struct vm_area_struct * vma);
42 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
43 static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
44 #else
45 static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address);
46 #endif
47
48 static struct vm_operations_struct ump_vm_ops =
49 {
50         .open = ump_vma_open,
51         .close = ump_vma_close,
52 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
53         .fault = ump_cpu_page_fault_handler
54 #else
55         .nopfn = ump_cpu_page_fault_handler
56 #endif
57 };
58
59 /*
60  * Page fault for VMA region
61  * This should never happen since we always map in the entire virtual memory range.
62  */
63 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
64 static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
65 #else
66 static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address)
67 #endif
68 {
69 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
70         void __user * address;
71         address = vmf->virtual_address;
72 #endif
73         MSG_ERR(("Page-fault in UMP memory region caused by the CPU\n"));
74         MSG_ERR(("VMA: 0x%08lx, virtual address: 0x%08lx\n", (unsigned long)vma, address));
75
76 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
77         return VM_FAULT_SIGBUS;
78 #else
79         return NOPFN_SIGBUS;
80 #endif
81 }
82
83 static void ump_vma_open(struct vm_area_struct * vma)
84 {
85         ump_vma_usage_tracker * vma_usage_tracker;
86         int new_val;
87
88         vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
89         BUG_ON(NULL == vma_usage_tracker);
90
91         new_val = atomic_inc_return(&vma_usage_tracker->references);
92
93         DBG_MSG(4, ("VMA open, VMA reference count incremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
94 }
95
96 static void ump_vma_close(struct vm_area_struct * vma)
97 {
98         ump_vma_usage_tracker * vma_usage_tracker;
99         _ump_uk_unmap_mem_s args;
100         int new_val;
101
102         vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
103         BUG_ON(NULL == vma_usage_tracker);
104
105         new_val = atomic_dec_return(&vma_usage_tracker->references);
106
107         DBG_MSG(4, ("VMA close, VMA reference count decremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
108
109         if (0 == new_val)
110         {
111                 ump_memory_allocation * descriptor;
112
113                 descriptor = vma_usage_tracker->descriptor;
114
115                 args.ctx = descriptor->ump_session;
116                 args.cookie = descriptor->cookie;
117                 args.mapping = descriptor->mapping;
118                 args.size = descriptor->size;
119
120                 args._ukk_private = NULL; /** @note unused */
121
122                 DBG_MSG(4, ("No more VMA references left, releasing UMP memory\n"));
123                 _ump_ukk_unmap_mem( & args );
124
125                 /* vma_usage_tracker is free()d by _ump_osk_mem_mapregion_term() */
126         }
127 }
128
129 _mali_osk_errcode_t _ump_osk_mem_mapregion_init( ump_memory_allocation * descriptor )
130 {
131         ump_vma_usage_tracker * vma_usage_tracker;
132         struct vm_area_struct *vma;
133
134         if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
135
136         vma_usage_tracker = kmalloc(sizeof(ump_vma_usage_tracker), GFP_KERNEL);
137         if (NULL == vma_usage_tracker)
138         {
139                 DBG_MSG(1, ("Failed to allocate memory for ump_vma_usage_tracker in _mali_osk_mem_mapregion_init\n"));
140                 return -_MALI_OSK_ERR_FAULT;
141         }
142
143         vma = (struct vm_area_struct*)descriptor->process_mapping_info;
144         if (NULL == vma )
145         {
146                 kfree(vma_usage_tracker);
147                 return _MALI_OSK_ERR_FAULT;
148         }
149
150         vma->vm_private_data = vma_usage_tracker;
151         vma->vm_flags |= VM_IO;
152         vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
153
154         if (0==descriptor->is_cached)
155         {
156                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
157         }
158         DBG_MSG(3, ("Mapping with page_prot: 0x%x\n", vma->vm_page_prot ));
159
160         /* Setup the functions which handle further VMA handling */
161         vma->vm_ops = &ump_vm_ops;
162
163         /* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
164         descriptor->mapping = (void __user*)vma->vm_start;
165
166         atomic_set(&vma_usage_tracker->references, 1); /*this can later be increased if process is forked, see ump_vma_open() */
167         vma_usage_tracker->descriptor = descriptor;
168
169         return _MALI_OSK_ERR_OK;
170 }
171
172 void _ump_osk_mem_mapregion_term( ump_memory_allocation * descriptor )
173 {
174         struct vm_area_struct* vma;
175         ump_vma_usage_tracker * vma_usage_tracker;
176
177         if (NULL == descriptor) return;
178
179         /* Linux does the right thing as part of munmap to remove the mapping
180          * All that remains is that we remove the vma_usage_tracker setup in init() */
181         vma = (struct vm_area_struct*)descriptor->process_mapping_info;
182
183         vma_usage_tracker = vma->vm_private_data;
184
185         /* We only get called if mem_mapregion_init succeeded */
186         kfree(vma_usage_tracker);
187         return;
188 }
189
190 _mali_osk_errcode_t _ump_osk_mem_mapregion_map( ump_memory_allocation * descriptor, u32 offset, u32 * phys_addr, unsigned long size )
191 {
192         struct vm_area_struct *vma;
193         _mali_osk_errcode_t retval;
194
195         if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
196
197         vma = (struct vm_area_struct*)descriptor->process_mapping_info;
198
199         if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
200
201         retval = remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, (*phys_addr) >> PAGE_SHIFT, size, vma->vm_page_prot) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;;
202
203                 DBG_MSG(4, ("Mapping virtual to physical memory. ID: %u, vma: 0x%08lx, virtual addr:0x%08lx, physical addr: 0x%08lx, size:%lu, prot:0x%x, vm_flags:0x%x RETVAL: 0x%x\n",
204                         ump_dd_secure_id_get(descriptor->handle),
205                         (unsigned long)vma,
206                         (unsigned long)(vma->vm_start + offset),
207                         (unsigned long)*phys_addr,
208                         size,
209                         (unsigned int)vma->vm_page_prot, vma->vm_flags, retval));
210
211         return retval;
212 }
213
214 static void level1_cache_flush_all(void)
215 {
216         DBG_MSG(4, ("UMP[xx] Flushing complete L1 cache\n"));
217         __cpuc_flush_kern_all();
218 }
219
220 void _ump_osk_msync( ump_dd_mem * mem, void * virt, u32 offset, u32 size, ump_uk_msync_op op, ump_session_data * session_data )
221 {
222         int i;
223         const void *start_v, *end_v;
224
225         /* Flush L1 using virtual address, the entire range in one go.
226          * Only flush if user space process has a valid write mapping on given address. */
227         if( (mem) && (virt!=NULL) && (access_ok(VERIFY_WRITE, virt, size)) )
228         {
229                 start_v = (void *)virt;
230                 end_v   = (void *)(start_v + size - 1);
231                 /*  There is no dmac_clean_range, so the L1 is always flushed,
232                  *  also for UMP_MSYNC_CLEAN. */
233                 /* MALI_SEC */
234                 dmac_flush_range(start_v, end_v);
235
236                 DBG_MSG(3, ("UMP[%02u] Flushing CPU L1 Cache. Cpu address: %x-%x\n", mem->secure_id, start_v,end_v));
237         }
238         else
239         {
240                 if (session_data)
241                 {
242                         if (op == _UMP_UK_MSYNC_FLUSH_L1  )
243                         {
244                                 DBG_MSG(4, ("UMP Pending L1 cache flushes: %d\n", session_data->has_pending_level1_cache_flush));
245                                 session_data->has_pending_level1_cache_flush = 0;
246                                 level1_cache_flush_all();
247                                 return;
248                         }
249                         else
250                         {
251                                 if (session_data->cache_operations_ongoing)
252                                 {
253                                         session_data->has_pending_level1_cache_flush++;
254                                         DBG_MSG(4, ("UMP[%02u] Defering the L1 flush. Nr pending:%d\n", mem->secure_id, session_data->has_pending_level1_cache_flush) );
255                                 }
256                                 else
257                                 {
258                                         /* Flushing the L1 cache for each switch_user() if ump_cache_operations_control(START) is not called */
259                                         level1_cache_flush_all();
260                                 }
261                         }
262                 }
263                 else
264                 {
265                         DBG_MSG(4, ("Unkown state %s %d\n", __FUNCTION__, __LINE__));
266                         level1_cache_flush_all();
267                 }
268         }
269
270         if ( NULL == mem ) return;
271
272         if ( mem->size_bytes==size)
273         {
274                 DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache\n",mem->secure_id));
275         }
276         else
277         {
278                 DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache. Blocks:%u, TotalSize:%u. FlushSize:%u Offset:0x%x FirstPaddr:0x%08x\n",
279                     mem->secure_id, mem->nr_blocks, mem->size_bytes, size, offset, mem->block_array[0].addr));
280         }
281
282
283         /* Flush L2 using physical addresses, block for block. */
284         for (i=0 ; i < mem->nr_blocks; i++)
285         {
286                 u32 start_p, end_p;
287                 ump_dd_physical_block *block;
288                 block = &mem->block_array[i];
289
290                 if(offset >= block->size)
291                 {
292                         offset -= block->size;
293                         continue;
294                 }
295
296                 if(offset)
297                 {
298                         start_p = (u32)block->addr + offset;
299                         /* We'll zero the offset later, after using it to calculate end_p. */
300                 }
301                 else
302                 {
303                         start_p = (u32)block->addr;
304                 }
305
306                 if(size < block->size - offset)
307                 {
308                         end_p = start_p + size - 1;
309                         size = 0;
310                 }
311                 else
312                 {
313                         if(offset)
314                         {
315                                 end_p = start_p + (block->size - offset - 1);
316                                 size -= block->size - offset;
317                                 offset = 0;
318                         }
319                         else
320                         {
321                                 end_p = start_p + block->size - 1;
322                                 size -= block->size;
323                         }
324                 }
325
326                 switch(op)
327                 {
328                                 case _UMP_UK_MSYNC_CLEAN:
329                                                 outer_clean_range(start_p, end_p);
330                                                 break;
331                                 case _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE:
332                                                 outer_flush_range(start_p, end_p);
333                                                 break;
334                                 case _UMP_UK_MSYNC_INVALIDATE:
335                                                 outer_inv_range(start_p, end_p);
336                                                 break;
337                                 default:
338                                                 break;
339                 }
340
341                 if(0 == size)
342                 {
343                         /* Nothing left to flush. */
344                         break;
345                 }
346         }
347
348         return;
349 }