2 * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
4 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
7 * A copy of the licence is included with the program, and can also be obtained from Free Software
8 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
12 * @file ump_osk_memory.c
13 * Implementation of the OS abstraction layer for the kernel device driver
16 /* needed to detect kernel version specific code */
17 #include <linux/version.h>
20 #include "ump_uk_types.h"
22 #include "ump_kernel_common.h"
23 #include <linux/module.h> /* kernel module definitions */
24 #include <linux/kernel.h>
26 #include <linux/sched.h> /* MALI_SEC */
27 #include <linux/slab.h>
29 #include <asm/memory.h>
30 #include <asm/uaccess.h> /* to verify pointers from user space */
31 #include <asm/cacheflush.h>
32 #include <linux/dma-mapping.h>
34 typedef struct ump_vma_usage_tracker
37 ump_memory_allocation *descriptor;
38 } ump_vma_usage_tracker;
40 static void ump_vma_open(struct vm_area_struct * vma);
41 static void ump_vma_close(struct vm_area_struct * vma);
42 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
43 static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
45 static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address);
48 static struct vm_operations_struct ump_vm_ops =
51 .close = ump_vma_close,
52 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
53 .fault = ump_cpu_page_fault_handler
55 .nopfn = ump_cpu_page_fault_handler
60 * Page fault for VMA region
61 * This should never happen since we always map in the entire virtual memory range.
63 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
64 static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
66 static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address)
69 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
70 void __user * address;
71 address = vmf->virtual_address;
73 MSG_ERR(("Page-fault in UMP memory region caused by the CPU\n"));
74 MSG_ERR(("VMA: 0x%08lx, virtual address: 0x%08lx\n", (unsigned long)vma, address));
76 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
77 return VM_FAULT_SIGBUS;
83 static void ump_vma_open(struct vm_area_struct * vma)
85 ump_vma_usage_tracker * vma_usage_tracker;
88 vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
89 BUG_ON(NULL == vma_usage_tracker);
91 new_val = atomic_inc_return(&vma_usage_tracker->references);
93 DBG_MSG(4, ("VMA open, VMA reference count incremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
96 static void ump_vma_close(struct vm_area_struct * vma)
98 ump_vma_usage_tracker * vma_usage_tracker;
99 _ump_uk_unmap_mem_s args;
102 vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
103 BUG_ON(NULL == vma_usage_tracker);
105 new_val = atomic_dec_return(&vma_usage_tracker->references);
107 DBG_MSG(4, ("VMA close, VMA reference count decremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
111 ump_memory_allocation * descriptor;
113 descriptor = vma_usage_tracker->descriptor;
115 args.ctx = descriptor->ump_session;
116 args.cookie = descriptor->cookie;
117 args.mapping = descriptor->mapping;
118 args.size = descriptor->size;
120 args._ukk_private = NULL; /** @note unused */
122 DBG_MSG(4, ("No more VMA references left, releasing UMP memory\n"));
123 _ump_ukk_unmap_mem( & args );
125 /* vma_usage_tracker is free()d by _ump_osk_mem_mapregion_term() */
129 _mali_osk_errcode_t _ump_osk_mem_mapregion_init( ump_memory_allocation * descriptor )
131 ump_vma_usage_tracker * vma_usage_tracker;
132 struct vm_area_struct *vma;
134 if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
136 vma_usage_tracker = kmalloc(sizeof(ump_vma_usage_tracker), GFP_KERNEL);
137 if (NULL == vma_usage_tracker)
139 DBG_MSG(1, ("Failed to allocate memory for ump_vma_usage_tracker in _mali_osk_mem_mapregion_init\n"));
140 return -_MALI_OSK_ERR_FAULT;
143 vma = (struct vm_area_struct*)descriptor->process_mapping_info;
146 kfree(vma_usage_tracker);
147 return _MALI_OSK_ERR_FAULT;
150 vma->vm_private_data = vma_usage_tracker;
151 vma->vm_flags |= VM_IO;
152 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
154 if (0==descriptor->is_cached)
156 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
158 DBG_MSG(3, ("Mapping with page_prot: 0x%x\n", vma->vm_page_prot ));
160 /* Setup the functions which handle further VMA handling */
161 vma->vm_ops = &ump_vm_ops;
163 /* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
164 descriptor->mapping = (void __user*)vma->vm_start;
166 atomic_set(&vma_usage_tracker->references, 1); /*this can later be increased if process is forked, see ump_vma_open() */
167 vma_usage_tracker->descriptor = descriptor;
169 return _MALI_OSK_ERR_OK;
172 void _ump_osk_mem_mapregion_term( ump_memory_allocation * descriptor )
174 struct vm_area_struct* vma;
175 ump_vma_usage_tracker * vma_usage_tracker;
177 if (NULL == descriptor) return;
179 /* Linux does the right thing as part of munmap to remove the mapping
180 * All that remains is that we remove the vma_usage_tracker setup in init() */
181 vma = (struct vm_area_struct*)descriptor->process_mapping_info;
183 vma_usage_tracker = vma->vm_private_data;
185 /* We only get called if mem_mapregion_init succeeded */
186 kfree(vma_usage_tracker);
190 _mali_osk_errcode_t _ump_osk_mem_mapregion_map( ump_memory_allocation * descriptor, u32 offset, u32 * phys_addr, unsigned long size )
192 struct vm_area_struct *vma;
193 _mali_osk_errcode_t retval;
195 if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
197 vma = (struct vm_area_struct*)descriptor->process_mapping_info;
199 if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
201 retval = remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, (*phys_addr) >> PAGE_SHIFT, size, vma->vm_page_prot) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;;
203 DBG_MSG(4, ("Mapping virtual to physical memory. ID: %u, vma: 0x%08lx, virtual addr:0x%08lx, physical addr: 0x%08lx, size:%lu, prot:0x%x, vm_flags:0x%x RETVAL: 0x%x\n",
204 ump_dd_secure_id_get(descriptor->handle),
206 (unsigned long)(vma->vm_start + offset),
207 (unsigned long)*phys_addr,
209 (unsigned int)vma->vm_page_prot, vma->vm_flags, retval));
214 static void level1_cache_flush_all(void)
216 DBG_MSG(4, ("UMP[xx] Flushing complete L1 cache\n"));
217 __cpuc_flush_kern_all();
220 void _ump_osk_msync( ump_dd_mem * mem, void * virt, u32 offset, u32 size, ump_uk_msync_op op, ump_session_data * session_data )
223 const void *start_v, *end_v;
225 /* Flush L1 using virtual address, the entire range in one go.
226 * Only flush if user space process has a valid write mapping on given address. */
227 if( (mem) && (virt!=NULL) && (access_ok(VERIFY_WRITE, virt, size)) )
229 start_v = (void *)virt;
230 end_v = (void *)(start_v + size - 1);
231 /* There is no dmac_clean_range, so the L1 is always flushed,
232 * also for UMP_MSYNC_CLEAN. */
234 dmac_flush_range(start_v, end_v);
236 DBG_MSG(3, ("UMP[%02u] Flushing CPU L1 Cache. Cpu address: %x-%x\n", mem->secure_id, start_v,end_v));
242 if (op == _UMP_UK_MSYNC_FLUSH_L1 )
244 DBG_MSG(4, ("UMP Pending L1 cache flushes: %d\n", session_data->has_pending_level1_cache_flush));
245 session_data->has_pending_level1_cache_flush = 0;
246 level1_cache_flush_all();
251 if (session_data->cache_operations_ongoing)
253 session_data->has_pending_level1_cache_flush++;
254 DBG_MSG(4, ("UMP[%02u] Defering the L1 flush. Nr pending:%d\n", mem->secure_id, session_data->has_pending_level1_cache_flush) );
258 /* Flushing the L1 cache for each switch_user() if ump_cache_operations_control(START) is not called */
259 level1_cache_flush_all();
265 DBG_MSG(4, ("Unkown state %s %d\n", __FUNCTION__, __LINE__));
266 level1_cache_flush_all();
270 if ( NULL == mem ) return;
272 if ( mem->size_bytes==size)
274 DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache\n",mem->secure_id));
278 DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache. Blocks:%u, TotalSize:%u. FlushSize:%u Offset:0x%x FirstPaddr:0x%08x\n",
279 mem->secure_id, mem->nr_blocks, mem->size_bytes, size, offset, mem->block_array[0].addr));
283 /* Flush L2 using physical addresses, block for block. */
284 for (i=0 ; i < mem->nr_blocks; i++)
287 ump_dd_physical_block *block;
288 block = &mem->block_array[i];
290 if(offset >= block->size)
292 offset -= block->size;
298 start_p = (u32)block->addr + offset;
299 /* We'll zero the offset later, after using it to calculate end_p. */
303 start_p = (u32)block->addr;
306 if(size < block->size - offset)
308 end_p = start_p + size - 1;
315 end_p = start_p + (block->size - offset - 1);
316 size -= block->size - offset;
321 end_p = start_p + block->size - 1;
328 case _UMP_UK_MSYNC_CLEAN:
329 outer_clean_range(start_p, end_p);
331 case _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE:
332 outer_flush_range(start_p, end_p);
334 case _UMP_UK_MSYNC_INVALIDATE:
335 outer_inv_range(start_p, end_p);
343 /* Nothing left to flush. */