2 * Copyright (C) 2010 ARM Limited. All rights reserved.
4 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
7 * A copy of the licence is included with the program, and can also be obtained from Free Software
8 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
12 * @file ump_osk_memory.c
13 * Implementation of the OS abstraction layer for the kernel device driver
16 /* needed to detect kernel version specific code */
17 #include <linux/version.h>
20 #include "ump_uk_types.h"
22 #include "ump_kernel_common.h"
23 #include <linux/module.h> /* kernel module definitions */
24 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/slab.h>
29 #include <asm/memory.h>
30 #include <asm/cacheflush.h>
31 #include <linux/dma-mapping.h>
33 typedef struct ump_vma_usage_tracker
36 ump_memory_allocation *descriptor;
37 } ump_vma_usage_tracker;
39 static void ump_vma_open(struct vm_area_struct * vma);
40 static void ump_vma_close(struct vm_area_struct * vma);
41 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
42 static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
44 static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address);
47 static struct vm_operations_struct ump_vm_ops =
50 .close = ump_vma_close,
51 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
52 .fault = ump_cpu_page_fault_handler
54 .nopfn = ump_cpu_page_fault_handler
59 * Page fault for VMA region
60 * This should never happen since we always map in the entire virtual memory range.
62 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
63 static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
65 static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address)
68 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
69 void __user * address;
70 address = vmf->virtual_address;
72 MSG_ERR(("Page-fault in UMP memory region caused by the CPU\n"));
73 MSG_ERR(("VMA: 0x%08lx, virtual address: 0x%08lx\n", (unsigned long)vma, address));
75 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
76 return VM_FAULT_SIGBUS;
82 static void ump_vma_open(struct vm_area_struct * vma)
84 ump_vma_usage_tracker * vma_usage_tracker;
87 vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
88 BUG_ON(NULL == vma_usage_tracker);
90 new_val = atomic_inc_return(&vma_usage_tracker->references);
92 DBG_MSG(4, ("VMA open, VMA reference count incremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
95 static void ump_vma_close(struct vm_area_struct * vma)
97 ump_vma_usage_tracker * vma_usage_tracker;
98 _ump_uk_unmap_mem_s args;
101 vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
102 BUG_ON(NULL == vma_usage_tracker);
104 new_val = atomic_dec_return(&vma_usage_tracker->references);
106 DBG_MSG(4, ("VMA close, VMA reference count decremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
110 ump_memory_allocation * descriptor;
112 descriptor = vma_usage_tracker->descriptor;
114 args.ctx = descriptor->ump_session;
115 args.cookie = descriptor->cookie;
116 args.mapping = descriptor->mapping;
117 args.size = descriptor->size;
119 args._ukk_private = NULL; /** @note unused */
121 DBG_MSG(4, ("No more VMA references left, releasing UMP memory\n"));
122 _ump_ukk_unmap_mem( & args );
124 /* vma_usage_tracker is free()d by _ump_osk_mem_mapregion_term() */
128 _mali_osk_errcode_t _ump_osk_mem_mapregion_init( ump_memory_allocation * descriptor )
130 ump_vma_usage_tracker * vma_usage_tracker;
131 struct vm_area_struct *vma;
133 if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
135 vma_usage_tracker = kmalloc(sizeof(ump_vma_usage_tracker), GFP_KERNEL);
136 if (NULL == vma_usage_tracker)
138 DBG_MSG(1, ("Failed to allocate memory for ump_vma_usage_tracker in _mali_osk_mem_mapregion_init\n"));
139 return -_MALI_OSK_ERR_FAULT;
142 vma = (struct vm_area_struct*)descriptor->process_mapping_info;
145 kfree(vma_usage_tracker);
146 return _MALI_OSK_ERR_FAULT;
149 vma->vm_private_data = vma_usage_tracker;
150 vma->vm_flags |= VM_IO;
151 vma->vm_flags |= VM_RESERVED;
153 if (0==descriptor->is_cached)
155 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
157 DBG_MSG(3, ("Mapping with page_prot: 0x%x\n", vma->vm_page_prot ));
159 /* Setup the functions which handle further VMA handling */
160 vma->vm_ops = &ump_vm_ops;
162 /* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
163 descriptor->mapping = (void __user*)vma->vm_start;
165 atomic_set(&vma_usage_tracker->references, 1); /*this can later be increased if process is forked, see ump_vma_open() */
166 vma_usage_tracker->descriptor = descriptor;
168 return _MALI_OSK_ERR_OK;
171 void _ump_osk_mem_mapregion_term( ump_memory_allocation * descriptor )
173 struct vm_area_struct* vma;
174 ump_vma_usage_tracker * vma_usage_tracker;
176 if (NULL == descriptor) return;
178 /* Linux does the right thing as part of munmap to remove the mapping
179 * All that remains is that we remove the vma_usage_tracker setup in init() */
180 vma = (struct vm_area_struct*)descriptor->process_mapping_info;
182 vma_usage_tracker = vma->vm_private_data;
184 /* We only get called if mem_mapregion_init succeeded */
185 kfree(vma_usage_tracker);
189 _mali_osk_errcode_t _ump_osk_mem_mapregion_map( ump_memory_allocation * descriptor, u32 offset, u32 * phys_addr, unsigned long size )
191 struct vm_area_struct *vma;
192 _mali_osk_errcode_t retval;
194 if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
196 vma = (struct vm_area_struct*)descriptor->process_mapping_info;
198 if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
200 retval = remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, (*phys_addr) >> PAGE_SHIFT, size, vma->vm_page_prot) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;;
202 DBG_MSG(4, ("Mapping virtual to physical memory. ID: %u, vma: 0x%08lx, virtual addr:0x%08lx, physical addr: 0x%08lx, size:%lu, prot:0x%x, vm_flags:0x%x RETVAL: 0x%x\n",
203 ump_dd_secure_id_get(descriptor->handle),
205 (unsigned long)(vma->vm_start + offset),
206 (unsigned long)*phys_addr,
208 (unsigned int)vma->vm_page_prot, vma->vm_flags, retval));
213 static u32 _ump_osk_virt_to_phys_start(ump_dd_mem * mem, u32 start, u32 address, int *index)
216 u32 offset = address - start;
217 ump_dd_physical_block *block;
220 for (i=0; i<mem->nr_blocks; i++) {
221 block = &mem->block_array[i];
225 DBG_MSG(3, ("_ump_osk_virt_to_phys : index : %d, virtual 0x%x, phys 0x%x\n", i, address, (u32)block->addr + offset - (sum -block->size)));
226 return (u32)block->addr + offset - (sum -block->size);
230 return _MALI_OSK_ERR_FAULT;
233 static u32 _ump_osk_virt_to_phys_end(ump_dd_mem * mem, u32 start, u32 address, int *index)
236 u32 offset = address - start;
237 ump_dd_physical_block *block;
240 for (i=0; i<mem->nr_blocks; i++) {
241 block = &mem->block_array[i];
245 DBG_MSG(3, ("_ump_osk_virt_to_phys : index : %d, virtual 0x%x, phys 0x%x\n", i, address, (u32)block->addr + offset - (sum -block->size)));
246 return (u32)block->addr + offset - (sum -block->size);
250 return _MALI_OSK_ERR_FAULT;
253 static void _ump_osk_msync_with_virt(ump_dd_mem * mem, ump_uk_msync_op op, u32 start, u32 address, u32 size)
255 int start_index, end_index;
258 DBG_MSG(3, ("Cache flush with user virtual address. start : 0x%x, end : 0x%x, address 0x%x, size 0x%x\n", start, start+mem->size_bytes, address, size));
260 start_p = _ump_osk_virt_to_phys_start(mem, start, address, &start_index);
261 end_p = _ump_osk_virt_to_phys_end(mem, start, address+size, &end_index);
263 if (start_index==end_index) {
264 if (op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE)
265 outer_flush_range(start_p, end_p);
267 outer_clean_range(start_p, end_p);
269 ump_dd_physical_block *block;
272 for (i=start_index; i<=end_index; i++) {
273 block = &mem->block_array[i];
275 if (i == start_index) {
276 if (op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE) {
277 outer_flush_range(start_p, block->addr+block->size);
279 outer_clean_range(start_p, block->addr+block->size);
282 else if (i == end_index) {
283 if (op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE) {
284 outer_flush_range(block->addr, end_p);
286 outer_clean_range(block->addr, end_p);
291 if (op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE) {
292 outer_flush_range(block->addr, block->addr+block->size);
294 outer_clean_range(block->addr, block->addr+block->size);
302 void _ump_osk_msync( ump_dd_mem * mem, ump_uk_msync_op op, u32 start, u32 address, u32 size)
306 ump_dd_physical_block *block;
309 ("Flushing nr of blocks: %u. First: paddr: 0x%08x vaddr: 0x%08x size:%dB\n",
310 mem->nr_blocks, mem->block_array[0].addr,
311 phys_to_virt(mem->block_array[0].addr),
312 mem->block_array[0].size));
314 #ifndef USING_DMA_FLUSH
316 if ((address >= start)
317 && ((address + size) <= start + mem->size_bytes)) {
318 if (op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE)
319 dmac_flush_range((void *)address,
320 (void *)(address + size - 1));
322 dmac_map_area((void *)address, size,
324 #ifdef CONFIG_CACHE_L2X0
325 _ump_osk_msync_with_virt(mem, op, start, address, size);
331 if ((op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE)) {
332 if ((mem->size_bytes >= SZ_1M)) {
333 __cpuc_flush_kern_all();
334 smp_call_function(__cpuc_flush_kern_all, NULL, 1);
337 } else if ((mem->size_bytes >= SZ_64K)) {
339 #ifdef CONFIG_CACHE_L2X0
340 for (i = 0; i < mem->nr_blocks; i++) {
341 block = &mem->block_array[i];
342 start_p = (u32) block->addr;
343 end_p = start_p + block->size - 1;
344 outer_flush_range(start_p, end_p);
352 for (i = 0; i < mem->nr_blocks; i++) {
353 /* TODO: Find out which flush method is best of 1)Dma OR 2)Normal flush functions */
354 /*#define USING_DMA_FLUSH */
355 #ifdef USING_DMA_FLUSH
356 DEBUG_ASSERT((PAGE_SIZE == mem->block_array[i].size));
358 pfn_to_page(mem->block_array[i].
359 addr >> PAGE_SHIFT), 0, PAGE_SIZE,
361 /*dma_unmap_page(NULL, mem->block_array[i].addr, PAGE_SIZE, DMA_BIDIRECTIONAL); */
363 block = &mem->block_array[i];
364 start_p = (u32) block->addr;
365 end_p = start_p + block->size - 1;
366 if (op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE) {
367 dmac_flush_range(phys_to_virt(start_p),
368 phys_to_virt(end_p));
369 outer_flush_range(start_p, end_p);
371 dmac_map_area(phys_to_virt(start_p), block->size,
373 outer_clean_range(start_p, end_p);
380 void _ump_osk_mem_mapregion_get( ump_dd_mem ** mem, unsigned long vaddr)
382 struct mm_struct *mm = current->mm;
383 struct vm_area_struct *vma;
384 ump_vma_usage_tracker * vma_usage_tracker;
385 ump_memory_allocation *descriptor;
386 ump_dd_handle handle;
388 DBG_MSG(3, ("_ump_osk_mem_mapregion_get: vaddr 0x%08lx\n", vaddr));
390 down_read(&mm->mmap_sem);
391 vma = find_vma(mm, vaddr);
392 up_read(&mm->mmap_sem);
395 DBG_MSG(3, ("Not found VMA\n"));
399 DBG_MSG(4, ("Get vma: 0x%08lx vma->vm_start: 0x%08lx\n", (unsigned long)vma, vma->vm_start));
401 vma_usage_tracker = (struct ump_vma_usage_tracker*)vma->vm_private_data;
402 if(vma_usage_tracker == NULL)
404 DBG_MSG(3, ("Not found vma_usage_tracker\n"));
409 descriptor = (struct ump_memory_allocation*)vma_usage_tracker->descriptor;
410 handle = (ump_dd_handle)descriptor->handle;
412 DBG_MSG(3, ("Get handle: 0x%08lx\n", handle));
413 *mem = (ump_dd_mem*)handle;