2 * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
4 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
7 * A copy of the licence is included with the program, and can also be obtained from Free Software
8 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
12 * @file mali_osk_low_level_mem.c
13 * Implementation of the OS abstraction layer for the kernel device driver
16 /* needed to detect kernel version specific code */
17 #include <linux/version.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/spinlock.h>
25 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)
26 #include <linux/shrinker.h>
30 #include <linux/memcontrol.h>
34 #include "mali_ukk.h" /* required to hook in _mali_ukk_mem_mmap handling */
35 #include "mali_kernel_common.h"
36 #include "mali_kernel_linux.h"
38 #ifdef CONFIG_SLP_MALI_DBG
39 #include <mach/regs-pmu.h>
42 static void mali_kernel_memory_vma_open(struct vm_area_struct * vma);
43 static void mali_kernel_memory_vma_close(struct vm_area_struct * vma);
46 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
47 static int mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
49 static unsigned long mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address);
53 typedef struct mali_vma_usage_tracker
57 } mali_vma_usage_tracker;
59 #define INVALID_PAGE 0xffffffff
61 /* Linked list structure to hold details of all OS allocations in a particular
66 struct AllocationList *next;
71 typedef struct AllocationList AllocationList;
73 /* Private structure to store details of a mapping region returned
74 * from _mali_osk_mem_mapregion_init
78 struct vm_area_struct *vma;
79 struct AllocationList *list;
80 struct AllocationList *tail;
83 typedef struct MappingInfo MappingInfo;
85 static u32 _kernel_page_allocate(void);
86 static void _kernel_page_release(u32 physical_address);
87 static AllocationList * _allocation_list_item_get(void);
88 static void _allocation_list_item_release(AllocationList * item);
91 /* Variable declarations */
92 static DEFINE_SPINLOCK(allocation_list_spinlock);
93 static AllocationList * pre_allocated_memory = (AllocationList*) NULL ;
94 static int pre_allocated_memory_size_current = 0;
95 #ifdef MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB
96 static int pre_allocated_memory_size_max = MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 1024 * 1024;
98 static int pre_allocated_memory_size_max = 16 * 1024 * 1024; /* 6 MiB */
101 static struct vm_operations_struct mali_kernel_vm_ops =
103 .open = mali_kernel_memory_vma_open,
104 .close = mali_kernel_memory_vma_close,
105 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
106 .fault = mali_kernel_memory_cpu_page_fault_handler
108 .nopfn = mali_kernel_memory_cpu_page_fault_handler
112 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
113 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
114 static int mali_mem_shrink(int nr_to_scan, gfp_t gfp_mask)
116 static int mali_mem_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask)
119 static int mali_mem_shrink(struct shrinker *shrinker, struct shrink_control *sc)
123 AllocationList *item;
124 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
127 int nr = sc->nr_to_scan;
132 return pre_allocated_memory_size_current / PAGE_SIZE;
135 if (0 == pre_allocated_memory_size_current)
137 /* No pages availble */
141 if (0 == spin_trylock_irqsave(&allocation_list_spinlock, flags))
143 /* Not able to lock. */
147 while (pre_allocated_memory && nr > 0)
149 item = pre_allocated_memory;
150 pre_allocated_memory = item->next;
152 _kernel_page_release(item->physaddr);
153 _mali_osk_free(item);
155 pre_allocated_memory_size_current -= PAGE_SIZE;
158 spin_unlock_irqrestore(&allocation_list_spinlock,flags);
160 return pre_allocated_memory_size_current / PAGE_SIZE;
163 struct shrinker mali_mem_shrinker = {
164 .shrink = mali_mem_shrink,
165 .seeks = DEFAULT_SEEKS,
168 void mali_osk_low_level_mem_init(void)
170 pre_allocated_memory = (AllocationList*) NULL ;
172 register_shrinker(&mali_mem_shrinker);
175 void mali_osk_low_level_mem_term(void)
177 unregister_shrinker(&mali_mem_shrinker);
179 while ( NULL != pre_allocated_memory )
181 AllocationList *item;
182 item = pre_allocated_memory;
183 pre_allocated_memory = item->next;
184 _kernel_page_release(item->physaddr);
185 _mali_osk_free( item );
187 pre_allocated_memory_size_current = 0;
190 static u32 _kernel_page_allocate(void)
192 struct page *new_page;
195 new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
197 if ( NULL == new_page )
204 /* SLP: charging 3D allocated page */
205 mem_cgroup_newpage_charge(new_page, current->mm, GFP_HIGHUSER |
206 __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
208 #ifdef CONFIG_SLP_LOWMEM_NOTIFY
209 inc_mm_counter(current->mm, MM_ANONPAGES);
212 /* Ensure page is flushed from CPU caches. */
213 linux_phys_addr = dma_map_page(NULL, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
215 return linux_phys_addr;
218 static void _kernel_page_release(u32 physical_address)
220 struct page *unmap_page;
223 dma_unmap_page(NULL, physical_address, PAGE_SIZE, DMA_BIDIRECTIONAL);
226 unmap_page = pfn_to_page( physical_address >> PAGE_SHIFT );
227 MALI_DEBUG_ASSERT_POINTER( unmap_page );
230 /* SLP: uncharging 3D allocated page */
231 mem_cgroup_uncharge_page(unmap_page);
233 #ifdef CONFIG_SLP_LOWMEM_NOTIFY
234 if (current && current->mm)
235 dec_mm_counter(current->mm, MM_ANONPAGES);
238 __free_page( unmap_page );
241 static AllocationList * _allocation_list_item_get(void)
243 AllocationList *item = NULL;
246 spin_lock_irqsave(&allocation_list_spinlock,flags);
247 if ( pre_allocated_memory )
249 item = pre_allocated_memory;
250 pre_allocated_memory = pre_allocated_memory->next;
251 pre_allocated_memory_size_current -= PAGE_SIZE;
253 spin_unlock_irqrestore(&allocation_list_spinlock,flags);
256 spin_unlock_irqrestore(&allocation_list_spinlock,flags);
258 item = _mali_osk_malloc( sizeof(AllocationList) );
264 item->physaddr = _kernel_page_allocate();
265 if ( INVALID_PAGE == item->physaddr )
267 /* Non-fatal error condition, out of memory. Upper levels will handle this. */
268 _mali_osk_free( item );
274 static void _allocation_list_item_release(AllocationList * item)
277 spin_lock_irqsave(&allocation_list_spinlock,flags);
278 if ( pre_allocated_memory_size_current < pre_allocated_memory_size_max)
280 item->next = pre_allocated_memory;
281 pre_allocated_memory = item;
282 pre_allocated_memory_size_current += PAGE_SIZE;
283 spin_unlock_irqrestore(&allocation_list_spinlock,flags);
286 spin_unlock_irqrestore(&allocation_list_spinlock,flags);
288 _kernel_page_release(item->physaddr);
289 _mali_osk_free( item );
292 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
293 static int mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
295 static unsigned long mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address)
298 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
299 void __user * address;
300 address = vmf->virtual_address;
303 * We always fail the call since all memory is pre-faulted when assigned to the process.
304 * Only the Mali cores can use page faults to extend buffers.
307 MALI_DEBUG_PRINT(1, ("Page-fault in Mali memory region caused by the CPU.\n"));
308 MALI_DEBUG_PRINT(1, ("Tried to access %p (process local virtual address) which is not currently mapped to any Mali memory.\n", (void*)address));
310 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
311 return VM_FAULT_SIGBUS;
317 static void mali_kernel_memory_vma_open(struct vm_area_struct * vma)
319 mali_vma_usage_tracker * vma_usage_tracker;
320 MALI_DEBUG_PRINT(4, ("Open called on vma %p\n", vma));
322 vma_usage_tracker = (mali_vma_usage_tracker*)vma->vm_private_data;
323 vma_usage_tracker->references++;
328 static void mali_kernel_memory_vma_close(struct vm_area_struct * vma)
330 _mali_uk_mem_munmap_s args = {0, };
331 mali_memory_allocation * descriptor;
332 mali_vma_usage_tracker * vma_usage_tracker;
333 MALI_DEBUG_PRINT(3, ("Close called on vma %p\n", vma));
335 vma_usage_tracker = (mali_vma_usage_tracker*)vma->vm_private_data;
337 BUG_ON(!vma_usage_tracker);
338 BUG_ON(0 == vma_usage_tracker->references);
340 vma_usage_tracker->references--;
342 if (0 != vma_usage_tracker->references)
344 MALI_DEBUG_PRINT(3, ("Ignoring this close, %d references still exists\n", vma_usage_tracker->references));
348 /** @note args->context unused, initialized to 0.
349 * Instead, we use the memory_session from the cookie */
351 descriptor = (mali_memory_allocation *)vma_usage_tracker->cookie;
353 args.cookie = (u32)descriptor;
354 args.mapping = descriptor->mapping;
355 args.size = descriptor->size;
357 _mali_ukk_mem_munmap( &args );
359 /* vma_usage_tracker is free()d by _mali_osk_mem_mapregion_term().
360 * In the case of the memory engine, it is called as the release function that has been registered with the engine*/
363 void _mali_osk_mem_barrier( void )
368 void _mali_osk_write_mem_barrier( void )
373 mali_io_address _mali_osk_mem_mapioregion( u32 phys, u32 size, const char *description )
375 return (mali_io_address)ioremap_nocache(phys, size);
378 void _mali_osk_mem_unmapioregion( u32 phys, u32 size, mali_io_address virt )
380 iounmap((void*)virt);
383 mali_io_address _mali_osk_mem_allocioregion( u32 *phys, u32 size )
386 MALI_DEBUG_ASSERT_POINTER( phys );
387 MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
388 MALI_DEBUG_ASSERT( 0 != size );
390 /* dma_alloc_* uses a limited region of address space. On most arch/marchs
391 * 2 to 14 MiB is available. This should be enough for the page tables, which
392 * currently is the only user of this function. */
393 virt = dma_alloc_coherent(NULL, size, phys, GFP_KERNEL | GFP_DMA );
395 MALI_DEBUG_PRINT(3, ("Page table virt: 0x%x = dma_alloc_coherent(size:%d, phys:0x%x, )\n", virt, size, phys));
399 MALI_DEBUG_PRINT(5, ("allocioregion: Failed to allocate Pagetable memory, size=0x%.8X\n", size ));
403 MALI_DEBUG_ASSERT( 0 == (*phys & ~_MALI_OSK_CPU_PAGE_MASK) );
405 return (mali_io_address)virt;
408 void _mali_osk_mem_freeioregion( u32 phys, u32 size, mali_io_address virt )
410 MALI_DEBUG_ASSERT_POINTER( (void*)virt );
411 MALI_DEBUG_ASSERT( 0 != size );
412 MALI_DEBUG_ASSERT( 0 == (phys & ( (1 << PAGE_SHIFT) - 1 )) );
414 dma_free_coherent(NULL, size, virt, phys);
417 _mali_osk_errcode_t inline _mali_osk_mem_reqregion( u32 phys, u32 size, const char *description )
419 #if MALI_LICENSE_IS_GPL
420 return _MALI_OSK_ERR_OK; /* GPL driver gets the mem region for the resources registered automatically */
422 return ((NULL == request_mem_region(phys, size, description)) ? _MALI_OSK_ERR_NOMEM : _MALI_OSK_ERR_OK);
426 void inline _mali_osk_mem_unreqregion( u32 phys, u32 size )
428 #if !MALI_LICENSE_IS_GPL
429 release_mem_region(phys, size);
433 void inline _mali_osk_mem_iowrite32_relaxed( volatile mali_io_address addr, u32 offset, u32 val )
435 __raw_writel(cpu_to_le32(val),((u8*)addr) + offset);
438 #ifdef CONFIG_SLP_MALI_DBG
439 void inline _mali_osk_mem_iowrite32_relaxed_cpu( volatile mali_io_address addr,
440 u32 offset, u32 val )
442 __raw_writel(cpu_to_le32(val),((u8*)addr) + offset);
446 u32 inline _mali_osk_mem_ioread32( volatile mali_io_address addr, u32 offset )
448 return ioread32(((u8*)addr) + offset);
451 #ifdef CONFIG_SLP_MALI_DBG
452 u32 inline _mali_osk_mem_ioread32_cpu(volatile mali_io_address addr, u32 offset)
454 return ioread32(((u8*)addr) + offset);
458 void inline _mali_osk_mem_iowrite32( volatile mali_io_address addr, u32 offset, u32 val )
460 iowrite32(val, ((u8*)addr) + offset);
463 void _mali_osk_cache_flushall( void )
465 /** @note Cached memory is not currently supported in this implementation */
468 void _mali_osk_cache_ensure_uncached_range_flushed( void *uncached_mapping, u32 offset, u32 size )
470 _mali_osk_write_mem_barrier();
473 _mali_osk_errcode_t _mali_osk_mem_mapregion_init( mali_memory_allocation * descriptor )
475 struct vm_area_struct *vma;
476 mali_vma_usage_tracker * vma_usage_tracker;
477 MappingInfo *mappingInfo;
479 if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
481 MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
483 vma = (struct vm_area_struct*)descriptor->process_addr_mapping_info;
485 if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
487 /* Re-write the process_addr_mapping_info */
488 mappingInfo = _mali_osk_calloc( 1, sizeof(MappingInfo) );
490 if ( NULL == mappingInfo ) return _MALI_OSK_ERR_FAULT;
492 vma_usage_tracker = _mali_osk_calloc( 1, sizeof(mali_vma_usage_tracker) );
494 if (NULL == vma_usage_tracker)
496 MALI_DEBUG_PRINT(2, ("Failed to allocate memory to track memory usage\n"));
497 _mali_osk_free( mappingInfo );
498 return _MALI_OSK_ERR_FAULT;
501 mappingInfo->vma = vma;
502 descriptor->process_addr_mapping_info = mappingInfo;
504 /* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
505 descriptor->mapping = (void __user*)vma->vm_start;
506 /* list member is already NULL */
509 set some bits which indicate that:
510 The memory is IO memory, meaning that no paging is to be performed and the memory should not be included in crash dumps
511 The memory is reserved, meaning that it's present and can never be paged out (see also previous entry)
513 vma->vm_flags |= VM_IO;
514 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
515 vma->vm_flags |= VM_DONTCOPY;
517 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
518 vma->vm_ops = &mali_kernel_vm_ops; /* Operations used on any memory system */
520 vma_usage_tracker->references = 1; /* set initial reference count to be 1 as vma_open won't be called for the first mmap call */
521 vma_usage_tracker->cookie = (u32)descriptor; /* cookie for munmap */
523 vma->vm_private_data = vma_usage_tracker;
525 return _MALI_OSK_ERR_OK;
528 void _mali_osk_mem_mapregion_term( mali_memory_allocation * descriptor )
530 struct vm_area_struct* vma;
531 mali_vma_usage_tracker * vma_usage_tracker;
532 MappingInfo *mappingInfo;
534 if (NULL == descriptor) return;
536 MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
538 mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
540 MALI_DEBUG_ASSERT_POINTER( mappingInfo );
542 /* Linux does the right thing as part of munmap to remove the mapping
543 * All that remains is that we remove the vma_usage_tracker setup in init() */
544 vma = mappingInfo->vma;
546 MALI_DEBUG_ASSERT_POINTER( vma );
548 /* ASSERT that there are no allocations on the list. Unmap should've been
549 * called on all OS allocations. */
550 MALI_DEBUG_ASSERT( NULL == mappingInfo->list );
552 vma_usage_tracker = vma->vm_private_data;
554 /* We only get called if mem_mapregion_init succeeded */
555 _mali_osk_free(vma_usage_tracker);
557 _mali_osk_free( mappingInfo );
561 _mali_osk_errcode_t _mali_osk_mem_mapregion_map( mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size )
563 struct vm_area_struct *vma;
564 MappingInfo *mappingInfo;
566 if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
568 MALI_DEBUG_ASSERT_POINTER( phys_addr );
570 MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
572 MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
574 MALI_DEBUG_ASSERT( 0 == (offset & ~_MALI_OSK_CPU_PAGE_MASK));
576 if (NULL == descriptor->mapping) return _MALI_OSK_ERR_INVALID_ARGS;
578 if (size > (descriptor->size - offset))
580 MALI_DEBUG_PRINT(1,("_mali_osk_mem_mapregion_map: virtual memory area not large enough to map physical 0x%x size %x into area 0x%x at offset 0x%xr\n",
581 *phys_addr, size, descriptor->mapping, offset));
582 return _MALI_OSK_ERR_FAULT;
585 mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
587 MALI_DEBUG_ASSERT_POINTER( mappingInfo );
589 vma = mappingInfo->vma;
591 if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
593 MALI_DEBUG_PRINT(7, ("Process map: mapping 0x%08X to process address 0x%08lX length 0x%08X\n", *phys_addr, (long unsigned int)(descriptor->mapping + offset), size));
595 if ( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC == *phys_addr )
597 _mali_osk_errcode_t ret;
598 AllocationList *alloc_item;
599 u32 linux_phys_frame_num;
601 alloc_item = _allocation_list_item_get();
602 if (NULL == alloc_item)
604 MALI_DEBUG_PRINT(1, ("Failed to allocate list item\n"));
605 return _MALI_OSK_ERR_NOMEM;
608 linux_phys_frame_num = alloc_item->physaddr >> PAGE_SHIFT;
610 ret = ( remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, linux_phys_frame_num, size, vma->vm_page_prot) ) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;
612 if ( ret != _MALI_OSK_ERR_OK)
614 MALI_PRINT_ERROR(("%s %d could not remap_pfn_range()\n", __FUNCTION__, __LINE__));
615 _allocation_list_item_release(alloc_item);
619 /* Put our alloc_item into the list of allocations on success */
620 if (NULL == mappingInfo->list)
622 mappingInfo->list = alloc_item;
626 mappingInfo->tail->next = alloc_item;
629 mappingInfo->tail = alloc_item;
630 alloc_item->next = NULL;
631 alloc_item->offset = offset;
633 /* Write out new physical address on success */
634 *phys_addr = alloc_item->physaddr;
639 /* Otherwise, Use the supplied physical address */
641 /* ASSERT that supplied phys_addr is page aligned */
642 MALI_DEBUG_ASSERT( 0 == ((*phys_addr) & ~_MALI_OSK_CPU_PAGE_MASK) );
644 return ( remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, *phys_addr >> PAGE_SHIFT, size, vma->vm_page_prot) ) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;
648 void _mali_osk_mem_mapregion_unmap( mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t flags )
650 MappingInfo *mappingInfo;
652 if (NULL == descriptor) return;
654 MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
656 MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
658 MALI_DEBUG_ASSERT( 0 == (offset & ~_MALI_OSK_CPU_PAGE_MASK) );
660 if (NULL == descriptor->mapping) return;
662 if (size > (descriptor->size - offset))
664 MALI_DEBUG_PRINT(1,("_mali_osk_mem_mapregion_unmap: virtual memory area not large enough to unmap size %x from area 0x%x at offset 0x%x\n",
665 size, descriptor->mapping, offset));
668 mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
670 MALI_DEBUG_ASSERT_POINTER( mappingInfo );
672 if ( 0 != (flags & _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR) )
674 /* This physical RAM was allocated in _mali_osk_mem_mapregion_map and
675 * so needs to be unmapped
679 /* First find the allocation in the list of allocations */
680 AllocationList *alloc = mappingInfo->list;
681 AllocationList **prev = &(mappingInfo->list);
683 while (NULL != alloc && alloc->offset != offset)
685 prev = &(alloc->next);
689 MALI_DEBUG_PRINT(1, ("Unmapping memory that isn't mapped\n"));
690 size -= _MALI_OSK_CPU_PAGE_SIZE;
691 offset += _MALI_OSK_CPU_PAGE_SIZE;
696 _allocation_list_item_release(alloc);
698 /* Move onto the next allocation */
699 size -= _MALI_OSK_CPU_PAGE_SIZE;
700 offset += _MALI_OSK_CPU_PAGE_SIZE;
704 /* Linux does the right thing as part of munmap to remove the mapping */