2 * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
4 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
7 * A copy of the licence is included with the program, and can also be obtained from Free Software
8 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
11 #include <linux/list.h>
13 #include <linux/mm_types.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/version.h>
17 #include <linux/platform_device.h>
18 #include <linux/workqueue.h>
21 #include "mali_memory.h"
22 #include "mali_memory_os_alloc.h"
23 #include "mali_kernel_linux.h"
25 /* Minimum size of allocator page pool */
26 #define MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 256)
27 #define MALI_OS_MEMORY_POOL_TRIM_JIFFIES (10 * CONFIG_HZ) /* Default to 10s */
29 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
30 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
31 static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask);
33 static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask);
36 static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc);
38 static void mali_mem_os_trim_pool(struct work_struct *work);
40 static struct mali_mem_os_allocator {
42 struct list_head pool_pages;
45 atomic_t allocated_pages;
46 size_t allocation_limit;
48 struct shrinker shrinker;
49 struct delayed_work timed_shrinker;
50 struct workqueue_struct *wq;
51 } mali_mem_os_allocator = {
52 .pool_lock = __SPIN_LOCK_UNLOCKED(pool_lock),
53 .pool_pages = LIST_HEAD_INIT(mali_mem_os_allocator.pool_pages),
56 .allocated_pages = ATOMIC_INIT(0),
57 .allocation_limit = 0,
59 .shrinker.shrink = mali_mem_os_shrink,
60 .shrinker.seeks = DEFAULT_SEEKS,
61 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
62 .timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool, TIMER_DEFERRABLE),
63 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)
64 .timed_shrinker = __DEFERRED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool),
66 .timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool),
70 static void mali_mem_os_free(mali_mem_allocation *descriptor)
74 MALI_DEBUG_ASSERT(MALI_MEM_OS == descriptor->type);
76 atomic_sub(descriptor->os_mem.count, &mali_mem_os_allocator.allocated_pages);
78 /* Put pages on pool. */
79 list_cut_position(&pages, &descriptor->os_mem.pages, descriptor->os_mem.pages.prev);
81 spin_lock(&mali_mem_os_allocator.pool_lock);
83 list_splice(&pages, &mali_mem_os_allocator.pool_pages);
84 mali_mem_os_allocator.pool_count += descriptor->os_mem.count;
86 spin_unlock(&mali_mem_os_allocator.pool_lock);
88 if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
89 MALI_DEBUG_PRINT(5, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count));
90 queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES);
94 static int mali_mem_os_alloc_pages(mali_mem_allocation *descriptor, u32 size)
96 struct page *new_page, *tmp;
98 size_t page_count = PAGE_ALIGN(size) / _MALI_OSK_MALI_PAGE_SIZE;
99 size_t remaining = page_count;
102 MALI_DEBUG_ASSERT_POINTER(descriptor);
103 MALI_DEBUG_ASSERT(MALI_MEM_OS == descriptor->type);
105 INIT_LIST_HEAD(&descriptor->os_mem.pages);
106 descriptor->os_mem.count = page_count;
108 /* Grab pages from pool. */
111 spin_lock(&mali_mem_os_allocator.pool_lock);
112 pool_pages = min(remaining, mali_mem_os_allocator.pool_count);
113 for (i = pool_pages; i > 0; i--) {
114 BUG_ON(list_empty(&mali_mem_os_allocator.pool_pages));
115 list_move(mali_mem_os_allocator.pool_pages.next, &pages);
117 mali_mem_os_allocator.pool_count -= pool_pages;
118 remaining -= pool_pages;
119 spin_unlock(&mali_mem_os_allocator.pool_lock);
122 /* Process pages from pool. */
124 list_for_each_entry_safe(new_page, tmp, &pages, lru) {
125 BUG_ON(NULL == new_page);
127 list_move_tail(&new_page->lru, &descriptor->os_mem.pages);
130 /* Allocate new pages, if needed. */
131 for (i = 0; i < remaining; i++) {
134 new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
136 if (unlikely(NULL == new_page)) {
137 /* Calculate the number of pages actually allocated, and free them. */
138 descriptor->os_mem.count = (page_count - remaining) + i;
139 atomic_add(descriptor->os_mem.count, &mali_mem_os_allocator.allocated_pages);
140 mali_mem_os_free(descriptor);
144 /* Ensure page is flushed from CPU caches. */
145 dma_addr = dma_map_page(&mali_platform_device->dev, new_page,
146 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
148 /* Store page phys addr */
149 SetPagePrivate(new_page);
150 set_page_private(new_page, dma_addr);
152 list_add_tail(&new_page->lru, &descriptor->os_mem.pages);
155 atomic_add(page_count, &mali_mem_os_allocator.allocated_pages);
157 if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
158 MALI_DEBUG_PRINT(4, ("OS Mem: Stopping pool trim timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
159 cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
165 static int mali_mem_os_mali_map(mali_mem_allocation *descriptor, struct mali_session_data *session)
167 struct mali_page_directory *pagedir = session->page_directory;
169 _mali_osk_errcode_t err;
170 u32 virt = descriptor->mali_mapping.addr;
171 u32 prop = descriptor->mali_mapping.properties;
173 MALI_DEBUG_ASSERT(MALI_MEM_OS == descriptor->type);
175 err = mali_mem_mali_map_prepare(descriptor);
176 if (_MALI_OSK_ERR_OK != err) {
180 list_for_each_entry(page, &descriptor->os_mem.pages, lru) {
181 u32 phys = page_private(page);
182 mali_mmu_pagedir_update(pagedir, virt, phys, MALI_MMU_PAGE_SIZE, prop);
183 virt += MALI_MMU_PAGE_SIZE;
189 static void mali_mem_os_mali_unmap(struct mali_session_data *session, mali_mem_allocation *descriptor)
191 mali_mem_mali_map_free(descriptor);
194 static int mali_mem_os_cpu_map(mali_mem_allocation *descriptor, struct vm_area_struct *vma)
198 unsigned long addr = vma->vm_start;
200 list_for_each_entry(page, &descriptor->os_mem.pages, lru) {
201 /* We should use vm_insert_page, but it does a dcache
202 * flush which makes it way slower than remap_pfn_range or vm_insert_pfn.
203 ret = vm_insert_page(vma, addr, page);
205 ret = vm_insert_pfn(vma, addr, page_to_pfn(page));
207 if (unlikely(0 != ret)) {
210 addr += _MALI_OSK_MALI_PAGE_SIZE;
216 mali_mem_allocation *mali_mem_os_alloc(u32 mali_addr, u32 size, struct vm_area_struct *vma, struct mali_session_data *session)
218 mali_mem_allocation *descriptor;
221 /* MALI_SEC */ //Remove limitation of Texture memory size for GLB2.7 T-rex
223 if (atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE + size > mali_mem_os_allocator.allocation_limit) {
224 MALI_DEBUG_PRINT(2, ("Mali Mem: Unable to allocate %u bytes. Currently allocated: %lu, max limit %lu\n",
226 atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE,
227 mali_mem_os_allocator.allocation_limit));
232 descriptor = mali_mem_descriptor_create(session, MALI_MEM_OS);
233 if (NULL == descriptor) return NULL;
235 descriptor->mali_mapping.addr = mali_addr;
236 descriptor->size = size;
237 descriptor->cpu_mapping.addr = (void __user*)vma->vm_start;
238 descriptor->cpu_mapping.ref = 1;
240 if (VM_SHARED == (VM_SHARED & vma->vm_flags)) {
241 descriptor->mali_mapping.properties = MALI_MMU_FLAGS_DEFAULT;
243 /* Cached Mali memory mapping */
244 descriptor->mali_mapping.properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
245 vma->vm_flags |= VM_SHARED;
248 err = mali_mem_os_alloc_pages(descriptor, size); /* Allocate pages */
249 if (0 != err) goto alloc_failed;
251 /* Take session memory lock */
252 _mali_osk_mutex_wait(session->memory_lock);
254 err = mali_mem_os_mali_map(descriptor, session); /* Map on Mali */
255 if (0 != err) goto mali_map_failed;
257 _mali_osk_mutex_signal(session->memory_lock);
259 err = mali_mem_os_cpu_map(descriptor, vma); /* Map on CPU */
260 if (0 != err) goto cpu_map_failed;
265 mali_mem_os_mali_unmap(session, descriptor);
267 _mali_osk_mutex_signal(session->memory_lock);
268 mali_mem_os_free(descriptor);
270 mali_mem_descriptor_destroy(descriptor);
271 MALI_DEBUG_PRINT(2, ("OS allocator: Failed to allocate memory (%d)\n", err));
275 void mali_mem_os_release(mali_mem_allocation *descriptor)
277 struct mali_session_data *session = descriptor->session;
279 /* Unmap the memory from the mali virtual address space. */
280 mali_mem_os_mali_unmap(session, descriptor);
283 mali_mem_os_free(descriptor);
287 #define MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE 128
291 mali_io_address mapping;
292 } page[MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE];
295 } mali_mem_page_table_page_pool = {
297 .lock = __SPIN_LOCK_UNLOCKED(pool_lock),
300 _mali_osk_errcode_t mali_mem_os_get_table_page(u32 *phys, mali_io_address *mapping)
302 _mali_osk_errcode_t ret = _MALI_OSK_ERR_NOMEM;
304 spin_lock(&mali_mem_page_table_page_pool.lock);
305 if (0 < mali_mem_page_table_page_pool.count) {
306 u32 i = --mali_mem_page_table_page_pool.count;
307 *phys = mali_mem_page_table_page_pool.page[i].phys;
308 *mapping = mali_mem_page_table_page_pool.page[i].mapping;
310 ret = _MALI_OSK_ERR_OK;
312 spin_unlock(&mali_mem_page_table_page_pool.lock);
314 if (_MALI_OSK_ERR_OK != ret) {
315 *mapping = dma_alloc_writecombine(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE, phys, GFP_KERNEL);
316 if (NULL != *mapping) {
317 ret = _MALI_OSK_ERR_OK;
324 void mali_mem_os_release_table_page(u32 phys, void *virt)
326 spin_lock(&mali_mem_page_table_page_pool.lock);
327 if (MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE > mali_mem_page_table_page_pool.count) {
328 u32 i = mali_mem_page_table_page_pool.count;
329 mali_mem_page_table_page_pool.page[i].phys = phys;
330 mali_mem_page_table_page_pool.page[i].mapping = virt;
332 ++mali_mem_page_table_page_pool.count;
334 spin_unlock(&mali_mem_page_table_page_pool.lock);
336 spin_unlock(&mali_mem_page_table_page_pool.lock);
338 dma_free_writecombine(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE, virt, phys);
342 static void mali_mem_os_free_page(struct page *page)
344 BUG_ON(page_count(page) != 1);
346 dma_unmap_page(&mali_platform_device->dev, page_private(page),
347 _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
349 ClearPagePrivate(page);
354 /* The maximum number of page table pool pages to free in one go. */
355 #define MALI_MEM_OS_CHUNK_TO_FREE 64UL
357 /* Free a certain number of pages from the page table page pool.
358 * The pool lock must be held when calling the function, and the lock will be
359 * released before returning.
361 static void mali_mem_os_page_table_pool_free(size_t nr_to_free)
363 u32 phys_arr[MALI_MEM_OS_CHUNK_TO_FREE];
364 void *virt_arr[MALI_MEM_OS_CHUNK_TO_FREE];
367 MALI_DEBUG_ASSERT(nr_to_free <= MALI_MEM_OS_CHUNK_TO_FREE);
369 /* Remove nr_to_free pages from the pool and store them locally on stack. */
370 for (i = 0; i < nr_to_free; i++) {
371 u32 pool_index = mali_mem_page_table_page_pool.count - i - 1;
373 phys_arr[i] = mali_mem_page_table_page_pool.page[pool_index].phys;
374 virt_arr[i] = mali_mem_page_table_page_pool.page[pool_index].mapping;
377 mali_mem_page_table_page_pool.count -= nr_to_free;
379 spin_unlock(&mali_mem_page_table_page_pool.lock);
381 /* After releasing the spinlock: free the pages we removed from the pool. */
382 for (i = 0; i < nr_to_free; i++) {
383 dma_free_writecombine(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE, virt_arr[i], phys_arr[i]);
387 static void mali_mem_os_trim_page_table_page_pool(void)
389 size_t nr_to_free = 0;
392 /* Keep 2 page table pages for each 1024 pages in the page cache. */
393 nr_to_keep = mali_mem_os_allocator.pool_count / 512;
394 /* And a minimum of eight pages, to accomodate new sessions. */
397 if (0 == spin_trylock(&mali_mem_page_table_page_pool.lock)) return;
399 if (nr_to_keep < mali_mem_page_table_page_pool.count) {
400 nr_to_free = mali_mem_page_table_page_pool.count - nr_to_keep;
401 nr_to_free = min((size_t)MALI_MEM_OS_CHUNK_TO_FREE, nr_to_free);
404 /* Pool lock will be released by the callee. */
405 mali_mem_os_page_table_pool_free(nr_to_free);
408 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
409 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
410 static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask)
412 static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask)
415 static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc)
418 struct page *page, *tmp;
420 struct list_head *le, pages;
421 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
424 int nr = sc->nr_to_scan;
428 return mali_mem_os_allocator.pool_count + mali_mem_page_table_page_pool.count;
431 if (0 == mali_mem_os_allocator.pool_count) {
432 /* No pages availble */
436 if (0 == spin_trylock_irqsave(&mali_mem_os_allocator.pool_lock, flags)) {
437 /* Not able to lock. */
441 /* Release from general page pool */
442 nr = min((size_t)nr, mali_mem_os_allocator.pool_count);
443 mali_mem_os_allocator.pool_count -= nr;
444 list_for_each(le, &mali_mem_os_allocator.pool_pages) {
448 list_cut_position(&pages, &mali_mem_os_allocator.pool_pages, le);
449 spin_unlock_irqrestore(&mali_mem_os_allocator.pool_lock, flags);
451 list_for_each_entry_safe(page, tmp, &pages, lru) {
452 mali_mem_os_free_page(page);
455 /* Release some pages from page table page pool */
456 mali_mem_os_trim_page_table_page_pool();
458 if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
459 /* Pools are empty, stop timer */
460 MALI_DEBUG_PRINT(5, ("Stopping timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
461 cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
464 return mali_mem_os_allocator.pool_count + mali_mem_page_table_page_pool.count;
467 static void mali_mem_os_trim_pool(struct work_struct *data)
469 struct page *page, *tmp;
470 struct list_head *le;
476 MALI_DEBUG_PRINT(3, ("OS Mem: Trimming pool %u\n", mali_mem_os_allocator.pool_count));
478 /* Release from general page pool */
479 spin_lock(&mali_mem_os_allocator.pool_lock);
480 if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
481 size_t count = mali_mem_os_allocator.pool_count - MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES;
482 /* Free half the pages on the pool above the static limit. Or 64 pages, 256KB. */
483 nr_to_free = max(count / 2, (size_t)64);
485 mali_mem_os_allocator.pool_count -= nr_to_free;
486 list_for_each(le, &mali_mem_os_allocator.pool_pages) {
488 if (0 == nr_to_free) break;
490 list_cut_position(&pages, &mali_mem_os_allocator.pool_pages, le);
492 spin_unlock(&mali_mem_os_allocator.pool_lock);
494 list_for_each_entry_safe(page, tmp, &pages, lru) {
495 mali_mem_os_free_page(page);
498 /* Release some pages from page table page pool */
499 mali_mem_os_trim_page_table_page_pool();
501 if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
502 MALI_DEBUG_PRINT(4, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count));
503 queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES);
507 _mali_osk_errcode_t mali_mem_os_init(void)
509 mali_mem_os_allocator.wq = alloc_workqueue("mali-mem", WQ_UNBOUND, 1);
510 if (NULL == mali_mem_os_allocator.wq) {
511 return _MALI_OSK_ERR_NOMEM;
514 register_shrinker(&mali_mem_os_allocator.shrinker);
516 return _MALI_OSK_ERR_OK;
519 void mali_mem_os_term(void)
521 struct page *page, *tmp;
523 unregister_shrinker(&mali_mem_os_allocator.shrinker);
524 cancel_delayed_work_sync(&mali_mem_os_allocator.timed_shrinker);
525 destroy_workqueue(mali_mem_os_allocator.wq);
527 spin_lock(&mali_mem_os_allocator.pool_lock);
528 list_for_each_entry_safe(page, tmp, &mali_mem_os_allocator.pool_pages, lru) {
529 mali_mem_os_free_page(page);
531 --mali_mem_os_allocator.pool_count;
533 BUG_ON(mali_mem_os_allocator.pool_count);
534 spin_unlock(&mali_mem_os_allocator.pool_lock);
536 /* Release from page table page pool */
540 spin_lock(&mali_mem_page_table_page_pool.lock);
542 nr_to_free = min((size_t)MALI_MEM_OS_CHUNK_TO_FREE, mali_mem_page_table_page_pool.count);
544 /* Pool lock will be released by the callee. */
545 mali_mem_os_page_table_pool_free(nr_to_free);
546 } while (0 != mali_mem_page_table_page_pool.count);
549 _mali_osk_errcode_t mali_memory_core_resource_os_memory(u32 size)
551 mali_mem_os_allocator.allocation_limit = size;
556 u32 mali_mem_os_stat(void)
558 return atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE;