tizen 2.4 release
[kernel/linux-3.0.git] / drivers / gpu / arm / mali400 / r4p0_rel0 / linux / mali_memory_os_alloc.c
1 /*
2  * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
3  * 
4  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6  * 
7  * A copy of the licence is included with the program, and can also be obtained from Free Software
8  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
9  */
10
11 #include <linux/list.h>
12 #include <linux/mm.h>
13 #include <linux/mm_types.h>
14 #include <linux/fs.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/version.h>
17 #include <linux/platform_device.h>
18 #include <linux/workqueue.h>
19
20 #include "mali_osk.h"
21 #include "mali_memory.h"
22 #include "mali_memory_os_alloc.h"
23 #include "mali_kernel_linux.h"
24
25 /* Minimum size of allocator page pool */
26 #define MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 256)
27 #define MALI_OS_MEMORY_POOL_TRIM_JIFFIES (10 * CONFIG_HZ) /* Default to 10s */
28
29 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
30 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
31 static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask);
32 #else
33 static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask);
34 #endif
35 #else
36 static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc);
37 #endif
38 static void mali_mem_os_trim_pool(struct work_struct *work);
39
40 static struct mali_mem_os_allocator {
41         spinlock_t pool_lock;
42         struct list_head pool_pages;
43         size_t pool_count;
44
45         atomic_t allocated_pages;
46         size_t allocation_limit;
47
48         struct shrinker shrinker;
49         struct delayed_work timed_shrinker;
50         struct workqueue_struct *wq;
51 } mali_mem_os_allocator = {
52         .pool_lock = __SPIN_LOCK_UNLOCKED(pool_lock),
53         .pool_pages = LIST_HEAD_INIT(mali_mem_os_allocator.pool_pages),
54         .pool_count = 0,
55
56         .allocated_pages = ATOMIC_INIT(0),
57         .allocation_limit = 0,
58
59         .shrinker.shrink = mali_mem_os_shrink,
60         .shrinker.seeks = DEFAULT_SEEKS,
61 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
62         .timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool, TIMER_DEFERRABLE),
63 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)
64         .timed_shrinker = __DEFERRED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool),
65 #else
66         .timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool),
67 #endif
68 };
69
70 static void mali_mem_os_free(mali_mem_allocation *descriptor)
71 {
72         LIST_HEAD(pages);
73
74         MALI_DEBUG_ASSERT(MALI_MEM_OS == descriptor->type);
75
76         atomic_sub(descriptor->os_mem.count, &mali_mem_os_allocator.allocated_pages);
77
78         /* Put pages on pool. */
79         list_cut_position(&pages, &descriptor->os_mem.pages, descriptor->os_mem.pages.prev);
80
81         spin_lock(&mali_mem_os_allocator.pool_lock);
82
83         list_splice(&pages, &mali_mem_os_allocator.pool_pages);
84         mali_mem_os_allocator.pool_count += descriptor->os_mem.count;
85
86         spin_unlock(&mali_mem_os_allocator.pool_lock);
87
88         if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
89                 MALI_DEBUG_PRINT(5, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count));
90                 queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES);
91         }
92 }
93
94 static int mali_mem_os_alloc_pages(mali_mem_allocation *descriptor, u32 size)
95 {
96         struct page *new_page, *tmp;
97         LIST_HEAD(pages);
98         size_t page_count = PAGE_ALIGN(size) / _MALI_OSK_MALI_PAGE_SIZE;
99         size_t remaining = page_count;
100         u32 i;
101
102         MALI_DEBUG_ASSERT_POINTER(descriptor);
103         MALI_DEBUG_ASSERT(MALI_MEM_OS == descriptor->type);
104
105         INIT_LIST_HEAD(&descriptor->os_mem.pages);
106         descriptor->os_mem.count = page_count;
107
108         /* Grab pages from pool. */
109         {
110                 size_t pool_pages;
111                 spin_lock(&mali_mem_os_allocator.pool_lock);
112                 pool_pages = min(remaining, mali_mem_os_allocator.pool_count);
113                 for (i = pool_pages; i > 0; i--) {
114                         BUG_ON(list_empty(&mali_mem_os_allocator.pool_pages));
115                         list_move(mali_mem_os_allocator.pool_pages.next, &pages);
116                 }
117                 mali_mem_os_allocator.pool_count -= pool_pages;
118                 remaining -= pool_pages;
119                 spin_unlock(&mali_mem_os_allocator.pool_lock);
120         }
121
122         /* Process pages from pool. */
123         i = 0;
124         list_for_each_entry_safe(new_page, tmp, &pages, lru) {
125                 BUG_ON(NULL == new_page);
126
127                 list_move_tail(&new_page->lru, &descriptor->os_mem.pages);
128         }
129
130         /* Allocate new pages, if needed. */
131         for (i = 0; i < remaining; i++) {
132                 dma_addr_t dma_addr;
133
134                 new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
135
136                 if (unlikely(NULL == new_page)) {
137                         /* Calculate the number of pages actually allocated, and free them. */
138                         descriptor->os_mem.count = (page_count - remaining) + i;
139                         atomic_add(descriptor->os_mem.count, &mali_mem_os_allocator.allocated_pages);
140                         mali_mem_os_free(descriptor);
141                         return -ENOMEM;
142                 }
143
144                 /* Ensure page is flushed from CPU caches. */
145                 dma_addr = dma_map_page(&mali_platform_device->dev, new_page,
146                                         0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
147
148                 /* Store page phys addr */
149                 SetPagePrivate(new_page);
150                 set_page_private(new_page, dma_addr);
151
152                 list_add_tail(&new_page->lru, &descriptor->os_mem.pages);
153         }
154
155         atomic_add(page_count, &mali_mem_os_allocator.allocated_pages);
156
157         if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
158                 MALI_DEBUG_PRINT(4, ("OS Mem: Stopping pool trim timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
159                 cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
160         }
161
162         return 0;
163 }
164
165 static int mali_mem_os_mali_map(mali_mem_allocation *descriptor, struct mali_session_data *session)
166 {
167         struct mali_page_directory *pagedir = session->page_directory;
168         struct page *page;
169         _mali_osk_errcode_t err;
170         u32 virt = descriptor->mali_mapping.addr;
171         u32 prop = descriptor->mali_mapping.properties;
172
173         MALI_DEBUG_ASSERT(MALI_MEM_OS == descriptor->type);
174
175         err = mali_mem_mali_map_prepare(descriptor);
176         if (_MALI_OSK_ERR_OK != err) {
177                 return -ENOMEM;
178         }
179
180         list_for_each_entry(page, &descriptor->os_mem.pages, lru) {
181                 u32 phys = page_private(page);
182                 mali_mmu_pagedir_update(pagedir, virt, phys, MALI_MMU_PAGE_SIZE, prop);
183                 virt += MALI_MMU_PAGE_SIZE;
184         }
185
186         return 0;
187 }
188
189 static void mali_mem_os_mali_unmap(struct mali_session_data *session, mali_mem_allocation *descriptor)
190 {
191         mali_mem_mali_map_free(descriptor);
192 }
193
194 static int mali_mem_os_cpu_map(mali_mem_allocation *descriptor, struct vm_area_struct *vma)
195 {
196         struct page *page;
197         int ret;
198         unsigned long addr = vma->vm_start;
199
200         list_for_each_entry(page, &descriptor->os_mem.pages, lru) {
201                 /* We should use vm_insert_page, but it does a dcache
202                  * flush which makes it way slower than remap_pfn_range or vm_insert_pfn.
203                 ret = vm_insert_page(vma, addr, page);
204                 */
205                 ret = vm_insert_pfn(vma, addr, page_to_pfn(page));
206
207                 if (unlikely(0 != ret)) {
208                         return -EFAULT;
209                 }
210                 addr += _MALI_OSK_MALI_PAGE_SIZE;
211         }
212
213         return 0;
214 }
215
216 mali_mem_allocation *mali_mem_os_alloc(u32 mali_addr, u32 size, struct vm_area_struct *vma, struct mali_session_data *session)
217 {
218         mali_mem_allocation *descriptor;
219         int err;
220
221         /* MALI_SEC */ //Remove limitation of Texture memory size for GLB2.7 T-rex
222 #if 0
223         if (atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE + size > mali_mem_os_allocator.allocation_limit) {
224                 MALI_DEBUG_PRINT(2, ("Mali Mem: Unable to allocate %u bytes. Currently allocated: %lu, max limit %lu\n",
225                                      size,
226                                      atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE,
227                                      mali_mem_os_allocator.allocation_limit));
228                 return NULL;
229         }
230 #endif
231
232         descriptor = mali_mem_descriptor_create(session, MALI_MEM_OS);
233         if (NULL == descriptor) return NULL;
234
235         descriptor->mali_mapping.addr = mali_addr;
236         descriptor->size = size;
237         descriptor->cpu_mapping.addr = (void __user*)vma->vm_start;
238         descriptor->cpu_mapping.ref = 1;
239
240         if (VM_SHARED == (VM_SHARED & vma->vm_flags)) {
241                 descriptor->mali_mapping.properties = MALI_MMU_FLAGS_DEFAULT;
242         } else {
243                 /* Cached Mali memory mapping */
244                 descriptor->mali_mapping.properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
245                 vma->vm_flags |= VM_SHARED;
246         }
247
248         err = mali_mem_os_alloc_pages(descriptor, size); /* Allocate pages */
249         if (0 != err) goto alloc_failed;
250
251         /* Take session memory lock */
252         _mali_osk_mutex_wait(session->memory_lock);
253
254         err = mali_mem_os_mali_map(descriptor, session); /* Map on Mali */
255         if (0 != err) goto mali_map_failed;
256
257         _mali_osk_mutex_signal(session->memory_lock);
258
259         err = mali_mem_os_cpu_map(descriptor, vma); /* Map on CPU */
260         if (0 != err) goto cpu_map_failed;
261
262         return descriptor;
263
264 cpu_map_failed:
265         mali_mem_os_mali_unmap(session, descriptor);
266 mali_map_failed:
267         _mali_osk_mutex_signal(session->memory_lock);
268         mali_mem_os_free(descriptor);
269 alloc_failed:
270         mali_mem_descriptor_destroy(descriptor);
271         MALI_DEBUG_PRINT(2, ("OS allocator: Failed to allocate memory (%d)\n", err));
272         return NULL;
273 }
274
275 void mali_mem_os_release(mali_mem_allocation *descriptor)
276 {
277         struct mali_session_data *session = descriptor->session;
278
279         /* Unmap the memory from the mali virtual address space. */
280         mali_mem_os_mali_unmap(session, descriptor);
281
282         /* Free pages */
283         mali_mem_os_free(descriptor);
284 }
285
286
287 #define MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE 128
288 static struct {
289         struct {
290                 u32 phys;
291                 mali_io_address mapping;
292         } page[MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE];
293         u32 count;
294         spinlock_t lock;
295 } mali_mem_page_table_page_pool = {
296         .count = 0,
297         .lock = __SPIN_LOCK_UNLOCKED(pool_lock),
298 };
299
300 _mali_osk_errcode_t mali_mem_os_get_table_page(u32 *phys, mali_io_address *mapping)
301 {
302         _mali_osk_errcode_t ret = _MALI_OSK_ERR_NOMEM;
303
304         spin_lock(&mali_mem_page_table_page_pool.lock);
305         if (0 < mali_mem_page_table_page_pool.count) {
306                 u32 i = --mali_mem_page_table_page_pool.count;
307                 *phys = mali_mem_page_table_page_pool.page[i].phys;
308                 *mapping = mali_mem_page_table_page_pool.page[i].mapping;
309
310                 ret = _MALI_OSK_ERR_OK;
311         }
312         spin_unlock(&mali_mem_page_table_page_pool.lock);
313
314         if (_MALI_OSK_ERR_OK != ret) {
315                 *mapping = dma_alloc_writecombine(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE, phys, GFP_KERNEL);
316                 if (NULL != *mapping) {
317                         ret = _MALI_OSK_ERR_OK;
318                 }
319         }
320
321         return ret;
322 }
323
324 void mali_mem_os_release_table_page(u32 phys, void *virt)
325 {
326         spin_lock(&mali_mem_page_table_page_pool.lock);
327         if (MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE > mali_mem_page_table_page_pool.count) {
328                 u32 i = mali_mem_page_table_page_pool.count;
329                 mali_mem_page_table_page_pool.page[i].phys = phys;
330                 mali_mem_page_table_page_pool.page[i].mapping = virt;
331
332                 ++mali_mem_page_table_page_pool.count;
333
334                 spin_unlock(&mali_mem_page_table_page_pool.lock);
335         } else {
336                 spin_unlock(&mali_mem_page_table_page_pool.lock);
337
338                 dma_free_writecombine(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE, virt, phys);
339         }
340 }
341
342 static void mali_mem_os_free_page(struct page *page)
343 {
344         BUG_ON(page_count(page) != 1);
345
346         dma_unmap_page(&mali_platform_device->dev, page_private(page),
347                        _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
348
349         ClearPagePrivate(page);
350
351         __free_page(page);
352 }
353
354 /* The maximum number of page table pool pages to free in one go. */
355 #define MALI_MEM_OS_CHUNK_TO_FREE 64UL
356
357 /* Free a certain number of pages from the page table page pool.
358  * The pool lock must be held when calling the function, and the lock will be
359  * released before returning.
360  */
361 static void mali_mem_os_page_table_pool_free(size_t nr_to_free)
362 {
363         u32 phys_arr[MALI_MEM_OS_CHUNK_TO_FREE];
364         void *virt_arr[MALI_MEM_OS_CHUNK_TO_FREE];
365         u32 i;
366
367         MALI_DEBUG_ASSERT(nr_to_free <= MALI_MEM_OS_CHUNK_TO_FREE);
368
369         /* Remove nr_to_free pages from the pool and store them locally on stack. */
370         for (i = 0; i < nr_to_free; i++) {
371                 u32 pool_index = mali_mem_page_table_page_pool.count - i - 1;
372
373                 phys_arr[i] = mali_mem_page_table_page_pool.page[pool_index].phys;
374                 virt_arr[i] = mali_mem_page_table_page_pool.page[pool_index].mapping;
375         }
376
377         mali_mem_page_table_page_pool.count -= nr_to_free;
378
379         spin_unlock(&mali_mem_page_table_page_pool.lock);
380
381         /* After releasing the spinlock: free the pages we removed from the pool. */
382         for (i = 0; i < nr_to_free; i++) {
383                 dma_free_writecombine(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE, virt_arr[i], phys_arr[i]);
384         }
385 }
386
387 static void mali_mem_os_trim_page_table_page_pool(void)
388 {
389         size_t nr_to_free = 0;
390         size_t nr_to_keep;
391
392         /* Keep 2 page table pages for each 1024 pages in the page cache. */
393         nr_to_keep = mali_mem_os_allocator.pool_count / 512;
394         /* And a minimum of eight pages, to accomodate new sessions. */
395         nr_to_keep += 8;
396
397         if (0 == spin_trylock(&mali_mem_page_table_page_pool.lock)) return;
398
399         if (nr_to_keep < mali_mem_page_table_page_pool.count) {
400                 nr_to_free = mali_mem_page_table_page_pool.count - nr_to_keep;
401                 nr_to_free = min((size_t)MALI_MEM_OS_CHUNK_TO_FREE, nr_to_free);
402         }
403
404         /* Pool lock will be released by the callee. */
405         mali_mem_os_page_table_pool_free(nr_to_free);
406 }
407
408 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
409 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
410 static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask)
411 #else
412 static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask)
413 #endif
414 #else
415 static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc)
416 #endif
417 {
418         struct page *page, *tmp;
419         unsigned long flags;
420         struct list_head *le, pages;
421 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
422         int nr = nr_to_scan;
423 #else
424         int nr = sc->nr_to_scan;
425 #endif
426
427         if (0 == nr) {
428                 return mali_mem_os_allocator.pool_count + mali_mem_page_table_page_pool.count;
429         }
430
431         if (0 == mali_mem_os_allocator.pool_count) {
432                 /* No pages availble */
433                 return 0;
434         }
435
436         if (0 == spin_trylock_irqsave(&mali_mem_os_allocator.pool_lock, flags)) {
437                 /* Not able to lock. */
438                 return -1;
439         }
440
441         /* Release from general page pool */
442         nr = min((size_t)nr, mali_mem_os_allocator.pool_count);
443         mali_mem_os_allocator.pool_count -= nr;
444         list_for_each(le, &mali_mem_os_allocator.pool_pages) {
445                 --nr;
446                 if (0 == nr) break;
447         }
448         list_cut_position(&pages, &mali_mem_os_allocator.pool_pages, le);
449         spin_unlock_irqrestore(&mali_mem_os_allocator.pool_lock, flags);
450
451         list_for_each_entry_safe(page, tmp, &pages, lru) {
452                 mali_mem_os_free_page(page);
453         }
454
455         /* Release some pages from page table page pool */
456         mali_mem_os_trim_page_table_page_pool();
457
458         if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
459                 /* Pools are empty, stop timer */
460                 MALI_DEBUG_PRINT(5, ("Stopping timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
461                 cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
462         }
463
464         return mali_mem_os_allocator.pool_count + mali_mem_page_table_page_pool.count;
465 }
466
467 static void mali_mem_os_trim_pool(struct work_struct *data)
468 {
469         struct page *page, *tmp;
470         struct list_head *le;
471         LIST_HEAD(pages);
472         size_t nr_to_free;
473
474         MALI_IGNORE(data);
475
476         MALI_DEBUG_PRINT(3, ("OS Mem: Trimming pool %u\n", mali_mem_os_allocator.pool_count));
477
478         /* Release from general page pool */
479         spin_lock(&mali_mem_os_allocator.pool_lock);
480         if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
481                 size_t count = mali_mem_os_allocator.pool_count - MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES;
482                 /* Free half the pages on the pool above the static limit. Or 64 pages, 256KB. */
483                 nr_to_free = max(count / 2, (size_t)64);
484
485                 mali_mem_os_allocator.pool_count -= nr_to_free;
486                 list_for_each(le, &mali_mem_os_allocator.pool_pages) {
487                         --nr_to_free;
488                         if (0 == nr_to_free) break;
489                 }
490                 list_cut_position(&pages, &mali_mem_os_allocator.pool_pages, le);
491         }
492         spin_unlock(&mali_mem_os_allocator.pool_lock);
493
494         list_for_each_entry_safe(page, tmp, &pages, lru) {
495                 mali_mem_os_free_page(page);
496         }
497
498         /* Release some pages from page table page pool */
499         mali_mem_os_trim_page_table_page_pool();
500
501         if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
502                 MALI_DEBUG_PRINT(4, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count));
503                 queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES);
504         }
505 }
506
507 _mali_osk_errcode_t mali_mem_os_init(void)
508 {
509         mali_mem_os_allocator.wq = alloc_workqueue("mali-mem", WQ_UNBOUND, 1);
510         if (NULL == mali_mem_os_allocator.wq) {
511                 return _MALI_OSK_ERR_NOMEM;
512         }
513
514         register_shrinker(&mali_mem_os_allocator.shrinker);
515
516         return _MALI_OSK_ERR_OK;
517 }
518
519 void mali_mem_os_term(void)
520 {
521         struct page *page, *tmp;
522
523         unregister_shrinker(&mali_mem_os_allocator.shrinker);
524         cancel_delayed_work_sync(&mali_mem_os_allocator.timed_shrinker);
525         destroy_workqueue(mali_mem_os_allocator.wq);
526
527         spin_lock(&mali_mem_os_allocator.pool_lock);
528         list_for_each_entry_safe(page, tmp, &mali_mem_os_allocator.pool_pages, lru) {
529                 mali_mem_os_free_page(page);
530
531                 --mali_mem_os_allocator.pool_count;
532         }
533         BUG_ON(mali_mem_os_allocator.pool_count);
534         spin_unlock(&mali_mem_os_allocator.pool_lock);
535
536         /* Release from page table page pool */
537         do {
538                 u32 nr_to_free;
539
540                 spin_lock(&mali_mem_page_table_page_pool.lock);
541
542                 nr_to_free = min((size_t)MALI_MEM_OS_CHUNK_TO_FREE, mali_mem_page_table_page_pool.count);
543
544                 /* Pool lock will be released by the callee. */
545                 mali_mem_os_page_table_pool_free(nr_to_free);
546         } while (0 != mali_mem_page_table_page_pool.count);
547 }
548
549 _mali_osk_errcode_t mali_memory_core_resource_os_memory(u32 size)
550 {
551         mali_mem_os_allocator.allocation_limit = size;
552
553         MALI_SUCCESS;
554 }
555
556 u32 mali_mem_os_stat(void)
557 {
558         return atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE;
559 }