tizen 2.4 release
[kernel/linux-3.0.git] / drivers / gpu / arm / mali400 / ump / linux / ump_kernel_memory_backend_os.c
1 /*
2  * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
3  *
4  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6  *
7  * A copy of the licence is included with the program, and can also be obtained from Free Software
8  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
9  */
10
11 /* needed to detect kernel version specific code */
12 #include <linux/version.h>
13
14 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
15 #include <linux/semaphore.h>
16 #else /* pre 2.6.26 the file was in the arch specific location */
17 #include <asm/semaphore.h>
18 #endif
19
20 #include <linux/dma-mapping.h>
21 #include <linux/mm.h>
22 #include <linux/slab.h>
23 #include <asm/atomic.h>
24 #include <linux/vmalloc.h>
25 #include <asm/cacheflush.h>
26 #include "ump_kernel_common.h"
27 #include "ump_kernel_memory_backend.h"
28
29
30
31 typedef struct os_allocator
32 {
33         struct semaphore mutex;
34         u32 num_pages_max;       /**< Maximum number of pages to allocate from the OS */
35         u32 num_pages_allocated; /**< Number of pages allocated from the OS */
36 } os_allocator;
37
38
39
40 static void os_free(void* ctx, ump_dd_mem * descriptor);
41 static int os_allocate(void* ctx, ump_dd_mem * descriptor);
42 static void os_memory_backend_destroy(ump_memory_backend * backend);
43 static u32 os_stat(struct ump_memory_backend *backend);
44
45
46
47 /*
48  * Create OS memory backend
49  */
50 ump_memory_backend * ump_os_memory_backend_create(const int max_allocation)
51 {
52         ump_memory_backend * backend;
53         os_allocator * info;
54
55         info = kmalloc(sizeof(os_allocator), GFP_KERNEL);
56         if (NULL == info)
57         {
58                 return NULL;
59         }
60
61         info->num_pages_max = max_allocation >> PAGE_SHIFT;
62         info->num_pages_allocated = 0;
63
64         sema_init(&info->mutex, 1);
65
66         backend = kmalloc(sizeof(ump_memory_backend), GFP_KERNEL);
67         if (NULL == backend)
68         {
69                 kfree(info);
70                 return NULL;
71         }
72
73         backend->ctx = info;
74         backend->allocate = os_allocate;
75         backend->release = os_free;
76         backend->shutdown = os_memory_backend_destroy;
77         backend->stat = os_stat;
78         backend->pre_allocate_physical_check = NULL;
79         backend->adjust_to_mali_phys = NULL;
80         /* MALI_SEC */
81         backend->get = NULL;
82         backend->set = NULL;
83
84         return backend;
85 }
86
87
88
89 /*
90  * Destroy specified OS memory backend
91  */
92 static void os_memory_backend_destroy(ump_memory_backend * backend)
93 {
94         os_allocator * info = (os_allocator*)backend->ctx;
95
96         DBG_MSG_IF(1, 0 != info->num_pages_allocated, ("%d pages still in use during shutdown\n", info->num_pages_allocated));
97
98         kfree(info);
99         kfree(backend);
100 }
101
102
103
104 /*
105  * Allocate UMP memory
106  */
107 static int os_allocate(void* ctx, ump_dd_mem * descriptor)
108 {
109         u32 left;
110         os_allocator * info;
111         int pages_allocated = 0;
112         int is_cached;
113
114         BUG_ON(!descriptor);
115         BUG_ON(!ctx);
116
117         info = (os_allocator*)ctx;
118         left = descriptor->size_bytes;
119         is_cached = descriptor->is_cached;
120
121         if (down_interruptible(&info->mutex))
122         {
123                 DBG_MSG(1, ("Failed to get mutex in os_free\n"));
124                 return 0; /* failure */
125         }
126
127         descriptor->backend_info = NULL;
128         descriptor->nr_blocks = ((left + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) >> PAGE_SHIFT;
129
130         DBG_MSG(5, ("Allocating page array. Size: %lu\n", descriptor->nr_blocks * sizeof(ump_dd_physical_block)));
131
132         descriptor->block_array = (ump_dd_physical_block *)vmalloc(sizeof(ump_dd_physical_block) * descriptor->nr_blocks);
133         if (NULL == descriptor->block_array)
134         {
135                 up(&info->mutex);
136                 DBG_MSG(1, ("Block array could not be allocated\n"));
137                 return 0; /* failure */
138         }
139
140         while (left > 0 && ((info->num_pages_allocated + pages_allocated) < info->num_pages_max))
141         {
142                 struct page * new_page;
143
144                 if (is_cached)
145                 {
146                         new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN);
147                 } else
148                 {
149                         new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
150                 }
151                 if (NULL == new_page)
152                 {
153                         MSG_ERR(("UMP memory allocated: Out of Memory !!\n"));
154                         break;
155                 }
156
157                 /* Ensure page caches are flushed. */
158                 if ( is_cached )
159                 {
160                         descriptor->block_array[pages_allocated].addr = page_to_phys(new_page);
161                         descriptor->block_array[pages_allocated].size = PAGE_SIZE;
162                 } else
163                 {
164                         descriptor->block_array[pages_allocated].addr = dma_map_page(NULL, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL );
165                         descriptor->block_array[pages_allocated].size = PAGE_SIZE;
166                 }
167
168                 DBG_MSG(5, ("Allocated page 0x%08lx cached: %d\n", descriptor->block_array[pages_allocated].addr, is_cached));
169
170                 if (left < PAGE_SIZE)
171                 {
172                         left = 0;
173                 }
174                 else
175                 {
176                         left -= PAGE_SIZE;
177                 }
178
179                 pages_allocated++;
180         }
181
182         DBG_MSG(5, ("Alloce for ID:%2d got %d pages, cached: %d\n", descriptor->secure_id,  pages_allocated));
183
184         if (left)
185         {
186                 DBG_MSG(1, ("Failed to allocate needed pages\n"));
187                 DBG_MSG(1, ("UMP memory allocated: %d kB  Configured maximum OS memory usage: %d kB\n",
188                                  (pages_allocated * _MALI_OSK_CPU_PAGE_SIZE)/1024, (info->num_pages_max* _MALI_OSK_CPU_PAGE_SIZE)/1024));
189
190                 while(pages_allocated)
191                 {
192                         pages_allocated--;
193                         if ( !is_cached )
194                         {
195                                 dma_unmap_page(NULL, descriptor->block_array[pages_allocated].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
196                         }
197                         __free_page(pfn_to_page(descriptor->block_array[pages_allocated].addr >> PAGE_SHIFT) );
198                 }
199
200                 up(&info->mutex);
201
202                 return 0; /* failure */
203         }
204
205         info->num_pages_allocated += pages_allocated;
206
207         DBG_MSG(6, ("%d out of %d pages now allocated\n", info->num_pages_allocated, info->num_pages_max));
208
209         up(&info->mutex);
210
211         return 1; /* success*/
212 }
213
214
215 /*
216  * Free specified UMP memory
217  */
218 static void os_free(void* ctx, ump_dd_mem * descriptor)
219 {
220         os_allocator * info;
221         int i;
222
223         BUG_ON(!ctx);
224         BUG_ON(!descriptor);
225
226         info = (os_allocator*)ctx;
227
228         BUG_ON(descriptor->nr_blocks > info->num_pages_allocated);
229
230         if (down_interruptible(&info->mutex))
231         {
232                 DBG_MSG(1, ("Failed to get mutex in os_free\n"));
233                 return;
234         }
235
236         DBG_MSG(5, ("Releasing %lu OS pages\n", descriptor->nr_blocks));
237
238         info->num_pages_allocated -= descriptor->nr_blocks;
239
240         up(&info->mutex);
241
242         for ( i = 0; i < descriptor->nr_blocks; i++)
243         {
244                 DBG_MSG(6, ("Freeing physical page. Address: 0x%08lx\n", descriptor->block_array[i].addr));
245                 if ( ! descriptor->is_cached)
246                 {
247                         dma_unmap_page(NULL, descriptor->block_array[i].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
248                 }
249                 __free_page(pfn_to_page(descriptor->block_array[i].addr>>PAGE_SHIFT) );
250         }
251
252         vfree(descriptor->block_array);
253 }
254
255
256 static u32 os_stat(struct ump_memory_backend *backend)
257 {
258         os_allocator *info;
259         info = (os_allocator*)backend->ctx;
260         return info->num_pages_allocated * _MALI_OSK_MALI_PAGE_SIZE;
261 }