tizen 2.4 release
[kernel/linux-3.0.git] / drivers / gpu / arm / mali400 / ump / linux / ump_kernel_memory_backend_dedicated.c
1 /*
2  * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
3  *
4  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6  *
7  * A copy of the licence is included with the program, and can also be obtained from Free Software
8  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
9  */
10
11 /* needed to detect kernel version specific code */
12 #include <linux/version.h>
13
14 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
15 #include <linux/semaphore.h>
16 #else /* pre 2.6.26 the file was in the arch specific location */
17 #include <asm/semaphore.h>
18 #endif
19
20 #include <linux/mm.h>
21 #include <linux/slab.h>
22 #include <asm/atomic.h>
23 #include <linux/vmalloc.h>
24 #include "ump_kernel_common.h"
25 #include "ump_kernel_memory_backend.h"
26
27
28
29 #define UMP_BLOCK_SIZE (256UL * 1024UL)  /* 256kB, remember to keep the ()s */
30
31
32
33 typedef struct block_info
34 {
35         struct block_info * next;
36 } block_info;
37
38
39
40 typedef struct block_allocator
41 {
42         struct semaphore mutex;
43         block_info * all_blocks;
44         block_info * first_free;
45         u32 base;
46         u32 num_blocks;
47         u32 num_free;
48 } block_allocator;
49
50
51 static void block_allocator_shutdown(ump_memory_backend * backend);
52 static int block_allocator_allocate(void* ctx, ump_dd_mem * mem);
53 static void block_allocator_release(void * ctx, ump_dd_mem * handle);
54 static inline u32 get_phys(block_allocator * allocator, block_info * block);
55 static u32 block_allocator_stat(struct ump_memory_backend *backend);
56
57
58
59 /*
60  * Create dedicated memory backend
61  */
62 ump_memory_backend * ump_block_allocator_create(u32 base_address, u32 size)
63 {
64         ump_memory_backend * backend;
65         block_allocator * allocator;
66         u32 usable_size;
67         u32 num_blocks;
68
69         usable_size = (size + UMP_BLOCK_SIZE - 1) & ~(UMP_BLOCK_SIZE - 1);
70         num_blocks = usable_size / UMP_BLOCK_SIZE;
71
72         if (0 == usable_size)
73         {
74                 DBG_MSG(1, ("Memory block of size %u is unusable\n", size));
75                 return NULL;
76         }
77
78         DBG_MSG(5, ("Creating dedicated UMP memory backend. Base address: 0x%08x, size: 0x%08x\n", base_address, size));
79         DBG_MSG(6, ("%u usable bytes which becomes %u blocks\n", usable_size, num_blocks));
80
81         backend = kzalloc(sizeof(ump_memory_backend), GFP_KERNEL);
82         if (NULL != backend)
83         {
84                 allocator = kmalloc(sizeof(block_allocator), GFP_KERNEL);
85                 if (NULL != allocator)
86                 {
87                         allocator->all_blocks = kmalloc(sizeof(block_allocator) * num_blocks, GFP_KERNEL);
88                         if (NULL != allocator->all_blocks)
89                         {
90                                 int i;
91
92                                 allocator->first_free = NULL;
93                                 allocator->num_blocks = num_blocks;
94                                 allocator->num_free = num_blocks;
95                                 allocator->base = base_address;
96                                 sema_init(&allocator->mutex, 1);
97
98                                 for (i = 0; i < num_blocks; i++)
99                                 {
100                                         allocator->all_blocks[i].next = allocator->first_free;
101                                         allocator->first_free = &allocator->all_blocks[i];
102                                 }
103
104                                 backend->ctx = allocator;
105                                 backend->allocate = block_allocator_allocate;
106                                 backend->release = block_allocator_release;
107                                 backend->shutdown = block_allocator_shutdown;
108                                 backend->stat = block_allocator_stat;
109                                 backend->pre_allocate_physical_check = NULL;
110                                 backend->adjust_to_mali_phys = NULL;
111                                 /* MALI_SEC */
112                                 backend->get = NULL;
113                                 backend->set = NULL;
114
115                                 return backend;
116                         }
117                         kfree(allocator);
118                 }
119                 kfree(backend);
120         }
121
122         return NULL;
123 }
124
125
126
127 /*
128  * Destroy specified dedicated memory backend
129  */
130 static void block_allocator_shutdown(ump_memory_backend * backend)
131 {
132         block_allocator * allocator;
133
134         BUG_ON(!backend);
135         BUG_ON(!backend->ctx);
136
137         allocator = (block_allocator*)backend->ctx;
138
139         DBG_MSG_IF(1, allocator->num_free != allocator->num_blocks, ("%u blocks still in use during shutdown\n", allocator->num_blocks - allocator->num_free));
140
141         kfree(allocator->all_blocks);
142         kfree(allocator);
143         kfree(backend);
144 }
145
146
147
148 static int block_allocator_allocate(void* ctx, ump_dd_mem * mem)
149 {
150         block_allocator * allocator;
151         u32 left;
152         block_info * last_allocated = NULL;
153         int i = 0;
154
155         BUG_ON(!ctx);
156         BUG_ON(!mem);
157
158         allocator = (block_allocator*)ctx;
159         left = mem->size_bytes;
160
161         BUG_ON(!left);
162         BUG_ON(!&allocator->mutex);
163
164         mem->nr_blocks = ((left + UMP_BLOCK_SIZE - 1) & ~(UMP_BLOCK_SIZE - 1)) / UMP_BLOCK_SIZE;
165         mem->block_array = (ump_dd_physical_block*)vmalloc(sizeof(ump_dd_physical_block) * mem->nr_blocks);
166         if (NULL == mem->block_array)
167         {
168                 MSG_ERR(("Failed to allocate block array\n"));
169                 return 0;
170         }
171
172         if (down_interruptible(&allocator->mutex))
173         {
174                 MSG_ERR(("Could not get mutex to do block_allocate\n"));
175                 return 0;
176         }
177
178         mem->size_bytes = 0;
179
180         while ((left > 0) && (allocator->first_free))
181         {
182                 block_info * block;
183
184                 block = allocator->first_free;
185                 allocator->first_free = allocator->first_free->next;
186                 block->next = last_allocated;
187                 last_allocated = block;
188                 allocator->num_free--;
189
190                 mem->block_array[i].addr = get_phys(allocator, block);
191                 mem->block_array[i].size = UMP_BLOCK_SIZE;
192                 mem->size_bytes += UMP_BLOCK_SIZE;
193
194                 i++;
195
196                 if (left < UMP_BLOCK_SIZE) left = 0;
197                 else left -= UMP_BLOCK_SIZE;
198         }
199
200         if (left)
201         {
202                 block_info * block;
203                 /* release all memory back to the pool */
204                 while (last_allocated)
205                 {
206                         block = last_allocated->next;
207                         last_allocated->next = allocator->first_free;
208                         allocator->first_free = last_allocated;
209                         last_allocated = block;
210                         allocator->num_free++;
211                 }
212
213                 vfree(mem->block_array);
214                 mem->backend_info = NULL;
215                 mem->block_array = NULL;
216
217                 DBG_MSG(4, ("Could not find a mem-block for the allocation.\n"));
218                 up(&allocator->mutex);
219
220                 return 0;
221         }
222
223         mem->backend_info = last_allocated;
224
225         up(&allocator->mutex);
226         mem->is_cached=0;
227
228         return 1;
229 }
230
231
232
233 static void block_allocator_release(void * ctx, ump_dd_mem * handle)
234 {
235         block_allocator * allocator;
236         block_info * block, * next;
237
238         BUG_ON(!ctx);
239         BUG_ON(!handle);
240
241         allocator = (block_allocator*)ctx;
242         block = (block_info*)handle->backend_info;
243         BUG_ON(!block);
244
245         if (down_interruptible(&allocator->mutex))
246         {
247                 MSG_ERR(("Allocator release: Failed to get mutex - memory leak\n"));
248                 return;
249         }
250
251         while (block)
252         {
253                 next = block->next;
254
255                 BUG_ON( (block < allocator->all_blocks) || (block > (allocator->all_blocks + allocator->num_blocks)));
256
257                 block->next = allocator->first_free;
258                 allocator->first_free = block;
259                 allocator->num_free++;
260
261                 block = next;
262         }
263         DBG_MSG(3, ("%d blocks free after release call\n", allocator->num_free));
264         up(&allocator->mutex);
265
266         vfree(handle->block_array);
267         handle->block_array = NULL;
268 }
269
270
271
272 /*
273  * Helper function for calculating the physical base adderss of a memory block
274  */
275 static inline u32 get_phys(block_allocator * allocator, block_info * block)
276 {
277         return allocator->base + ((block - allocator->all_blocks) * UMP_BLOCK_SIZE);
278 }
279
280 static u32 block_allocator_stat(struct ump_memory_backend *backend)
281 {
282         block_allocator *allocator;
283         BUG_ON(!backend);
284         allocator = (block_allocator*)backend->ctx;
285         BUG_ON(!allocator);
286
287         return (allocator->num_blocks - allocator->num_free)* UMP_BLOCK_SIZE;
288 }