upload tizen1.0 source
[kernel/linux-2.6.36.git] / drivers / media / video / samsung / ump / linux / ump_kernel_memory_backend_os.c
1 /*
2  * Copyright (C) 2010 ARM Limited. All rights reserved.
3  * 
4  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6  * 
7  * A copy of the licence is included with the program, and can also be obtained from Free Software
8  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
9  */
10
11 /* needed to detect kernel version specific code */
12 #include <linux/version.h>
13
14 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
15 #include <linux/semaphore.h>
16 #else /* pre 2.6.26 the file was in the arch specific location */
17 #include <asm/semaphore.h>
18 #endif
19
20 #include <linux/dma-mapping.h>
21 #include <linux/mm.h>
22 #include <linux/slab.h>
23 #include <asm/atomic.h>
24 #include <linux/vmalloc.h>
25 #include <asm/cacheflush.h>
26 #include "ump_kernel_common.h"
27 #include "ump_kernel_memory_backend.h"
28
29
30
31 typedef struct os_allocator
32 {
33         struct semaphore mutex;
34         u32 num_pages_max;       /**< Maximum number of pages to allocate from the OS */
35         u32 num_pages_allocated; /**< Number of pages allocated from the OS */
36 } os_allocator;
37
38
39
40 static void os_free(void* ctx, ump_dd_mem * descriptor);
41 static int os_allocate(void* ctx, ump_dd_mem * descriptor);
42 static void os_memory_backend_destroy(ump_memory_backend * backend);
43 static u32 os_stat(struct ump_memory_backend *backend);
44
45
46
47 /*
48  * Create OS memory backend
49  */
50 ump_memory_backend * ump_os_memory_backend_create(const int max_allocation)
51 {
52         ump_memory_backend * backend;
53         os_allocator * info;
54
55         info = kmalloc(sizeof(os_allocator), GFP_KERNEL);
56         if (NULL == info)
57         {
58                 return NULL;
59         }
60
61         info->num_pages_max = max_allocation >> PAGE_SHIFT;
62         info->num_pages_allocated = 0;
63
64         sema_init(&info->mutex, 1);
65
66         backend = kmalloc(sizeof(ump_memory_backend), GFP_KERNEL);
67         if (NULL == backend)
68         {
69                 kfree(info);
70                 return NULL;
71         }
72
73         backend->ctx = info;
74         backend->allocate = os_allocate;
75         backend->release = os_free;
76         backend->shutdown = os_memory_backend_destroy;
77         backend->stat = os_stat;
78         backend->pre_allocate_physical_check = NULL;
79         backend->adjust_to_mali_phys = NULL;
80         backend->get = NULL;
81         backend->set = NULL;
82
83         return backend;
84 }
85
86
87
88 /*
89  * Destroy specified OS memory backend
90  */
91 static void os_memory_backend_destroy(ump_memory_backend * backend)
92 {
93         os_allocator * info = (os_allocator*)backend->ctx;
94
95         DBG_MSG_IF(1, 0 != info->num_pages_allocated, ("%d pages still in use during shutdown\n", info->num_pages_allocated));
96
97         kfree(info);
98         kfree(backend);
99 }
100
101
102
103 /*
104  * Allocate UMP memory
105  */
106 static int os_allocate(void* ctx, ump_dd_mem * descriptor)
107 {
108         u32 left;
109         os_allocator * info;
110         int pages_allocated = 0;
111         int is_cached;
112
113         BUG_ON(!descriptor);
114         BUG_ON(!ctx);
115
116         info = (os_allocator*)ctx;
117         left = descriptor->size_bytes;
118         is_cached = descriptor->is_cached;
119
120         if (down_interruptible(&info->mutex))
121         {
122                 DBG_MSG(1, ("Failed to get mutex in os_free\n"));
123                 return 0; /* failure */
124         }
125
126         descriptor->backend_info = NULL;
127         descriptor->nr_blocks = ((left + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) >> PAGE_SHIFT;
128
129         DBG_MSG(5, ("Allocating page array. Size: %lu\n", descriptor->nr_blocks * sizeof(ump_dd_physical_block)));
130
131         descriptor->block_array = (ump_dd_physical_block *)vmalloc(sizeof(ump_dd_physical_block) * descriptor->nr_blocks);
132         if (NULL == descriptor->block_array)
133         {
134                 up(&info->mutex);
135                 DBG_MSG(1, ("Block array could not be allocated\n"));
136                 return 0; /* failure */
137         }
138
139         while (left > 0 && ((info->num_pages_allocated + pages_allocated) < info->num_pages_max))
140         {
141                 struct page * new_page;
142
143                 if (is_cached)
144                 {
145                         new_page = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY | __GFP_NOWARN );
146                 } else
147                 {
148                         new_page = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY | __GFP_NOWARN | __GFP_COLD);
149                 }
150                 if (NULL == new_page)
151                 {
152                         break;
153                 }
154
155                 /* Ensure page caches are flushed. */
156                 if ( is_cached )
157                 {
158                         descriptor->block_array[pages_allocated].addr = page_to_phys(new_page);
159                         descriptor->block_array[pages_allocated].size = PAGE_SIZE;
160                 } else
161                 {
162                         descriptor->block_array[pages_allocated].addr = dma_map_page(NULL, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL );
163                         descriptor->block_array[pages_allocated].size = PAGE_SIZE;
164                 }
165
166                 DBG_MSG(5, ("Allocated page 0x%08lx cached: %d\n", descriptor->block_array[pages_allocated].addr, is_cached));
167
168                 if (left < PAGE_SIZE)
169                 {
170                         left = 0;
171                 }
172                 else
173                 {
174                         left -= PAGE_SIZE;
175                 }
176
177                 pages_allocated++;
178         }
179
180         DBG_MSG(5, ("Alloce for ID:%2d got %d pages, cached: %d\n", descriptor->secure_id,  pages_allocated));
181
182         if (left)
183         {
184                 DBG_MSG(1, ("Failed to allocate needed pages\n"));
185
186                 while(pages_allocated)
187                 {
188                         pages_allocated--;
189                         if ( !is_cached )
190                         {
191                                 dma_unmap_page(NULL, descriptor->block_array[pages_allocated].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
192                         }
193                         __free_page(pfn_to_page(descriptor->block_array[pages_allocated].addr >> PAGE_SHIFT) );
194                 }
195
196                 up(&info->mutex);
197
198                 return 0; /* failure */
199         }
200
201         info->num_pages_allocated += pages_allocated;
202
203         DBG_MSG(6, ("%d out of %d pages now allocated\n", info->num_pages_allocated, info->num_pages_max));
204
205         up(&info->mutex);
206
207         return 1; /* success*/
208 }
209
210
211 /*
212  * Free specified UMP memory
213  */
214 static void os_free(void* ctx, ump_dd_mem * descriptor)
215 {
216         os_allocator * info;
217         int i;
218
219         BUG_ON(!ctx);
220         BUG_ON(!descriptor);
221
222         info = (os_allocator*)ctx;
223
224         BUG_ON(descriptor->nr_blocks > info->num_pages_allocated);
225
226         if (down_interruptible(&info->mutex))
227         {
228                 DBG_MSG(1, ("Failed to get mutex in os_free\n"));
229                 return;
230         }
231
232         DBG_MSG(5, ("Releasing %lu OS pages\n", descriptor->nr_blocks));
233
234         info->num_pages_allocated -= descriptor->nr_blocks;
235
236         up(&info->mutex);
237
238         for ( i = 0; i < descriptor->nr_blocks; i++)
239         {
240                 DBG_MSG(6, ("Freeing physical page. Address: 0x%08lx\n", descriptor->block_array[i].addr));
241                 if ( ! descriptor->is_cached)
242                 {
243                         dma_unmap_page(NULL, descriptor->block_array[i].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
244                 }
245                 __free_page(pfn_to_page(descriptor->block_array[i].addr>>PAGE_SHIFT) );
246         }
247
248         vfree(descriptor->block_array);
249 }
250
251
252 static u32 os_stat(struct ump_memory_backend *backend)
253 {
254         os_allocator *info;
255         info = (os_allocator*)backend->ctx;
256         return info->num_pages_allocated * _MALI_OSK_MALI_PAGE_SIZE;
257 }