upload tizen1.0 source
[kernel/linux-2.6.36.git] / drivers / staging / memrar / memrar_allocator.c
1 /*
2  *      memrar_allocator 1.0:  An allocator for Intel RAR.
3  *
4  *      Copyright (C) 2010 Intel Corporation. All rights reserved.
5  *
6  *      This program is free software; you can redistribute it and/or
7  *      modify it under the terms of version 2 of the GNU General
8  *      Public License as published by the Free Software Foundation.
9  *
10  *      This program is distributed in the hope that it will be
11  *      useful, but WITHOUT ANY WARRANTY; without even the implied
12  *      warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
13  *      PURPOSE.  See the GNU General Public License for more details.
14  *      You should have received a copy of the GNU General Public
15  *      License along with this program; if not, write to the Free
16  *      Software Foundation, Inc., 59 Temple Place - Suite 330,
17  *      Boston, MA  02111-1307, USA.
18  *      The full GNU General Public License is included in this
19  *      distribution in the file called COPYING.
20  *
21  *
22  *  ------------------------------------------------------------------
23  *
24  *      This simple allocator implementation provides a
25  *      malloc()/free()-like interface for reserving space within a
26  *      previously reserved block of memory.  It is not specific to
27  *      any hardware, nor is it coupled with the lower level paging
28  *      mechanism.
29  *
30  *      The primary goal of this implementation is to provide a means
31  *      to partition an arbitrary block of memory without actually
32  *      accessing the memory or incurring any hardware side-effects
33  *      (e.g. paging).  It is, in effect, a bookkeeping mechanism for
34  *      buffers.
35  */
36
37
38 #include "memrar_allocator.h"
39 #include <linux/slab.h>
40 #include <linux/bug.h>
41 #include <linux/kernel.h>
42
43
44 struct memrar_allocator *memrar_create_allocator(unsigned long base,
45                                                  size_t capacity,
46                                                  size_t block_size)
47 {
48         struct memrar_allocator *allocator  = NULL;
49         struct memrar_address_ranges *first_node = NULL;
50
51         /*
52          * Make sure the base address is aligned on a block_size
53          * boundary.
54          *
55          * @todo Is this necessary?
56          */
57         /* base = ALIGN(base, block_size); */
58
59         /* Validate parameters.
60          *
61          * Make sure we can allocate the entire memory space.  Zero
62          * capacity or block size are obviously invalid.
63          */
64         if (base == 0
65             || capacity == 0
66             || block_size == 0
67             || ULONG_MAX - capacity < base
68             || capacity < block_size)
69                 return allocator;
70
71         /*
72          * There isn't much point in creating a memory allocator that
73          * is only capable of holding one block but we'll allow it,
74          * and issue a diagnostic.
75          */
76         WARN(capacity < block_size * 2,
77              "memrar: Only one block available to allocator.\n");
78
79         allocator = kmalloc(sizeof(*allocator), GFP_KERNEL);
80
81         if (allocator == NULL)
82                 return allocator;
83
84         mutex_init(&allocator->lock);
85         allocator->base = base;
86
87         /* Round the capacity down to a multiple of block_size. */
88         allocator->capacity = (capacity / block_size) * block_size;
89
90         allocator->block_size = block_size;
91
92         allocator->largest_free_area = allocator->capacity;
93
94         /* Initialize the handle and free lists. */
95         INIT_LIST_HEAD(&allocator->allocated_list.list);
96         INIT_LIST_HEAD(&allocator->free_list.list);
97
98         first_node = kmalloc(sizeof(*first_node), GFP_KERNEL);
99         if (first_node == NULL) {
100                 kfree(allocator);
101                 allocator = NULL;
102         } else {
103                 /* Full range of blocks is available. */
104                 first_node->range.begin = base;
105                 first_node->range.end   = base + allocator->capacity;
106                 list_add(&first_node->list,
107                          &allocator->free_list.list);
108         }
109
110         return allocator;
111 }
112
113 void memrar_destroy_allocator(struct memrar_allocator *allocator)
114 {
115         /*
116          * Assume that the memory allocator lock isn't held at this
117          * point in time.  Caller must ensure that.
118          */
119
120         struct memrar_address_ranges *pos = NULL;
121         struct memrar_address_ranges *n   = NULL;
122
123         if (allocator == NULL)
124                 return;
125
126         mutex_lock(&allocator->lock);
127
128         /* Reclaim free list resources. */
129         list_for_each_entry_safe(pos,
130                                  n,
131                                  &allocator->free_list.list,
132                                  list) {
133                 list_del(&pos->list);
134                 kfree(pos);
135         }
136
137         mutex_unlock(&allocator->lock);
138
139         kfree(allocator);
140 }
141
142 unsigned long memrar_allocator_alloc(struct memrar_allocator *allocator,
143                                      size_t size)
144 {
145         struct memrar_address_ranges *pos = NULL;
146
147         size_t num_blocks;
148         unsigned long reserved_bytes;
149
150         /*
151          * Address of allocated buffer.  We assume that zero is not a
152          * valid address.
153          */
154         unsigned long addr = 0;
155
156         if (allocator == NULL || size == 0)
157                 return addr;
158
159         /* Reserve enough blocks to hold the amount of bytes requested. */
160         num_blocks = DIV_ROUND_UP(size, allocator->block_size);
161
162         reserved_bytes = num_blocks * allocator->block_size;
163
164         mutex_lock(&allocator->lock);
165
166         if (reserved_bytes > allocator->largest_free_area) {
167                 mutex_unlock(&allocator->lock);
168                 return addr;
169         }
170
171         /*
172          * Iterate through the free list to find a suitably sized
173          * range of free contiguous memory blocks.
174          *
175          * We also take the opportunity to reset the size of the
176          * largest free area size statistic.
177          */
178         list_for_each_entry(pos, &allocator->free_list.list, list) {
179                 struct memrar_address_range * const fr = &pos->range;
180                 size_t const curr_size = fr->end - fr->begin;
181
182                 if (curr_size >= reserved_bytes && addr == 0) {
183                         struct memrar_address_range *range = NULL;
184                         struct memrar_address_ranges * const new_node =
185                                 kmalloc(sizeof(*new_node), GFP_KERNEL);
186
187                         if (new_node == NULL)
188                                 break;
189
190                         list_add(&new_node->list,
191                                  &allocator->allocated_list.list);
192
193                         /*
194                          * Carve out area of memory from end of free
195                          * range.
196                          */
197                         range        = &new_node->range;
198                         range->end   = fr->end;
199                         fr->end     -= reserved_bytes;
200                         range->begin = fr->end;
201                         addr         = range->begin;
202
203                         /*
204                          * Check if largest area has decreased in
205                          * size.  We'll need to continue scanning for
206                          * the next largest area if it has.
207                          */
208                         if (curr_size == allocator->largest_free_area)
209                                 allocator->largest_free_area -=
210                                         reserved_bytes;
211                         else
212                                 break;
213                 }
214
215                 /*
216                  * Reset largest free area size statistic as needed,
217                  * but only if we've actually allocated memory.
218                  */
219                 if (addr != 0
220                     && curr_size > allocator->largest_free_area) {
221                         allocator->largest_free_area = curr_size;
222                         break;
223                 }
224         }
225
226         mutex_unlock(&allocator->lock);
227
228         return addr;
229 }
230
231 long memrar_allocator_free(struct memrar_allocator *allocator,
232                            unsigned long addr)
233 {
234         struct list_head *pos = NULL;
235         struct list_head *tmp = NULL;
236         struct list_head *dst = NULL;
237
238         struct memrar_address_ranges      *allocated = NULL;
239         struct memrar_address_range const *handle    = NULL;
240
241         unsigned long old_end        = 0;
242         unsigned long new_chunk_size = 0;
243
244         if (allocator == NULL)
245                 return -EINVAL;
246
247         if (addr == 0)
248                 return 0;  /* Ignore "free(0)". */
249
250         mutex_lock(&allocator->lock);
251
252         /* Find the corresponding handle. */
253         list_for_each_entry(allocated,
254                             &allocator->allocated_list.list,
255                             list) {
256                 if (allocated->range.begin == addr) {
257                         handle = &allocated->range;
258                         break;
259                 }
260         }
261
262         /* No such buffer created by this allocator. */
263         if (handle == NULL) {
264                 mutex_unlock(&allocator->lock);
265                 return -EFAULT;
266         }
267
268         /*
269          * Coalesce adjacent chunks of memory if possible.
270          *
271          * @note This isn't full blown coalescing since we're only
272          *       coalescing at most three chunks of memory.
273          */
274         list_for_each_safe(pos, tmp, &allocator->free_list.list) {
275                 /* @todo O(n) performance.  Optimize. */
276
277                 struct memrar_address_range * const chunk =
278                         &list_entry(pos,
279                                     struct memrar_address_ranges,
280                                     list)->range;
281
282                 /* Extend size of existing free adjacent chunk. */
283                 if (chunk->end == handle->begin) {
284                         /*
285                          * Chunk "less than" than the one we're
286                          * freeing is adjacent.
287                          *
288                          * Before:
289                          *
290                          *   +-----+------+
291                          *   |chunk|handle|
292                          *   +-----+------+
293                          *
294                          * After:
295                          *
296                          *   +------------+
297                          *   |   chunk    |
298                          *   +------------+
299                          */
300
301                         struct memrar_address_ranges const * const next =
302                                 list_entry(pos->next,
303                                            struct memrar_address_ranges,
304                                            list);
305
306                         chunk->end = handle->end;
307
308                         /*
309                          * Now check if next free chunk is adjacent to
310                          * the current extended free chunk.
311                          *
312                          * Before:
313                          *
314                          *   +------------+----+
315                          *   |   chunk    |next|
316                          *   +------------+----+
317                          *
318                          * After:
319                          *
320                          *   +-----------------+
321                          *   |      chunk      |
322                          *   +-----------------+
323                          */
324                         if (!list_is_singular(pos)
325                             && chunk->end == next->range.begin) {
326                                 chunk->end = next->range.end;
327                                 list_del(pos->next);
328                                 kfree(next);
329                         }
330
331                         list_del(&allocated->list);
332
333                         new_chunk_size = chunk->end - chunk->begin;
334
335                         goto exit_memrar_free;
336
337                 } else if (handle->end == chunk->begin) {
338                         /*
339                          * Chunk "greater than" than the one we're
340                          * freeing is adjacent.
341                          *
342                          *   +------+-----+
343                          *   |handle|chunk|
344                          *   +------+-----+
345                          *
346                          * After:
347                          *
348                          *   +------------+
349                          *   |   chunk    |
350                          *   +------------+
351                          */
352
353                         struct memrar_address_ranges const * const prev =
354                                 list_entry(pos->prev,
355                                            struct memrar_address_ranges,
356                                            list);
357
358                         chunk->begin = handle->begin;
359
360                         /*
361                          * Now check if previous free chunk is
362                          * adjacent to the current extended free
363                          * chunk.
364                          *
365                          *
366                          * Before:
367                          *
368                          *   +----+------------+
369                          *   |prev|   chunk    |
370                          *   +----+------------+
371                          *
372                          * After:
373                          *
374                          *   +-----------------+
375                          *   |      chunk      |
376                          *   +-----------------+
377                          */
378                         if (!list_is_singular(pos)
379                             && prev->range.end == chunk->begin) {
380                                 chunk->begin = prev->range.begin;
381                                 list_del(pos->prev);
382                                 kfree(prev);
383                         }
384
385                         list_del(&allocated->list);
386
387                         new_chunk_size = chunk->end - chunk->begin;
388
389                         goto exit_memrar_free;
390
391                 } else if (chunk->end < handle->begin
392                            && chunk->end > old_end) {
393                         /* Keep track of where the entry could be
394                          * potentially moved from the "allocated" list
395                          * to the "free" list if coalescing doesn't
396                          * occur, making sure the "free" list remains
397                          * sorted.
398                          */
399                         old_end = chunk->end;
400                         dst = pos;
401                 }
402         }
403
404         /*
405          * Nothing to coalesce.
406          *
407          * Move the entry from the "allocated" list to the "free"
408          * list.
409          */
410         list_move(&allocated->list, dst);
411         new_chunk_size = handle->end - handle->begin;
412         allocated = NULL;
413
414 exit_memrar_free:
415
416         if (new_chunk_size > allocator->largest_free_area)
417                 allocator->largest_free_area = new_chunk_size;
418
419         mutex_unlock(&allocator->lock);
420
421         kfree(allocated);
422
423         return 0;
424 }
425
426
427
428 /*
429   Local Variables:
430     c-file-style: "linux"
431   End:
432 */