2 * Contiguous Memory Allocator framework: Best Fit allocator
3 * Copyright (c) 2010 by Samsung Electronics.
4 * Written by Michal Nazarewicz (m.nazarewicz@samsung.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2 of the
9 * License or (at your optional) any later version of the license.
12 #define pr_fmt(fmt) "cma: bf: " fmt
14 #ifdef CONFIG_CMA_DEBUG
18 #include <linux/errno.h> /* Error numbers */
19 #include <linux/slab.h> /* kmalloc() */
21 #include <linux/cma.h> /* CMA structures */
24 /************************* Data Types *************************/
28 struct rb_node by_size;
31 struct cma_bf_private {
32 struct rb_root by_start_root;
33 struct rb_root by_size_root;
37 /************************* Prototypes *************************/
40 * Those are only for holes. They must be called whenever hole's
41 * properties change but also whenever chunk becomes a hole or hole
44 static void __cma_bf_hole_insert_by_size(struct cma_bf_item *item);
45 static void __cma_bf_hole_erase_by_size(struct cma_bf_item *item);
46 static int __must_check
47 __cma_bf_hole_insert_by_start(struct cma_bf_item *item);
48 static void __cma_bf_hole_erase_by_start(struct cma_bf_item *item);
51 * __cma_bf_hole_take - takes a chunk of memory out of a hole.
52 * @hole: hole to take chunk from
54 * @alignment: chunk's starting address alignment (must be power of two)
56 * Takes a @size bytes large chunk from hole @hole which must be able
57 * to hold the chunk. The "must be able" includes also alignment
60 * Returns allocated item or NULL on error (if kmalloc() failed).
62 static struct cma_bf_item *__must_check
63 __cma_bf_hole_take(struct cma_bf_item *hole, size_t size, dma_addr_t alignment);
66 * __cma_bf_hole_merge_maybe - tries to merge hole with neighbours.
67 * @item: hole to try and merge
69 * Which items are preserved is undefined so you may not rely on it.
71 static void __cma_bf_hole_merge_maybe(struct cma_bf_item *item);
74 /************************* Device API *************************/
76 int cma_bf_init(struct cma_region *reg)
78 struct cma_bf_private *prv;
79 struct cma_bf_item *item;
81 prv = kzalloc(sizeof *prv, GFP_KERNEL);
85 item = kzalloc(sizeof *item, GFP_KERNEL);
86 if (unlikely(!item)) {
91 item->ch.start = reg->start;
92 item->ch.size = reg->size;
95 rb_root_init(&prv->by_start_root, &item->ch.by_start);
96 rb_root_init(&prv->by_size_root, &item->by_size);
98 reg->private_data = prv;
102 void cma_bf_cleanup(struct cma_region *reg)
104 struct cma_bf_private *prv = reg->private_data;
105 struct cma_bf_item *item =
106 rb_entry(prv->by_size_root.rb_node,
107 struct cma_bf_item, by_size);
109 /* We can assume there is only a single hole in the tree. */
110 WARN_ON(item->by_size.rb_left || item->by_size.rb_right ||
111 item->ch.by_start.rb_left || item->ch.by_start.rb_right);
117 struct cma_chunk *cma_bf_alloc(struct cma_region *reg,
118 size_t size, dma_addr_t alignment)
120 struct cma_bf_private *prv = reg->private_data;
121 struct rb_node *node = prv->by_size_root.rb_node;
122 struct cma_bf_item *item = NULL;
124 /* First find hole that is large enough */
126 struct cma_bf_item *i =
127 rb_entry(node, struct cma_bf_item, by_size);
129 if (i->ch.size < size) {
130 node = node->rb_right;
131 } else if (i->ch.size >= size) {
132 node = node->rb_left;
139 /* Now look for items which can satisfy alignment requirements */
141 dma_addr_t start = ALIGN(item->ch.start, alignment);
142 dma_addr_t end = item->ch.start + item->ch.size;
143 if (start < end && end - start >= size) {
144 item = __cma_bf_hole_take(item, size, alignment);
145 return likely(item) ? &item->ch : NULL;
148 node = rb_next(node);
152 item = rb_entry(node, struct cma_bf_item, by_size);
156 void cma_bf_free(struct cma_chunk *chunk)
158 struct cma_bf_item *item = container_of(chunk, struct cma_bf_item, ch);
161 if (unlikely(__cma_bf_hole_insert_by_start(item))) {
163 * We're screwed... Just free the item and forget
164 * about it. Things are broken beyond repair so no
165 * sense in trying to recover.
169 __cma_bf_hole_insert_by_size(item);
171 /* Merge with prev and next sibling */
172 __cma_bf_hole_merge_maybe(item);
177 /************************* Basic Tree Manipulation *************************/
179 static void __cma_bf_hole_insert_by_size(struct cma_bf_item *item)
181 struct cma_bf_private *prv = item->ch.reg->private_data;
182 struct rb_node **link = &prv->by_size_root.rb_node, *parent = NULL;
183 const typeof(item->ch.size) value = item->ch.size;
186 struct cma_bf_item *i;
188 i = rb_entry(parent, struct cma_bf_item, by_size);
189 link = value <= i->ch.size
194 rb_link_node(&item->by_size, parent, link);
195 rb_insert_color(&item->by_size, &prv->by_size_root);
198 static void __cma_bf_hole_erase_by_size(struct cma_bf_item *item)
200 struct cma_bf_private *prv = item->ch.reg->private_data;
201 rb_erase(&item->by_size, &prv->by_size_root);
204 static int __must_check
205 __cma_bf_hole_insert_by_start(struct cma_bf_item *item)
207 struct cma_bf_private *prv = item->ch.reg->private_data;
208 struct rb_node **link = &prv->by_start_root.rb_node, *parent = NULL;
209 const typeof(item->ch.start) value = item->ch.start;
212 struct cma_bf_item *i;
214 i = rb_entry(parent, struct cma_bf_item, ch.by_start);
216 if (WARN_ON(value == i->ch.start))
218 * This should *never* happen. And I mean
219 * *never*. We could even BUG on it but
220 * hopefully things are only a bit broken,
221 * ie. system can still run. We produce
222 * a warning and return an error.
226 link = value <= i->ch.start
231 rb_link_node(&item->ch.by_start, parent, link);
232 rb_insert_color(&item->ch.by_start, &prv->by_start_root);
236 static void __cma_bf_hole_erase_by_start(struct cma_bf_item *item)
238 struct cma_bf_private *prv = item->ch.reg->private_data;
239 rb_erase(&item->ch.by_start, &prv->by_start_root);
243 /************************* More Tree Manipulation *************************/
245 static struct cma_bf_item *__must_check
246 __cma_bf_hole_take(struct cma_bf_item *hole, size_t size, size_t alignment)
248 struct cma_bf_item *item;
251 * There are three cases:
252 * 1. the chunk takes the whole hole,
253 * 2. the chunk is at the beginning or at the end of the hole, or
254 * 3. the chunk is in the middle of the hole.
258 /* Case 1, the whole hole */
259 if (size == hole->ch.size) {
260 __cma_bf_hole_erase_by_size(hole);
261 __cma_bf_hole_erase_by_start(hole);
267 item = kmalloc(sizeof *item, GFP_KERNEL);
271 item->ch.start = ALIGN(hole->ch.start, alignment);
272 item->ch.size = size;
274 /* Case 3, in the middle */
275 if (item->ch.start != hole->ch.start
276 && item->ch.start + item->ch.size !=
277 hole->ch.start + hole->ch.size) {
278 struct cma_bf_item *tail;
281 * Space between the end of the chunk and the end of
282 * the region, ie. space left after the end of the
283 * chunk. If this is dividable by alignment we can
284 * move the chunk to the end of the hole.
287 hole->ch.start + hole->ch.size -
288 (item->ch.start + item->ch.size);
289 if (left % alignment == 0) {
290 item->ch.start += left;
295 * We are going to add a hole at the end. This way,
296 * we will reduce the problem to case 2 -- the chunk
297 * will be at the end of the hole.
299 tail = kmalloc(sizeof *tail, GFP_KERNEL);
300 if (unlikely(!tail)) {
305 tail->ch.start = item->ch.start + item->ch.size;
307 hole->ch.start + hole->ch.size - tail->ch.start;
308 tail->ch.reg = hole->ch.reg;
310 if (unlikely(__cma_bf_hole_insert_by_start(tail))) {
312 * Things are broken beyond repair... Abort
313 * inserting the hole but still continue with
314 * allocation (seems like the best we can do).
317 hole->ch.size = tail->ch.start - hole->ch.start;
320 __cma_bf_hole_insert_by_size(tail);
322 * It's important that we first insert the new
323 * hole in the tree sorted by size and later
324 * reduce the size of the old hole. We will
325 * update the position of the old hole in the
326 * rb tree in code that handles case 2.
328 hole->ch.size = tail->ch.start - hole->ch.start;
335 /* Case 2, at the beginning or at the end */
337 /* No need to update the tree; order preserved. */
338 if (item->ch.start == hole->ch.start)
339 hole->ch.start += item->ch.size;
341 /* Alter hole's size */
342 hole->ch.size -= size;
343 __cma_bf_hole_erase_by_size(hole);
344 __cma_bf_hole_insert_by_size(hole);
350 static void __cma_bf_hole_merge_maybe(struct cma_bf_item *item)
352 struct cma_bf_item *prev;
353 struct rb_node *node;
356 node = rb_prev(&item->ch.by_start);
359 prev = rb_entry(node, struct cma_bf_item, ch.by_start);
362 if (prev->ch.start + prev->ch.size == item->ch.start) {
363 /* Remove previous hole from trees */
364 __cma_bf_hole_erase_by_size(prev);
365 __cma_bf_hole_erase_by_start(prev);
367 /* Alter this hole */
368 item->ch.size += prev->ch.size;
369 item->ch.start = prev->ch.start;
370 __cma_bf_hole_erase_by_size(item);
371 __cma_bf_hole_insert_by_size(item);
373 * No need to update by start trees as we do
374 * not break sequence order
385 node = rb_next(&item->ch.by_start);
389 item = rb_entry(node, struct cma_bf_item, ch.by_start);
395 /************************* Register *************************/
396 static int cma_bf_module_init(void)
398 static struct cma_allocator alloc = {
401 .cleanup = cma_bf_cleanup,
402 .alloc = cma_bf_alloc,
405 return cma_allocator_register(&alloc);
407 subsys_initcall_sync(cma_bf_module_init);