1 /**************************************************************************
3 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA.
5 * Copyright 2009 VMware, Inc., Palo Alto, CA., USA.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 **************************************************************************/
31 * Generic simple memory manager implementation. Intended to be used as a base
32 * class implementation for more advanced memory managers.
34 * Note that the algorithm used is quite simple and there might be substantial
35 * performance gains if a smarter free list is implemented. Currently it is just an
36 * unordered stack of free regions. This could easily be improved if an RB-tree
37 * is used instead. At least if we expect heavy fragmentation.
38 * Note that this implementation is more or less identical to the drm core manager
39 * in the linux kernel.
42 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
54 wsbmMMTailSpace(struct _WsbmMM *mm)
56 struct _WsbmListHead *tail_node;
57 struct _WsbmMMNode *entry;
59 tail_node = mm->ml_entry.prev;
60 entry = WSBMLISTENTRY(tail_node, struct _WsbmMMNode, ml_entry);
69 wsbmMMRemoveSpaceFromTail(struct _WsbmMM *mm, unsigned long size)
71 struct _WsbmListHead *tail_node;
72 struct _WsbmMMNode *entry;
74 tail_node = mm->ml_entry.prev;
75 entry = WSBMLISTENTRY(tail_node, struct _WsbmMMNode, ml_entry);
80 if (entry->size <= size)
88 wsbmMMCreateTailNode(struct _WsbmMM *mm,
89 unsigned long start, unsigned long size)
91 struct _WsbmMMNode *child;
93 child = (struct _WsbmMMNode *)malloc(sizeof(*child));
102 WSBMLISTADDTAIL(&child->ml_entry, &mm->ml_entry);
103 WSBMLISTADDTAIL(&child->fl_entry, &mm->fl_entry);
108 static struct _WsbmMMNode *
109 wsbmMMSplitAtStart(struct _WsbmMMNode *parent, unsigned long size)
111 struct _WsbmMMNode *child;
113 child = (struct _WsbmMMNode *)malloc(sizeof(*child));
117 WSBMINITLISTHEAD(&child->fl_entry);
121 child->start = parent->start;
122 child->mm = parent->mm;
124 WSBMLISTADDTAIL(&child->ml_entry, &parent->ml_entry);
125 WSBMINITLISTHEAD(&child->fl_entry);
127 parent->size -= size;
128 parent->start += size;
133 wsbmMMGetBlock(struct _WsbmMMNode *parent,
134 unsigned long size, unsigned alignment)
137 struct _WsbmMMNode *align_splitoff = NULL;
138 struct _WsbmMMNode *child;
142 tmp = parent->start % alignment;
145 align_splitoff = wsbmMMSplitAtStart(parent, alignment - tmp);
150 if (parent->size == size) {
151 WSBMLISTDELINIT(&parent->fl_entry);
155 child = wsbmMMSplitAtStart(parent, size);
159 wsbmMMPutBlock(align_splitoff);
165 * Put a block. Merge with the previous and / or next block if they are free.
166 * Otherwise add to the free stack.
170 wsbmMMPutBlock(struct _WsbmMMNode *cur)
173 struct _WsbmMM *mm = cur->mm;
174 struct _WsbmListHead *cur_head = &cur->ml_entry;
175 struct _WsbmListHead *root_head = &mm->ml_entry;
176 struct _WsbmMMNode *prev_node = NULL;
177 struct _WsbmMMNode *next_node;
181 if (cur_head->prev != root_head) {
183 WSBMLISTENTRY(cur_head->prev, struct _WsbmMMNode, ml_entry);
184 if (prev_node->free) {
185 prev_node->size += cur->size;
189 if (cur_head->next != root_head) {
191 WSBMLISTENTRY(cur_head->next, struct _WsbmMMNode, ml_entry);
192 if (next_node->free) {
194 prev_node->size += next_node->size;
195 WSBMLISTDEL(&next_node->ml_entry);
196 WSBMLISTDEL(&next_node->fl_entry);
199 next_node->size += cur->size;
200 next_node->start = cur->start;
207 WSBMLISTADD(&cur->fl_entry, &mm->fl_entry);
209 WSBMLISTDEL(&cur->ml_entry);
215 wsbmMMSearchFree(const struct _WsbmMM *mm,
216 unsigned long size, unsigned alignment, int best_match)
218 struct _WsbmListHead *list;
219 const struct _WsbmListHead *free_stack = &mm->fl_entry;
220 struct _WsbmMMNode *entry;
221 struct _WsbmMMNode *best;
222 unsigned long best_size;
228 WSBMLISTFOREACH(list, free_stack) {
229 entry = WSBMLISTENTRY(list, struct _WsbmMMNode, fl_entry);
233 if (entry->size < size)
237 register unsigned tmp = entry->start % alignment;
240 wasted += alignment - tmp;
243 if (entry->size >= size + wasted) {
246 if (size < best_size) {
248 best_size = entry->size;
257 wsbmMMclean(struct _WsbmMM *mm)
259 struct _WsbmListHead *head = &mm->ml_entry;
261 return (head->next->next == head);
265 wsbmMMinit(struct _WsbmMM *mm, unsigned long start, unsigned long size)
267 WSBMINITLISTHEAD(&mm->ml_entry);
268 WSBMINITLISTHEAD(&mm->fl_entry);
270 return wsbmMMCreateTailNode(mm, start, size);
274 wsbmMMtakedown(struct _WsbmMM *mm)
276 struct _WsbmListHead *bnode = mm->fl_entry.next;
277 struct _WsbmMMNode *entry;
279 entry = WSBMLISTENTRY(bnode, struct _WsbmMMNode, fl_entry);
281 if (entry->ml_entry.next != &mm->ml_entry ||
282 entry->fl_entry.next != &mm->fl_entry) {
286 WSBMLISTDEL(&entry->fl_entry);
287 WSBMLISTDEL(&entry->ml_entry);