1 /* Memory allocator `malloc'.
2 Copyright 1990, 1991, 1992, 1993 Free Software Foundation
3 Written May 1989 by Mike Haertel.
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Library General Public License as
7 published by the Free Software Foundation; either version 2 of the
8 License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Library General Public License for more details.
15 You should have received a copy of the GNU Library General Public
16 License along with this library; see the file COPYING.LIB. If
17 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
18 Cambridge, MA 02139, USA.
20 The author may be reached (Email) at the address mike@ai.mit.edu,
21 or (US mail) as Mike Haertel c/o Free Software Foundation. */
23 #ifndef _MALLOC_INTERNAL
24 #define _MALLOC_INTERNAL
28 /* How to really get more memory. */
29 __ptr_t (*__morecore) __P ((ptrdiff_t __size)) = __default_morecore;
31 /* Debugging hook for `malloc'. */
32 __ptr_t (*__malloc_hook) __P ((size_t __size));
34 /* Pointer to the base of the first block. */
37 /* Block information table. Allocated with align/__free (not malloc/free). */
38 malloc_info *_heapinfo;
40 /* Number of info entries. */
41 static size_t heapsize;
43 /* Search index in the info table. */
46 /* Limit of valid info table indices. */
49 /* Count of large blocks allocated for each fragment size. */
50 int _fragblocks[BLOCKLOG];
52 /* Free lists for each fragment size. */
53 struct list _fraghead[BLOCKLOG];
55 /* Instrumentation. */
61 /* Are you experienced? */
62 int __malloc_initialized;
64 void (*__after_morecore_hook) __P ((void));
66 /* Aligned allocation. */
67 static __ptr_t align __P ((size_t));
73 unsigned long int adj;
75 result = (*__morecore) (size);
76 adj = (unsigned long int) ((unsigned long int) ((char *) result -
77 (char *) NULL)) % BLOCKSIZE;
80 adj = BLOCKSIZE - adj;
81 (void) (*__morecore) (adj);
82 result = (char *) result + adj;
85 if (__after_morecore_hook)
86 (*__after_morecore_hook) ();
91 /* Set everything up and remember that we have. */
92 static int initialize __P ((void));
96 heapsize = HEAP / BLOCKSIZE;
97 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
99 _bytes_used = heapsize * sizeof (malloc_info);
102 if (_heapinfo == NULL)
104 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
105 _heapinfo[0].free.size = 0;
106 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
108 _heapbase = (char *) _heapinfo;
109 __malloc_initialized = 1;
113 /* Get neatly aligned memory, initializing or
114 growing the heap info table as necessary. */
115 static __ptr_t morecore __P ((size_t));
121 malloc_info *newinfo, *oldinfo;
124 result = align (size);
128 /* Check if we need to grow the info table. */
129 if ((size_t) BLOCK ((char *) result + size) > heapsize)
132 while ((size_t) BLOCK ((char *) result + size) > newsize)
134 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
137 (*__morecore) (-size);
141 _bytes_used += newsize * sizeof (malloc_info);
144 memset (newinfo, 0, newsize * sizeof (malloc_info));
145 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
147 newinfo[BLOCK (oldinfo)].busy.type = 0;
148 newinfo[BLOCK (oldinfo)].busy.info.size
149 = BLOCKIFY (heapsize * sizeof (malloc_info));
155 _heaplimit = BLOCK ((char *) result + size);
159 /* Allocate memory from the heap. */
165 size_t block, blocks, lastblocks, start;
172 if (__malloc_hook != NULL)
173 return (*__malloc_hook) (size);
175 if (!__malloc_initialized)
179 if (size < sizeof (struct list))
180 size = sizeof (struct list);
182 /* Determine the allocation policy based on the request size. */
183 if (size <= BLOCKSIZE / 2)
185 /* Small allocation to receive a fragment of a block.
186 Determine the logarithm to base two of the fragment size. */
187 register size_t log = 1;
189 while ((size /= 2) != 0)
192 /* Look in the fragment lists for a
193 free fragment of the desired size. */
194 next = _fraghead[log].next;
197 /* There are free fragments of this size.
198 Pop a fragment out of the fragment list and return it.
199 Update the block's nfree and first counters. */
200 result = (__ptr_t) next;
201 next->prev->next = next->next;
202 if (next->next != NULL)
203 next->next->prev = next->prev;
204 block = BLOCK (result);
205 if (--_heapinfo[block].busy.info.frag.nfree != 0)
206 _heapinfo[block].busy.info.frag.first = (unsigned long int)
207 ((unsigned long int) ((char *) next->next - (char *) NULL)
210 /* Update the statistics. */
212 _bytes_used += 1 << log;
214 _bytes_free -= 1 << log;
218 /* No free fragments of the desired size, so get a new block
219 and break it into fragments, returning the first. */
220 result = malloc (BLOCKSIZE);
225 /* Link all fragments but the first into the free list. */
226 for (i = 1; i < (size_t) (BLOCKSIZE >> log); ++i)
228 next = (struct list *) ((char *) result + (i << log));
229 next->next = _fraghead[log].next;
230 next->prev = &_fraghead[log];
231 next->prev->next = next;
232 if (next->next != NULL)
233 next->next->prev = next;
236 /* Initialize the nfree and first counters for this block. */
237 block = BLOCK (result);
238 _heapinfo[block].busy.type = log;
239 _heapinfo[block].busy.info.frag.nfree = i - 1;
240 _heapinfo[block].busy.info.frag.first = i - 1;
242 _chunks_free += (BLOCKSIZE >> log) - 1;
243 _bytes_free += BLOCKSIZE - (1 << log);
244 _bytes_used -= BLOCKSIZE - (1 << log);
249 /* Large allocation to receive one or more blocks.
250 Search the free list in a circle starting at the last place visited.
251 If we loop completely around without finding a large enough
252 space we will have to get more memory from the system. */
253 blocks = BLOCKIFY (size);
254 start = block = _heapindex;
255 while (_heapinfo[block].free.size < blocks)
257 block = _heapinfo[block].free.next;
260 /* Need to get more from the system. Check to see if
261 the new core will be contiguous with the final free
262 block; if so we don't need to get as much. */
263 block = _heapinfo[0].free.prev;
264 lastblocks = _heapinfo[block].free.size;
265 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
266 (*__morecore) (0) == ADDRESS (block + lastblocks) &&
267 (morecore ((blocks - lastblocks) * BLOCKSIZE)) != NULL)
269 /* Note that morecore() can change the location of
270 the final block if it moves the info table and the
271 old one gets coalesced into the final block. */
272 block = _heapinfo[0].free.prev;
273 _heapinfo[block].free.size += blocks - lastblocks;
276 result = morecore (blocks * BLOCKSIZE);
279 block = BLOCK (result);
280 _heapinfo[block].busy.type = 0;
281 _heapinfo[block].busy.info.size = blocks;
283 _bytes_used += blocks * BLOCKSIZE;
288 /* At this point we have found a suitable free list entry.
289 Figure out how to remove what we need from the list. */
290 result = ADDRESS (block);
291 if (_heapinfo[block].free.size > blocks)
293 /* The block we found has a bit left over,
294 so relink the tail end back into the free list. */
295 _heapinfo[block + blocks].free.size
296 = _heapinfo[block].free.size - blocks;
297 _heapinfo[block + blocks].free.next
298 = _heapinfo[block].free.next;
299 _heapinfo[block + blocks].free.prev
300 = _heapinfo[block].free.prev;
301 _heapinfo[_heapinfo[block].free.prev].free.next
302 = _heapinfo[_heapinfo[block].free.next].free.prev
303 = _heapindex = block + blocks;
307 /* The block exactly matches our requirements,
308 so just remove it from the list. */
309 _heapinfo[_heapinfo[block].free.next].free.prev
310 = _heapinfo[block].free.prev;
311 _heapinfo[_heapinfo[block].free.prev].free.next
312 = _heapindex = _heapinfo[block].free.next;
316 _heapinfo[block].busy.type = 0;
317 _heapinfo[block].busy.info.size = blocks;
319 _bytes_used += blocks * BLOCKSIZE;
320 _bytes_free -= blocks * BLOCKSIZE;