1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
32 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
33 file open. Prefer either to valloc. */
35 # undef HAVE_MMAP_DEV_ZERO
37 # include <sys/mman.h>
39 # define MAP_FAILED -1
41 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
42 # define MAP_ANONYMOUS MAP_ANON
48 #ifdef HAVE_MMAP_DEV_ZERO
50 # include <sys/mman.h>
52 # define MAP_FAILED -1
59 #define USING_MALLOC_PAGE_GROUPS
64 This garbage-collecting allocator allocates objects on one of a set
65 of pages. Each page can allocate objects of a single size only;
66 available sizes are powers of two starting at four bytes. The size
67 of an allocation request is rounded up to the next power of two
68 (`order'), and satisfied from the appropriate page.
70 Each page is recorded in a page-entry, which also maintains an
71 in-use bitmap of object positions on the page. This allows the
72 allocation state of a particular object to be flipped without
73 touching the page itself.
75 Each page-entry also has a context depth, which is used to track
76 pushing and popping of allocation contexts. Only objects allocated
77 in the current (highest-numbered) context may be collected.
79 Page entries are arranged in an array of singly-linked lists. The
80 array is indexed by the allocation size, in bits, of the pages on
81 it; i.e. all pages on a list allocate objects of the same size.
82 Pages are ordered on the list such that all non-full pages precede
83 all full pages, with non-full pages arranged in order of decreasing
86 Empty pages (of all orders) are kept on a single page cache list,
87 and are considered first when new pages are required; they are
88 deallocated at the start of the next collection if they haven't
89 been recycled by then. */
92 /* Define GGC_POISON to poison memory marked unused by the collector. */
95 /* Define GGC_ALWAYS_COLLECT to perform collection every time
96 ggc_collect is invoked. Otherwise, collection is performed only
97 when a significant amount of memory has been allocated since the
99 #undef GGC_ALWAYS_COLLECT
101 #ifdef ENABLE_GC_CHECKING
104 #ifdef ENABLE_GC_ALWAYS_COLLECT
105 #define GGC_ALWAYS_COLLECT
108 /* Define GGC_DEBUG_LEVEL to print debugging information.
109 0: No debugging output.
110 1: GC statistics only.
111 2: Page-entry allocations/deallocations as well.
112 3: Object allocations as well.
113 4: Object marks as well. */
114 #define GGC_DEBUG_LEVEL (0)
116 #ifndef HOST_BITS_PER_PTR
117 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
121 /* A two-level tree is used to look up the page-entry for a given
122 pointer. Two chunks of the pointer's bits are extracted to index
123 the first and second levels of the tree, as follows:
127 msb +----------------+----+------+------+ lsb
133 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
134 pages are aligned on system page boundaries. The next most
135 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
136 index values in the lookup table, respectively.
138 For 32-bit architectures and the settings below, there are no
139 leftover bits. For architectures with wider pointers, the lookup
140 tree points to a list of pages, which must be scanned to find the
143 #define PAGE_L1_BITS (8)
144 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
145 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
146 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
148 #define LOOKUP_L1(p) \
149 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
151 #define LOOKUP_L2(p) \
152 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
154 /* The number of objects per allocation page, for objects on a page of
155 the indicated ORDER. */
156 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
158 /* The size of an object on a page of the indicated ORDER. */
159 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
161 /* The number of extra orders, not corresponding to power-of-two sized
164 #define NUM_EXTRA_ORDERS \
165 (sizeof (extra_order_size_table) / sizeof (extra_order_size_table[0]))
167 /* The Ith entry is the maximum size of an object to be stored in the
168 Ith extra order. Adding a new entry to this array is the *only*
169 thing you need to do to add a new special allocation size. */
171 static const size_t extra_order_size_table[] = {
172 sizeof (struct tree_decl),
173 sizeof (struct tree_list)
176 /* The total number of orders. */
178 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
180 /* We use this structure to determine the alignment required for
181 allocations. For power-of-two sized allocations, that's not a
182 problem, but it does matter for odd-sized allocations. */
184 struct max_alignment {
188 #ifdef HAVE_LONG_DOUBLE
196 /* The biggest alignment required. */
198 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
200 /* The Ith entry is the number of objects on a page or order I. */
202 static unsigned objects_per_page_table[NUM_ORDERS];
204 /* The Ith entry is the size of an object on a page of order I. */
206 static size_t object_size_table[NUM_ORDERS];
208 /* A page_entry records the status of an allocation page. This
209 structure is dynamically sized to fit the bitmap in_use_p. */
210 typedef struct page_entry
212 /* The next page-entry with objects of the same size, or NULL if
213 this is the last page-entry. */
214 struct page_entry *next;
216 /* The number of bytes allocated. (This will always be a multiple
217 of the host system page size.) */
220 /* The address at which the memory is allocated. */
223 #ifdef USING_MALLOC_PAGE_GROUPS
224 /* Back pointer to the page group this page came from. */
225 struct page_group *group;
228 /* Saved in-use bit vector for pages that aren't in the topmost
229 context during collection. */
230 unsigned long *save_in_use_p;
232 /* Context depth of this page. */
233 unsigned short context_depth;
235 /* The number of free objects remaining on this page. */
236 unsigned short num_free_objects;
238 /* A likely candidate for the bit position of a free object for the
239 next allocation from this page. */
240 unsigned short next_bit_hint;
242 /* The lg of size of objects allocated from this page. */
245 /* A bit vector indicating whether or not objects are in use. The
246 Nth bit is one if the Nth object on this page is allocated. This
247 array is dynamically sized. */
248 unsigned long in_use_p[1];
251 #ifdef USING_MALLOC_PAGE_GROUPS
252 /* A page_group describes a large allocation from malloc, from which
253 we parcel out aligned pages. */
254 typedef struct page_group
256 /* A linked list of all extant page groups. */
257 struct page_group *next;
259 /* The address we received from malloc. */
262 /* The size of the block. */
265 /* A bitmask of pages in use. */
270 #if HOST_BITS_PER_PTR <= 32
272 /* On 32-bit hosts, we use a two level page table, as pictured above. */
273 typedef page_entry **page_table[PAGE_L1_SIZE];
277 /* On 64-bit hosts, we use the same two level page tables plus a linked
278 list that disambiguates the top 32-bits. There will almost always be
279 exactly one entry in the list. */
280 typedef struct page_table_chain
282 struct page_table_chain *next;
284 page_entry **table[PAGE_L1_SIZE];
289 /* The rest of the global variables. */
290 static struct globals
292 /* The Nth element in this array is a page with objects of size 2^N.
293 If there are any pages with free objects, they will be at the
294 head of the list. NULL if there are no page-entries for this
296 page_entry *pages[NUM_ORDERS];
298 /* The Nth element in this array is the last page with objects of
299 size 2^N. NULL if there are no page-entries for this object
301 page_entry *page_tails[NUM_ORDERS];
303 /* Lookup table for associating allocation pages with object addresses. */
306 /* The system's page size. */
310 /* Bytes currently allocated. */
313 /* Bytes currently allocated at the end of the last collection. */
314 size_t allocated_last_gc;
316 /* Total amount of memory mapped. */
319 /* The current depth in the context stack. */
320 unsigned short context_depth;
322 /* A file descriptor open to /dev/zero for reading. */
323 #if defined (HAVE_MMAP_DEV_ZERO)
327 /* A cache of free system pages. */
328 page_entry *free_pages;
330 #ifdef USING_MALLOC_PAGE_GROUPS
331 page_group *page_groups;
334 /* The file descriptor for debugging output. */
338 /* The size in bytes required to maintain a bitmap for the objects
340 #define BITMAP_SIZE(Num_objects) \
341 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
343 /* Skip garbage collection if the current allocation is not at least
344 this factor times the allocation at the end of the last collection.
345 In other words, total allocation must expand by (this factor minus
346 one) before collection is performed. */
347 #define GGC_MIN_EXPAND_FOR_GC (1.3)
349 /* Bound `allocated_last_gc' to 4MB, to prevent the memory expansion
350 test from triggering too often when the heap is small. */
351 #define GGC_MIN_LAST_ALLOCATED (4 * 1024 * 1024)
353 /* Allocate pages in chunks of this size, to throttle calls to memory
354 allocation routines. The first page is used, the rest go onto the
355 free list. This cannot be larger than HOST_BITS_PER_INT for the
356 in_use bitmask for page_group. */
357 #define GGC_QUIRE_SIZE 16
360 static int ggc_allocated_p PARAMS ((const void *));
361 static page_entry *lookup_page_table_entry PARAMS ((const void *));
362 static void set_page_table_entry PARAMS ((void *, page_entry *));
364 static char *alloc_anon PARAMS ((char *, size_t));
366 #ifdef USING_MALLOC_PAGE_GROUPS
367 static size_t page_group_index PARAMS ((char *, char *));
368 static void set_page_group_in_use PARAMS ((page_group *, char *));
369 static void clear_page_group_in_use PARAMS ((page_group *, char *));
371 static struct page_entry * alloc_page PARAMS ((unsigned));
372 static void free_page PARAMS ((struct page_entry *));
373 static void release_pages PARAMS ((void));
374 static void clear_marks PARAMS ((void));
375 static void sweep_pages PARAMS ((void));
376 static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
379 static void poison_pages PARAMS ((void));
382 void debug_print_page_list PARAMS ((int));
384 /* Returns non-zero if P was allocated in GC'able memory. */
393 #if HOST_BITS_PER_PTR <= 32
396 page_table table = G.lookup;
397 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
402 if (table->high_bits == high_bits)
406 base = &table->table[0];
409 /* Extract the level 1 and 2 indicies. */
413 return base[L1] && base[L1][L2];
416 /* Traverse the page table and find the entry for a page.
417 Die (probably) if the object wasn't allocated via GC. */
419 static inline page_entry *
420 lookup_page_table_entry(p)
426 #if HOST_BITS_PER_PTR <= 32
429 page_table table = G.lookup;
430 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
431 while (table->high_bits != high_bits)
433 base = &table->table[0];
436 /* Extract the level 1 and 2 indicies. */
443 /* Set the page table entry for a page. */
446 set_page_table_entry(p, entry)
453 #if HOST_BITS_PER_PTR <= 32
457 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
458 for (table = G.lookup; table; table = table->next)
459 if (table->high_bits == high_bits)
462 /* Not found -- allocate a new table. */
463 table = (page_table) xcalloc (1, sizeof(*table));
464 table->next = G.lookup;
465 table->high_bits = high_bits;
468 base = &table->table[0];
471 /* Extract the level 1 and 2 indicies. */
475 if (base[L1] == NULL)
476 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
478 base[L1][L2] = entry;
481 /* Prints the page-entry for object size ORDER, for debugging. */
484 debug_print_page_list (order)
488 printf ("Head=%p, Tail=%p:\n", (PTR) G.pages[order],
489 (PTR) G.page_tails[order]);
493 printf ("%p(%1d|%3d) -> ", (PTR) p, p->context_depth,
494 p->num_free_objects);
502 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
503 (if non-null). The ifdef structure here is intended to cause a
504 compile error unless exactly one of the HAVE_* is defined. */
507 alloc_anon (pref, size)
508 char *pref ATTRIBUTE_UNUSED;
511 #ifdef HAVE_MMAP_ANON
512 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
513 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
515 #ifdef HAVE_MMAP_DEV_ZERO
516 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
517 MAP_PRIVATE, G.dev_zero_fd, 0);
520 if (page == (char *) MAP_FAILED)
522 fputs ("Virtual memory exhausted!\n", stderr);
526 /* Remember that we allocated this memory. */
527 G.bytes_mapped += size;
532 #ifdef USING_MALLOC_PAGE_GROUPS
533 /* Compute the index for this page into the page group. */
536 page_group_index (allocation, page)
537 char *allocation, *page;
539 return (size_t)(page - allocation) >> G.lg_pagesize;
542 /* Set and clear the in_use bit for this page in the page group. */
545 set_page_group_in_use (group, page)
549 group->in_use |= 1 << page_group_index (group->allocation, page);
553 clear_page_group_in_use (group, page)
557 group->in_use &= ~(1 << page_group_index (group->allocation, page));
561 /* Allocate a new page for allocating objects of size 2^ORDER,
562 and return an entry for it. The entry is not added to the
563 appropriate page_table list. */
565 static inline struct page_entry *
569 struct page_entry *entry, *p, **pp;
573 size_t page_entry_size;
575 #ifdef USING_MALLOC_PAGE_GROUPS
579 num_objects = OBJECTS_PER_PAGE (order);
580 bitmap_size = BITMAP_SIZE (num_objects + 1);
581 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
582 entry_size = num_objects * OBJECT_SIZE (order);
583 if (entry_size < G.pagesize)
584 entry_size = G.pagesize;
589 /* Check the list of free pages for one we can use. */
590 for (pp = &G.free_pages, p = *pp; p ; pp = &p->next, p = *pp)
591 if (p->bytes == entry_size)
596 /* Recycle the allocated memory from this page ... */
599 #ifdef USING_MALLOC_PAGE_GROUPS
602 /* ... and, if possible, the page entry itself. */
603 if (p->order == order)
606 memset (entry, 0, page_entry_size);
612 else if (entry_size == G.pagesize)
614 /* We want just one page. Allocate a bunch of them and put the
615 extras on the freelist. (Can only do this optimization with
616 mmap for backing store.) */
617 struct page_entry *e, *f = G.free_pages;
620 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
621 /* This loop counts down so that the chain will be in ascending
623 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
625 e = (struct page_entry *) xcalloc (1, page_entry_size);
627 e->bytes = G.pagesize;
628 e->page = page + (i << G.lg_pagesize);
635 page = alloc_anon (NULL, entry_size);
637 #ifdef USING_MALLOC_PAGE_GROUPS
640 /* Allocate a large block of memory and serve out the aligned
641 pages therein. This results in much less memory wastage
642 than the traditional implementation of valloc. */
644 char *allocation, *a, *enda;
645 size_t alloc_size, head_slop, tail_slop;
646 int multiple_pages = (entry_size == G.pagesize);
649 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
651 alloc_size = entry_size + G.pagesize - 1;
652 allocation = xmalloc (alloc_size);
654 page = (char *)(((size_t) allocation + G.pagesize - 1) & -G.pagesize);
655 head_slop = page - allocation;
657 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
659 tail_slop = alloc_size - entry_size - head_slop;
660 enda = allocation + alloc_size - tail_slop;
662 /* We allocated N pages, which are likely not aligned, leaving
663 us with N-1 usable pages. We plan to place the page_group
664 structure somewhere in the slop. */
665 if (head_slop >= sizeof (page_group))
666 group = (page_group *)page - 1;
669 /* We magically got an aligned allocation. Too bad, we have
670 to waste a page anyway. */
674 tail_slop += G.pagesize;
676 if (tail_slop < sizeof (page_group))
678 group = (page_group *)enda;
679 tail_slop -= sizeof (page_group);
682 /* Remember that we allocated this memory. */
683 group->next = G.page_groups;
684 group->allocation = allocation;
685 group->alloc_size = alloc_size;
687 G.page_groups = group;
688 G.bytes_mapped += alloc_size;
690 /* If we allocated multiple pages, put the rest on the free list. */
693 struct page_entry *e, *f = G.free_pages;
694 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
696 e = (struct page_entry *) xcalloc (1, page_entry_size);
698 e->bytes = G.pagesize;
710 entry = (struct page_entry *) xcalloc (1, page_entry_size);
712 entry->bytes = entry_size;
714 entry->context_depth = G.context_depth;
715 entry->order = order;
716 entry->num_free_objects = num_objects;
717 entry->next_bit_hint = 1;
719 #ifdef USING_MALLOC_PAGE_GROUPS
720 entry->group = group;
721 set_page_group_in_use (group, page);
724 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
725 increment the hint. */
726 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
727 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
729 set_page_table_entry (page, entry);
731 if (GGC_DEBUG_LEVEL >= 2)
732 fprintf (G.debug_file,
733 "Allocating page at %p, object size=%d, data %p-%p\n",
734 (PTR) entry, OBJECT_SIZE (order), page, page + entry_size - 1);
739 /* For a page that is no longer needed, put it on the free page list. */
745 if (GGC_DEBUG_LEVEL >= 2)
746 fprintf (G.debug_file,
747 "Deallocating page at %p, data %p-%p\n", (PTR) entry,
748 entry->page, entry->page + entry->bytes - 1);
750 set_page_table_entry (entry->page, NULL);
752 #ifdef USING_MALLOC_PAGE_GROUPS
753 clear_page_group_in_use (entry->group, entry->page);
756 entry->next = G.free_pages;
757 G.free_pages = entry;
760 /* Release the free page cache to the system. */
766 page_entry *p, *next;
770 /* Gather up adjacent pages so they are unmapped together. */
781 while (p && p->page == start + len)
790 G.bytes_mapped -= len;
795 #ifdef USING_MALLOC_PAGE_GROUPS
799 /* Remove all pages from free page groups from the list. */
801 while ((p = *pp) != NULL)
802 if (p->group->in_use == 0)
810 /* Remove all free page groups, and release the storage. */
812 while ((g = *gp) != NULL)
816 G.bytes_mapped -= g->alloc_size;
817 free (g->allocation);
824 /* This table provides a fast way to determine ceil(log_2(size)) for
825 allocation requests. The minimum allocation size is four bytes. */
827 static unsigned char size_lookup[257] =
829 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
830 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
831 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
832 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
833 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
834 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
835 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
836 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
837 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
838 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
839 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
840 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
841 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
842 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
843 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
844 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
848 /* Allocate a chunk of memory of SIZE bytes. If ZERO is non-zero, the
849 memory is zeroed; otherwise, its contents are undefined. */
855 unsigned order, word, bit, object_offset;
856 struct page_entry *entry;
860 order = size_lookup[size];
864 while (size > OBJECT_SIZE (order))
868 /* If there are non-full pages for this size allocation, they are at
869 the head of the list. */
870 entry = G.pages[order];
872 /* If there is no page for this object size, or all pages in this
873 context are full, allocate a new page. */
874 if (entry == NULL || entry->num_free_objects == 0)
876 struct page_entry *new_entry;
877 new_entry = alloc_page (order);
879 /* If this is the only entry, it's also the tail. */
881 G.page_tails[order] = new_entry;
883 /* Put new pages at the head of the page list. */
884 new_entry->next = entry;
886 G.pages[order] = new_entry;
888 /* For a new page, we know the word and bit positions (in the
889 in_use bitmap) of the first available object -- they're zero. */
890 new_entry->next_bit_hint = 1;
897 /* First try to use the hint left from the previous allocation
898 to locate a clear bit in the in-use bitmap. We've made sure
899 that the one-past-the-end bit is always set, so if the hint
900 has run over, this test will fail. */
901 unsigned hint = entry->next_bit_hint;
902 word = hint / HOST_BITS_PER_LONG;
903 bit = hint % HOST_BITS_PER_LONG;
905 /* If the hint didn't work, scan the bitmap from the beginning. */
906 if ((entry->in_use_p[word] >> bit) & 1)
909 while (~entry->in_use_p[word] == 0)
911 while ((entry->in_use_p[word] >> bit) & 1)
913 hint = word * HOST_BITS_PER_LONG + bit;
916 /* Next time, try the next bit. */
917 entry->next_bit_hint = hint + 1;
919 object_offset = hint * OBJECT_SIZE (order);
922 /* Set the in-use bit. */
923 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
925 /* Keep a running total of the number of free objects. If this page
926 fills up, we may have to move it to the end of the list if the
927 next page isn't full. If the next page is full, all subsequent
928 pages are full, so there's no need to move it. */
929 if (--entry->num_free_objects == 0
930 && entry->next != NULL
931 && entry->next->num_free_objects > 0)
933 G.pages[order] = entry->next;
935 G.page_tails[order]->next = entry;
936 G.page_tails[order] = entry;
939 /* Calculate the object's address. */
940 result = entry->page + object_offset;
943 /* `Poison' the entire allocated object, including any padding at
945 memset (result, 0xaf, OBJECT_SIZE (order));
948 /* Keep track of how many bytes are being allocated. This
949 information is used in deciding when to collect. */
950 G.allocated += OBJECT_SIZE (order);
952 if (GGC_DEBUG_LEVEL >= 3)
953 fprintf (G.debug_file,
954 "Allocating object, requested size=%d, actual=%d at %p on %p\n",
955 (int) size, OBJECT_SIZE (order), result, (PTR) entry);
960 /* If P is not marked, marks it and return false. Otherwise return true.
961 P must have been allocated by the GC allocator; it mustn't point to
962 static objects, stack variables, or memory allocated with malloc. */
972 /* Look up the page on which the object is alloced. If the object
973 wasn't allocated by the collector, we'll probably die. */
974 entry = lookup_page_table_entry (p);
975 #ifdef ENABLE_CHECKING
980 /* Calculate the index of the object on the page; this is its bit
981 position in the in_use_p bitmap. */
982 bit = (((const char *) p) - entry->page) / OBJECT_SIZE (entry->order);
983 word = bit / HOST_BITS_PER_LONG;
984 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
986 /* If the bit was previously set, skip it. */
987 if (entry->in_use_p[word] & mask)
990 /* Otherwise set it, and decrement the free object count. */
991 entry->in_use_p[word] |= mask;
992 entry->num_free_objects -= 1;
994 if (GGC_DEBUG_LEVEL >= 4)
995 fprintf (G.debug_file, "Marking %p\n", p);
1000 /* Mark P, but check first that it was allocated by the collector. */
1003 ggc_mark_if_gcable (p)
1006 if (p && ggc_allocated_p (p))
1010 /* Return the size of the gc-able object P. */
1016 page_entry *pe = lookup_page_table_entry (p);
1017 return OBJECT_SIZE (pe->order);
1020 /* Initialize the ggc-mmap allocator. */
1027 G.pagesize = getpagesize();
1028 G.lg_pagesize = exact_log2 (G.pagesize);
1030 #ifdef HAVE_MMAP_DEV_ZERO
1031 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1032 if (G.dev_zero_fd == -1)
1037 G.debug_file = fopen ("ggc-mmap.debug", "w");
1039 G.debug_file = stdout;
1042 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
1045 /* StunOS has an amazing off-by-one error for the first mmap allocation
1046 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1047 believe, is an unaligned page allocation, which would cause us to
1048 hork badly if we tried to use it. */
1050 char *p = alloc_anon (NULL, G.pagesize);
1051 struct page_entry *e;
1052 if ((size_t)p & (G.pagesize - 1))
1054 /* How losing. Discard this one and try another. If we still
1055 can't get something useful, give up. */
1057 p = alloc_anon (NULL, G.pagesize);
1058 if ((size_t)p & (G.pagesize - 1))
1062 /* We have a good page, might as well hold onto it... */
1063 e = (struct page_entry *) xcalloc (1, sizeof (struct page_entry));
1064 e->bytes = G.pagesize;
1066 e->next = G.free_pages;
1071 /* Initialize the object size table. */
1072 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1073 object_size_table[order] = (size_t) 1 << order;
1074 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1076 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1078 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1079 so that we're sure of getting aligned memory. */
1080 s = CEIL (s, MAX_ALIGNMENT) * MAX_ALIGNMENT;
1081 object_size_table[order] = s;
1084 /* Initialize the objects-per-page table. */
1085 for (order = 0; order < NUM_ORDERS; ++order)
1087 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1088 if (objects_per_page_table[order] == 0)
1089 objects_per_page_table[order] = 1;
1092 /* Reset the size_lookup array to put appropriately sized objects in
1093 the special orders. All objects bigger than the previous power
1094 of two, but no greater than the special size, should go in the
1096 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1101 o = size_lookup[OBJECT_SIZE (order)];
1102 for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
1103 size_lookup[i] = order;
1107 /* Increment the `GC context'. Objects allocated in an outer context
1108 are never freed, eliminating the need to register their roots. */
1116 if (G.context_depth == 0)
1120 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1121 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1124 ggc_recalculate_in_use_p (p)
1130 /* Because the past-the-end bit in in_use_p is always set, we
1131 pretend there is one additional object. */
1132 num_objects = OBJECTS_PER_PAGE (p->order) + 1;
1134 /* Reset the free object count. */
1135 p->num_free_objects = num_objects;
1137 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1139 i < CEIL (BITMAP_SIZE (num_objects),
1140 sizeof (*p->in_use_p));
1145 /* Something is in use if it is marked, or if it was in use in a
1146 context further down the context stack. */
1147 p->in_use_p[i] |= p->save_in_use_p[i];
1149 /* Decrement the free object count for every object allocated. */
1150 for (j = p->in_use_p[i]; j; j >>= 1)
1151 p->num_free_objects -= (j & 1);
1154 if (p->num_free_objects >= num_objects)
1158 /* Decrement the `GC context'. All objects allocated since the
1159 previous ggc_push_context are migrated to the outer context. */
1164 unsigned order, depth;
1166 depth = --G.context_depth;
1168 /* Any remaining pages in the popped context are lowered to the new
1169 current context; i.e. objects allocated in the popped context and
1170 left over are imported into the previous context. */
1171 for (order = 2; order < NUM_ORDERS; order++)
1175 for (p = G.pages[order]; p != NULL; p = p->next)
1177 if (p->context_depth > depth)
1178 p->context_depth = depth;
1180 /* If this page is now in the topmost context, and we'd
1181 saved its allocation state, restore it. */
1182 else if (p->context_depth == depth && p->save_in_use_p)
1184 ggc_recalculate_in_use_p (p);
1185 free (p->save_in_use_p);
1186 p->save_in_use_p = 0;
1192 /* Unmark all objects. */
1199 for (order = 2; order < NUM_ORDERS; order++)
1201 size_t num_objects = OBJECTS_PER_PAGE (order);
1202 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1205 for (p = G.pages[order]; p != NULL; p = p->next)
1207 #ifdef ENABLE_CHECKING
1208 /* The data should be page-aligned. */
1209 if ((size_t) p->page & (G.pagesize - 1))
1213 /* Pages that aren't in the topmost context are not collected;
1214 nevertheless, we need their in-use bit vectors to store GC
1215 marks. So, back them up first. */
1216 if (p->context_depth < G.context_depth)
1218 if (! p->save_in_use_p)
1219 p->save_in_use_p = xmalloc (bitmap_size);
1220 memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
1223 /* Reset reset the number of free objects and clear the
1224 in-use bits. These will be adjusted by mark_obj. */
1225 p->num_free_objects = num_objects;
1226 memset (p->in_use_p, 0, bitmap_size);
1228 /* Make sure the one-past-the-end bit is always set. */
1229 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1230 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1235 /* Free all empty pages. Partially empty pages need no attention
1236 because the `mark' bit doubles as an `unused' bit. */
1243 for (order = 2; order < NUM_ORDERS; order++)
1245 /* The last page-entry to consider, regardless of entries
1246 placed at the end of the list. */
1247 page_entry * const last = G.page_tails[order];
1249 size_t num_objects = OBJECTS_PER_PAGE (order);
1250 size_t live_objects;
1251 page_entry *p, *previous;
1261 page_entry *next = p->next;
1263 /* Loop until all entries have been examined. */
1266 /* Add all live objects on this page to the count of
1267 allocated memory. */
1268 live_objects = num_objects - p->num_free_objects;
1270 G.allocated += OBJECT_SIZE (order) * live_objects;
1272 /* Only objects on pages in the topmost context should get
1274 if (p->context_depth < G.context_depth)
1277 /* Remove the page if it's empty. */
1278 else if (live_objects == 0)
1281 G.pages[order] = next;
1283 previous->next = next;
1285 /* Are we removing the last element? */
1286 if (p == G.page_tails[order])
1287 G.page_tails[order] = previous;
1292 /* If the page is full, move it to the end. */
1293 else if (p->num_free_objects == 0)
1295 /* Don't move it if it's already at the end. */
1296 if (p != G.page_tails[order])
1298 /* Move p to the end of the list. */
1300 G.page_tails[order]->next = p;
1302 /* Update the tail pointer... */
1303 G.page_tails[order] = p;
1305 /* ... and the head pointer, if necessary. */
1307 G.pages[order] = next;
1309 previous->next = next;
1314 /* If we've fallen through to here, it's a page in the
1315 topmost context that is neither full nor empty. Such a
1316 page must precede pages at lesser context depth in the
1317 list, so move it to the head. */
1318 else if (p != G.pages[order])
1320 previous->next = p->next;
1321 p->next = G.pages[order];
1323 /* Are we moving the last element? */
1324 if (G.page_tails[order] == p)
1325 G.page_tails[order] = previous;
1334 /* Now, restore the in_use_p vectors for any pages from contexts
1335 other than the current one. */
1336 for (p = G.pages[order]; p; p = p->next)
1337 if (p->context_depth != G.context_depth)
1338 ggc_recalculate_in_use_p (p);
1343 /* Clobber all free objects. */
1350 for (order = 2; order < NUM_ORDERS; order++)
1352 size_t num_objects = OBJECTS_PER_PAGE (order);
1353 size_t size = OBJECT_SIZE (order);
1356 for (p = G.pages[order]; p != NULL; p = p->next)
1360 if (p->context_depth != G.context_depth)
1361 /* Since we don't do any collection for pages in pushed
1362 contexts, there's no need to do any poisoning. And
1363 besides, the IN_USE_P array isn't valid until we pop
1367 for (i = 0; i < num_objects; i++)
1370 word = i / HOST_BITS_PER_LONG;
1371 bit = i % HOST_BITS_PER_LONG;
1372 if (((p->in_use_p[word] >> bit) & 1) == 0)
1373 memset (p->page + i * size, 0xa5, size);
1380 /* Top level mark-and-sweep routine. */
1385 /* Avoid frequent unnecessary work by skipping collection if the
1386 total allocations haven't expanded much since the last
1388 #ifndef GGC_ALWAYS_COLLECT
1389 if (G.allocated < GGC_MIN_EXPAND_FOR_GC * G.allocated_last_gc)
1393 timevar_push (TV_GC);
1395 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1397 /* Zero the total allocated bytes. This will be recalculated in the
1401 /* Release the pages we freed the last time we collected, but didn't
1402 reuse in the interim. */
1414 G.allocated_last_gc = G.allocated;
1415 if (G.allocated_last_gc < GGC_MIN_LAST_ALLOCATED)
1416 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
1418 timevar_pop (TV_GC);
1421 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1424 /* Print allocation statistics. */
1425 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1427 : ((x) < 1024*1024*10 \
1429 : (x) / (1024*1024))))
1430 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1433 ggc_print_statistics ()
1435 struct ggc_statistics stats;
1437 size_t total_overhead = 0;
1439 /* Clear the statistics. */
1440 memset (&stats, 0, sizeof (stats));
1442 /* Make sure collection will really occur. */
1443 G.allocated_last_gc = 0;
1445 /* Collect and print the statistics common across collectors. */
1446 ggc_print_common_statistics (stderr, &stats);
1448 /* Release free pages so that we will not count the bytes allocated
1449 there as part of the total allocated memory. */
1452 /* Collect some information about the various sizes of
1454 fprintf (stderr, "\n%-5s %10s %10s %10s\n",
1455 "Log", "Allocated", "Used", "Overhead");
1456 for (i = 0; i < NUM_ORDERS; ++i)
1463 /* Skip empty entries. */
1467 overhead = allocated = in_use = 0;
1469 /* Figure out the total number of bytes allocated for objects of
1470 this size, and how many of them are actually in use. Also figure
1471 out how much memory the page table is using. */
1472 for (p = G.pages[i]; p; p = p->next)
1474 allocated += p->bytes;
1476 (OBJECTS_PER_PAGE (i) - p->num_free_objects) * OBJECT_SIZE (i);
1478 overhead += (sizeof (page_entry) - sizeof (long)
1479 + BITMAP_SIZE (OBJECTS_PER_PAGE (i) + 1));
1481 fprintf (stderr, "%-5d %10ld%c %10ld%c %10ld%c\n", i,
1482 SCALE (allocated), LABEL (allocated),
1483 SCALE (in_use), LABEL (in_use),
1484 SCALE (overhead), LABEL (overhead));
1485 total_overhead += overhead;
1487 fprintf (stderr, "%-5s %10ld%c %10ld%c %10ld%c\n", "Total",
1488 SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
1489 SCALE (G.allocated), LABEL(G.allocated),
1490 SCALE (total_overhead), LABEL (total_overhead));