1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
23 #include "coretypes.h"
33 #ifdef ENABLE_VALGRIND_CHECKING
34 # ifdef HAVE_MEMCHECK_H
35 # include <memcheck.h>
37 # include <valgrind.h>
40 /* Avoid #ifdef:s when we can help it. */
41 #define VALGRIND_DISCARD(x)
44 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
45 file open. Prefer either to valloc. */
47 # undef HAVE_MMAP_DEV_ZERO
49 # include <sys/mman.h>
51 # define MAP_FAILED -1
53 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
54 # define MAP_ANONYMOUS MAP_ANON
60 #ifdef HAVE_MMAP_DEV_ZERO
62 # include <sys/mman.h>
64 # define MAP_FAILED -1
71 #define USING_MALLOC_PAGE_GROUPS
76 This garbage-collecting allocator allocates objects on one of a set
77 of pages. Each page can allocate objects of a single size only;
78 available sizes are powers of two starting at four bytes. The size
79 of an allocation request is rounded up to the next power of two
80 (`order'), and satisfied from the appropriate page.
82 Each page is recorded in a page-entry, which also maintains an
83 in-use bitmap of object positions on the page. This allows the
84 allocation state of a particular object to be flipped without
85 touching the page itself.
87 Each page-entry also has a context depth, which is used to track
88 pushing and popping of allocation contexts. Only objects allocated
89 in the current (highest-numbered) context may be collected.
91 Page entries are arranged in an array of singly-linked lists. The
92 array is indexed by the allocation size, in bits, of the pages on
93 it; i.e. all pages on a list allocate objects of the same size.
94 Pages are ordered on the list such that all non-full pages precede
95 all full pages, with non-full pages arranged in order of decreasing
98 Empty pages (of all orders) are kept on a single page cache list,
99 and are considered first when new pages are required; they are
100 deallocated at the start of the next collection if they haven't
101 been recycled by then. */
103 /* Define GGC_DEBUG_LEVEL to print debugging information.
104 0: No debugging output.
105 1: GC statistics only.
106 2: Page-entry allocations/deallocations as well.
107 3: Object allocations as well.
108 4: Object marks as well. */
109 #define GGC_DEBUG_LEVEL (0)
111 #ifndef HOST_BITS_PER_PTR
112 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
116 /* A two-level tree is used to look up the page-entry for a given
117 pointer. Two chunks of the pointer's bits are extracted to index
118 the first and second levels of the tree, as follows:
122 msb +----------------+----+------+------+ lsb
128 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
129 pages are aligned on system page boundaries. The next most
130 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
131 index values in the lookup table, respectively.
133 For 32-bit architectures and the settings below, there are no
134 leftover bits. For architectures with wider pointers, the lookup
135 tree points to a list of pages, which must be scanned to find the
138 #define PAGE_L1_BITS (8)
139 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
140 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
141 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
143 #define LOOKUP_L1(p) \
144 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
146 #define LOOKUP_L2(p) \
147 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
149 /* The number of objects per allocation page, for objects on a page of
150 the indicated ORDER. */
151 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
153 /* The number of objects in P. */
154 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
156 /* The size of an object on a page of the indicated ORDER. */
157 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
159 /* For speed, we avoid doing a general integer divide to locate the
160 offset in the allocation bitmap, by precalculating numbers M, S
161 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
162 within the page which is evenly divisible by the object size Z. */
163 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
164 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
165 #define OFFSET_TO_BIT(OFFSET, ORDER) \
166 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
168 /* The number of extra orders, not corresponding to power-of-two sized
171 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
173 #define RTL_SIZE(NSLOTS) \
174 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
176 #define TREE_EXP_SIZE(OPS) \
177 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
179 /* The Ith entry is the maximum size of an object to be stored in the
180 Ith extra order. Adding a new entry to this array is the *only*
181 thing you need to do to add a new special allocation size. */
183 static const size_t extra_order_size_table[] = {
184 sizeof (struct tree_decl),
185 sizeof (struct tree_list),
187 RTL_SIZE (2), /* MEM, PLUS, etc. */
188 RTL_SIZE (9), /* INSN, CALL_INSN, JUMP_INSN */
191 /* The total number of orders. */
193 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
195 /* We use this structure to determine the alignment required for
196 allocations. For power-of-two sized allocations, that's not a
197 problem, but it does matter for odd-sized allocations. */
199 struct max_alignment {
207 /* The biggest alignment required. */
209 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
211 /* Compute the smallest nonnegative number which when added to X gives
214 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
216 /* Compute the smallest multiple of F that is >= X. */
218 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
220 /* The Ith entry is the number of objects on a page or order I. */
222 static unsigned objects_per_page_table[NUM_ORDERS];
224 /* The Ith entry is the size of an object on a page of order I. */
226 static size_t object_size_table[NUM_ORDERS];
228 /* The Ith entry is a pair of numbers (mult, shift) such that
229 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
230 for all k evenly divisible by OBJECT_SIZE(I). */
237 inverse_table[NUM_ORDERS];
239 /* A page_entry records the status of an allocation page. This
240 structure is dynamically sized to fit the bitmap in_use_p. */
241 typedef struct page_entry
243 /* The next page-entry with objects of the same size, or NULL if
244 this is the last page-entry. */
245 struct page_entry *next;
247 /* The number of bytes allocated. (This will always be a multiple
248 of the host system page size.) */
251 /* The address at which the memory is allocated. */
254 #ifdef USING_MALLOC_PAGE_GROUPS
255 /* Back pointer to the page group this page came from. */
256 struct page_group *group;
259 /* This is the index in the by_depth varray where this page table
261 unsigned long index_by_depth;
263 /* Context depth of this page. */
264 unsigned short context_depth;
266 /* The number of free objects remaining on this page. */
267 unsigned short num_free_objects;
269 /* A likely candidate for the bit position of a free object for the
270 next allocation from this page. */
271 unsigned short next_bit_hint;
273 /* The lg of size of objects allocated from this page. */
276 /* A bit vector indicating whether or not objects are in use. The
277 Nth bit is one if the Nth object on this page is allocated. This
278 array is dynamically sized. */
279 unsigned long in_use_p[1];
282 #ifdef USING_MALLOC_PAGE_GROUPS
283 /* A page_group describes a large allocation from malloc, from which
284 we parcel out aligned pages. */
285 typedef struct page_group
287 /* A linked list of all extant page groups. */
288 struct page_group *next;
290 /* The address we received from malloc. */
293 /* The size of the block. */
296 /* A bitmask of pages in use. */
301 #if HOST_BITS_PER_PTR <= 32
303 /* On 32-bit hosts, we use a two level page table, as pictured above. */
304 typedef page_entry **page_table[PAGE_L1_SIZE];
308 /* On 64-bit hosts, we use the same two level page tables plus a linked
309 list that disambiguates the top 32-bits. There will almost always be
310 exactly one entry in the list. */
311 typedef struct page_table_chain
313 struct page_table_chain *next;
315 page_entry **table[PAGE_L1_SIZE];
320 /* The rest of the global variables. */
321 static struct globals
323 /* The Nth element in this array is a page with objects of size 2^N.
324 If there are any pages with free objects, they will be at the
325 head of the list. NULL if there are no page-entries for this
327 page_entry *pages[NUM_ORDERS];
329 /* The Nth element in this array is the last page with objects of
330 size 2^N. NULL if there are no page-entries for this object
332 page_entry *page_tails[NUM_ORDERS];
334 /* Lookup table for associating allocation pages with object addresses. */
337 /* The system's page size. */
341 /* Bytes currently allocated. */
344 /* Bytes currently allocated at the end of the last collection. */
345 size_t allocated_last_gc;
347 /* Total amount of memory mapped. */
350 /* Bit N set if any allocations have been done at context depth N. */
351 unsigned long context_depth_allocations;
353 /* Bit N set if any collections have been done at context depth N. */
354 unsigned long context_depth_collections;
356 /* The current depth in the context stack. */
357 unsigned short context_depth;
359 /* A file descriptor open to /dev/zero for reading. */
360 #if defined (HAVE_MMAP_DEV_ZERO)
364 /* A cache of free system pages. */
365 page_entry *free_pages;
367 #ifdef USING_MALLOC_PAGE_GROUPS
368 page_group *page_groups;
371 /* The file descriptor for debugging output. */
374 /* Current number of elements in use in depth below. */
375 unsigned int depth_in_use;
377 /* Maximum number of elements that can be used before resizing. */
378 unsigned int depth_max;
380 /* Each element of this arry is an index in by_depth where the given
381 depth starts. This structure is indexed by that given depth we
382 are interested in. */
385 /* Current number of elements in use in by_depth below. */
386 unsigned int by_depth_in_use;
388 /* Maximum number of elements that can be used before resizing. */
389 unsigned int by_depth_max;
391 /* Each element of this array is a pointer to a page_entry, all
392 page_entries can be found in here by increasing depth.
393 index_by_depth in the page_entry is the index into this data
394 structure where that page_entry can be found. This is used to
395 speed up finding all page_entries at a particular depth. */
396 page_entry **by_depth;
398 /* Each element is a pointer to the saved in_use_p bits, if any,
399 zero otherwise. We allocate them all together, to enable a
400 better runtime data access pattern. */
401 unsigned long **save_in_use;
403 #ifdef GATHER_STATISTICS
406 /* Total memory allocated with ggc_alloc */
407 unsigned long long total_allocated;
408 /* Total overhead for memory to be allocated with ggc_alloc */
409 unsigned long long total_overhead;
411 /* Total allocations and overhead for sizes less than 32, 64 and 128.
412 These sizes are interesting because they are typical cache line
415 unsigned long long total_allocated_under32;
416 unsigned long long total_overhead_under32;
418 unsigned long long total_allocated_under64;
419 unsigned long long total_overhead_under64;
421 unsigned long long total_allocated_under128;
422 unsigned long long total_overhead_under128;
424 /* The overhead for each of the allocation orders. */
425 unsigned long long total_overhead_per_order[NUM_ORDERS];
430 /* The size in bytes required to maintain a bitmap for the objects
432 #define BITMAP_SIZE(Num_objects) \
433 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
435 /* Allocate pages in chunks of this size, to throttle calls to memory
436 allocation routines. The first page is used, the rest go onto the
437 free list. This cannot be larger than HOST_BITS_PER_INT for the
438 in_use bitmask for page_group. */
439 #define GGC_QUIRE_SIZE 16
441 /* Initial guess as to how many page table entries we might need. */
442 #define INITIAL_PTE_COUNT 128
444 static int ggc_allocated_p (const void *);
445 static page_entry *lookup_page_table_entry (const void *);
446 static void set_page_table_entry (void *, page_entry *);
448 static char *alloc_anon (char *, size_t);
450 #ifdef USING_MALLOC_PAGE_GROUPS
451 static size_t page_group_index (char *, char *);
452 static void set_page_group_in_use (page_group *, char *);
453 static void clear_page_group_in_use (page_group *, char *);
455 static struct page_entry * alloc_page (unsigned);
456 static void free_page (struct page_entry *);
457 static void release_pages (void);
458 static void clear_marks (void);
459 static void sweep_pages (void);
460 static void ggc_recalculate_in_use_p (page_entry *);
461 static void compute_inverse (unsigned);
462 static inline void adjust_depth (void);
463 static void move_ptes_to_front (int, int);
465 #ifdef ENABLE_GC_CHECKING
466 static void poison_pages (void);
469 void debug_print_page_list (int);
470 static void push_depth (unsigned int);
471 static void push_by_depth (page_entry *, unsigned long *);
472 struct alloc_zone *rtl_zone = NULL;
473 struct alloc_zone *tree_zone = NULL;
474 struct alloc_zone *garbage_zone = NULL;
476 /* Push an entry onto G.depth. */
479 push_depth (unsigned int i)
481 if (G.depth_in_use >= G.depth_max)
484 G.depth = xrealloc (G.depth, G.depth_max * sizeof (unsigned int));
486 G.depth[G.depth_in_use++] = i;
489 /* Push an entry onto G.by_depth and G.save_in_use. */
492 push_by_depth (page_entry *p, unsigned long *s)
494 if (G.by_depth_in_use >= G.by_depth_max)
497 G.by_depth = xrealloc (G.by_depth,
498 G.by_depth_max * sizeof (page_entry *));
499 G.save_in_use = xrealloc (G.save_in_use,
500 G.by_depth_max * sizeof (unsigned long *));
502 G.by_depth[G.by_depth_in_use] = p;
503 G.save_in_use[G.by_depth_in_use++] = s;
506 #if (GCC_VERSION < 3001)
507 #define prefetch(X) ((void) X)
509 #define prefetch(X) __builtin_prefetch (X)
512 #define save_in_use_p_i(__i) \
514 #define save_in_use_p(__p) \
515 (save_in_use_p_i (__p->index_by_depth))
517 /* Returns nonzero if P was allocated in GC'able memory. */
520 ggc_allocated_p (const void *p)
525 #if HOST_BITS_PER_PTR <= 32
528 page_table table = G.lookup;
529 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
534 if (table->high_bits == high_bits)
538 base = &table->table[0];
541 /* Extract the level 1 and 2 indices. */
545 return base[L1] && base[L1][L2];
548 /* Traverse the page table and find the entry for a page.
549 Die (probably) if the object wasn't allocated via GC. */
551 static inline page_entry *
552 lookup_page_table_entry (const void *p)
557 #if HOST_BITS_PER_PTR <= 32
560 page_table table = G.lookup;
561 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
562 while (table->high_bits != high_bits)
564 base = &table->table[0];
567 /* Extract the level 1 and 2 indices. */
574 /* Set the page table entry for a page. */
577 set_page_table_entry (void *p, page_entry *entry)
582 #if HOST_BITS_PER_PTR <= 32
586 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
587 for (table = G.lookup; table; table = table->next)
588 if (table->high_bits == high_bits)
591 /* Not found -- allocate a new table. */
592 table = xcalloc (1, sizeof(*table));
593 table->next = G.lookup;
594 table->high_bits = high_bits;
597 base = &table->table[0];
600 /* Extract the level 1 and 2 indices. */
604 if (base[L1] == NULL)
605 base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
607 base[L1][L2] = entry;
610 /* Prints the page-entry for object size ORDER, for debugging. */
613 debug_print_page_list (int order)
616 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
617 (void *) G.page_tails[order]);
621 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
622 p->num_free_objects);
630 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
631 (if non-null). The ifdef structure here is intended to cause a
632 compile error unless exactly one of the HAVE_* is defined. */
635 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
637 #ifdef HAVE_MMAP_ANON
638 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
639 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
641 #ifdef HAVE_MMAP_DEV_ZERO
642 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
643 MAP_PRIVATE, G.dev_zero_fd, 0);
646 if (page == (char *) MAP_FAILED)
648 perror ("virtual memory exhausted");
649 exit (FATAL_EXIT_CODE);
652 /* Remember that we allocated this memory. */
653 G.bytes_mapped += size;
655 /* Pretend we don't have access to the allocated pages. We'll enable
656 access to smaller pieces of the area in ggc_alloc. Discard the
657 handle to avoid handle leak. */
658 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
663 #ifdef USING_MALLOC_PAGE_GROUPS
664 /* Compute the index for this page into the page group. */
667 page_group_index (char *allocation, char *page)
669 return (size_t) (page - allocation) >> G.lg_pagesize;
672 /* Set and clear the in_use bit for this page in the page group. */
675 set_page_group_in_use (page_group *group, char *page)
677 group->in_use |= 1 << page_group_index (group->allocation, page);
681 clear_page_group_in_use (page_group *group, char *page)
683 group->in_use &= ~(1 << page_group_index (group->allocation, page));
687 /* Allocate a new page for allocating objects of size 2^ORDER,
688 and return an entry for it. The entry is not added to the
689 appropriate page_table list. */
691 static inline struct page_entry *
692 alloc_page (unsigned order)
694 struct page_entry *entry, *p, **pp;
698 size_t page_entry_size;
700 #ifdef USING_MALLOC_PAGE_GROUPS
704 num_objects = OBJECTS_PER_PAGE (order);
705 bitmap_size = BITMAP_SIZE (num_objects + 1);
706 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
707 entry_size = num_objects * OBJECT_SIZE (order);
708 if (entry_size < G.pagesize)
709 entry_size = G.pagesize;
714 /* Check the list of free pages for one we can use. */
715 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
716 if (p->bytes == entry_size)
721 /* Recycle the allocated memory from this page ... */
725 #ifdef USING_MALLOC_PAGE_GROUPS
729 /* ... and, if possible, the page entry itself. */
730 if (p->order == order)
733 memset (entry, 0, page_entry_size);
739 else if (entry_size == G.pagesize)
741 /* We want just one page. Allocate a bunch of them and put the
742 extras on the freelist. (Can only do this optimization with
743 mmap for backing store.) */
744 struct page_entry *e, *f = G.free_pages;
747 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
749 /* This loop counts down so that the chain will be in ascending
751 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
753 e = xcalloc (1, page_entry_size);
755 e->bytes = G.pagesize;
756 e->page = page + (i << G.lg_pagesize);
764 page = alloc_anon (NULL, entry_size);
766 #ifdef USING_MALLOC_PAGE_GROUPS
769 /* Allocate a large block of memory and serve out the aligned
770 pages therein. This results in much less memory wastage
771 than the traditional implementation of valloc. */
773 char *allocation, *a, *enda;
774 size_t alloc_size, head_slop, tail_slop;
775 int multiple_pages = (entry_size == G.pagesize);
778 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
780 alloc_size = entry_size + G.pagesize - 1;
781 allocation = xmalloc (alloc_size);
783 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
784 head_slop = page - allocation;
786 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
788 tail_slop = alloc_size - entry_size - head_slop;
789 enda = allocation + alloc_size - tail_slop;
791 /* We allocated N pages, which are likely not aligned, leaving
792 us with N-1 usable pages. We plan to place the page_group
793 structure somewhere in the slop. */
794 if (head_slop >= sizeof (page_group))
795 group = (page_group *)page - 1;
798 /* We magically got an aligned allocation. Too bad, we have
799 to waste a page anyway. */
803 tail_slop += G.pagesize;
805 if (tail_slop < sizeof (page_group))
807 group = (page_group *)enda;
808 tail_slop -= sizeof (page_group);
811 /* Remember that we allocated this memory. */
812 group->next = G.page_groups;
813 group->allocation = allocation;
814 group->alloc_size = alloc_size;
816 G.page_groups = group;
817 G.bytes_mapped += alloc_size;
819 /* If we allocated multiple pages, put the rest on the free list. */
822 struct page_entry *e, *f = G.free_pages;
823 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
825 e = xcalloc (1, page_entry_size);
827 e->bytes = G.pagesize;
839 entry = xcalloc (1, page_entry_size);
841 entry->bytes = entry_size;
843 entry->context_depth = G.context_depth;
844 entry->order = order;
845 entry->num_free_objects = num_objects;
846 entry->next_bit_hint = 1;
848 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
850 #ifdef USING_MALLOC_PAGE_GROUPS
851 entry->group = group;
852 set_page_group_in_use (group, page);
855 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
856 increment the hint. */
857 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
858 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
860 set_page_table_entry (page, entry);
862 if (GGC_DEBUG_LEVEL >= 2)
863 fprintf (G.debug_file,
864 "Allocating page at %p, object size=%lu, data %p-%p\n",
865 (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
866 page + entry_size - 1);
871 /* Adjust the size of G.depth so that no index greater than the one
872 used by the top of the G.by_depth is used. */
879 if (G.by_depth_in_use)
881 top = G.by_depth[G.by_depth_in_use-1];
883 /* Peel back indices in depth that index into by_depth, so that
884 as new elements are added to by_depth, we note the indices
885 of those elements, if they are for new context depths. */
886 while (G.depth_in_use > (size_t)top->context_depth+1)
891 /* For a page that is no longer needed, put it on the free page list. */
894 free_page (page_entry *entry)
896 if (GGC_DEBUG_LEVEL >= 2)
897 fprintf (G.debug_file,
898 "Deallocating page at %p, data %p-%p\n", (void *) entry,
899 entry->page, entry->page + entry->bytes - 1);
901 /* Mark the page as inaccessible. Discard the handle to avoid handle
903 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
905 set_page_table_entry (entry->page, NULL);
907 #ifdef USING_MALLOC_PAGE_GROUPS
908 clear_page_group_in_use (entry->group, entry->page);
911 if (G.by_depth_in_use > 1)
913 page_entry *top = G.by_depth[G.by_depth_in_use-1];
915 /* If they are at the same depth, put top element into freed
917 if (entry->context_depth == top->context_depth)
919 int i = entry->index_by_depth;
921 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
922 top->index_by_depth = i;
926 /* We cannot free a page from a context deeper than the
935 entry->next = G.free_pages;
936 G.free_pages = entry;
939 /* Release the free page cache to the system. */
945 page_entry *p, *next;
949 /* Gather up adjacent pages so they are unmapped together. */
960 while (p && p->page == start + len)
969 G.bytes_mapped -= len;
974 #ifdef USING_MALLOC_PAGE_GROUPS
978 /* Remove all pages from free page groups from the list. */
980 while ((p = *pp) != NULL)
981 if (p->group->in_use == 0)
989 /* Remove all free page groups, and release the storage. */
991 while ((g = *gp) != NULL)
995 G.bytes_mapped -= g->alloc_size;
996 free (g->allocation);
1003 /* This table provides a fast way to determine ceil(log_2(size)) for
1004 allocation requests. The minimum allocation size is eight bytes. */
1006 static unsigned char size_lookup[257] =
1008 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1009 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1010 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1011 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1012 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1013 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1014 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1015 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1016 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1017 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1018 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1019 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1020 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1021 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1022 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1023 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1027 /* Typed allocation function. Does nothing special in this collector. */
1030 ggc_alloc_typed (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size)
1032 return ggc_alloc (size);
1035 /* Zone allocation function. Does nothing special in this collector. */
1038 ggc_alloc_zone (size_t size, struct alloc_zone *zone ATTRIBUTE_UNUSED)
1040 return ggc_alloc (size);
1043 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1046 ggc_alloc (size_t size)
1048 unsigned order, word, bit, object_offset;
1049 struct page_entry *entry;
1053 order = size_lookup[size];
1057 while (size > OBJECT_SIZE (order))
1061 /* If there are non-full pages for this size allocation, they are at
1062 the head of the list. */
1063 entry = G.pages[order];
1065 /* If there is no page for this object size, or all pages in this
1066 context are full, allocate a new page. */
1067 if (entry == NULL || entry->num_free_objects == 0)
1069 struct page_entry *new_entry;
1070 new_entry = alloc_page (order);
1072 new_entry->index_by_depth = G.by_depth_in_use;
1073 push_by_depth (new_entry, 0);
1075 /* We can skip context depths, if we do, make sure we go all the
1076 way to the new depth. */
1077 while (new_entry->context_depth >= G.depth_in_use)
1078 push_depth (G.by_depth_in_use-1);
1080 /* If this is the only entry, it's also the tail. */
1082 G.page_tails[order] = new_entry;
1084 /* Put new pages at the head of the page list. */
1085 new_entry->next = entry;
1087 G.pages[order] = new_entry;
1089 /* For a new page, we know the word and bit positions (in the
1090 in_use bitmap) of the first available object -- they're zero. */
1091 new_entry->next_bit_hint = 1;
1098 /* First try to use the hint left from the previous allocation
1099 to locate a clear bit in the in-use bitmap. We've made sure
1100 that the one-past-the-end bit is always set, so if the hint
1101 has run over, this test will fail. */
1102 unsigned hint = entry->next_bit_hint;
1103 word = hint / HOST_BITS_PER_LONG;
1104 bit = hint % HOST_BITS_PER_LONG;
1106 /* If the hint didn't work, scan the bitmap from the beginning. */
1107 if ((entry->in_use_p[word] >> bit) & 1)
1110 while (~entry->in_use_p[word] == 0)
1112 while ((entry->in_use_p[word] >> bit) & 1)
1114 hint = word * HOST_BITS_PER_LONG + bit;
1117 /* Next time, try the next bit. */
1118 entry->next_bit_hint = hint + 1;
1120 object_offset = hint * OBJECT_SIZE (order);
1123 /* Set the in-use bit. */
1124 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1126 /* Keep a running total of the number of free objects. If this page
1127 fills up, we may have to move it to the end of the list if the
1128 next page isn't full. If the next page is full, all subsequent
1129 pages are full, so there's no need to move it. */
1130 if (--entry->num_free_objects == 0
1131 && entry->next != NULL
1132 && entry->next->num_free_objects > 0)
1134 G.pages[order] = entry->next;
1136 G.page_tails[order]->next = entry;
1137 G.page_tails[order] = entry;
1140 /* Calculate the object's address. */
1141 result = entry->page + object_offset;
1143 #ifdef ENABLE_GC_CHECKING
1144 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1145 exact same semantics in presence of memory bugs, regardless of
1146 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1147 handle to avoid handle leak. */
1148 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, OBJECT_SIZE (order)));
1150 /* `Poison' the entire allocated object, including any padding at
1152 memset (result, 0xaf, OBJECT_SIZE (order));
1154 /* Make the bytes after the end of the object unaccessible. Discard the
1155 handle to avoid handle leak. */
1156 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size,
1157 OBJECT_SIZE (order) - size));
1160 /* Tell Valgrind that the memory is there, but its content isn't
1161 defined. The bytes at the end of the object are still marked
1163 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
1165 /* Keep track of how many bytes are being allocated. This
1166 information is used in deciding when to collect. */
1167 G.allocated += OBJECT_SIZE (order);
1169 #ifdef GATHER_STATISTICS
1171 G.stats.total_overhead += OBJECT_SIZE (order) - size;
1172 G.stats.total_overhead_per_order[order] += OBJECT_SIZE (order) - size;
1173 G.stats.total_allocated += OBJECT_SIZE(order);
1176 G.stats.total_overhead_under32 += OBJECT_SIZE (order) - size;
1177 G.stats.total_allocated_under32 += OBJECT_SIZE(order);
1181 G.stats.total_overhead_under64 += OBJECT_SIZE (order) - size;
1182 G.stats.total_allocated_under64 += OBJECT_SIZE(order);
1186 G.stats.total_overhead_under128 += OBJECT_SIZE (order) - size;
1187 G.stats.total_allocated_under128 += OBJECT_SIZE(order);
1193 if (GGC_DEBUG_LEVEL >= 3)
1194 fprintf (G.debug_file,
1195 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1196 (unsigned long) size, (unsigned long) OBJECT_SIZE (order), result,
1202 /* If P is not marked, marks it and return false. Otherwise return true.
1203 P must have been allocated by the GC allocator; it mustn't point to
1204 static objects, stack variables, or memory allocated with malloc. */
1207 ggc_set_mark (const void *p)
1213 /* Look up the page on which the object is alloced. If the object
1214 wasn't allocated by the collector, we'll probably die. */
1215 entry = lookup_page_table_entry (p);
1216 #ifdef ENABLE_CHECKING
1221 /* Calculate the index of the object on the page; this is its bit
1222 position in the in_use_p bitmap. */
1223 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1224 word = bit / HOST_BITS_PER_LONG;
1225 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1227 /* If the bit was previously set, skip it. */
1228 if (entry->in_use_p[word] & mask)
1231 /* Otherwise set it, and decrement the free object count. */
1232 entry->in_use_p[word] |= mask;
1233 entry->num_free_objects -= 1;
1235 if (GGC_DEBUG_LEVEL >= 4)
1236 fprintf (G.debug_file, "Marking %p\n", p);
1241 /* Return 1 if P has been marked, zero otherwise.
1242 P must have been allocated by the GC allocator; it mustn't point to
1243 static objects, stack variables, or memory allocated with malloc. */
1246 ggc_marked_p (const void *p)
1252 /* Look up the page on which the object is alloced. If the object
1253 wasn't allocated by the collector, we'll probably die. */
1254 entry = lookup_page_table_entry (p);
1255 #ifdef ENABLE_CHECKING
1260 /* Calculate the index of the object on the page; this is its bit
1261 position in the in_use_p bitmap. */
1262 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1263 word = bit / HOST_BITS_PER_LONG;
1264 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1266 return (entry->in_use_p[word] & mask) != 0;
1269 /* Return the size of the gc-able object P. */
1272 ggc_get_size (const void *p)
1274 page_entry *pe = lookup_page_table_entry (p);
1275 return OBJECT_SIZE (pe->order);
1278 /* Subroutine of init_ggc which computes the pair of numbers used to
1279 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1281 This algorithm is taken from Granlund and Montgomery's paper
1282 "Division by Invariant Integers using Multiplication"
1283 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1287 compute_inverse (unsigned order)
1292 size = OBJECT_SIZE (order);
1294 while (size % 2 == 0)
1301 while (inv * size != 1)
1302 inv = inv * (2 - inv*size);
1304 DIV_MULT (order) = inv;
1305 DIV_SHIFT (order) = e;
1308 /* Initialize the ggc-mmap allocator. */
1314 G.pagesize = getpagesize();
1315 G.lg_pagesize = exact_log2 (G.pagesize);
1317 #ifdef HAVE_MMAP_DEV_ZERO
1318 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1319 if (G.dev_zero_fd == -1)
1320 internal_error ("open /dev/zero: %m");
1324 G.debug_file = fopen ("ggc-mmap.debug", "w");
1326 G.debug_file = stdout;
1330 /* StunOS has an amazing off-by-one error for the first mmap allocation
1331 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1332 believe, is an unaligned page allocation, which would cause us to
1333 hork badly if we tried to use it. */
1335 char *p = alloc_anon (NULL, G.pagesize);
1336 struct page_entry *e;
1337 if ((size_t)p & (G.pagesize - 1))
1339 /* How losing. Discard this one and try another. If we still
1340 can't get something useful, give up. */
1342 p = alloc_anon (NULL, G.pagesize);
1343 if ((size_t)p & (G.pagesize - 1))
1347 /* We have a good page, might as well hold onto it... */
1348 e = xcalloc (1, sizeof (struct page_entry));
1349 e->bytes = G.pagesize;
1351 e->next = G.free_pages;
1356 /* Initialize the object size table. */
1357 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1358 object_size_table[order] = (size_t) 1 << order;
1359 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1361 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1363 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1364 so that we're sure of getting aligned memory. */
1365 s = ROUND_UP (s, MAX_ALIGNMENT);
1366 object_size_table[order] = s;
1369 /* Initialize the objects-per-page and inverse tables. */
1370 for (order = 0; order < NUM_ORDERS; ++order)
1372 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1373 if (objects_per_page_table[order] == 0)
1374 objects_per_page_table[order] = 1;
1375 compute_inverse (order);
1378 /* Reset the size_lookup array to put appropriately sized objects in
1379 the special orders. All objects bigger than the previous power
1380 of two, but no greater than the special size, should go in the
1382 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1387 o = size_lookup[OBJECT_SIZE (order)];
1388 for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
1389 size_lookup[i] = order;
1394 G.depth = xmalloc (G.depth_max * sizeof (unsigned int));
1396 G.by_depth_in_use = 0;
1397 G.by_depth_max = INITIAL_PTE_COUNT;
1398 G.by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
1399 G.save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
1402 /* Increment the `GC context'. Objects allocated in an outer context
1403 are never freed, eliminating the need to register their roots. */
1406 ggc_push_context (void)
1411 if (G.context_depth >= HOST_BITS_PER_LONG)
1415 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1416 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1419 ggc_recalculate_in_use_p (page_entry *p)
1424 /* Because the past-the-end bit in in_use_p is always set, we
1425 pretend there is one additional object. */
1426 num_objects = OBJECTS_IN_PAGE (p) + 1;
1428 /* Reset the free object count. */
1429 p->num_free_objects = num_objects;
1431 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1433 i < CEIL (BITMAP_SIZE (num_objects),
1434 sizeof (*p->in_use_p));
1439 /* Something is in use if it is marked, or if it was in use in a
1440 context further down the context stack. */
1441 p->in_use_p[i] |= save_in_use_p (p)[i];
1443 /* Decrement the free object count for every object allocated. */
1444 for (j = p->in_use_p[i]; j; j >>= 1)
1445 p->num_free_objects -= (j & 1);
1448 if (p->num_free_objects >= num_objects)
1452 /* Decrement the `GC context'. All objects allocated since the
1453 previous ggc_push_context are migrated to the outer context. */
1456 ggc_pop_context (void)
1458 unsigned long omask;
1459 unsigned int depth, i, e;
1460 #ifdef ENABLE_CHECKING
1464 depth = --G.context_depth;
1465 omask = (unsigned long)1 << (depth + 1);
1467 if (!((G.context_depth_allocations | G.context_depth_collections) & omask))
1470 G.context_depth_allocations |= (G.context_depth_allocations & omask) >> 1;
1471 G.context_depth_allocations &= omask - 1;
1472 G.context_depth_collections &= omask - 1;
1474 /* The G.depth array is shortened so that the last index is the
1475 context_depth of the top element of by_depth. */
1476 if (depth+1 < G.depth_in_use)
1477 e = G.depth[depth+1];
1479 e = G.by_depth_in_use;
1481 /* We might not have any PTEs of depth depth. */
1482 if (depth < G.depth_in_use)
1485 /* First we go through all the pages at depth depth to
1486 recalculate the in use bits. */
1487 for (i = G.depth[depth]; i < e; ++i)
1491 #ifdef ENABLE_CHECKING
1494 /* Check that all of the pages really are at the depth that
1496 if (p->context_depth != depth)
1498 if (p->index_by_depth != i)
1502 prefetch (&save_in_use_p_i (i+8));
1503 prefetch (&save_in_use_p_i (i+16));
1504 if (save_in_use_p_i (i))
1507 ggc_recalculate_in_use_p (p);
1508 free (save_in_use_p_i (i));
1509 save_in_use_p_i (i) = 0;
1514 /* Then, we reset all page_entries with a depth greater than depth
1516 for (i = e; i < G.by_depth_in_use; ++i)
1518 page_entry *p = G.by_depth[i];
1520 /* Check that all of the pages really are at the depth we
1522 #ifdef ENABLE_CHECKING
1523 if (p->context_depth <= depth)
1525 if (p->index_by_depth != i)
1528 p->context_depth = depth;
1533 #ifdef ENABLE_CHECKING
1534 for (order = 2; order < NUM_ORDERS; order++)
1538 for (p = G.pages[order]; p != NULL; p = p->next)
1540 if (p->context_depth > depth)
1542 else if (p->context_depth == depth && save_in_use_p (p))
1549 /* Unmark all objects. */
1556 for (order = 2; order < NUM_ORDERS; order++)
1560 for (p = G.pages[order]; p != NULL; p = p->next)
1562 size_t num_objects = OBJECTS_IN_PAGE (p);
1563 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1565 #ifdef ENABLE_CHECKING
1566 /* The data should be page-aligned. */
1567 if ((size_t) p->page & (G.pagesize - 1))
1571 /* Pages that aren't in the topmost context are not collected;
1572 nevertheless, we need their in-use bit vectors to store GC
1573 marks. So, back them up first. */
1574 if (p->context_depth < G.context_depth)
1576 if (! save_in_use_p (p))
1577 save_in_use_p (p) = xmalloc (bitmap_size);
1578 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1581 /* Reset reset the number of free objects and clear the
1582 in-use bits. These will be adjusted by mark_obj. */
1583 p->num_free_objects = num_objects;
1584 memset (p->in_use_p, 0, bitmap_size);
1586 /* Make sure the one-past-the-end bit is always set. */
1587 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1588 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1593 /* Free all empty pages. Partially empty pages need no attention
1594 because the `mark' bit doubles as an `unused' bit. */
1601 for (order = 2; order < NUM_ORDERS; order++)
1603 /* The last page-entry to consider, regardless of entries
1604 placed at the end of the list. */
1605 page_entry * const last = G.page_tails[order];
1608 size_t live_objects;
1609 page_entry *p, *previous;
1619 page_entry *next = p->next;
1621 /* Loop until all entries have been examined. */
1624 num_objects = OBJECTS_IN_PAGE (p);
1626 /* Add all live objects on this page to the count of
1627 allocated memory. */
1628 live_objects = num_objects - p->num_free_objects;
1630 G.allocated += OBJECT_SIZE (order) * live_objects;
1632 /* Only objects on pages in the topmost context should get
1634 if (p->context_depth < G.context_depth)
1637 /* Remove the page if it's empty. */
1638 else if (live_objects == 0)
1641 G.pages[order] = next;
1643 previous->next = next;
1645 /* Are we removing the last element? */
1646 if (p == G.page_tails[order])
1647 G.page_tails[order] = previous;
1652 /* If the page is full, move it to the end. */
1653 else if (p->num_free_objects == 0)
1655 /* Don't move it if it's already at the end. */
1656 if (p != G.page_tails[order])
1658 /* Move p to the end of the list. */
1660 G.page_tails[order]->next = p;
1662 /* Update the tail pointer... */
1663 G.page_tails[order] = p;
1665 /* ... and the head pointer, if necessary. */
1667 G.pages[order] = next;
1669 previous->next = next;
1674 /* If we've fallen through to here, it's a page in the
1675 topmost context that is neither full nor empty. Such a
1676 page must precede pages at lesser context depth in the
1677 list, so move it to the head. */
1678 else if (p != G.pages[order])
1680 previous->next = p->next;
1681 p->next = G.pages[order];
1683 /* Are we moving the last element? */
1684 if (G.page_tails[order] == p)
1685 G.page_tails[order] = previous;
1694 /* Now, restore the in_use_p vectors for any pages from contexts
1695 other than the current one. */
1696 for (p = G.pages[order]; p; p = p->next)
1697 if (p->context_depth != G.context_depth)
1698 ggc_recalculate_in_use_p (p);
1702 #ifdef ENABLE_GC_CHECKING
1703 /* Clobber all free objects. */
1710 for (order = 2; order < NUM_ORDERS; order++)
1712 size_t size = OBJECT_SIZE (order);
1715 for (p = G.pages[order]; p != NULL; p = p->next)
1720 if (p->context_depth != G.context_depth)
1721 /* Since we don't do any collection for pages in pushed
1722 contexts, there's no need to do any poisoning. And
1723 besides, the IN_USE_P array isn't valid until we pop
1727 num_objects = OBJECTS_IN_PAGE (p);
1728 for (i = 0; i < num_objects; i++)
1731 word = i / HOST_BITS_PER_LONG;
1732 bit = i % HOST_BITS_PER_LONG;
1733 if (((p->in_use_p[word] >> bit) & 1) == 0)
1735 char *object = p->page + i * size;
1737 /* Keep poison-by-write when we expect to use Valgrind,
1738 so the exact same memory semantics is kept, in case
1739 there are memory errors. We override this request
1741 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size));
1742 memset (object, 0xa5, size);
1744 /* Drop the handle to avoid handle leak. */
1745 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size));
1753 /* Top level mark-and-sweep routine. */
1758 /* Avoid frequent unnecessary work by skipping collection if the
1759 total allocations haven't expanded much since the last
1761 float allocated_last_gc =
1762 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1764 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1766 if (G.allocated < allocated_last_gc + min_expand)
1769 timevar_push (TV_GC);
1771 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1773 /* Zero the total allocated bytes. This will be recalculated in the
1777 /* Release the pages we freed the last time we collected, but didn't
1778 reuse in the interim. */
1781 /* Indicate that we've seen collections at this context depth. */
1782 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
1787 #ifdef ENABLE_GC_CHECKING
1793 G.allocated_last_gc = G.allocated;
1795 timevar_pop (TV_GC);
1798 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1801 /* Print allocation statistics. */
1802 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1804 : ((x) < 1024*1024*10 \
1806 : (x) / (1024*1024))))
1807 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1810 ggc_print_statistics (void)
1812 struct ggc_statistics stats;
1814 size_t total_overhead = 0;
1816 /* Clear the statistics. */
1817 memset (&stats, 0, sizeof (stats));
1819 /* Make sure collection will really occur. */
1820 G.allocated_last_gc = 0;
1822 /* Collect and print the statistics common across collectors. */
1823 ggc_print_common_statistics (stderr, &stats);
1825 /* Release free pages so that we will not count the bytes allocated
1826 there as part of the total allocated memory. */
1829 /* Collect some information about the various sizes of
1831 fprintf (stderr, "%-5s %10s %10s %10s\n",
1832 "Size", "Allocated", "Used", "Overhead");
1833 for (i = 0; i < NUM_ORDERS; ++i)
1840 /* Skip empty entries. */
1844 overhead = allocated = in_use = 0;
1846 /* Figure out the total number of bytes allocated for objects of
1847 this size, and how many of them are actually in use. Also figure
1848 out how much memory the page table is using. */
1849 for (p = G.pages[i]; p; p = p->next)
1851 allocated += p->bytes;
1853 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
1855 overhead += (sizeof (page_entry) - sizeof (long)
1856 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
1858 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
1859 (unsigned long) OBJECT_SIZE (i),
1860 SCALE (allocated), LABEL (allocated),
1861 SCALE (in_use), LABEL (in_use),
1862 SCALE (overhead), LABEL (overhead));
1863 total_overhead += overhead;
1865 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
1866 SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
1867 SCALE (G.allocated), LABEL(G.allocated),
1868 SCALE (total_overhead), LABEL (total_overhead));
1870 #ifdef GATHER_STATISTICS
1872 fprintf (stderr, "Total Overhead: %10lld\n",
1873 G.stats.total_overhead);
1874 fprintf (stderr, "Total Allocated: %10lld\n",
1875 G.stats.total_allocated);
1877 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
1878 G.stats.total_overhead_under32);
1879 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
1880 G.stats.total_allocated_under32);
1881 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
1882 G.stats.total_overhead_under64);
1883 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
1884 G.stats.total_allocated_under64);
1885 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
1886 G.stats.total_overhead_under128);
1887 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
1888 G.stats.total_allocated_under128);
1890 for (i = 0; i < NUM_ORDERS; i++)
1891 if (G.stats.total_overhead_per_order[i])
1892 fprintf (stderr, "Total Overhead page size %7d: %10lld\n",
1893 OBJECT_SIZE (i), G.stats.total_overhead_per_order[i]);
1900 struct ggc_pch_ondisk
1902 unsigned totals[NUM_ORDERS];
1904 size_t base[NUM_ORDERS];
1905 size_t written[NUM_ORDERS];
1908 struct ggc_pch_data *
1911 return xcalloc (sizeof (struct ggc_pch_data), 1);
1915 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
1916 size_t size, bool is_string ATTRIBUTE_UNUSED)
1921 order = size_lookup[size];
1925 while (size > OBJECT_SIZE (order))
1929 d->d.totals[order]++;
1933 ggc_pch_total_size (struct ggc_pch_data *d)
1938 for (i = 0; i < NUM_ORDERS; i++)
1939 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
1944 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
1946 size_t a = (size_t) base;
1949 for (i = 0; i < NUM_ORDERS; i++)
1952 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
1958 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
1959 size_t size, bool is_string ATTRIBUTE_UNUSED)
1965 order = size_lookup[size];
1969 while (size > OBJECT_SIZE (order))
1973 result = (char *) d->base[order];
1974 d->base[order] += OBJECT_SIZE (order);
1979 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1980 FILE *f ATTRIBUTE_UNUSED)
1982 /* Nothing to do. */
1986 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1987 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
1988 size_t size, bool is_string ATTRIBUTE_UNUSED)
1991 static const char emptyBytes[256];
1994 order = size_lookup[size];
1998 while (size > OBJECT_SIZE (order))
2002 if (fwrite (x, size, 1, f) != 1)
2003 fatal_error ("can't write PCH file: %m");
2005 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2006 object out to OBJECT_SIZE(order). This happens for strings. */
2008 if (size != OBJECT_SIZE (order))
2010 unsigned padding = OBJECT_SIZE(order) - size;
2012 /* To speed small writes, we use a nulled-out array that's larger
2013 than most padding requests as the source for our null bytes. This
2014 permits us to do the padding with fwrite() rather than fseek(), and
2015 limits the chance the the OS may try to flush any outstanding
2017 if (padding <= sizeof(emptyBytes))
2019 if (fwrite (emptyBytes, 1, padding, f) != padding)
2020 fatal_error ("can't write PCH file");
2024 /* Larger than our buffer? Just default to fseek. */
2025 if (fseek (f, padding, SEEK_CUR) != 0)
2026 fatal_error ("can't write PCH file");
2030 d->written[order]++;
2031 if (d->written[order] == d->d.totals[order]
2032 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2035 fatal_error ("can't write PCH file: %m");
2039 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2041 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2042 fatal_error ("can't write PCH file: %m");
2046 /* Move the PCH PTE entries just added to the end of by_depth, to the
2050 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2054 /* First, we swap the new entries to the front of the varrays. */
2055 page_entry **new_by_depth;
2056 unsigned long **new_save_in_use;
2058 new_by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
2059 new_save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
2061 memcpy (&new_by_depth[0],
2062 &G.by_depth[count_old_page_tables],
2063 count_new_page_tables * sizeof (void *));
2064 memcpy (&new_by_depth[count_new_page_tables],
2066 count_old_page_tables * sizeof (void *));
2067 memcpy (&new_save_in_use[0],
2068 &G.save_in_use[count_old_page_tables],
2069 count_new_page_tables * sizeof (void *));
2070 memcpy (&new_save_in_use[count_new_page_tables],
2072 count_old_page_tables * sizeof (void *));
2075 free (G.save_in_use);
2077 G.by_depth = new_by_depth;
2078 G.save_in_use = new_save_in_use;
2080 /* Now update all the index_by_depth fields. */
2081 for (i = G.by_depth_in_use; i > 0; --i)
2083 page_entry *p = G.by_depth[i-1];
2084 p->index_by_depth = i-1;
2087 /* And last, we update the depth pointers in G.depth. The first
2088 entry is already 0, and context 0 entries always start at index
2089 0, so there is nothing to update in the first slot. We need a
2090 second slot, only if we have old ptes, and if we do, they start
2091 at index count_new_page_tables. */
2092 if (count_old_page_tables)
2093 push_depth (count_new_page_tables);
2097 ggc_pch_read (FILE *f, void *addr)
2099 struct ggc_pch_ondisk d;
2102 unsigned long count_old_page_tables;
2103 unsigned long count_new_page_tables;
2105 count_old_page_tables = G.by_depth_in_use;
2107 /* We've just read in a PCH file. So, every object that used to be
2108 allocated is now free. */
2114 /* No object read from a PCH file should ever be freed. So, set the
2115 context depth to 1, and set the depth of all the currently-allocated
2116 pages to be 1 too. PCH pages will have depth 0. */
2117 if (G.context_depth != 0)
2119 G.context_depth = 1;
2120 for (i = 0; i < NUM_ORDERS; i++)
2123 for (p = G.pages[i]; p != NULL; p = p->next)
2124 p->context_depth = G.context_depth;
2127 /* Allocate the appropriate page-table entries for the pages read from
2129 if (fread (&d, sizeof (d), 1, f) != 1)
2130 fatal_error ("can't read PCH file: %m");
2132 for (i = 0; i < NUM_ORDERS; i++)
2134 struct page_entry *entry;
2140 if (d.totals[i] == 0)
2143 bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2144 num_objs = bytes / OBJECT_SIZE (i);
2145 entry = xcalloc (1, (sizeof (struct page_entry)
2147 + BITMAP_SIZE (num_objs + 1)));
2148 entry->bytes = bytes;
2150 entry->context_depth = 0;
2152 entry->num_free_objects = 0;
2156 j + HOST_BITS_PER_LONG <= num_objs + 1;
2157 j += HOST_BITS_PER_LONG)
2158 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2159 for (; j < num_objs + 1; j++)
2160 entry->in_use_p[j / HOST_BITS_PER_LONG]
2161 |= 1L << (j % HOST_BITS_PER_LONG);
2163 for (pte = entry->page;
2164 pte < entry->page + entry->bytes;
2166 set_page_table_entry (pte, entry);
2168 if (G.page_tails[i] != NULL)
2169 G.page_tails[i]->next = entry;
2172 G.page_tails[i] = entry;
2174 /* We start off by just adding all the new information to the
2175 end of the varrays, later, we will move the new information
2176 to the front of the varrays, as the PCH page tables are at
2178 push_by_depth (entry, 0);
2181 /* Now, we update the various data structures that speed page table
2183 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2185 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2187 /* Update the statistics. */
2188 G.allocated = G.allocated_last_gc = offs - (char *)addr;