/* Should return the same value as GC_large_free_bytes. */
GC_INNER word GC_compute_large_free_bytes(void)
{
- struct hblk * h;
- hdr * hhdr;
word total_free = 0;
unsigned i;
for (i = 0; i <= N_HBLK_FLS; ++i) {
+ struct hblk * h;
+ hdr * hhdr;
+
for (h = GC_hblkfreelist[i]; h != 0; h = hhdr->hb_next) {
hhdr = HDR(h);
total_free += hhdr->hb_sz;
# if !defined(NO_DEBUGGING)
void GC_print_hblkfreelist(void)
{
- struct hblk * h;
- hdr * hhdr;
unsigned i;
word total;
for (i = 0; i <= N_HBLK_FLS; ++i) {
- h = GC_hblkfreelist[i];
+ struct hblk * h = GC_hblkfreelist[i];
+
if (0 != h) GC_printf("Free list %u (total size %lu):\n",
i, (unsigned long)GC_free_bytes[i]);
while (h != 0) {
- hhdr = HDR(h);
+ hdr * hhdr = HDR(h);
+
GC_printf("\t%p size %lu %s black listed\n",
(void *)h, (unsigned long) hhdr -> hb_sz,
GC_is_black_listed(h, HBLKSIZE) != 0 ? "start" :
/* appears, or -1 if it appears nowhere. */
static int free_list_index_of(hdr *wanted)
{
- struct hblk * h;
- hdr * hhdr;
int i;
for (i = 0; i <= N_HBLK_FLS; ++i) {
- h = GC_hblkfreelist[i];
- while (h != 0) {
+ struct hblk * h;
+ hdr * hhdr;
+
+ for (h = GC_hblkfreelist[i]; h != 0; h = hhdr -> hb_next) {
hhdr = HDR(h);
if (hhdr == wanted) return i;
- h = hhdr -> hb_next;
}
}
return -1;
void GC_dump_regions(void)
{
unsigned i;
- ptr_t start, end;
- ptr_t p;
- size_t bytes;
- hdr *hhdr;
+
for (i = 0; i < GC_n_heap_sects; ++i) {
- start = GC_heap_sects[i].hs_start;
- bytes = GC_heap_sects[i].hs_bytes;
- end = start + bytes;
+ ptr_t start = GC_heap_sects[i].hs_start;
+ size_t bytes = GC_heap_sects[i].hs_bytes;
+ ptr_t end = start + bytes;
+ ptr_t p;
+
/* Merge in contiguous sections. */
while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
++i;
}
GC_printf("***Section from %p to %p\n", start, end);
for (p = start; (word)p < (word)end; ) {
- hhdr = HDR(p);
+ hdr *hhdr = HDR(p);
+
if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
GC_printf("\t%p Missing header!!(%p)\n", p, (void *)hhdr);
p += HBLKSIZE;
{
int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
struct hblk *second = GC_hblkfreelist[index];
- hdr * second_hdr;
# if defined(GC_ASSERTIONS) && !defined(USE_MUNMAP)
struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
hdr * nexthdr = HDR(next);
struct hblk *prev = GC_free_block_ending_at(h);
hdr * prevhdr = HDR(prev);
+
GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr)
|| (signed_word)GC_heapsize < 0);
/* In the last case, blocks may be too large to merge. */
GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr)
|| (signed_word)GC_heapsize < 0);
# endif
-
GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
GC_hblkfreelist[index] = h;
GC_free_bytes[index] += hhdr -> hb_sz;
hhdr -> hb_next = second;
hhdr -> hb_prev = 0;
if (0 != second) {
+ hdr * second_hdr;
+
GET_HDR(second, second_hdr);
second_hdr -> hb_prev = h;
}
/* way blocks are ever unmapped. */
GC_INNER void GC_unmap_old(void)
{
- struct hblk * h;
- hdr * hhdr;
int i;
if (GC_unmap_threshold == 0)
return; /* unmapping disabled */
for (i = 0; i <= N_HBLK_FLS; ++i) {
+ struct hblk * h;
+ hdr * hhdr;
+
for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
hhdr = HDR(h);
if (!IS_MAPPED(hhdr)) continue;
hdr * hhdr; /* Header corr. to hbp */
struct hblk *thishbp;
hdr * thishdr; /* Header corr. to thishbp */
- signed_word size_needed; /* number of bytes in requested objects */
- signed_word size_avail; /* bytes available in this block */
-
- size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS_CHECKED(sz);
+ signed_word size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS_CHECKED(sz);
+ /* number of bytes in requested objects */
/* search for a big enough block in free list */
for (hbp = GC_hblkfreelist[n];; hbp = hhdr -> hb_next) {
+ signed_word size_avail; /* bytes available in this block */
+
if (NULL == hbp) return NULL;
GET_HDR(hbp, hhdr); /* set hhdr value */
size_avail = hhdr->hb_sz;
if (size_avail < size_needed) continue;
if (size_avail != size_needed) {
- signed_word next_size;
-
if (!may_split) continue;
/* If the next heap block is obviously better, go on. */
/* This prevents us from disassembling a single large */
/* block to get tiny blocks. */
thishbp = hhdr -> hb_next;
if (thishbp != 0) {
+ signed_word next_size;
+
GET_HDR(thishbp, thishdr);
next_size = (signed_word)(thishdr -> hb_sz);
if (next_size < size_avail
*/
STATIC void GC_maybe_gc(void)
{
- static int n_partial_gcs = 0;
-
GC_ASSERT(I_HOLD_LOCK());
ASSERT_CANCEL_DISABLED();
if (GC_should_collect()) {
+ static int n_partial_gcs = 0;
+
if (!GC_incremental) {
/* FIXME: If possible, GC_default_stop_func should be used here */
GC_try_to_collect_inner(GC_never_stop_func);
GC_INNER void GC_collect_a_little_inner(int n)
{
- int i;
IF_CANCEL(int cancel_state;)
if (GC_dont_gc) return;
DISABLE_CANCEL(cancel_state);
if (GC_incremental && GC_collection_in_progress()) {
+ int i;
+
for (i = GC_deficit; i < GC_RATE*n; i++) {
if (GC_mark_some((ptr_t)0)) {
/* Need to finish a collection */
/* Set all mark bits for the free list whose first entry is q */
GC_INNER void GC_set_fl_marks(ptr_t q)
{
- struct hblk *h, *last_h;
- hdr *hhdr;
- IF_PER_OBJ(size_t sz;)
- unsigned bit_no;
-
- if (q != NULL) {
- h = HBLKPTR(q);
- last_h = h;
- hhdr = HDR(h);
- IF_PER_OBJ(sz = hhdr->hb_sz;)
-
- for (;;) {
- bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
+ if (q != NULL) {
+ struct hblk *h = HBLKPTR(q);
+ struct hblk *last_h = h;
+ hdr *hhdr = HDR(h);
+ IF_PER_OBJ(size_t sz = hhdr->hb_sz;)
+
+ for (;;) {
+ unsigned bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
+
if (!mark_bit_from_hdr(hhdr, bit_no)) {
set_mark_bit_from_hdr(hhdr, bit_no);
++hhdr -> hb_n_marks;
hhdr = HDR(h);
IF_PER_OBJ(sz = hhdr->hb_sz;)
}
- }
- }
+ }
+ }
}
#if defined(GC_ASSERTIONS) && defined(THREADS) && defined(THREAD_LOCAL_ALLOC)
/* Decrement GC_bytes_found by number of bytes on free list. */
STATIC void GC_clear_fl_marks(ptr_t q)
{
- struct hblk *h, *last_h;
- hdr *hhdr;
- size_t sz;
- unsigned bit_no;
-
- if (q != NULL) {
- h = HBLKPTR(q);
- last_h = h;
- hhdr = HDR(h);
- sz = hhdr->hb_sz; /* Normally set only once. */
-
- for (;;) {
- bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
+ if (q != NULL) {
+ struct hblk *h = HBLKPTR(q);
+ struct hblk *last_h = h;
+ hdr *hhdr = HDR(h);
+ size_t sz = hhdr->hb_sz; /* Normally set only once. */
+
+ for (;;) {
+ unsigned bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
+
if (mark_bit_from_hdr(hhdr, bit_no)) {
size_t n_marks = hhdr -> hb_n_marks - 1;
clear_mark_bit_from_hdr(hhdr, bit_no);
hhdr = HDR(h);
sz = hhdr->hb_sz;
}
- }
- }
+ }
+ }
}
#if defined(GC_ASSERTIONS) && defined(THREADS) && defined(THREAD_LOCAL_ALLOC)
GC_INNER void GC_register_dynamic_libraries(void)
{
MEMORY_BASIC_INFORMATION buf;
- size_t result;
DWORD protect;
LPVOID p;
char * base;
# endif
base = limit = p = GC_sysinfo.lpMinimumApplicationAddress;
while ((word)p < (word)GC_sysinfo.lpMaximumApplicationAddress) {
- result = VirtualQuery(p, &buf, sizeof(buf));
+ size_t result = VirtualQuery(p, &buf, sizeof(buf));
+
# ifdef MSWINCE
if (result == 0) {
/* Page is free; advance to the next possible allocation base */
hdr * hhdr = HDR(p);
word descr = hhdr -> hb_descr;
ptr_t q;
- word r;
ptr_t scan_limit;
ptr_t target_limit = p + hhdr -> hb_sz - 1;
scan_limit = target_limit + 1 - sizeof(word);
}
for (q = p; (word)q <= (word)scan_limit; q += ALIGNMENT) {
- r = *(word *)q;
+ word r = *(word *)q;
+
if (r < (word)p || r > (word)target_limit) {
GC_PUSH_ONE_HEAP(r, q, GC_mark_stack_top);
}
finalization_mark_proc mp)
{
ptr_t base;
- struct finalizable_object * curr_fo, * prev_fo;
+ struct finalizable_object * curr_fo;
size_t index;
struct finalizable_object *new_fo = 0;
hdr *hhdr = NULL; /* initialized to prevent warning. */
- GC_oom_func oom_fn;
DCL_LOCK_STATE;
LOCK();
/* in the THREADS case we hold allocation lock. */
base = (ptr_t)obj;
for (;;) {
+ struct finalizable_object *prev_fo = NULL;
+ GC_oom_func oom_fn;
+
index = HASH2(base, log_fo_table_size);
- prev_fo = 0;
curr_fo = GC_fnlz_roots.fo_head[index];
while (curr_fo != 0) {
GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
STATIC void GC_dump_finalization_links(
const struct dl_hashtbl_s *dl_hashtbl)
{
- struct disappearing_link *curr_dl;
- ptr_t real_ptr, real_link;
size_t dl_size = dl_hashtbl->log_size == -1 ? 0 :
(size_t)1 << dl_hashtbl->log_size;
size_t i;
for (i = 0; i < dl_size; i++) {
+ struct disappearing_link *curr_dl;
+
for (curr_dl = dl_hashtbl -> head[i]; curr_dl != 0;
curr_dl = dl_next(curr_dl)) {
- real_ptr = GC_REVEAL_POINTER(curr_dl -> dl_hidden_obj);
- real_link = GC_REVEAL_POINTER(curr_dl -> dl_hidden_link);
+ ptr_t real_ptr = GC_REVEAL_POINTER(curr_dl -> dl_hidden_obj);
+ ptr_t real_link = GC_REVEAL_POINTER(curr_dl -> dl_hidden_link);
+
GC_printf("Object: %p, link: %p\n", real_ptr, real_link);
}
}
struct finalizable_object * curr_fo;
size_t fo_size = log_fo_table_size == -1 ? 0 :
(size_t)1 << log_fo_table_size;
- ptr_t real_ptr;
size_t i;
GC_printf("Disappearing (short) links:\n");
for (i = 0; i < fo_size; i++) {
for (curr_fo = GC_fnlz_roots.fo_head[i];
curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
- real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
+ ptr_t real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
+
GC_printf("Finalizable object: %p\n", real_ptr);
}
}
size_t dl_size = dl_hashtbl->log_size == -1 ? 0 : \
(size_t)1 << dl_hashtbl->log_size; \
for (i = 0; i < dl_size; i++) { \
+ struct disappearing_link *prev_dl = NULL; \
curr_dl = dl_hashtbl -> head[i]; \
- prev_dl = NULL; \
while (curr_dl) {
#define ITERATE_DL_HASHTBL_END(curr_dl, prev_dl) \
GC_INLINE void GC_make_disappearing_links_disappear(
struct dl_hashtbl_s* dl_hashtbl)
{
- struct disappearing_link *curr, *prev, *next;
- ptr_t real_ptr, real_link;
+ struct disappearing_link *curr, *next;
ITERATE_DL_HASHTBL_BEGIN(dl_hashtbl, curr, prev)
- real_ptr = GC_REVEAL_POINTER(curr -> dl_hidden_obj);
- real_link = GC_REVEAL_POINTER(curr -> dl_hidden_link);
+ ptr_t real_ptr = GC_REVEAL_POINTER(curr -> dl_hidden_obj);
+ ptr_t real_link = GC_REVEAL_POINTER(curr -> dl_hidden_link);
+
if (!GC_is_marked(real_ptr)) {
*(word *)real_link = 0;
GC_clear_mark_bit(curr);
GC_INLINE void GC_remove_dangling_disappearing_links(
struct dl_hashtbl_s* dl_hashtbl)
{
- struct disappearing_link *curr, *prev, *next;
- ptr_t real_link;
+ struct disappearing_link *curr, *next;
ITERATE_DL_HASHTBL_BEGIN(dl_hashtbl, curr, prev)
- real_link = GC_base(GC_REVEAL_POINTER(curr -> dl_hidden_link));
+ ptr_t real_link = GC_base(GC_REVEAL_POINTER(curr -> dl_hidden_link));
+
if (NULL != real_link && !GC_is_marked(real_link)) {
GC_clear_mark_bit(curr);
DELETE_DL_HASHTBL_ENTRY(dl_hashtbl, curr, prev, next);
/* Should be called without allocation lock. */
GC_API int GC_CALL GC_invoke_finalizers(void)
{
- struct finalizable_object * curr_fo;
int count = 0;
word bytes_freed_before = 0; /* initialized to prevent warning. */
DCL_LOCK_STATE;
while (GC_fnlz_roots.finalize_now != NULL) {
+ struct finalizable_object * curr_fo;
+
# ifdef THREADS
LOCK();
# endif
GC_INNER GC_bool GC_install_counts(struct hblk *h, size_t sz/* bytes */)
{
struct hblk * hbp;
- word i;
for (hbp = h; (word)hbp < (word)h + sz; hbp += BOTTOM_SZ) {
if (!get_index((word) hbp)) return(FALSE);
}
if (!get_index((word)h + sz - 1)) return(FALSE);
for (hbp = h + 1; (word)hbp < (word)h + sz; hbp += 1) {
- i = HBLK_PTR_DIFF(hbp, h);
+ word i = HBLK_PTR_DIFF(hbp, h);
+
SET_HDR(hbp, (hdr *)(i > MAX_JUMP? MAX_JUMP : i));
}
return(TRUE);
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind_global(size_t lb, int k)
{
- void *op;
- void **opp;
- size_t lg;
- DCL_LOCK_STATE;
-
GC_ASSERT(k < MAXOBJKINDS);
if (SMALL_OBJ(lb)) {
+ void *op;
+ void **opp;
+ size_t lg = GC_size_map[lb];
+ DCL_LOCK_STATE;
+
GC_DBG_COLLECT_AT_MALLOC(lb);
- lg = GC_size_map[lb];
LOCK();
opp = &GC_obj_kinds[k].ok_freelist[lg];
op = *opp;
size_t lb, int k)
{
void *op;
- void **opp;
- size_t lg;
DCL_LOCK_STATE;
GC_ASSERT(k < MAXOBJKINDS);
if (SMALL_OBJ(lb)) {
+ void **opp;
+ size_t lg;
+
GC_DBG_COLLECT_AT_MALLOC(lb);
if (EXTRA_BYTES != 0 && lb != 0) lb--;
/* We don't need the extra byte, since this won't be */
hdr *hhdr;
size_t sz; /* In bytes */
size_t ngranules; /* sz in granules */
- void **flh;
int knd;
struct obj_kind * ok;
DCL_LOCK_STATE;
knd = hhdr -> hb_obj_kind;
ok = &GC_obj_kinds[knd];
if (EXPECT(ngranules <= MAXOBJGRANULES, TRUE)) {
+ void **flh;
+
LOCK();
GC_bytes_freed += sz;
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
UNLOCK();
} else {
size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
+
LOCK();
GC_bytes_freed += sz;
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
word * b = (word *)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
word * t = (word *)(((word) top) & ~(ALIGNMENT-1));
register word *p;
- register word q;
register word *lim;
register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
register ptr_t least_ha = GC_least_plausible_heap_addr;
lim = t - 1 /* longword */;
for (p = b; (word)p <= (word)lim;
p = (word *)(((ptr_t)p) + ALIGNMENT)) {
- q = *p;
+ register word q = *p;
GC_PUSH_ONE_STACK(q, p);
}
# undef GC_greatest_plausible_heap_addr
word * mark_word_addr = &(hhdr->hb_marks[0]);
word *p;
word *plim;
- word *q;
- word mark_word;
/* Allow registers to be used for some frequently accessed */
/* global variables. Otherwise aliasing issues are likely */
/* go through all words in block */
while ((word)p < (word)plim) {
- mark_word = *mark_word_addr++;
- q = p;
+ word mark_word = *mark_word_addr++;
+ word *q = p;
+
while(mark_word != 0) {
if (mark_word & 1) {
PUSH_GRANULE(q);
word * mark_word_addr = &(hhdr->hb_marks[0]);
word *p;
word *plim;
- word *q;
- word mark_word;
ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
ptr_t least_ha = GC_least_plausible_heap_addr;
/* go through all words in block */
while ((word)p < (word)plim) {
- mark_word = *mark_word_addr++;
- q = p;
+ word mark_word = *mark_word_addr++;
+ word *q = p;
+
while(mark_word != 0) {
if (mark_word & 1) {
PUSH_GRANULE(q);
word * mark_word_addr = &(hhdr->hb_marks[0]);
word *p;
word *plim;
- word *q;
- word mark_word;
ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
ptr_t least_ha = GC_least_plausible_heap_addr;
/* go through all words in block */
while ((word)p < (word)plim) {
- mark_word = *mark_word_addr++;
- q = p;
+ word mark_word = *mark_word_addr++;
+ word *q = p;
+
while(mark_word != 0) {
if (mark_word & 1) {
PUSH_GRANULE(q);
{
size_t low = 0;
size_t high = GC_excl_table_entries - 1;
- size_t mid;
while (high > low) {
- mid = (low + high) >> 1;
+ size_t mid = (low + high) >> 1;
+
/* low <= mid < high */
if ((word) GC_excl_table[mid].e_end <= (word) start_addr) {
low = mid + 1;
GC_INNER void GC_exclude_static_roots_inner(void *start, void *finish)
{
struct exclusion * next;
- size_t next_index, i;
+ size_t next_index;
GC_ASSERT((word)start % sizeof(word) == 0);
GC_ASSERT((word)start < (word)finish);
next = GC_next_exclusion(start);
}
if (0 != next) {
+ size_t i;
+
if ((word)(next -> e_start) < (word) finish) {
/* incomplete error check. */
ABORT("Exclusion ranges overlap");
STATIC void GC_push_conditional_with_exclusions(ptr_t bottom, ptr_t top,
GC_bool all GC_ATTR_UNUSED)
{
- struct exclusion * next;
- ptr_t excl_start;
-
while ((word)bottom < (word)top) {
- next = GC_next_exclusion(bottom);
- if (0 == next || (word)(excl_start = next -> e_start) >= (word)top) {
- GC_PUSH_CONDITIONAL(bottom, top, all);
- return;
+ struct exclusion *next = GC_next_exclusion(bottom);
+ ptr_t excl_start;
+
+ if (0 == next
+ || (word)(excl_start = next -> e_start) >= (word)top) {
+ GC_PUSH_CONDITIONAL(bottom, top, all);
+ break;
}
if ((word)excl_start > (word)bottom)
GC_PUSH_CONDITIONAL(bottom, excl_start, all);
struct hblk *h;
bottom_index *bi;
hdr *candidate_hdr;
- ptr_t limit;
r = p;
if (!EXPECT(GC_is_initialized, TRUE)) return 0;
size_t offset = HBLKDISPL(r);
word sz = candidate_hdr -> hb_sz;
size_t obj_displ = offset % sz;
+ ptr_t limit;
r -= obj_displ;
limit = r + sz;
STATIC word GC_parse_mem_size_arg(const char *str)
{
- char *endptr;
word result = 0; /* bad value */
- char ch;
if (*str != '\0') {
+ char *endptr;
+ char ch;
+
result = (word)STRTOULL(str, &endptr, 10);
ch = *endptr;
if (ch != '\0') {
return len;
# else
int bytes_written = 0;
- int result;
IF_CANCEL(int cancel_state;)
DISABLE_CANCEL(cancel_state);
while ((size_t)bytes_written < len) {
# ifdef GC_SOLARIS_THREADS
- result = syscall(SYS_write, fd, buf + bytes_written,
+ int result = syscall(SYS_write, fd, buf + bytes_written,
len - bytes_written);
# else
- result = write(fd, buf + bytes_written, len - bytes_written);
+ int result = write(fd, buf + bytes_written, len - bytes_written);
# endif
+
if (-1 == result) {
RESTORE_CANCEL(cancel_state);
return(result);
STATIC ssize_t GC_repeat_read(int fd, char *buf, size_t count)
{
size_t num_read = 0;
- ssize_t result;
ASSERT_CANCEL_DISABLED();
while (num_read < count) {
- result = READ(fd, buf + num_read, count - num_read);
+ ssize_t result = READ(fd, buf + num_read, count - num_read);
+
if (result < 0) return result;
if (result == 0) break;
num_read += result;
/* of time. */
GC_INNER char * GC_get_maps(void)
{
- int f;
ssize_t result;
static char *maps_buf = NULL;
static size_t maps_buf_sz = 1;
/* Note that we may not allocate conventionally, and */
/* thus can't use stdio. */
do {
+ int f;
+
while (maps_size >= maps_buf_sz) {
/* Grow only by powers of 2, since we leak "too small" buffers.*/
while (maps_size >= maps_buf_sz) maps_buf_sz *= 2;
STATIC ptr_t GC_least_described_address(ptr_t start)
{
MEMORY_BASIC_INFORMATION buf;
- size_t result;
LPVOID limit;
ptr_t p;
- LPVOID q;
limit = GC_sysinfo.lpMinimumApplicationAddress;
p = (ptr_t)((word)start & ~(GC_page_size - 1));
for (;;) {
- q = (LPVOID)(p - GC_page_size);
+ size_t result;
+ LPVOID q = (LPVOID)(p - GC_page_size);
+
if ((word)q > (word)p /* underflow */ || (word)q < (word)limit) break;
result = VirtualQuery(q, &buf, sizeof(buf));
if (result != sizeof(buf) || buf.AllocationBase == 0) break;
STATIC void GC_register_root_section(ptr_t static_root)
{
MEMORY_BASIC_INFORMATION buf;
- size_t result;
DWORD protect;
LPVOID p;
char * base;
- char * limit, * new_limit;
+ char * limit;
if (!GC_no_win32_dlls) return;
p = base = limit = GC_least_described_address(static_root);
while ((word)p < (word)GC_sysinfo.lpMaximumApplicationAddress) {
- result = VirtualQuery(p, &buf, sizeof(buf));
+ size_t result = VirtualQuery(p, &buf, sizeof(buf));
+ char * new_limit;
+
if (result != sizeof(buf) || buf.AllocationBase == 0
|| GC_is_heap_base(buf.AllocationBase)) break;
new_limit = (char *)p + buf.RegionSize;
void GC_register_data_segments(void)
{
ptr_t region_start = DATASTART;
- ptr_t region_end;
if ((word)region_start - 1U >= (word)DATAEND)
ABORT_ARG2("Wrong DATASTART/END pair",
": %p .. %p", region_start, DATAEND);
for (;;) {
- region_end = GC_find_limit_openbsd(region_start, DATAEND);
+ ptr_t region_end = GC_find_limit_openbsd(region_start, DATAEND);
+
GC_add_roots_inner(region_start, region_end, FALSE);
if ((word)region_end >= (word)DATAEND)
break;
hdr * hhdr = HDR(hbp);
size_t sz = hhdr -> hb_sz; /* size of objects in current block */
struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
- struct hblk ** rlh;
if( sz > MAXOBJBYTES ) { /* 1 big object */
if( !mark_bit_from_hdr(hhdr, 0) ) {
}
} else if (GC_find_leak || !GC_block_nearly_full(hhdr)) {
/* group of smaller objects, enqueue the real work */
- rlh = &(ok -> ok_reclaim_list[BYTES_TO_GRANULES(sz)]);
+ struct hblk **rlh = ok -> ok_reclaim_list + BYTES_TO_GRANULES(sz);
+
hhdr -> hb_next = *rlh;
*rlh = hbp;
} /* else not worth salvaging. */
GC_atomic_in_use = 0;
/* Clear reclaim- and free-lists */
for (kind = 0; kind < GC_n_kinds; kind++) {
- void **fop;
- void **lim;
struct hblk ** rlist = GC_obj_kinds[kind].ok_reclaim_list;
GC_bool should_clobber = (GC_obj_kinds[kind].ok_descriptor != 0);
if (rlist == 0) continue; /* This kind not used. */
if (!report_if_found) {
- lim = &(GC_obj_kinds[kind].ok_freelist[MAXOBJGRANULES+1]);
+ void **fop;
+ void **lim = &(GC_obj_kinds[kind].ok_freelist[MAXOBJGRANULES+1]);
+
for (fop = GC_obj_kinds[kind].ok_freelist;
(word)fop < (word)lim; fop++) {
if (*fop != 0) {