/* Execute s once for each predecessor q of p in the points-to graph. */
/* s should be a bracketed statement. We declare q. */
#define FOR_EACH_PRED(q, p, s) \
- { \
+ do { \
ptr_t q = GET_OH_BG_PTR(p); \
if (!((word)q & FLAG_MANY)) { \
if (q && !((word)q & 1)) s \
q = be_ -> edges[local_]; s \
} \
} \
- }
+ } while (0)
/* Ensure that p has a back_edges structure associated with it. */
static void ensure_struct(ptr_t p)
/* amount of code. */
# define GC_FAST_MALLOC_GRANS(result,granules,tiny_fl,num_direct,\
kind,default_expr,init) \
-{ \
+ do { \
if (GC_EXPECT((granules) >= GC_TINY_FREELISTS,0)) { \
result = (default_expr); \
} else { \
void **my_fl = (tiny_fl) + (granules); \
void *my_entry=*my_fl; \
void *next; \
- \
+ \
while (GC_EXPECT((GC_word)my_entry \
<= (num_direct) + GC_TINY_FREELISTS + 1, 0)) { \
/* Entry contains counter or NULL */ \
GC_ASSERT(GC_size(result) >= (granules)*GC_GRANULE_BYTES); \
GC_ASSERT((kind) == PTRFREE || ((GC_word *)result)[1] == 0); \
out: ; \
- } \
-}
+ } \
+ } while (0)
# define GC_WORDS_TO_WHOLE_GRANULES(n) \
GC_WORDS_TO_GRANULES((n) + GC_GRANULE_WORDS - 1)
/* free list array. For single-threaded applications, this may be */
/* a global array. */
# define GC_MALLOC_WORDS(result,n,tiny_fl) \
-{ \
+ do { \
size_t grans = GC_WORDS_TO_WHOLE_GRANULES(n); \
GC_FAST_MALLOC_GRANS(result, grans, tiny_fl, 0, \
NORMAL, GC_malloc(grans*GC_GRANULE_BYTES), \
*(void **)(result) = 0); \
-}
+ } while (0)
# define GC_MALLOC_ATOMIC_WORDS(result,n,tiny_fl) \
-{ \
+ do { \
size_t grans = GC_WORDS_TO_WHOLE_GRANULES(n); \
GC_FAST_MALLOC_GRANS(result, grans, tiny_fl, 0, \
PTRFREE, GC_malloc_atomic(grans*GC_GRANULE_BYTES), \
(void)0 /* no initialization */); \
-}
+ } while (0)
/* And once more for two word initialized objects: */
# define GC_CONS(result, first, second, tiny_fl) \
-{ \
+ do { \
size_t grans = GC_WORDS_TO_WHOLE_GRANULES(2); \
GC_FAST_MALLOC_GRANS(result, grans, tiny_fl, 0, \
NORMAL, GC_malloc(grans*GC_GRANULE_BYTES), \
*(void **)(result) = (void *)(first)); \
((void **)(result))[1] = (void *)(second); \
-}
+ } while (0)
#endif /* !GC_INLINE_H */
/* Returns zero if p points to somewhere other than the first page */
/* of an object, and it is not a valid pointer to the object. */
#define HC_GET_HDR(p, hhdr, source, exit_label) \
- { \
+ do { \
hdr_cache_entry * hce = HCE(p); \
if (EXPECT(HCE_VALID_FOR(hce, p), TRUE)) { \
HC_HIT(); \
hhdr = HEADER_CACHE_MISS(p, hce, source); \
if (0 == hhdr) goto exit_label; \
} \
- }
+ } while (0)
typedef struct bi {
hdr * index[BOTTOM_SZ];
# else
# define HDR(p) HDR_INNER(p)
# endif
-# define GET_BI(p, bottom_indx) (bottom_indx) = BI(p)
-# define GET_HDR(p, hhdr) (hhdr) = HDR(p)
-# define SET_HDR(p, hhdr) HDR_INNER(p) = (hhdr)
-# define GET_HDR_ADDR(p, ha) (ha) = &(HDR_INNER(p))
+# define GET_BI(p, bottom_indx) (void)((bottom_indx) = BI(p))
+# define GET_HDR(p, hhdr) (void)((hhdr) = HDR(p))
+# define SET_HDR(p, hhdr) (void)(HDR_INNER(p) = (hhdr))
+# define GET_HDR_ADDR(p, ha) (void)((ha) = &HDR_INNER(p))
#else /* hash */
/* Hash function for tree top level */
# define TL_HASH(hi) ((hi) & (TOP_SZ - 1))
/* Set bottom_indx to point to the bottom index for address p */
# define GET_BI(p, bottom_indx) \
- { \
+ do { \
register word hi = \
(word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE); \
register bottom_index * _bi = GC_top_index[TL_HASH(hi)]; \
while (_bi -> key != hi && _bi != GC_all_nils) \
_bi = _bi -> hash_link; \
(bottom_indx) = _bi; \
- }
+ } while (0)
# define GET_HDR_ADDR(p, ha) \
- { \
+ do { \
register bottom_index * bi; \
GET_BI(p, bi); \
- (ha) = &(HDR_FROM_BI(bi, p)); \
- }
-# define GET_HDR(p, hhdr) { register hdr ** _ha; GET_HDR_ADDR(p, _ha); \
- (hhdr) = *_ha; }
-# define SET_HDR(p, hhdr) { register hdr ** _ha; GET_HDR_ADDR(p, _ha); \
- *_ha = (hhdr); }
+ (ha) = &HDR_FROM_BI(bi, p); \
+ } while (0)
+# define GET_HDR(p, hhdr) \
+ do { \
+ register hdr ** _ha; \
+ GET_HDR_ADDR(p, _ha); \
+ (hhdr) = *_ha; \
+ } while (0)
+# define SET_HDR(p, hhdr) \
+ do { \
+ register hdr ** _ha; \
+ GET_HDR_ADDR(p, _ha); \
+ *_ha = (hhdr); \
+ } while (0)
# define HDR(p) GC_find_header((ptr_t)(p))
#endif
GC_EXTERN GC_bool GC_need_to_lock;
# else /* !THREADS */
-# define LOCK()
-# define UNLOCK()
-# define SET_LOCK_HOLDER()
-# define UNSET_LOCK_HOLDER()
+# define LOCK() (void)0
+# define UNLOCK() (void)0
+# define SET_LOCK_HOLDER() (void)0
+# define UNSET_LOCK_HOLDER() (void)0
# define I_HOLD_LOCK() TRUE
# define I_DONT_HOLD_LOCK() TRUE
/* Used only in positive assertions or to test whether */
# define UNLOCK() UNCOND_UNLOCK()
# else
/* At least two thread running; need to lock. */
-# define LOCK() { if (GC_need_to_lock) UNCOND_LOCK(); }
-# define UNLOCK() { if (GC_need_to_lock) UNCOND_UNLOCK(); }
+# define LOCK() do { if (GC_need_to_lock) UNCOND_LOCK(); } while (0)
+# define UNLOCK() do { if (GC_need_to_lock) UNCOND_UNLOCK(); } while (0)
# endif
#endif
/* Push the object obj with corresponding heap block header hhdr onto */
/* the mark stack. */
#define PUSH_OBJ(obj, hhdr, mark_stack_top, mark_stack_limit) \
-{ \
+ do { \
register word _descr = (hhdr) -> hb_descr; \
GC_ASSERT(!HBLK_IS_FREE(hhdr)); \
if (_descr != 0) { \
mark_stack_top -> mse_start = (obj); \
mark_stack_top -> mse_descr.w = _descr; \
} \
-}
+ } while (0)
/* Push the contents of current onto the mark stack if it is a valid */
/* ptr to a currently unmarked object. Mark it. */
/* generate the exit_label transparently. */
#define PUSH_CONTENTS(current, mark_stack_top, mark_stack_limit, \
source, exit_label) \
-{ \
+ do { \
hdr * my_hhdr; \
HC_GET_HDR(current, my_hhdr, source, exit_label); \
PUSH_CONTENTS_HDR(current, mark_stack_top, mark_stack_limit, \
source, exit_label, my_hhdr, TRUE); \
-exit_label: ; \
-}
+ exit_label: ; \
+ } while (0)
/* Set mark bit, exit if it was already set. */
#ifdef USE_MARK_BYTES
/* the bit twice in the concurrent case. This can result in the */
/* object being pushed twice. But that's only a performance issue. */
# define SET_MARK_BIT_EXIT_IF_SET(hhdr,bit_no,exit_label) \
- { \
+ do { \
char * mark_byte_addr = (char *)hhdr -> hb_marks + (bit_no); \
if (*mark_byte_addr) goto exit_label; \
*mark_byte_addr = 1; \
- }
+ } while (0)
#else
# ifdef PARALLEL_MARK
/* This is used only if we explicitly set USE_MARK_BITS. */
/* The following may fail to exit even if the bit was already set. */
/* For our uses, that's benign: */
# define OR_WORD_EXIT_IF_SET(addr, bits, exit_label) \
- { \
+ do { \
if (!(*(addr) & (bits))) { \
AO_or((volatile AO_t *)(addr), (AO_t)(bits)); \
} else { \
goto exit_label; \
} \
- }
+ } while (0)
# else
# define OR_WORD_EXIT_IF_SET(addr, bits, exit_label) \
- { \
+ do { \
word old = *(addr); \
word my_bits = (bits); \
if (old & my_bits) goto exit_label; \
*(addr) = (old | my_bits); \
- }
+ } while (0)
# endif /* !PARALLEL_MARK */
# define SET_MARK_BIT_EXIT_IF_SET(hhdr,bit_no,exit_label) \
- { \
+ do { \
word * mark_word_addr = hhdr -> hb_marks + divWORDSZ(bit_no); \
OR_WORD_EXIT_IF_SET(mark_word_addr, (word)1 << modWORDSZ(bit_no), \
exit_label); \
- }
+ } while (0)
#endif /* !USE_MARK_BYTES */
#ifdef PARALLEL_MARK
#endif
#if defined(I386) && defined(__GNUC__)
-# define LONG_MULT(hprod, lprod, x, y) { \
+# define LONG_MULT(hprod, lprod, x, y) \
+ do { \
__asm__ __volatile__("mull %2" : "=a"(lprod), "=d"(hprod) \
: "g"(y), "0"(x)); \
- }
+ } while (0)
#else
-# define LONG_MULT(hprod, lprod, x, y) { \
+# define LONG_MULT(hprod, lprod, x, y) \
+ do { \
unsigned long long prod = (unsigned long long)(x) \
* (unsigned long long)(y); \
hprod = prod >> 32; \
lprod = (unsigned32)prod; \
- }
+ } while (0)
#endif /* !I386 */
/* If the mark bit corresponding to current is not set, set it, and */
#ifdef MARK_BIT_PER_GRANULE
# define PUSH_CONTENTS_HDR(current, mark_stack_top, mark_stack_limit, \
source, exit_label, hhdr, do_offset_check) \
-{ \
+ do { \
size_t displ = HBLKDISPL(current); /* Displacement in block; in bytes. */\
/* displ is always within range. If current doesn't point to */ \
/* first block, then we are in the all_interior_pointers case, and */ \
INCR_MARKS(hhdr); \
GC_STORE_BACK_PTR((ptr_t)source, base); \
PUSH_OBJ(base, hhdr, mark_stack_top, mark_stack_limit); \
-}
+ } while (0)
#endif /* MARK_BIT_PER_GRANULE */
#ifdef MARK_BIT_PER_OBJ
# define PUSH_CONTENTS_HDR(current, mark_stack_top, mark_stack_limit, \
source, exit_label, hhdr, do_offset_check) \
-{ \
+ do { \
size_t displ = HBLKDISPL(current); /* Displacement in block; in bytes. */\
unsigned32 low_prod, high_prod; \
unsigned32 inv_sz = hhdr -> hb_inv_sz; \
LONG_MULT(high_prod, low_prod, displ, inv_sz); \
/* product is > and within sz_in_bytes of displ * sz_in_bytes * 2**32 */ \
if (EXPECT(low_prod >> 16 != 0, FALSE)) { \
- FIXME: fails if offset is a multiple of HBLKSIZE which becomes 0 \
+ /* FIXME: fails if offset is a multiple of HBLKSIZE which becomes 0 */ \
if (inv_sz == LARGE_INV_SZ) { \
size_t obj_displ; \
base = (ptr_t)(hhdr -> hb_block); \
INCR_MARKS(hhdr); \
GC_STORE_BACK_PTR((ptr_t)source, base); \
PUSH_OBJ(base, hhdr, mark_stack_top, mark_stack_limit); \
-}
+ } while (0)
#endif /* MARK_BIT_PER_OBJ */
#if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
#if NEED_FIXUP_POINTER
/* Try both the raw version and the fixed up one. */
# define GC_PUSH_ONE_STACK(p, source) \
+ do { \
if ((word)(p) >= (word)GC_least_plausible_heap_addr \
&& (word)(p) < (word)GC_greatest_plausible_heap_addr) { \
PUSH_ONE_CHECKED_STACK(p, source); \
if ((word)(p) >= (word)GC_least_plausible_heap_addr \
&& (word)(p) < (word)GC_greatest_plausible_heap_addr) { \
PUSH_ONE_CHECKED_STACK(p, source); \
- }
+ } \
+ } while (0)
#else /* !NEED_FIXUP_POINTER */
# define GC_PUSH_ONE_STACK(p, source) \
+ do { \
if ((word)(p) >= (word)GC_least_plausible_heap_addr \
&& (word)(p) < (word)GC_greatest_plausible_heap_addr) { \
PUSH_ONE_CHECKED_STACK(p, source); \
- }
+ } \
+ } while (0)
#endif
/* As above, but interior pointer recognition as for normal heap pointers. */
#define GC_PUSH_ONE_HEAP(p,source,mark_stack_top) \
- { \
+ do { \
FIXUP_POINTER(p); \
if ((word)(p) >= (word)GC_least_plausible_heap_addr \
&& (word)(p) < (word)GC_greatest_plausible_heap_addr) \
mark_stack_top = GC_mark_and_push((void *)(p), mark_stack_top, \
GC_mark_stack_limit, (void * *)(source)); \
- }
+ } while (0)
/* Mark starting at mark stack entry top (incl.) down to */
/* mark stack entry bottom (incl.). Stop after performing */
* FIXME: Why do we need the GC_mark_state test below?
*/
#define GC_MARK_FO(real_ptr, mark_proc) \
-{ \
+ do { \
(*(mark_proc))(real_ptr); \
while (!GC_mark_stack_empty()) MARK_FROM_MARK_STACK(); \
if (GC_mark_state != MS_NONE) { \
GC_set_mark_bit(real_ptr); \
- while (!GC_mark_some((ptr_t)0)) {} \
+ while (!GC_mark_some((ptr_t)0)) { /* empty */ } \
} \
-}
+ } while (0)
GC_EXTERN GC_bool GC_mark_stack_too_small;
/* We need a larger mark stack. May be */
#endif /* __GNUC__ */
#ifdef HAVE_CONFIG_H
- /* The "inline" keyword is as determined by Autoconf's AC_C_INLINE. */
+ /* The "inline" keyword is determined by Autoconf AC_C_INLINE. */
# define GC_INLINE static inline
#elif defined(_MSC_VER) || defined(__INTEL_COMPILER) || defined(__DMC__) \
|| defined(__WATCOMC__)
# define COOLER_THAN >
# define HOTTER_THAN <
# define MAKE_COOLER(x,y) if ((word)((x) + (y)) > (word)(x)) {(x) += (y);} \
- else {(x) = (ptr_t)ONES;}
+ else (x) = (ptr_t)ONES
# define MAKE_HOTTER(x,y) (x) -= (y)
# else
# define COOLER_THAN <
# define HOTTER_THAN >
# define MAKE_COOLER(x,y) if ((word)((x) - (y)) < (word)(x)) {(x) -= (y);} \
- else {(x) = 0;}
+ else (x) = 0
# define MAKE_HOTTER(x,y) (x) += (y)
# endif
# undef GET_TIME
# undef MS_TIME_DIFF
# define CLOCK_TYPE struct timeval
-# define GET_TIME(x) { struct rusage rusage; \
- getrusage (RUSAGE_SELF, &rusage); \
- x = rusage.ru_utime; }
+# define GET_TIME(x) \
+ do { \
+ struct rusage rusage; \
+ getrusage(RUSAGE_SELF, &rusage); \
+ x = rusage.ru_utime; \
+ } while (0)
# define MS_TIME_DIFF(a,b) ((unsigned long)(a.tv_sec - b.tv_sec) * 1000 \
+ (unsigned long)(a.tv_usec - b.tv_usec) / 1000)
#elif defined(MSWIN32) || defined(MSWINCE)
# include <windows.h>
# include <winbase.h>
# define CLOCK_TYPE DWORD
-# define GET_TIME(x) x = GetTickCount()
+# define GET_TIME(x) (void)(x = GetTickCount())
# define MS_TIME_DIFF(a,b) ((long)((a)-(b)))
#else /* !MSWIN32, !MSWINCE, !BSD_TIME */
# include <time.h>
/* microseconds (which are not really clock ticks). */
# endif
# define CLOCK_TYPE clock_t
-# define GET_TIME(x) x = clock()
+# define GET_TIME(x) (void)(x = clock())
# define MS_TIME_DIFF(a,b) (CLOCKS_PER_SEC % 1000 == 0 ? \
(unsigned long)((a) - (b)) / (unsigned long)(CLOCKS_PER_SEC / 1000) \
: ((unsigned long)((a) - (b)) * 1000) / (unsigned long)CLOCKS_PER_SEC)
} while (0)
/* Same as ABORT but does not have 'no-return' attribute. */
-/* ABORT on dummy condition (which is always true). */
-#define ABORT_RET(msg) { if ((signed_word)GC_current_warn_proc != -1) \
- ABORT(msg); }
+/* ABORT on a dummy condition (which is always true). */
+#define ABORT_RET(msg) \
+ if ((signed_word)GC_current_warn_proc == -1) {} else ABORT(msg)
/* Exit abnormally, but without making a mess (e.g. out of memory) */
# ifdef PCR
# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
if (GC_all_interior_pointers) { \
GC_add_to_black_list_stack((word)(bits), (source)); \
- } else { \
- GC_add_to_black_list_normal((word)(bits), (source)); \
- }
+ } else \
+ GC_add_to_black_list_normal((word)(bits), (source))
GC_INNER void GC_add_to_black_list_stack(word p, ptr_t source);
# define GC_ADD_TO_BLACK_LIST_STACK(bits, source) \
GC_add_to_black_list_stack((word)(bits), (source))
# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
if (GC_all_interior_pointers) { \
GC_add_to_black_list_stack((word)(bits)); \
- } else { \
- GC_add_to_black_list_normal((word)(bits)); \
- }
+ } else \
+ GC_add_to_black_list_normal((word)(bits))
GC_INNER void GC_add_to_black_list_stack(word p);
# define GC_ADD_TO_BLACK_LIST_STACK(bits, source) \
GC_add_to_black_list_stack((word)(bits))
#ifdef GC_ASSERTIONS
# define GC_ASSERT(expr) \
+ do { \
if (!(expr)) { \
GC_err_printf("Assertion failure: %s:%d\n", \
__FILE__, __LINE__); \
ABORT("assertion failure"); \
- }
+ } \
+ } while (0)
GC_INNER word GC_compute_large_free_bytes(void);
GC_INNER word GC_compute_root_size(void);
#else
# define GC_STATIC_ASSERT(expr) (void)sizeof(char[(expr)? 1 : -1])
#endif
-#define COND_DUMP_CHECKS { \
- GC_ASSERT(GC_compute_large_free_bytes() == GC_large_free_bytes); \
- GC_ASSERT(GC_compute_root_size() == GC_root_size); }
+#define COND_DUMP_CHECKS \
+ do { \
+ GC_ASSERT(GC_compute_large_free_bytes() == GC_large_free_bytes); \
+ GC_ASSERT(GC_compute_root_size() == GC_root_size); \
+ } while (0)
#ifndef NO_DEBUGGING
GC_EXTERN GC_bool GC_dump_regularly;
/* Generate regular debugging dumps. */
-# define COND_DUMP { if (EXPECT(GC_dump_regularly, FALSE)) GC_dump(); \
- else COND_DUMP_CHECKS; }
+# define COND_DUMP if (EXPECT(GC_dump_regularly, FALSE)) GC_dump(); \
+ else COND_DUMP_CHECKS
#else
# define COND_DUMP COND_DUMP_CHECKS
#endif
# else
# define INCR_CANCEL_DISABLE()
# define DECR_CANCEL_DISABLE()
-# define ASSERT_CANCEL_DISABLED()
+# define ASSERT_CANCEL_DISABLED() (void)0
# endif /* GC_ASSERTIONS & ... */
# define DISABLE_CANCEL(state) \
- { pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &state); \
- INCR_CANCEL_DISABLE(); }
+ do { pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &state); \
+ INCR_CANCEL_DISABLE(); } while (0)
# define RESTORE_CANCEL(state) \
- { ASSERT_CANCEL_DISABLED(); \
+ do { ASSERT_CANCEL_DISABLED(); \
pthread_setcancelstate(state, NULL); \
- DECR_CANCEL_DISABLE(); }
+ DECR_CANCEL_DISABLE(); } while (0)
#else /* !CANCEL_SAFE */
-# define DISABLE_CANCEL(state)
-# define RESTORE_CANCEL(state)
-# define ASSERT_CANCEL_DISABLED()
+# define DISABLE_CANCEL(state) (void)0
+# define RESTORE_CANCEL(state) (void)0
+# define ASSERT_CANCEL_DISABLED() (void)0
#endif /* !CANCEL_SAFE */
#endif /* GC_PRIVATE_H */
# include <unistd.h>
# define GETPAGESIZE() sysconf(_SC_PAGE_SIZE)
# ifndef __GNUC__
-# define PREFETCH(x) { \
+# define PREFETCH(x) do { \
register long addr = (long)(x); \
(void) _asm ("LDW", 0, 0, addr, 0); \
- }
+ } while (0)
# endif
# endif /* HPUX */
# ifdef LINUX
#endif
#ifndef PREFETCH
-# define PREFETCH(x)
+# define PREFETCH(x) (void)0
# define NO_PREFETCH
#endif
#ifndef PREFETCH_FOR_WRITE
-# define PREFETCH_FOR_WRITE(x)
+# define PREFETCH_FOR_WRITE(x) (void)0
# define NO_PREFETCH_FOR_WRITE
#endif
# if GC_GRANULE_WORDS == 1
# define USE_PUSH_MARKED_ACCELERATORS
# define PUSH_GRANULE(q) \
- { word qcontents = (q)[0]; \
- GC_PUSH_ONE_HEAP(qcontents, q, GC_mark_stack_top); }
+ do { \
+ word qcontents = (q)[0]; \
+ GC_PUSH_ONE_HEAP(qcontents, q, GC_mark_stack_top); \
+ } while (0)
# elif GC_GRANULE_WORDS == 2
# define USE_PUSH_MARKED_ACCELERATORS
# define PUSH_GRANULE(q) \
- { word qcontents = (q)[0]; \
+ do { \
+ word qcontents = (q)[0]; \
GC_PUSH_ONE_HEAP(qcontents, q, GC_mark_stack_top); \
qcontents = (q)[1]; \
- GC_PUSH_ONE_HEAP(qcontents, (q)+1, GC_mark_stack_top); }
+ GC_PUSH_ONE_HEAP(qcontents, (q)+1, GC_mark_stack_top); \
+ } while (0)
# elif GC_GRANULE_WORDS == 4
# define USE_PUSH_MARKED_ACCELERATORS
# define PUSH_GRANULE(q) \
- { word qcontents = (q)[0]; \
+ do { \
+ word qcontents = (q)[0]; \
GC_PUSH_ONE_HEAP(qcontents, q, GC_mark_stack_top); \
qcontents = (q)[1]; \
GC_PUSH_ONE_HEAP(qcontents, (q)+1, GC_mark_stack_top); \
qcontents = (q)[2]; \
GC_PUSH_ONE_HEAP(qcontents, (q)+2, GC_mark_stack_top); \
qcontents = (q)[3]; \
- GC_PUSH_ONE_HEAP(qcontents, (q)+3, GC_mark_stack_top); }
+ GC_PUSH_ONE_HEAP(qcontents, (q)+3, GC_mark_stack_top); \
+ } while (0)
# endif
#endif /* !USE_MARK_BYTES && MARK_BIT_PER_GRANULE */
/* Floating point arguments and formats should be avoided, since FP */
/* conversion is more likely to allocate memory. */
/* Assumes that no more than BUFSZ-1 characters are written at once. */
-#define GC_PRINTF_FILLBUF(buf, format) { \
+#define GC_PRINTF_FILLBUF(buf, format) \
+ do { \
va_list args; \
va_start(args, format); \
(buf)[sizeof(buf) - 1] = 0x15; /* guard */ \
va_end(args); \
if ((buf)[sizeof(buf) - 1] != 0x15) \
ABORT("GC_printf clobbered stack"); \
- }
+ } while (0)
void GC_printf(const char *format, ...)
{
# include <mach/vm_map.h>
STATIC mach_port_t GC_task_self = 0;
# define PROTECT(addr,len) \
- if(vm_protect(GC_task_self,(vm_address_t)(addr),(vm_size_t)(len), \
- FALSE, VM_PROT_READ \
- | (GC_pages_executable ? VM_PROT_EXECUTE : 0)) \
- != KERN_SUCCESS) { \
- ABORT("vm_protect(PROTECT) failed"); \
- }
+ if (vm_protect(GC_task_self, (vm_address_t)(addr), (vm_size_t)(len), \
+ FALSE, VM_PROT_READ \
+ | (GC_pages_executable ? VM_PROT_EXECUTE : 0)) \
+ == KERN_SUCCESS) {} else ABORT("vm_protect(PROTECT) failed")
# define UNPROTECT(addr,len) \
- if(vm_protect(GC_task_self,(vm_address_t)(addr),(vm_size_t)(len), \
- FALSE, (VM_PROT_READ | VM_PROT_WRITE) \
- | (GC_pages_executable ? VM_PROT_EXECUTE : 0)) \
- != KERN_SUCCESS) { \
- ABORT("vm_protect(UNPROTECT) failed"); \
- }
+ if (vm_protect(GC_task_self, (vm_address_t)(addr), (vm_size_t)(len), \
+ FALSE, (VM_PROT_READ | VM_PROT_WRITE) \
+ | (GC_pages_executable ? VM_PROT_EXECUTE : 0)) \
+ == KERN_SUCCESS) {} else ABORT("vm_protect(UNPROTECT) failed")
# elif !defined(USE_WINALLOC)
# include <sys/mman.h>
# define PROTECT(addr, len) \
if (mprotect((caddr_t)(addr), (size_t)(len), \
PROT_READ \
- | (GC_pages_executable ? PROT_EXEC : 0)) < 0) { \
- ABORT("mprotect failed"); \
- }
+ | (GC_pages_executable ? PROT_EXEC : 0)) >= 0) { \
+ } else ABORT("mprotect failed")
# define UNPROTECT(addr, len) \
if (mprotect((caddr_t)(addr), (size_t)(len), \
(PROT_READ | PROT_WRITE) \
- | (GC_pages_executable ? PROT_EXEC : 0)) < 0) { \
- ABORT(GC_pages_executable ? "un-mprotect executable page" \
- " failed (probably disabled by OS)" : \
- "un-mprotect failed"); \
- }
+ | (GC_pages_executable ? PROT_EXEC : 0)) >= 0) { \
+ } else ABORT(GC_pages_executable ? \
+ "un-mprotect executable page failed" \
+ " (probably disabled by OS)" : \
+ "un-mprotect failed")
# undef IGNORE_PAGES_EXECUTABLE
# else /* USE_WINALLOC */
static DWORD protect_junk;
# define PROTECT(addr, len) \
- if (!VirtualProtect((addr), (len), \
- GC_pages_executable ? PAGE_EXECUTE_READ : \
- PAGE_READONLY, \
- &protect_junk)) { \
- ABORT_ARG1("VirtualProtect failed", \
- ": errcode= 0x%X", (unsigned)GetLastError()); \
- }
+ if (VirtualProtect((addr), (len), \
+ GC_pages_executable ? PAGE_EXECUTE_READ : \
+ PAGE_READONLY, \
+ &protect_junk)) { \
+ } else ABORT_ARG1("VirtualProtect failed", \
+ ": errcode= 0x%X", (unsigned)GetLastError())
# define UNPROTECT(addr, len) \
- if (!VirtualProtect((addr), (len), \
- GC_pages_executable ? PAGE_EXECUTE_READWRITE : \
- PAGE_READWRITE, \
- &protect_junk)) { \
- ABORT("un-VirtualProtect failed"); \
- }
+ if (VirtualProtect((addr), (len), \
+ GC_pages_executable ? PAGE_EXECUTE_READWRITE : \
+ PAGE_READWRITE, \
+ &protect_junk)) { \
+ } else ABORT("un-VirtualProtect failed")
# endif /* USE_WINALLOC */
# if defined(MSWIN32)
GC_syms_initialized = TRUE;
}
-# define INIT_REAL_SYMS() if (!EXPECT(GC_syms_initialized, TRUE)) \
- GC_init_real_syms()
+# define INIT_REAL_SYMS() if (EXPECT(GC_syms_initialized, TRUE)) {} \
+ else GC_init_real_syms()
#else
-# define INIT_REAL_SYMS()
+# define INIT_REAL_SYMS() (void)0
#endif
static GC_bool parallel_initialized = FALSE;
&& t != &first_thread) { \
GC_ASSERT(SMALL_OBJ(GC_size(t))); \
GC_remove_protection(HBLKPTR(t), 1, FALSE); \
- }
+ } else (void)0
#else
-# define UNPROTECT_THREAD(t)
+# define UNPROTECT_THREAD(t) (void)0
#endif
#ifdef CYGWIN32
((NUMERIC_THREAD_ID(pthread_id) >> 5) % PTHREAD_MAP_SIZE)
/* It appears pthread_t is really a pointer type ... */
# define SET_PTHREAD_MAP_CACHE(pthread_id, win32_id) \
- (GC_pthread_map_cache[PTHREAD_MAP_INDEX(pthread_id)] = (win32_id))
+ (void)(GC_pthread_map_cache[PTHREAD_MAP_INDEX(pthread_id)] = (win32_id))
# define GET_PTHREAD_MAP_CACHE(pthread_id) \
GC_pthread_map_cache[PTHREAD_MAP_INDEX(pthread_id)]
/* pointer registers are included in case client code was */
/* compiled with the 'omit frame pointer' optimisation. */
# define PUSH1(reg) GC_push_one((word)context.reg)
-# define PUSH2(r1,r2) PUSH1(r1), PUSH1(r2)
-# define PUSH4(r1,r2,r3,r4) PUSH2(r1,r2), PUSH2(r3,r4)
+# define PUSH2(r1,r2) (PUSH1(r1), PUSH1(r2))
+# define PUSH4(r1,r2,r3,r4) (PUSH2(r1,r2), PUSH2(r3,r4))
# if defined(I386)
PUSH4(Edi,Esi,Ebx,Edx), PUSH2(Ecx,Eax), PUSH1(Ebp);
sp = (ptr_t)context.Esp;