if (sz > MAXOBJBYTES) {
/* Round it up to the next whole heap block */
- word descr;
+ word descr = GC_obj_kinds[obj_kind].ok_descriptor;
+
+ sz = (sz + HBLKSIZE-1) & ~HBLKMASK;
+ if (GC_obj_kinds[obj_kind].ok_relocate_descr)
+ descr += sz;
+ /* GC_realloc might be changing the block size while */
+ /* GC_reclaim_block or GC_clear_hdr_marks is examining it. */
+ /* The change to the size field is benign, in that GC_reclaim */
+ /* (and GC_clear_hdr_marks) would work correctly with either */
+ /* value, since we are not changing the number of objects in */
+ /* the block. But seeing a half-updated value (though unlikely */
+ /* to occur in practice) could be probably bad. */
+ /* Using unordered atomic accesses on the size and hb_descr */
+ /* fields would solve the issue. (The alternate solution might */
+ /* be to initially overallocate large objects, so we do not */
+ /* have to adjust the size in GC_realloc, if they still fit. */
+ /* But that is probably more expensive, since we may end up */
+ /* scanning a bunch of zeros during GC.) */
+# ifdef AO_HAVE_store
+ GC_STATIC_ASSERT(sizeof(hhdr->hb_sz) == sizeof(AO_t));
+ AO_store((volatile AO_t *)&hhdr->hb_sz, (AO_t)sz);
+ AO_store((volatile AO_t *)&hhdr->hb_descr, (AO_t)descr);
+# else
+ {
+ DCL_LOCK_STATE;
+
+ LOCK();
+ hhdr -> hb_sz = sz;
+ hhdr -> hb_descr = descr;
+ UNLOCK();
+ }
+# endif
- sz = (sz+HBLKSIZE-1) & (~HBLKMASK);
- hhdr -> hb_sz = sz;
- descr = GC_obj_kinds[obj_kind].ok_descriptor;
- if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
- hhdr -> hb_descr = descr;
# ifdef MARK_BIT_PER_OBJ
GC_ASSERT(hhdr -> hb_inv_sz == LARGE_INV_SZ);
# endif
/* clear all mark bits in the header */
GC_INNER void GC_clear_hdr_marks(hdr *hhdr)
{
- size_t last_bit = FINAL_MARK_BIT((size_t)hhdr->hb_sz);
+ size_t last_bit;
+
+# ifdef AO_HAVE_load
+ /* Atomic access is used to avoid racing with GC_realloc. */
+ last_bit = FINAL_MARK_BIT((size_t)AO_load((volatile AO_t *)&hhdr->hb_sz));
+# else
+ /* No race as GC_realloc holds the lock while updating hb_sz. */
+ last_bit = FINAL_MARK_BIT((size_t)hhdr->hb_sz);
+# endif
+
BZERO(hhdr -> hb_marks, sizeof(hhdr->hb_marks));
set_mark_bit_from_hdr(hhdr, last_bit);
hhdr -> hb_n_marks = 0;
}
}
+/* Is a pointer-free block? Same as IS_PTRFREE macro (in os_dep.c) but */
+/* uses unordered atomic access to avoid racing with GC_realloc. */
+#ifdef AO_HAVE_load
+# define IS_PTRFREE_SAFE(hhdr) \
+ (AO_load((volatile AO_t *)&(hhdr)->hb_descr) == 0)
+#else
+ /* No race as GC_realloc holds the lock while updating hb_descr. */
+# define IS_PTRFREE_SAFE(hhdr) ((hhdr)->hb_descr == 0)
+#endif
+
/*
* Generic procedure to rebuild a free list in hbp.
* Also called directly from GC_malloc_many.
GC_ASSERT(GC_find_header((ptr_t)hbp) == hhdr);
# ifndef GC_DISABLE_INCREMENTAL
- GC_remove_protection(hbp, 1, (hhdr)->hb_descr == 0 /* Pointer-free? */);
+ GC_remove_protection(hbp, 1, IS_PTRFREE_SAFE(hhdr));
# endif
# ifdef ENABLE_DISCLAIM
if ((hhdr -> hb_flags & HAS_DISCLAIM) != 0) {
/* else */ if (init || GC_debugging_started) {
result = GC_reclaim_clear(hbp, hhdr, sz, list, count);
} else {
- GC_ASSERT((hhdr)->hb_descr == 0 /* Pointer-free block */);
+ GC_ASSERT(IS_PTRFREE_SAFE(hhdr));
result = GC_reclaim_uninit(hbp, hhdr, sz, list, count);
}
if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) GC_set_hdr_marks(hhdr);
# ifdef ENABLE_DISCLAIM
in_use:
# endif
- if (hhdr -> hb_descr != 0) {
- GC_composite_in_use += sz;
- } else {
+ if (IS_PTRFREE_SAFE(hhdr)) {
GC_atomic_in_use += sz;
+ } else {
+ GC_composite_in_use += sz;
}
}
} else {
/* already have the right cache context here. Also */
/* doing it here avoids some silly lock contention in */
/* GC_malloc_many. */
-
- if (hhdr -> hb_descr != 0) {
- GC_composite_in_use += sz * hhdr -> hb_n_marks;
- } else {
+ if (IS_PTRFREE_SAFE(hhdr)) {
GC_atomic_in_use += sz * hhdr -> hb_n_marks;
+ } else {
+ GC_composite_in_use += sz * hhdr -> hb_n_marks;
}
}
}