typedef struct {
hb_atomic_int_t ref_count;
+#define HB_REFERENCE_COUNT_INVALID_VALUE ((hb_atomic_int_t) -1)
+#define HB_REFERENCE_COUNT_INVALID {HB_REFERENCE_COUNT_INVALID_VALUE}
+
inline void init (int v) { ref_count = v; /* non-atomic is fine */ }
inline int inc (void) { return hb_atomic_int_fetch_and_add (ref_count, 1); }
inline int dec (void) { return hb_atomic_int_fetch_and_add (ref_count, -1); }
inline void set (int v) { return hb_atomic_int_set (ref_count, v); }
+
+ /* XXX
+ *
+ * One thing I'm not sure. The following two methods should be declared
+ * const. However, that assumes that hb_atomic_int_get() is const. I have
+ * a vague memory hearing from Chris Wilson or Jeff Muizelaar that atomic get
+ * is implemented as a fetch_and_add(0). In which case it does write to the
+ * memory, and hence cannot be called on .rodata section. But that's how we
+ * use it.
+ *
+ * If that is indeed the case, then perhaps is_invalid() should do a
+ * non-protected read of the location.
+ */
inline int get (void) { return hb_atomic_int_get (ref_count); }
+ inline bool is_invalid (void) { return get () == HB_REFERENCE_COUNT_INVALID_VALUE; }
} hb_reference_count_t;
-#define HB_REFERENCE_COUNT_INVALID_VALUE ((hb_atomic_int_t) -1)
-#define HB_REFERENCE_COUNT_INVALID {HB_REFERENCE_COUNT_INVALID_VALUE}
-
-#define HB_REFERENCE_COUNT_IS_INVALID(RC) ((RC).get () == HB_REFERENCE_COUNT_INVALID_VALUE)
/* Debug */
/* Object allocation and lifecycle manamgement macros */
#define HB_OBJECT_IS_INERT(obj) \
- (unlikely (HB_REFERENCE_COUNT_IS_INVALID ((obj)->ref_count)))
+ (unlikely ((obj)->ref_count.is_invalid ()))
#define HB_OBJECT_DO_INIT_EXPR(obj) \
obj->ref_count.init (1)
#include <glib.h>
-typedef int hb_atomic_int_t;
+typedef volatile int hb_atomic_int_t;
#define hb_atomic_int_fetch_and_add(AI, V) g_atomic_int_exchange_and_add (&(AI), V)
#define hb_atomic_int_get(AI) g_atomic_int_get (&(AI))
#define hb_atomic_int_set(AI, V) g_atomic_int_set (&(AI), V)
#warning "Could not find any system to define platform macros, library will NOT be thread-safe"
#endif
-typedef int hb_atomic_int_t;
+typedef volatile int hb_atomic_int_t;
#define hb_atomic_int_fetch_and_add(AI, V) ((AI) += (V), (AI) - (V))
#define hb_atomic_int_get(AI) (AI)
#define hb_atomic_int_set(AI, V) HB_STMT_START { (AI) = (V); } HB_STMT_END
-typedef int hb_mutex_t;
+typedef volatile int hb_mutex_t;
#define HB_MUTEX_INIT 0
#define hb_mutex_init(M) HB_STMT_START { (M) = 0; } HB_STMT_END
#define hb_mutex_lock(M) HB_STMT_START { (M) = 1; } HB_STMT_END