#include "util/u_atomic.h"
-/**
- * Helper macro to get around c++ being cranky about an enum that is a bitmask
- */
#ifdef __cplusplus
-#define or_mask(d, mask) \
- do { \
- decltype(mask) _d = (d); \
- d = (decltype(mask))(_d | (mask)); \
- } while (0)
+template<typename E>
+struct BitmaskEnum {
+ E value;
+
+ using underlying = typename std::underlying_type_t<E>;
+
+#define FOREACH_TYPE(M, ...) \
+ M(E, ##__VA_ARGS__) \
+ M(bool, ##__VA_ARGS__) \
+ M(uint8_t, ##__VA_ARGS__) \
+ M(int8_t, ##__VA_ARGS__) \
+ M(uint16_t, ##__VA_ARGS__) \
+ M(int16_t, ##__VA_ARGS__) \
+ M(uint32_t, ##__VA_ARGS__) \
+ M(int32_t, ##__VA_ARGS__)
+
+#define CONSTRUCTOR(T) BitmaskEnum(T value) : value(static_cast<E>(value)) {}
+ FOREACH_TYPE(CONSTRUCTOR)
+#undef CONSTRUCTOR
+
+#define CAST(T) inline operator T() const { return static_cast<T>(value); }
+ FOREACH_TYPE(CAST)
+#undef CAST
+
+#define BOP(T, OP) \
+ inline E operator OP(T rhs) const { \
+ return static_cast<E> ( \
+ static_cast<underlying>(value) OP \
+ static_cast<underlying>(rhs) \
+ ); \
+ }
+ FOREACH_TYPE(BOP, |)
+ FOREACH_TYPE(BOP, &)
+#undef BOP
+
+#if defined(__GNUC__) && !defined(__clang) && (__GNUC__ <= 10)
+/*
+ * Silence:
+ *
+ * ../src/freedreno/common/freedreno_common.h: In instantiation of 'E& BitmaskEnum<E>::operator|=(BitmaskEnum<E>::underlying) [with E = fd_dirty_3d_state; BitmaskEnum<E>::underlying = unsigned int]':
+ * ../src/gallium/drivers/freedreno/freedreno_context.h:620:16: required from here
+ * ../src/freedreno/common/freedreno_common.h:68:39: error: dereferencing type-punned pointer will break strict-aliasing rules [-Werror=strict-aliasing]
+ * 68 | reinterpret_cast<underlying&>(value) OP static_cast<underlying>(rhs) ); \
+ * | ^~~~~
+ *
+ * I cannot reproduce on gcc 12.2.1 or with clang 14.0.5 so I'm going to assume
+ * this is a bug with gcc 10.x
+ */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wstrict-aliasing"
+#endif
+
+#define UOP(T, OP) \
+ inline E& operator OP(T rhs) { \
+ return reinterpret_cast<E&>( \
+ reinterpret_cast<underlying&>(value) OP static_cast<underlying>(rhs) ); \
+ }
+ UOP(underlying, |=)
+ UOP(underlying, &=)
+#undef UOP
+
+#if defined(__GNUC__) && !defined(__clang) && (__GNUC__ < 7)
+#pragma GCC diagnostic pop
+#endif
+
+ inline E operator ~() const {
+ static_assert(sizeof(E) == sizeof(BitmaskEnum<E>));
+ return static_cast<E> (
+ ~static_cast<underlying>(value)
+ );
+ }
+#undef FOREACH_TYPE
+};
+#define BITMASK_ENUM(E) BitmaskEnum<E>
#else
-#define or_mask(d, mask) \
- do { \
- d |= (mask); \
- } while (0)
+#define BITMASK_ENUM(E) enum E
#endif
/*
* color_logic_Op (since those functions are disabled when by-
* passing GMEM.
*/
- enum fd_gmem_reason gmem_reason;
+ BITMASK_ENUM(fd_gmem_reason) gmem_reason;
/* At submit time, once we've decided that this batch will use GMEM
* rendering, the appropriate gmem state is looked up:
uint32_t gen_dirty;
/* which state objects need to be re-emit'd: */
- enum fd_dirty_3d_state dirty dt;
+ BITMASK_ENUM(fd_dirty_3d_state) dirty dt;
/* per shader-stage dirty status: */
- enum fd_dirty_shader_state dirty_shader[PIPE_SHADER_TYPES] dt;
+ BITMASK_ENUM(fd_dirty_shader_state) dirty_shader[PIPE_SHADER_TYPES] dt;
void *compute dt;
struct pipe_blend_state *blend dt;
/* Mark specified non-shader-stage related state as dirty: */
static inline void
-fd_context_dirty(struct fd_context *ctx, enum fd_dirty_3d_state dirty) assert_dt
+fd_context_dirty(struct fd_context *ctx, BITMASK_ENUM(fd_dirty_3d_state) dirty)
+ assert_dt
{
assert(util_is_power_of_two_nonzero(dirty));
assert(ffs(dirty) <= ARRAY_SIZE(ctx->gen_dirty_map));
ctx->gen_dirty |= ctx->gen_dirty_map[ffs(dirty) - 1];
if (fd_context_dirty_resource(dirty))
- or_mask(dirty, FD_DIRTY_RESOURCE);
+ dirty |= FD_DIRTY_RESOURCE;
- or_mask(ctx->dirty, dirty);
+ ctx->dirty |= dirty;
}
static inline void
fd_context_dirty_shader(struct fd_context *ctx, enum pipe_shader_type shader,
- enum fd_dirty_shader_state dirty) assert_dt
+ BITMASK_ENUM(fd_dirty_shader_state) dirty)
+ assert_dt
{
const enum fd_dirty_3d_state map[] = {
FD_DIRTY_PROG, FD_DIRTY_CONST, FD_DIRTY_TEX,
ctx->gen_dirty |= ctx->gen_dirty_shader_map[shader][ffs(dirty) - 1];
- or_mask(ctx->dirty_shader[shader], dirty);
+ ctx->dirty_shader[shader] |= dirty;
fd_context_dirty(ctx, map[ffs(dirty) - 1]);
}
*/
static inline void
fd_context_add_shader_map(struct fd_context *ctx, enum pipe_shader_type shader,
- enum fd_dirty_shader_state dirty, uint32_t gen_dirty)
+ BITMASK_ENUM(fd_dirty_shader_state) dirty, uint32_t gen_dirty)
{
u_foreach_bit (b, dirty) {
ctx->gen_dirty_shader_map[shader][b] |= gen_dirty;
/* bitmask of state this resource could potentially dirty when rebound,
* see rebind_resource()
*/
- enum fd_dirty_3d_state dirty;
+ BITMASK_ENUM(fd_dirty_3d_state) dirty;
/* Sequence # incremented each time bo changes: */
uint16_t seqno;
if (likely(rsc->dirty & usage))
return;
fd_resource_lock(rsc);
- or_mask(rsc->dirty, usage);
+ rsc->dirty |= usage;
fd_resource_unlock(rsc);
}