unsigned i;
/* The number of dwords all the dirty states would take. */
- for (i = 0; i < R600_NUM_ATOMS; i++) {
- if (ctx->atoms[i] && ctx->atoms[i]->dirty) {
- num_dw += ctx->atoms[i]->num_dw;
- if (ctx->screen->b.trace_bo) {
- num_dw += R600_TRACE_CS_DWORDS;
- }
+ i = r600_next_dirty_atom(ctx, 0);
+ while (i < R600_NUM_ATOMS) {
+ num_dw += ctx->atoms[i]->num_dw;
+ if (ctx->screen->b.trace_bo) {
+ num_dw += R600_TRACE_CS_DWORDS;
}
+ i = r600_next_dirty_atom(ctx, i + 1);
}
/* The upper-bound of how much space a draw command would take. */
#define R600_BIG_ENDIAN 0
#endif
+#define R600_DIRTY_ATOM_WORD_BITS (sizeof(unsigned long) * 8)
+#define R600_DIRTY_ATOM_ARRAY_LEN DIV_ROUND_UP(R600_NUM_ATOMS, R600_DIRTY_ATOM_WORD_BITS)
+
struct r600_context;
struct r600_bytecode;
struct r600_shader_key;
/* State binding slots are here. */
struct r600_atom *atoms[R600_NUM_ATOMS];
+ /* Dirty atom bitmask for fast tests */
+ unsigned long dirty_atoms[R600_DIRTY_ATOM_ARRAY_LEN];
/* States for CS initialization. */
struct r600_command_buffer start_cs_cmd; /* invariant state mostly */
/** Compute specific registers initializations. The start_cs_cmd atom
struct r600_atom *atom,
bool dirty)
{
+ unsigned long mask;
+ unsigned int w;
+
atom->dirty = dirty;
+
+ assert(atom->id != 0);
+ w = atom->id / R600_DIRTY_ATOM_WORD_BITS;
+ mask = 1ul << (atom->id % R600_DIRTY_ATOM_WORD_BITS);
+ if (dirty)
+ rctx->dirty_atoms[w] |= mask;
+ else
+ rctx->dirty_atoms[w] &= ~mask;
}
static inline void r600_mark_atom_dirty(struct r600_context *rctx,
r600_set_atom_dirty(rctx, atom, true);
}
+static inline unsigned int r600_next_dirty_atom(struct r600_context *rctx,
+ unsigned int id)
+{
+#if !defined(DEBUG) && defined(HAVE___BUILTIN_CTZ)
+ unsigned int w = id / R600_DIRTY_ATOM_WORD_BITS;
+ unsigned int bit = id % R600_DIRTY_ATOM_WORD_BITS;
+ unsigned long bits, mask = (1ul << bit) - 1;
+
+ for (; w < R600_DIRTY_ATOM_ARRAY_LEN; w++, mask = 0ul) {
+ bits = rctx->dirty_atoms[w] & ~mask;
+ if (bits == 0)
+ continue;
+ return w * R600_DIRTY_ATOM_WORD_BITS + __builtin_ctzl(bits);
+ }
+
+ return R600_NUM_ATOMS;
+#else
+ for (; id < R600_NUM_ATOMS; id++) {
+ bool dirty = !!(rctx->dirty_atoms[id / R600_DIRTY_ATOM_WORD_BITS] &
+ (1ul << (id % R600_DIRTY_ATOM_WORD_BITS)));
+ assert(dirty == (rctx->atoms[id] && rctx->atoms[id]->dirty));
+ if (dirty)
+ break;
+ }
+
+ return id;
+#endif
+}
+
void r600_trace_emit(struct r600_context *rctx);
static inline void r600_emit_atom(struct r600_context *rctx, struct r600_atom *atom)
assert(id < R600_NUM_ATOMS);
assert(rctx->atoms[id] == NULL);
rctx->atoms[id] = atom;
+ atom->id = id;
atom->dirty = false;
}
r600_need_cs_space(rctx, ib.user_buffer ? 5 : 0, TRUE);
r600_flush_emit(rctx);
- for (i = 0; i < R600_NUM_ATOMS; i++) {
- if (rctx->atoms[i] == NULL || !rctx->atoms[i]->dirty) {
- continue;
- }
+ i = r600_next_dirty_atom(rctx, 0);
+ while (i < R600_NUM_ATOMS) {
r600_emit_atom(rctx, rctx->atoms[i]);
+ i = r600_next_dirty_atom(rctx, i + 1);
}
if (rctx->b.chip_class == CAYMAN) {