OUT_RING(ring, num_textures);
}
+/**
+ * Handle invalidates potentially coming from other contexts. By
+ * flagging the invalidate rather than handling it immediately, we
+ * avoid needing to refcnt (with it's associated atomics) the tex
+ * state. And furthermore, this avoids cross-ctx (thread) sharing
+ * of fd_ringbuffer's, avoiding their need for atomic refcnts.
+ */
+static void
+handle_invalidates(struct fd_context *ctx)
+ assert_dt
+{
+ struct fd6_context *fd6_ctx = fd6_context(ctx);
+
+ fd_screen_assert_locked(ctx->screen);
+
+ hash_table_foreach (fd6_ctx->tex_cache, entry) {
+ struct fd6_texture_state *state = entry->data;
+
+ if (state->invalidate)
+ remove_tex_entry(fd6_ctx, entry);
+ }
+
+ fd6_ctx->tex_cache_needs_invalidate = false;
+}
+
struct fd6_texture_state *
fd6_texture_state(struct fd_context *ctx, enum pipe_shader_type type,
struct fd_texture_stateobj *tex)
uint32_t hash = tex_key_hash(&key);
fd_screen_lock(ctx->screen);
+
+ if (unlikely(fd6_ctx->tex_cache_needs_invalidate))
+ handle_invalidates(ctx);
+
struct hash_entry *entry =
_mesa_hash_table_search_pre_hashed(fd6_ctx->tex_cache, hash, &key);
for (unsigned i = 0; i < ARRAY_SIZE(state->key.view); i++) {
if (rsc->seqno == state->key.view[i].rsc_seqno) {
- remove_tex_entry(fd6_ctx, entry);
- break;
+ struct fd6_texture_state *tex = entry->data;
+ tex->invalidate = true;
+ fd6_ctx->tex_cache_needs_invalidate = true;
}
}
}