FD_DIRTY_BLEND_DUAL = BIT(26),
FD_DIRTY_BLEND_COHERENT = BIT(27),
#define NUM_DIRTY_BITS 28
-
- /* additional flag for state requires updated resource tracking: */
- FD_DIRTY_RESOURCE = BIT(31),
};
/* per shader-stage dirty state: */
/* which state objects need to be re-emit'd: */
BITMASK_ENUM(fd_dirty_3d_state) dirty dt;
+ /* As above, but also needs draw time resource tracking: */
+ BITMASK_ENUM(fd_dirty_3d_state) dirty_resource dt;
+
/* per shader-stage dirty status: */
BITMASK_ENUM(fd_dirty_shader_state) dirty_shader[PIPE_SHADER_TYPES] dt;
+ /* As above, but also needs draw time resource tracking: */
+ BITMASK_ENUM(fd_dirty_shader_state) dirty_shader_resource[PIPE_SHADER_TYPES] dt;
+
void *compute dt;
struct pipe_blend_state *blend dt;
struct pipe_rasterizer_state *rasterizer dt;
return (struct fd_stream_output_target *)target;
}
-/**
- * Does the dirty state require resource tracking, ie. in general
- * does it reference some resource. There are some special cases:
- *
- * - FD_DIRTY_CONST can reference a resource, but cb0 is handled
- * specially as if it is not a user-buffer, we expect it to be
- * coming from const_uploader, so we can make some assumptions
- * that future transfer_map will be UNSYNCRONIZED
- * - FD_DIRTY_ZSA controls how the framebuffer is accessed
- * - FD_DIRTY_BLEND needs to update GMEM reason
- *
- * TODO if we can make assumptions that framebuffer state is bound
- * first, before blend/zsa/etc state we can move some of the ZSA/
- * BLEND state handling from draw time to bind time. I think this
- * is true of mesa/st, perhaps we can just document it to be a
- * frontend requirement?
- */
-static inline bool
-fd_context_dirty_resource(enum fd_dirty_3d_state dirty)
-{
- return dirty & (FD_DIRTY_FRAMEBUFFER | FD_DIRTY_ZSA |
- FD_DIRTY_SSBO | FD_DIRTY_IMAGE | FD_DIRTY_VTXBUF |
- FD_DIRTY_TEX | FD_DIRTY_STREAMOUT | FD_DIRTY_QUERY);
-}
-
/* Mark specified non-shader-stage related state as dirty: */
static inline void
fd_context_dirty(struct fd_context *ctx, BITMASK_ENUM(fd_dirty_3d_state) dirty)
assert(ffs(dirty) <= ARRAY_SIZE(ctx->gen_dirty_map));
ctx->gen_dirty |= ctx->gen_dirty_map[ffs(dirty) - 1];
-
- if (fd_context_dirty_resource(dirty))
- dirty |= FD_DIRTY_RESOURCE;
-
ctx->dirty |= dirty;
+
+ /* These are still not handled at bind time: */
+ if (dirty & (FD_DIRTY_FRAMEBUFFER | FD_DIRTY_QUERY | FD_DIRTY_ZSA))
+ ctx->dirty_resource |= dirty;
}
static inline enum fd_dirty_3d_state
{
ctx->last.dirty = true;
ctx->dirty = (enum fd_dirty_3d_state) ~0;
+ ctx->dirty_resource = (enum fd_dirty_3d_state) ~0;
/* NOTE: don't use ~0 for gen_dirty, because the gen specific
* emit code will loop over all the bits:
*/
ctx->gen_dirty = ctx->gen_all_dirty;
- for (unsigned i = 0; i < PIPE_SHADER_TYPES; i++)
+ for (unsigned i = 0; i < PIPE_SHADER_TYPES; i++) {
ctx->dirty_shader[i] = (enum fd_dirty_shader_state) ~0;
+ ctx->dirty_shader_resource[i] = (enum fd_dirty_shader_state) ~0;
+ }
}
static inline void
{
ctx->last.dirty = false;
ctx->dirty = (enum fd_dirty_3d_state)0;
+ ctx->dirty_resource = (enum fd_dirty_3d_state)0;
ctx->gen_dirty = 0;
for (unsigned i = 0; i < PIPE_SHADER_TYPES; i++) {
ctx->dirty_shader[i] = (enum fd_dirty_shader_state)0;
+ ctx->dirty_shader_resource[i] = (enum fd_dirty_shader_state)0;
}
}
{
struct fd_context *ctx = batch->ctx;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
- enum fd_dirty_3d_state dirty = ctx->dirty;
+ enum fd_dirty_3d_state dirty = ctx->dirty_resource;
unsigned buffers = 0, restore_buffers = 0;
if (dirty & (FD_DIRTY_FRAMEBUFFER | FD_DIRTY_ZSA)) {
}
u_foreach_bit (s, ctx->bound_shader_stages) {
- enum fd_dirty_shader_state dirty_shader = ctx->dirty_shader[s];
+ enum fd_dirty_shader_state dirty_shader = ctx->dirty_shader_resource[s];
/* Mark constbuf as being read: */
if (dirty_shader & FD_DIRTY_SHADER_CONST) {
{
struct fd_context *ctx = batch->ctx;
- if (ctx->dirty & FD_DIRTY_RESOURCE)
+ if (ctx->dirty_resource)
return true;
if (info->index_size && !batch_references_resource(batch, info->index.resource))
fd_screen_lock(ctx->screen);
- if (ctx->dirty & FD_DIRTY_RESOURCE)
+ if (ctx->dirty_resource)
batch_draw_tracking_for_dirty_bits(batch);
/* Mark index buffer as being read */
fd_batch_resource_read_slowpath(batch, rsc);
}
+static inline bool
+needs_dirty_resource(struct fd_context *ctx, struct pipe_resource *prsc, bool write)
+ assert_dt
+{
+ if (!prsc)
+ return false;
+
+ struct fd_resource *rsc = fd_resource(prsc);
+
+ /* Switching between draw and non_draw will dirty all state, so if
+ * we pick the wrong one, all the bits in the dirty_resource state
+ * will be set anyways.. so no harm, no foul.
+ */
+ struct fd_batch *batch = ctx->batch_nondraw ? ctx->batch_nondraw : ctx->batch;
+
+ if (!batch)
+ return false;
+
+ if (write)
+ return rsc->track->write_batch != batch;
+
+ return !fd_batch_references_resource(batch, rsc);
+}
+
+static inline void
+fd_dirty_resource(struct fd_context *ctx, struct pipe_resource *prsc,
+ BITMASK_ENUM(fd_dirty_3d_state) dirty, bool write)
+ assert_dt
+{
+ if (ctx->dirty_resource & dirty)
+ return;
+
+ if (!needs_dirty_resource(ctx, prsc, write))
+ return;
+
+ ctx->dirty_resource |= dirty;
+}
+
+static inline void
+fd_dirty_shader_resource(struct fd_context *ctx, struct pipe_resource *prsc,
+ enum pipe_shader_type shader,
+ BITMASK_ENUM(fd_dirty_shader_state) dirty,
+ bool write)
+ assert_dt
+{
+ if (ctx->dirty_shader_resource[shader] & dirty)
+ return;
+
+ if (!needs_dirty_resource(ctx, prsc, write))
+ return;
+
+ ctx->dirty_shader_resource[shader] |= dirty;
+ ctx->dirty_resource |= dirty_shader_to_dirty_state(dirty);
+}
+
static inline enum fdl_view_type
fdl_type_from_pipe_target(enum pipe_texture_target target) {
switch (target) {
fd_context_dirty_shader(ctx, shader, FD_DIRTY_SHADER_CONST);
fd_resource_set_usage(cb->buffer, FD_DIRTY_CONST);
-
- if (index > 0) {
- assert(!cb->user_buffer);
- ctx->dirty |= FD_DIRTY_RESOURCE;
- }
+ fd_dirty_shader_resource(ctx, cb->buffer, shader, FD_DIRTY_SHADER_CONST, false);
}
void
buf->buffer_size = buffers[i].buffer_size;
pipe_resource_reference(&buf->buffer, buffers[i].buffer);
+ bool write = writable_bitmask & BIT(i);
+
fd_resource_set_usage(buffers[i].buffer, FD_DIRTY_SSBO);
+ fd_dirty_shader_resource(ctx, buffers[i].buffer, shader,
+ FD_DIRTY_SHADER_SSBO, write);
so->enabled_mask |= BIT(n);
- if (writable_bitmask & BIT(i)) {
+ if (write) {
struct fd_resource *rsc = fd_resource(buf->buffer);
util_range_add(&rsc->b.b, &rsc->valid_buffer_range,
buf->buffer_offset,
util_copy_image_view(buf, &images[i]);
if (buf->resource) {
+ bool write = buf->access & PIPE_IMAGE_ACCESS_WRITE;
+
fd_resource_set_usage(buf->resource, FD_DIRTY_IMAGE);
+ fd_dirty_shader_resource(ctx, buf->resource, shader,
+ FD_DIRTY_SHADER_IMAGE, write);
so->enabled_mask |= BIT(n);
- if ((buf->access & PIPE_IMAGE_ACCESS_WRITE) &&
- (buf->resource->target == PIPE_BUFFER)) {
-
+ if (write && (buf->resource->target == PIPE_BUFFER)) {
struct fd_resource *rsc = fd_resource(buf->resource);
util_range_add(&rsc->b.b, &rsc->valid_buffer_range,
buf->u.buf.offset,
for (unsigned i = 0; i < count; i++) {
assert(!vb[i].is_user_buffer);
fd_resource_set_usage(vb[i].buffer.resource, FD_DIRTY_VTXBUF);
+ fd_dirty_resource(ctx, vb[i].buffer.resource, FD_DIRTY_VTXBUF, false);
/* Robust buffer access: Return undefined data (the start of the buffer)
* instead of process termination or a GPU hang in case of overflow.
so->reset |= (reset << i);
+ if (targets[i]) {
+ fd_resource_set_usage(targets[i]->buffer, FD_DIRTY_STREAMOUT);
+ fd_dirty_resource(ctx, targets[i]->buffer, FD_DIRTY_STREAMOUT, true);
+ }
+
if (!changed && !reset)
continue;
ctx->streamout.verts_written = 0;
}
- if (so->targets[i])
- fd_resource_set_usage(so->targets[i]->buffer, FD_DIRTY_STREAMOUT);
pipe_so_target_reference(&so->targets[i], targets[i]);
}
if (tex->textures[p]) {
fd_resource_set_usage(tex->textures[p]->texture, FD_DIRTY_TEX);
+ fd_dirty_shader_resource(ctx, tex->textures[p]->texture,
+ shader, FD_DIRTY_SHADER_TEX, false);
tex->valid_textures |= (1 << p);
} else {
tex->valid_textures &= ~(1 << p);