return true;
}
+/* Replace the storage of dst with src. */
+void r600_replace_buffer_storage(struct pipe_context *ctx,
+ struct pipe_resource *dst,
+ struct pipe_resource *src)
+{
+ struct r600_common_context *rctx = (struct r600_common_context *)ctx;
+ struct r600_resource *rdst = r600_resource(dst);
+ struct r600_resource *rsrc = r600_resource(src);
+ uint64_t old_gpu_address = rdst->gpu_address;
+
+ pb_reference(&rdst->buf, rsrc->buf);
+ rdst->gpu_address = rsrc->gpu_address;
+
+ assert(rdst->vram_usage == rsrc->vram_usage);
+ assert(rdst->gart_usage == rsrc->gart_usage);
+ assert(rdst->bo_size == rsrc->bo_size);
+ assert(rdst->bo_alignment == rsrc->bo_alignment);
+ assert(rdst->domains == rsrc->domains);
+ assert(rdst->flags == rsrc->flags);
+
+ rctx->rebind_buffer(ctx, dst, old_gpu_address);
+}
+
void r600_invalidate_resource(struct pipe_context *ctx,
struct pipe_resource *resource)
{
* the buffer is bound, including all resource descriptors. */
void (*invalidate_buffer)(struct pipe_context *ctx, struct pipe_resource *buf);
+ /* Update all resource bindings where the buffer is bound, including
+ * all resource descriptors. This is invalidate_buffer without
+ * the invalidation. */
+ void (*rebind_buffer)(struct pipe_context *ctx, struct pipe_resource *buf,
+ uint64_t old_gpu_address);
+
/* Enable or disable occlusion queries. */
void (*set_occlusion_query_state)(struct pipe_context *ctx, bool enable);
enum ring_type ring);
};
-/* r600_buffer.c */
+/* r600_buffer_common.c */
bool r600_rings_is_buffer_referenced(struct r600_common_context *ctx,
struct pb_buffer *buf,
enum radeon_bo_usage usage);
void
r600_invalidate_resource(struct pipe_context *ctx,
struct pipe_resource *resource);
+void r600_replace_buffer_storage(struct pipe_context *ctx,
+ struct pipe_resource *dst,
+ struct pipe_resource *src);
/* r600_common_pipe.c */
void r600_gfx_write_event_eop(struct r600_common_context *ctx,
}
}
-/* Reallocate a buffer a update all resource bindings where the buffer is
- * bound.
- *
- * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
- * idle by discarding its contents. Apps usually tell us when to do this using
- * map_buffer flags, for example.
- */
-static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
+static void si_rebind_buffer(struct pipe_context *ctx, struct pipe_resource *buf,
+ uint64_t old_va)
{
struct si_context *sctx = (struct si_context*)ctx;
struct r600_resource *rbuffer = r600_resource(buf);
unsigned i, shader;
- uint64_t old_va = rbuffer->gpu_address;
unsigned num_elems = sctx->vertex_elements ?
sctx->vertex_elements->count : 0;
- /* Reallocate the buffer in the same pipe_resource. */
- r600_alloc_resource(&sctx->screen->b, rbuffer);
-
/* We changed the buffer, now we need to bind it where the old one
* was bound. This consists of 2 things:
* 1) Updating the resource descriptor and dirtying it.
}
}
+/* Reallocate a buffer a update all resource bindings where the buffer is
+ * bound.
+ *
+ * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
+ * idle by discarding its contents. Apps usually tell us when to do this using
+ * map_buffer flags, for example.
+ */
+static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
+{
+ struct si_context *sctx = (struct si_context*)ctx;
+ struct r600_resource *rbuffer = r600_resource(buf);
+ uint64_t old_va = rbuffer->gpu_address;
+
+ /* Reallocate the buffer in the same pipe_resource. */
+ r600_alloc_resource(&sctx->screen->b, rbuffer);
+
+ si_rebind_buffer(ctx, buf, old_va);
+}
+
/* Update mutable image descriptor fields of all bound textures. */
void si_update_all_texture_descriptors(struct si_context *sctx)
{
sctx->b.b.set_sampler_views = si_set_sampler_views;
sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
sctx->b.invalidate_buffer = si_invalidate_buffer;
+ sctx->b.rebind_buffer = si_rebind_buffer;
/* Shader user data. */
si_init_atom(sctx, &sctx->shader_userdata.atom, &sctx->atoms.s.shader_userdata,