case nir_intrinsic_load_blend_const_color_a_float:
ptr = &u->fs.blend_constant[3];
break;
+ case nir_intrinsic_load_ssbo_address:
+ ptr = &u->ssbo_base[nir_src_as_uint(intr->src[0])];
+ break;
+ case nir_intrinsic_get_ssbo_size:
+ ptr = &u->ssbo_size[nir_src_as_uint(intr->src[0])];
+ break;
default:
return false;
}
/* Uniform buffer objects */
uint64_t ubo_base[PIPE_MAX_CONSTANT_BUFFERS];
+ /* Shader storage buffer objects */
+ uint64_t ssbo_base[PIPE_MAX_SHADER_BUFFERS];
+ uint32_t ssbo_size[PIPE_MAX_SHADER_BUFFERS];
+
union {
struct {
/* Vertex buffer object bases, if present */
}
static uint64_t
+agx_shader_buffer_ptr(struct agx_batch *batch, struct pipe_shader_buffer *sb)
+{
+ if (sb->buffer) {
+ struct agx_resource *rsrc = agx_resource(sb->buffer);
+
+ /* Assume SSBOs are written. TODO: Optimize read-only SSBOs */
+ agx_batch_writes(batch, rsrc);
+
+ return rsrc->bo->ptr.gpu + sb->buffer_offset;
+ } else {
+ return 0;
+ }
+}
+
+static uint64_t
agx_vertex_buffer_ptr(struct agx_batch *batch, unsigned vbo)
{
struct pipe_vertex_buffer vb = batch->ctx->vertex_buffers[vbo];
uniforms.ubo_base[cb] = agx_const_buffer_ptr(batch, &st->cb[cb]);
}
+ u_foreach_bit(cb, st->ssbo_mask) {
+ uniforms.ssbo_base[cb] = agx_shader_buffer_ptr(batch, &st->ssbo[cb]);
+ uniforms.ssbo_size[cb] = st->ssbo[cb].buffer_size;
+ }
+
if (stage == PIPE_SHADER_VERTEX) {
u_foreach_bit(vbo, ctx->vb_mask) {
uniforms.vs.vbo_base[vbo] = agx_vertex_buffer_ptr(batch, vbo);