From c086f2770bd9f8714aef7b1c5814e5a38f4b55b5 Mon Sep 17 00:00:00 2001 From: Alyssa Rosenzweig Date: Sat, 25 Feb 2023 11:44:36 -0500 Subject: [PATCH] asahi: Rework system value lowering The previous lowering was insufficient in two areas: * No support for indirection. This is required for dynamically indexing into UBOs, SSBOS, etc in OpenGL ES 3.2 * Only a single table supported. Multiple tables are required to implement indirect dispatch/draws efficiently, in order to bind the indirect buffer as uniforms. The first problem is addressed here by reworking the lowering of system values to happen in NIR, decoupled from the uniform register assignment details, such that we can handle 1:n lowerings in a straightforward way. Namely, indirect sysvals are lowered to indirect memory loads relative to the base address of the sysval table, where the table address is itself pushed as a (direct) sysval. The second problem is addressed in this patch by generalizing to multiple uniform tables. Signed-off-by: Alyssa Rosenzweig Part-of: --- docs/features.txt | 4 +- src/gallium/drivers/asahi/agx_nir_lower_sysvals.c | 254 +++++++++++++++------- src/gallium/drivers/asahi/agx_state.c | 6 +- src/gallium/drivers/asahi/agx_state.h | 13 +- src/gallium/drivers/asahi/agx_uniforms.c | 14 +- 5 files changed, 205 insertions(+), 86 deletions(-) diff --git a/docs/features.txt b/docs/features.txt index 3e56e6b..9f9c52d 100644 --- a/docs/features.txt +++ b/docs/features.txt @@ -117,8 +117,8 @@ GL 4.0, GLSL 4.00 --- all DONE: freedreno/a6xx, i965/gen7+, nvc0, r600, radeonsi GL_ARB_draw_indirect DONE (freedreno, i965/gen7+, softpipe, v3d, asahi) GL_ARB_gpu_shader5 DONE (freedreno/a6xx, i965/gen7+) - 'precise' qualifier DONE (softpipe) - - Dynamically uniform sampler array indices DONE (softpipe) - - Dynamically uniform UBO array indices DONE (freedreno, softpipe) + - Dynamically uniform sampler array indices DONE (softpipe, asahi) + - Dynamically uniform UBO array indices DONE (freedreno, softpipe, asahi) - Implicit signed -> unsigned conversions DONE (softpipe, asahi) - Fused multiply-add DONE (softpipe, asahi) - Packing/bitfield/conversion functions DONE (freedreno, softpipe, panfrost, asahi) diff --git a/src/gallium/drivers/asahi/agx_nir_lower_sysvals.c b/src/gallium/drivers/asahi/agx_nir_lower_sysvals.c index ce2015c..1d6ff1b 100644 --- a/src/gallium/drivers/asahi/agx_nir_lower_sysvals.c +++ b/src/gallium/drivers/asahi/agx_nir_lower_sysvals.c @@ -11,120 +11,197 @@ /* * Lower all system values to uniform loads. This pass tries to compact ranges * of contiguous uploaded uniforms to reduce the draw-time overhead of uploading - * many tiny ranges. To do so, it works in 3 steps: + * many tiny ranges. To do so, it works in 4 steps: * - * 1. Walk the NIR, converting system values to placeholder load_preambles. + * 1. Lower NIR sysvals to loads from the system value buffers (as placeholder + * load_preambles) + * 2. Walk the NIR, recording the placeholder load_preambles. * 2. Walk the ranges of uniforms needed, compacting into contiguous ranges. * 3. Fill in the load_preamble instructions with the real uniforms. */ -struct state { - /* Array of load_preamble nir_intrinsic_instr's to fix up at the end */ - struct util_dynarray load_preambles; +#define MAX_TABLE_SIZE sizeof(struct agx_draw_uniforms) + +struct table_state { /* Bitset of 16-bit uniforms pushed */ - BITSET_DECLARE(pushed, sizeof(struct agx_draw_uniforms) / 2); + BITSET_DECLARE(pushed, MAX_TABLE_SIZE / 2); /* Element size in 16-bit units, so we may split ranges of different sizes * to guarantee natural alignment. */ - uint8_t element_size[sizeof(struct agx_draw_uniforms) / 2]; + uint8_t element_size[MAX_TABLE_SIZE / 2]; }; -static bool -pass(struct nir_builder *b, nir_instr *instr, void *data) +struct state { + /* Array of load_preamble nir_intrinsic_instr's to fix up at the end */ + struct util_dynarray load_preambles; + + struct table_state tables[AGX_NUM_SYSVAL_TABLES]; +}; + +static nir_ssa_def * +load_sysval(nir_builder *b, unsigned dim, unsigned bitsize, uint8_t table, + uint16_t offset) { - b->cursor = nir_before_instr(instr); - struct state *state = data; + /* Encode as a sideband */ + uint32_t packed = (((uint32_t)table) << 16) | ((uint32_t)offset); + return nir_load_preamble(b, dim, bitsize, .base = packed); +} + +static nir_ssa_def * +load_sysval_root(nir_builder *b, unsigned dim, unsigned bitsize, void *ptr) +{ + return load_sysval(b, dim, bitsize, AGX_SYSVAL_TABLE_ROOT, (uintptr_t)ptr); +} + +static nir_ssa_def * +load_sysval_indirect(nir_builder *b, unsigned dim, unsigned bitsize, + uint8_t table, void *base, nir_ssa_def *offset_el) +{ + nir_ssa_scalar scalar = {offset_el, 0}; + unsigned stride = (dim * bitsize) / 8; + + if (nir_ssa_scalar_is_const(scalar)) { + /* Load the sysval directly */ + return load_sysval( + b, dim, bitsize, table, + (uintptr_t)base + (nir_ssa_scalar_as_uint(scalar) * stride)); + } else { + /* Load the base address of the table */ + struct agx_draw_uniforms *u = NULL; + nir_ssa_def *table_base = load_sysval_root(b, 1, 64, &u->tables[table]); + + /* Load address of the array in the table */ + nir_ssa_def *array_base = nir_iadd_imm(b, table_base, (uintptr_t)base); - /* For offsetof with dynamic array elements */ + /* Index into the table and load */ + nir_ssa_def *address = nir_iadd( + b, array_base, nir_u2u64(b, nir_imul_imm(b, offset_el, stride))); + return nir_load_global_constant(b, address, bitsize / 8, dim, bitsize); + } +} + +static nir_ssa_def * +lower_intrinsic(nir_builder *b, nir_intrinsic_instr *intr) +{ struct agx_draw_uniforms *u = NULL; - void *ptr = NULL; + + switch (intr->intrinsic) { + case nir_intrinsic_load_vbo_base_agx: + return load_sysval_indirect(b, 1, 64, AGX_SYSVAL_TABLE_ROOT, + &u->vs.vbo_base, intr->src[0].ssa); + case nir_intrinsic_load_ubo_base_agx: + return load_sysval_indirect(b, 1, 64, AGX_SYSVAL_TABLE_ROOT, u->ubo_base, + intr->src[0].ssa); + case nir_intrinsic_load_texture_base_agx: + return load_sysval_root(b, 1, 64, &u->texture_base); + case nir_intrinsic_load_blend_const_color_r_float: + return load_sysval_root(b, 1, 32, &u->fs.blend_constant[0]); + case nir_intrinsic_load_blend_const_color_g_float: + return load_sysval_root(b, 1, 32, &u->fs.blend_constant[1]); + case nir_intrinsic_load_blend_const_color_b_float: + return load_sysval_root(b, 1, 32, &u->fs.blend_constant[2]); + case nir_intrinsic_load_blend_const_color_a_float: + return load_sysval_root(b, 1, 32, &u->fs.blend_constant[3]); + case nir_intrinsic_load_ssbo_address: + return load_sysval_indirect(b, 1, 64, AGX_SYSVAL_TABLE_ROOT, + &u->ssbo_base, intr->src[0].ssa); + case nir_intrinsic_get_ssbo_size: + return load_sysval_indirect(b, 1, 32, AGX_SYSVAL_TABLE_ROOT, + &u->ssbo_size, intr->src[0].ssa); + default: + return NULL; + } +} + +/* Step 1. Lower NIR sysvals */ +static bool +lower_sysvals(nir_builder *b, nir_instr *instr, void *data) +{ + b->cursor = nir_before_instr(instr); nir_dest *dest; + nir_ssa_def *replacement = NULL; if (instr->type == nir_instr_type_intrinsic) { nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr); dest = &intr->dest; - - switch (intr->intrinsic) { - case nir_intrinsic_load_vbo_base_agx: - ptr = &u->vs.vbo_base[nir_src_as_uint(intr->src[0])]; - break; - case nir_intrinsic_load_ubo_base_agx: - ptr = &u->ubo_base[nir_src_as_uint(intr->src[0])]; - break; - case nir_intrinsic_load_texture_base_agx: - ptr = &u->texture_base; - break; - case nir_intrinsic_load_blend_const_color_r_float: - ptr = &u->fs.blend_constant[0]; - break; - case nir_intrinsic_load_blend_const_color_g_float: - ptr = &u->fs.blend_constant[1]; - break; - case nir_intrinsic_load_blend_const_color_b_float: - ptr = &u->fs.blend_constant[2]; - break; - case nir_intrinsic_load_blend_const_color_a_float: - ptr = &u->fs.blend_constant[3]; - break; - case nir_intrinsic_load_ssbo_address: - ptr = &u->ssbo_base[nir_src_as_uint(intr->src[0])]; - break; - case nir_intrinsic_get_ssbo_size: - ptr = &u->ssbo_size[nir_src_as_uint(intr->src[0])]; - break; - default: - return false; - } + replacement = lower_intrinsic(b, intr); } else if (instr->type == nir_instr_type_tex) { nir_tex_instr *tex = nir_instr_as_tex(instr); dest = &tex->dest; - if (tex->op == nir_texop_lod_bias_agx) { - /* TODO: Dynamic indexing samplers? */ - ptr = &u->lod_bias[tex->sampler_index]; - } else { + if (tex->op != nir_texop_lod_bias_agx) return false; + + struct agx_draw_uniforms *u = NULL; + + int src_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_offset); + if (src_idx >= 0) { + replacement = + load_sysval_indirect(b, 1, 16, AGX_SYSVAL_TABLE_ROOT, u->lod_bias, + tex->src[src_idx].src.ssa); + } else { + replacement = + load_sysval_root(b, 1, 16, &u->lod_bias[tex->sampler_index]); } + } + + if (replacement != NULL) { + nir_ssa_def_rewrite_uses(&dest->ssa, replacement); + return true; } else { return false; } +} + +/* Step 2: Record system value loads */ +static bool +record_loads(nir_builder *b, nir_instr *instr, void *data) +{ + if (instr->type != nir_instr_type_intrinsic) + return false; - assert(nir_dest_bit_size(*dest) >= 16 && "no 8-bit sysvals"); + nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr); + if (intr->intrinsic != nir_intrinsic_load_preamble) + return false; - unsigned dim = nir_dest_num_components(*dest); - unsigned element_size = nir_dest_bit_size(*dest) / 16; + assert(nir_dest_bit_size(intr->dest) >= 16 && "no 8-bit sysvals"); + unsigned dim = nir_dest_num_components(intr->dest); + unsigned element_size = nir_dest_bit_size(intr->dest) / 16; unsigned length = dim * element_size; - unsigned offset = (uintptr_t)ptr; + struct state *state = data; + unsigned base = nir_intrinsic_base(intr); + struct table_state *table = &state->tables[base >> 16]; + unsigned offset = base & 0xFFFF; assert((offset % 2) == 0 && "all entries are aligned by ABI"); - nir_ssa_def *value = - nir_load_preamble(b, dim, nir_dest_bit_size(*dest), .base = offset); - nir_ssa_def_rewrite_uses(&dest->ssa, value); - - BITSET_SET_RANGE(state->pushed, (offset / 2), (offset / 2) + length - 1); + BITSET_SET_RANGE(table->pushed, (offset / 2), (offset / 2) + length - 1); for (unsigned i = 0; i < length; ++i) { - if (state->element_size[(offset / 2) + i]) - assert((state->element_size[(offset / 2) + i]) == element_size); + if (table->element_size[(offset / 2) + i]) + assert((table->element_size[(offset / 2) + i]) == element_size); else - state->element_size[(offset / 2) + i] = element_size; + table->element_size[(offset / 2) + i] = element_size; } - util_dynarray_append(&state->load_preambles, nir_intrinsic_instr *, - nir_instr_as_intrinsic(value->parent_instr)); - return true; + util_dynarray_append(&state->load_preambles, nir_intrinsic_instr *, intr); + return false; } +/* Step 3: Decide where to push the system values */ static struct agx_push_range * -find_push_range_containing(struct agx_compiled_shader *shader, unsigned offset) +find_push_range_containing(struct agx_compiled_shader *shader, uint8_t table, + uint16_t offset) { for (unsigned i = 0; i < shader->push_range_count; ++i) { struct agx_push_range *range = &shader->push[i]; + if (range->table != table) + continue; + /* range->length is 16-bit words, need to convert. offset is bytes. */ - unsigned length_B = range->length * 2; + uint16_t length_B = range->length * 2; if (range->offset <= offset && offset < (range->offset + length_B)) return range; @@ -134,10 +211,9 @@ find_push_range_containing(struct agx_compiled_shader *shader, unsigned offset) } static unsigned -lay_out_uniforms(struct agx_compiled_shader *shader, struct state *state) +lay_out_table(struct agx_compiled_shader *shader, struct table_state *state, + unsigned table_index, unsigned uniform) { - unsigned uniform = 0; - unsigned start, end; BITSET_FOREACH_RANGE(start, end, state->pushed, sizeof(state->pushed) * 8) { unsigned range_start = start; @@ -168,6 +244,7 @@ lay_out_uniforms(struct agx_compiled_shader *shader, struct state *state) shader->push[shader->push_range_count++] = (struct agx_push_range){ .uniform = uniform, + .table = table_index, .offset = range_start * 2 /* bytes, not elements */, .length = (range_end - range_start), }; @@ -177,9 +254,26 @@ lay_out_uniforms(struct agx_compiled_shader *shader, struct state *state) } while (range_start < end); } + return uniform; +} + +static unsigned +lay_out_uniforms(struct agx_compiled_shader *shader, struct state *state) +{ + unsigned uniform = 0; + + /* Lay out each system value table */ + for (uint8_t t = 0; t < AGX_NUM_SYSVAL_TABLES; ++t) + uniform = lay_out_table(shader, &state->tables[t], t, uniform); + + /* Step 4: Fill in the loads */ util_dynarray_foreach(&state->load_preambles, nir_intrinsic_instr *, intr) { - unsigned offset = nir_intrinsic_base(*intr); - struct agx_push_range *range = find_push_range_containing(shader, offset); + uint32_t base = nir_intrinsic_base(*intr); + uint8_t table = base >> 16; + uint16_t offset = base & 0xFFFF; + + struct agx_push_range *range = + find_push_range_containing(shader, table, offset); nir_intrinsic_set_base(*intr, range->uniform + ((offset - range->offset) / 2)); @@ -192,16 +286,20 @@ bool agx_nir_lower_sysvals(nir_shader *shader, struct agx_compiled_shader *compiled, unsigned *push_size) { - struct state state = {0}; - bool progress = nir_shader_instructions_pass( - shader, pass, nir_metadata_block_index | nir_metadata_dominance, &state); + shader, lower_sysvals, nir_metadata_block_index | nir_metadata_dominance, + NULL); - if (progress) { - *push_size = lay_out_uniforms(compiled, &state); - } else { + if (!progress) { *push_size = 0; + return false; } - return progress; + struct state state = {0}; + nir_shader_instructions_pass( + shader, record_loads, nir_metadata_block_index | nir_metadata_dominance, + &state); + + *push_size = lay_out_uniforms(compiled, &state); + return true; } diff --git a/src/gallium/drivers/asahi/agx_state.c b/src/gallium/drivers/asahi/agx_state.c index b8f8c9e..bb5b2c0 100644 --- a/src/gallium/drivers/asahi/agx_state.c +++ b/src/gallium/drivers/asahi/agx_state.c @@ -1838,11 +1838,13 @@ agx_build_pipeline(struct agx_batch *batch, struct agx_compiled_shader *cs, /* Must only upload uniforms after uploading textures so we can implement the * AGX_PUSH_TEXTURE_BASE sysval correctly. */ - uint64_t uniforms = agx_upload_uniforms(batch, T_tex.gpu, stage); + uint64_t uniform_tables[AGX_NUM_SYSVAL_TABLES] = { + agx_upload_uniforms(batch, T_tex.gpu, stage), + }; for (unsigned i = 0; i < cs->push_range_count; ++i) { agx_usc_uniform(&b, cs->push[i].uniform, cs->push[i].length, - uniforms + cs->push[i].offset); + uniform_tables[cs->push[i].table] + cs->push[i].offset); } if (stage == PIPE_SHADER_FRAGMENT) diff --git a/src/gallium/drivers/asahi/agx_state.h b/src/gallium/drivers/asahi/agx_state.h index 7cab7d2..6443a2a 100644 --- a/src/gallium/drivers/asahi/agx_state.h +++ b/src/gallium/drivers/asahi/agx_state.h @@ -68,7 +68,13 @@ agx_so_target(struct pipe_stream_output_target *target) * compiler. The layout is up to us and handled by our code lowering system * values to uniforms. */ +enum agx_sysval_table { AGX_SYSVAL_TABLE_ROOT, AGX_NUM_SYSVAL_TABLES }; + +/* Root system value table */ struct PACKED agx_draw_uniforms { + /* Pointers to the system value tables themselves (for indirection) */ + uint64_t tables[AGX_NUM_SYSVAL_TABLES]; + /* Pointer to binding table for texture descriptor, or 0 if none */ uint64_t texture_base; @@ -102,11 +108,14 @@ struct agx_push_range { /* Base 16-bit uniform to push to */ uint16_t uniform; - /* Offset into agx_draw_uniforms to push in bytes */ + /* Offset into the table to push in bytes */ uint16_t offset; + /* Which table to push from */ + uint8_t table; + /* Number of consecutive 16-bit uniforms to push */ - size_t length; + uint8_t length; }; struct agx_compiled_shader { diff --git a/src/gallium/drivers/asahi/agx_uniforms.c b/src/gallium/drivers/asahi/agx_uniforms.c index f2a57ce..aaa3c7a 100644 --- a/src/gallium/drivers/asahi/agx_uniforms.c +++ b/src/gallium/drivers/asahi/agx_uniforms.c @@ -77,7 +77,16 @@ agx_upload_uniforms(struct agx_batch *batch, uint64_t textures, struct agx_context *ctx = batch->ctx; struct agx_stage *st = &ctx->stage[stage]; - struct agx_draw_uniforms uniforms = {.texture_base = textures}; + struct agx_ptr root_ptr = agx_pool_alloc_aligned( + &batch->pool, sizeof(struct agx_draw_uniforms), 16); + + struct agx_draw_uniforms uniforms = { + .tables = + { + [AGX_SYSVAL_TABLE_ROOT] = root_ptr.gpu, + }, + .texture_base = textures, + }; u_foreach_bit(s, st->valid_samplers) { uniforms.lod_bias[s] = st->samplers[s]->lod_bias_as_fp16; @@ -101,5 +110,6 @@ agx_upload_uniforms(struct agx_batch *batch, uint64_t textures, sizeof(ctx->blend_color)); } - return agx_pool_upload(&batch->pool, &uniforms, sizeof(uniforms)); + memcpy(root_ptr.cpu, &uniforms, sizeof(uniforms)); + return root_ptr.gpu; } -- 2.7.4