static nir_def *
build_background_op(nir_builder *b, enum agx_meta_op op, unsigned rt,
- unsigned nr, bool msaa)
+ unsigned nr, bool msaa, bool layered)
{
if (op == AGX_META_OP_LOAD) {
+ nir_def *coord = nir_u2u32(b, nir_load_pixel_coord(b));
+
+ if (layered) {
+ coord = nir_vec3(b, nir_channel(b, coord, 0), nir_channel(b, coord, 1),
+ agx_internal_layer_id(b));
+ }
+
nir_tex_instr *tex = nir_tex_instr_create(b->shader, 2);
/* The type doesn't matter as long as it matches the store */
tex->dest_type = nir_type_uint32;
tex->sampler_dim = msaa ? GLSL_SAMPLER_DIM_MS : GLSL_SAMPLER_DIM_2D;
+ tex->is_array = layered;
tex->op = msaa ? nir_texop_txf_ms : nir_texop_txf;
- tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_coord,
- nir_u2u32(b, nir_load_pixel_coord(b)));
+ tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_coord, coord);
+
+ /* Layer is necessarily already in-bounds so we do not want the compiler
+ * to clamp it, which would require reading the descriptor
+ */
+ tex->backend_flags = AGX_TEXTURE_FLAG_NO_CLAMP;
if (msaa) {
tex->src[1] =
tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_lod, nir_imm_int(b, 0));
}
- tex->coord_components = 2;
+ tex->coord_components = layered ? 3 : 2;
tex->texture_index = rt;
nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
unsigned nr = util_format_get_nr_components(key->tib.logical_format[rt]);
bool msaa = key->tib.nr_samples > 1;
+ bool layered = key->tib.layered;
assert(nr > 0);
- nir_store_output(&b, build_background_op(&b, key->op[rt], rt, nr, msaa),
- nir_imm_int(&b, 0), .write_mask = BITFIELD_MASK(nr),
- .src_type = nir_type_uint32,
- .io_semantics.location = FRAG_RESULT_DATA0 + rt,
- .io_semantics.num_slots = 1);
+ nir_store_output(
+ &b, build_background_op(&b, key->op[rt], rt, nr, msaa, layered),
+ nir_imm_int(&b, 0), .write_mask = BITFIELD_MASK(nr),
+ .src_type = nir_type_uint32,
+ .io_semantics.location = FRAG_RESULT_DATA0 + rt,
+ .io_semantics.num_slots = 1);
b.shader->info.outputs_written |= BITFIELD64_BIT(FRAG_RESULT_DATA0 + rt);
}