case nir_intrinsic_load_back_face_agx:
return agx_get_sr_to(b, dst, AGX_SR_BACKFACING);
+ case nir_intrinsic_load_texture_base_agx:
+ return agx_mov_to(b, dst, agx_indexed_sysval(b->shader,
+ AGX_PUSH_TEXTURE_BASE, AGX_SIZE_64, 0, 4));
+
case nir_intrinsic_load_vertex_id:
return agx_mov_to(b, dst, agx_abs(agx_register(10, AGX_SIZE_32)));
uint8_t *record = ptr.cpu;
- for (unsigned i = 0; i < cs->info.push_ranges; ++i) {
- struct agx_push push = cs->info.push[i];
-
- agx_pack(record, BIND_UNIFORM, cfg) {
- cfg.start_halfs = push.base;
- cfg.size_halfs = push.length;
- cfg.buffer = agx_push_location(ctx, push, stage);
- }
-
- record += AGX_BIND_UNIFORM_LENGTH;
- }
-
unsigned nr_textures = ctx->stage[stage].texture_count;
unsigned nr_samplers = ctx->stage[stage].sampler_count;
cfg.buffer = T_tex.gpu;
}
+ ctx->batch->textures = T_tex.gpu;
record += AGX_BIND_TEXTURE_LENGTH;
}
record += AGX_BIND_SAMPLER_LENGTH;
}
+ /* Must only upload uniforms after uploading textures so we can implement the
+ * AGX_PUSH_TEXTURE_BASE sysval correctly.
+ */
+ for (unsigned i = 0; i < cs->info.push_ranges; ++i) {
+ struct agx_push push = cs->info.push[i];
+
+ agx_pack(record, BIND_UNIFORM, cfg) {
+ cfg.start_halfs = push.base;
+ cfg.size_halfs = push.length;
+ cfg.buffer = agx_push_location(ctx, push, stage);
+ }
+
+ record += AGX_BIND_UNIFORM_LENGTH;
+ }
+
/* TODO: Can we prepack this? */
if (stage == PIPE_SHADER_FRAGMENT) {
bool writes_sample_mask = ctx->fs->info.writes_sample_mask;
return ptr.gpu;
}
+ case AGX_PUSH_TEXTURE_BASE: {
+ struct agx_ptr ptr = agx_pool_alloc_aligned(&batch->pool, sizeof(uint64_t), 8);
+ uint64_t *address = ptr.cpu;
+ *address = batch->textures;
+ return ptr.gpu;
+ }
+
default:
unreachable("todo: push more");
}