bool supports_shader_constants;
/**
- * Whether or not the driver wants variable group size to be lowered by the
- * back-end compiler.
- */
- bool lower_variable_group_size;
-
- /**
* Whether indirect UBO loads should use the sampler or go through the
* data/constant cache. For the sampler, UBO surface states have to be set
* up with VK_FORMAT_R32G32B32A32_FLOAT whereas if it's going through the
/* Add uniforms for builtins after regular NIR uniforms. */
assert(uniforms == prog_data->nr_params);
- uint32_t *param;
- if (nir->info.workgroup_size_variable &&
- compiler->lower_variable_group_size) {
- param = brw_stage_prog_data_add_params(prog_data, 3);
- for (unsigned i = 0; i < 3; i++) {
- param[i] = (BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X + i);
- group_size[i] = fs_reg(UNIFORM, uniforms++, BRW_REGISTER_TYPE_UD);
- }
- }
-
/* Subgroup ID must be the last uniform on the list. This will make
* easier later to split between cross thread and per thread
* uniforms.
*/
- param = brw_stage_prog_data_add_params(prog_data, 1);
+ uint32_t *param = brw_stage_prog_data_add_params(prog_data, 1);
*param = BRW_PARAM_BUILTIN_SUBGROUP_ID;
subgroup_id = fs_reg(UNIFORM, uniforms++, BRW_REGISTER_TYPE_UD);
}
}
case nir_intrinsic_load_workgroup_size: {
- /* For non-variable case, this should've been lowered already. */
- assert(nir->info.workgroup_size_variable);
-
- assert(compiler->lower_variable_group_size);
- assert(gl_shader_stage_is_compute(stage));
-
- for (unsigned i = 0; i < 3; i++) {
- bld.MOV(retype(offset(dest, bld, i), BRW_REGISTER_TYPE_UD),
- group_size[i]);
- }
+ /* Should have been lowered by brw_nir_lower_cs_intrinsics() or
+ * crocus/iris_setup_uniforms() for the variable group size case.
+ */
+ unreachable("Should have been lowered");
break;
}