shader_info->num_input_sgprs + 3);
if (nir->stage == MESA_SHADER_COMPUTE) {
for (int i = 0; i < 3; ++i)
- shader_info->cs.block_size[i] = nir->info.cs.local_size[i];
+ shader_info->cs.block_size[i] = nir->info->cs.local_size[i];
}
if (nir->stage == MESA_SHADER_FRAGMENT)
- shader_info->fs.early_fragment_test = nir->info.fs.early_fragment_tests;
+ shader_info->fs.early_fragment_test = nir->info->fs.early_fragment_tests;
}
nir_builder b;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, NULL);
- b.shader->info.name = ralloc_strdup(b.shader, "meta_blit_vs");
+ b.shader->info->name = ralloc_strdup(b.shader, "meta_blit_vs");
nir_variable *pos_in = nir_variable_create(b.shader, nir_var_shader_in,
vec4, "a_pos");
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
sprintf(shader_name, "meta_blit_fs.%d", tex_dim);
- b.shader->info.name = ralloc_strdup(b.shader, shader_name);
+ b.shader->info->name = ralloc_strdup(b.shader, shader_name);
nir_variable *tex_pos_in = nir_variable_create(b.shader, nir_var_shader_in,
vec4, "v_tex_pos");
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
sprintf(shader_name, "meta_blit_depth_fs.%d", tex_dim);
- b.shader->info.name = ralloc_strdup(b.shader, shader_name);
+ b.shader->info->name = ralloc_strdup(b.shader, shader_name);
nir_variable *tex_pos_in = nir_variable_create(b.shader, nir_var_shader_in,
vec4, "v_tex_pos");
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
sprintf(shader_name, "meta_blit_stencil_fs.%d", tex_dim);
- b.shader->info.name = ralloc_strdup(b.shader, shader_name);
+ b.shader->info->name = ralloc_strdup(b.shader, shader_name);
nir_variable *tex_pos_in = nir_variable_create(b.shader, nir_var_shader_in,
vec4, "v_tex_pos");
nir_builder b;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, NULL);
- b.shader->info.name = ralloc_strdup(b.shader, "meta_blit_vs");
+ b.shader->info->name = ralloc_strdup(b.shader, "meta_blit_vs");
nir_variable *pos_in = nir_variable_create(b.shader, nir_var_shader_in,
vec4, "a_pos");
nir_builder b;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
- b.shader->info.name = ralloc_strdup(b.shader, name);
+ b.shader->info->name = ralloc_strdup(b.shader, name);
nir_variable *tex_pos_in = nir_variable_create(b.shader, nir_var_shader_in,
vec2, "v_tex_pos");
nir_builder b;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
- b.shader->info.name = ralloc_strdup(b.shader, name);
+ b.shader->info->name = ralloc_strdup(b.shader, name);
nir_variable *tex_pos_in = nir_variable_create(b.shader, nir_var_shader_in,
vec2, "v_tex_pos");
nir_builder b;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
- b.shader->info.name = ralloc_strdup(b.shader, name);
+ b.shader->info->name = ralloc_strdup(b.shader, name);
nir_variable *tex_pos_in = nir_variable_create(b.shader, nir_var_shader_in,
vec2, "v_tex_pos");
nir_builder b;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
- b.shader->info.name = ralloc_strdup(b.shader, "meta_buffer_fill");
- b.shader->info.cs.local_size[0] = 64;
- b.shader->info.cs.local_size[1] = 1;
- b.shader->info.cs.local_size[2] = 1;
+ b.shader->info->name = ralloc_strdup(b.shader, "meta_buffer_fill");
+ b.shader->info->cs.local_size[0] = 64;
+ b.shader->info->cs.local_size[1] = 1;
+ b.shader->info->cs.local_size[2] = 1;
nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
nir_ssa_def *block_size = nir_imm_ivec4(&b,
- b.shader->info.cs.local_size[0],
- b.shader->info.cs.local_size[1],
- b.shader->info.cs.local_size[2], 0);
+ b.shader->info->cs.local_size[0],
+ b.shader->info->cs.local_size[1],
+ b.shader->info->cs.local_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
nir_builder b;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
- b.shader->info.name = ralloc_strdup(b.shader, "meta_buffer_copy");
- b.shader->info.cs.local_size[0] = 64;
- b.shader->info.cs.local_size[1] = 1;
- b.shader->info.cs.local_size[2] = 1;
+ b.shader->info->name = ralloc_strdup(b.shader, "meta_buffer_copy");
+ b.shader->info->cs.local_size[0] = 64;
+ b.shader->info->cs.local_size[1] = 1;
+ b.shader->info->cs.local_size[2] = 1;
nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
nir_ssa_def *block_size = nir_imm_ivec4(&b,
- b.shader->info.cs.local_size[0],
- b.shader->info.cs.local_size[1],
- b.shader->info.cs.local_size[2], 0);
+ b.shader->info->cs.local_size[0],
+ b.shader->info->cs.local_size[1],
+ b.shader->info->cs.local_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
false,
GLSL_TYPE_FLOAT);
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
- b.shader->info.name = ralloc_strdup(b.shader, "meta_itob_cs");
- b.shader->info.cs.local_size[0] = 16;
- b.shader->info.cs.local_size[1] = 16;
- b.shader->info.cs.local_size[2] = 1;
+ b.shader->info->name = ralloc_strdup(b.shader, "meta_itob_cs");
+ b.shader->info->cs.local_size[0] = 16;
+ b.shader->info->cs.local_size[1] = 16;
+ b.shader->info->cs.local_size[2] = 1;
nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform,
sampler_type, "s_tex");
input_img->data.descriptor_set = 0;
nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
nir_ssa_def *block_size = nir_imm_ivec4(&b,
- b.shader->info.cs.local_size[0],
- b.shader->info.cs.local_size[1],
- b.shader->info.cs.local_size[2], 0);
+ b.shader->info->cs.local_size[0],
+ b.shader->info->cs.local_size[1],
+ b.shader->info->cs.local_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
nir_builder_init_simple_shader(&vs_b, NULL, MESA_SHADER_VERTEX, NULL);
nir_builder_init_simple_shader(&fs_b, NULL, MESA_SHADER_FRAGMENT, NULL);
- vs_b.shader->info.name = ralloc_strdup(vs_b.shader, "meta_clear_color_vs");
- fs_b.shader->info.name = ralloc_strdup(fs_b.shader, "meta_clear_color_fs");
+ vs_b.shader->info->name = ralloc_strdup(vs_b.shader, "meta_clear_color_vs");
+ fs_b.shader->info->name = ralloc_strdup(fs_b.shader, "meta_clear_color_fs");
const struct glsl_type *position_type = glsl_vec4_type();
const struct glsl_type *color_type = glsl_vec4_type();
nir_builder_init_simple_shader(&vs_b, NULL, MESA_SHADER_VERTEX, NULL);
nir_builder_init_simple_shader(&fs_b, NULL, MESA_SHADER_FRAGMENT, NULL);
- vs_b.shader->info.name = ralloc_strdup(vs_b.shader, "meta_clear_depthstencil_vs");
- fs_b.shader->info.name = ralloc_strdup(fs_b.shader, "meta_clear_depthstencil_fs");
+ vs_b.shader->info->name = ralloc_strdup(vs_b.shader, "meta_clear_depthstencil_vs");
+ fs_b.shader->info->name = ralloc_strdup(fs_b.shader, "meta_clear_depthstencil_fs");
const struct glsl_type *position_type = glsl_vec4_type();
nir_variable *vs_in_pos =
nir_variable *v_position;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, NULL);
- b.shader->info.name = ralloc_strdup(b.shader, "meta_depth_decomp_vs");
+ b.shader->info->name = ralloc_strdup(b.shader, "meta_depth_decomp_vs");
a_position = nir_variable_create(b.shader, nir_var_shader_in, vec4,
"a_position");
nir_builder b;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
- b.shader->info.name = ralloc_asprintf(b.shader,
- "meta_depth_decomp_noop_fs");
+ b.shader->info->name = ralloc_asprintf(b.shader,
+ "meta_depth_decomp_noop_fs");
return b.shader;
}
nir_variable *v_position;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, NULL);
- b.shader->info.name = ralloc_strdup(b.shader, "meta_fast_clear_vs");
+ b.shader->info->name = ralloc_strdup(b.shader, "meta_fast_clear_vs");
a_position = nir_variable_create(b.shader, nir_var_shader_in, vec4,
"a_position");
nir_builder b;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
- b.shader->info.name = ralloc_asprintf(b.shader,
+ b.shader->info->name = ralloc_asprintf(b.shader,
"meta_fast_clear_noop_fs");
return b.shader;
nir_variable *v_tex_position;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, NULL);
- b.shader->info.name = ralloc_strdup(b.shader, "meta_resolve_vs");
+ b.shader->info->name = ralloc_strdup(b.shader, "meta_resolve_vs");
a_position = nir_variable_create(b.shader, nir_var_shader_in, vec4,
"a_position");
nir_variable *f_color; /* vec4, fragment output color */
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
- b.shader->info.name = ralloc_asprintf(b.shader,
- "meta_resolve_fs");
+ b.shader->info->name = ralloc_asprintf(b.shader,
+ "meta_resolve_fs");
v_tex_position = nir_variable_create(b.shader, nir_var_shader_in, vec4,
"v_tex_position");
GLSL_TYPE_FLOAT);
snprintf(name, 64, "meta_resolve_cs-%d-%s", samples, is_integer ? "int" : "float");
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
- b.shader->info.name = ralloc_strdup(b.shader, name);
- b.shader->info.cs.local_size[0] = 16;
- b.shader->info.cs.local_size[1] = 16;
- b.shader->info.cs.local_size[2] = 1;
+ b.shader->info->name = ralloc_strdup(b.shader, name);
+ b.shader->info->cs.local_size[0] = 16;
+ b.shader->info->cs.local_size[1] = 16;
+ b.shader->info->cs.local_size[2] = 1;
nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform,
sampler_type, "s_tex");
nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
nir_ssa_def *block_size = nir_imm_ivec4(&b,
- b.shader->info.cs.local_size[0],
- b.shader->info.cs.local_size[1],
- b.shader->info.cs.local_size[2], 0);
+ b.shader->info->cs.local_size[0],
+ b.shader->info->cs.local_size[1],
+ b.shader->info->cs.local_size[2], 0);
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
}
/* Vulkan uses the separate-shader linking model */
- nir->info.separate_shader = true;
+ nir->info->separate_shader = true;
// nir = brw_preprocess_nir(compiler, nir);
unsigned code_size = 0;
if (module->nir)
- _mesa_sha1_compute(module->nir->info.name,
- strlen(module->nir->info.name),
+ _mesa_sha1_compute(module->nir->info->name,
+ strlen(module->nir->info->name),
module->sha1);
radv_hash_shader(sha1, module, entrypoint, spec_info, layout, key);
if (!modules[MESA_SHADER_FRAGMENT]) {
nir_builder fs_b;
nir_builder_init_simple_shader(&fs_b, NULL, MESA_SHADER_FRAGMENT, NULL);
- fs_b.shader->info.name = ralloc_strdup(fs_b.shader, "noop_fs");
+ fs_b.shader->info->name = ralloc_strdup(fs_b.shader, "noop_fs");
fs_m.nir = fs_b.shader;
modules[MESA_SHADER_FRAGMENT] = &fs_m;
}
{
struct gl_linked_shader *sh = shader_prog->_LinkedShaders[stage];
- nir_shader *shader = nir_shader_create(NULL, stage, options);
+ nir_shader *shader = nir_shader_create(NULL, stage, options, NULL);
nir_visitor v1(shader);
nir_function_visitor v2(&v1);
v2.run(sh->ir);
visit_exec_list(sh->ir, &v1);
- shader->info.name = ralloc_asprintf(shader, "GLSL%d", shader_prog->Name);
+ shader->info->name = ralloc_asprintf(shader, "GLSL%d", shader_prog->Name);
if (shader_prog->Label)
- shader->info.label = ralloc_strdup(shader, shader_prog->Label);
- shader->info.num_textures = util_last_bit(sh->Program->SamplersUsed);
- shader->info.num_ubos = sh->NumUniformBlocks;
- shader->info.num_abos = shader_prog->NumAtomicBuffers;
- shader->info.num_ssbos = sh->NumShaderStorageBlocks;
- shader->info.num_images = sh->NumImages;
- shader->info.inputs_read = sh->Program->InputsRead;
- shader->info.double_inputs_read = sh->Program->DoubleInputsRead;
- shader->info.outputs_written = sh->Program->OutputsWritten;
- shader->info.outputs_read = sh->Program->OutputsRead;
- shader->info.patch_inputs_read = sh->Program->PatchInputsRead;
- shader->info.patch_outputs_written = sh->Program->PatchOutputsWritten;
- shader->info.system_values_read = sh->Program->SystemValuesRead;
- shader->info.uses_texture_gather = sh->Program->UsesGather;
- shader->info.uses_clip_distance_out =
+ shader->info->label = ralloc_strdup(shader, shader_prog->Label);
+ shader->info->num_textures = util_last_bit(sh->Program->SamplersUsed);
+ shader->info->num_ubos = sh->NumUniformBlocks;
+ shader->info->num_abos = shader_prog->NumAtomicBuffers;
+ shader->info->num_ssbos = sh->NumShaderStorageBlocks;
+ shader->info->num_images = sh->NumImages;
+ shader->info->inputs_read = sh->Program->InputsRead;
+ shader->info->double_inputs_read = sh->Program->DoubleInputsRead;
+ shader->info->outputs_written = sh->Program->OutputsWritten;
+ shader->info->outputs_read = sh->Program->OutputsRead;
+ shader->info->patch_inputs_read = sh->Program->PatchInputsRead;
+ shader->info->patch_outputs_written = sh->Program->PatchOutputsWritten;
+ shader->info->system_values_read = sh->Program->SystemValuesRead;
+ shader->info->uses_texture_gather = sh->Program->UsesGather;
+ shader->info->uses_clip_distance_out =
sh->Program->ClipDistanceArraySize != 0;
- shader->info.separate_shader = shader_prog->SeparateShader;
- shader->info.has_transform_feedback_varyings =
+ shader->info->separate_shader = shader_prog->SeparateShader;
+ shader->info->has_transform_feedback_varyings =
shader_prog->TransformFeedback.NumVarying > 0;
switch (stage) {
case MESA_SHADER_TESS_CTRL:
- shader->info.tcs.vertices_out = sh->info.TessCtrl.VerticesOut;
+ shader->info->tcs.vertices_out = sh->info.TessCtrl.VerticesOut;
break;
case MESA_SHADER_GEOMETRY:
- shader->info.gs.vertices_in = shader_prog->Geom.VerticesIn;
- shader->info.gs.output_primitive = sh->info.Geom.OutputType;
- shader->info.gs.vertices_out = sh->info.Geom.VerticesOut;
- shader->info.gs.invocations = sh->info.Geom.Invocations;
- shader->info.gs.uses_end_primitive = shader_prog->Geom.UsesEndPrimitive;
- shader->info.gs.uses_streams = shader_prog->Geom.UsesStreams;
+ shader->info->gs.vertices_in = shader_prog->Geom.VerticesIn;
+ shader->info->gs.output_primitive = sh->info.Geom.OutputType;
+ shader->info->gs.vertices_out = sh->info.Geom.VerticesOut;
+ shader->info->gs.invocations = sh->info.Geom.Invocations;
+ shader->info->gs.uses_end_primitive = shader_prog->Geom.UsesEndPrimitive;
+ shader->info->gs.uses_streams = shader_prog->Geom.UsesStreams;
break;
case MESA_SHADER_FRAGMENT: {
struct gl_fragment_program *fp =
(struct gl_fragment_program *)sh->Program;
- shader->info.fs.uses_discard = fp->UsesKill;
- shader->info.fs.uses_sample_qualifier = fp->IsSample != 0;
- shader->info.fs.early_fragment_tests = sh->info.EarlyFragmentTests;
- shader->info.fs.depth_layout = fp->FragDepthLayout;
+ shader->info->fs.uses_discard = fp->UsesKill;
+ shader->info->fs.uses_sample_qualifier = fp->IsSample != 0;
+ shader->info->fs.early_fragment_tests = sh->info.EarlyFragmentTests;
+ shader->info->fs.depth_layout = fp->FragDepthLayout;
break;
}
case MESA_SHADER_COMPUTE: {
struct gl_compute_program *cp = (struct gl_compute_program *)sh->Program;
- shader->info.cs.local_size[0] = cp->LocalSize[0];
- shader->info.cs.local_size[1] = cp->LocalSize[1];
- shader->info.cs.local_size[2] = cp->LocalSize[2];
+ shader->info->cs.local_size[0] = cp->LocalSize[0];
+ shader->info->cs.local_size[1] = cp->LocalSize[1];
+ shader->info->cs.local_size[2] = cp->LocalSize[2];
break;
}
nir_shader *
nir_shader_create(void *mem_ctx,
gl_shader_stage stage,
- const nir_shader_compiler_options *options)
+ const nir_shader_compiler_options *options,
+ shader_info *si)
{
nir_shader *shader = ralloc(mem_ctx, nir_shader);
exec_list_make_empty(&shader->shared);
shader->options = options;
- memset(&shader->info, 0, sizeof(shader->info));
+
+ shader->info = si ? si : rzalloc(shader, shader_info);
exec_list_make_empty(&shader->functions);
exec_list_make_empty(&shader->registers);
const struct nir_shader_compiler_options *options;
/** Various bits of compile-time information about a given shader */
- struct shader_info info;
+ struct shader_info *info;
/** list of global variables in the shader (nir_variable) */
struct exec_list globals;
nir_shader *nir_shader_create(void *mem_ctx,
gl_shader_stage stage,
- const nir_shader_compiler_options *options);
+ const nir_shader_compiler_options *options,
+ shader_info *si);
/** creates a register, including assigning it an index and adding it to the list */
nir_register *nir_global_reg_create(nir_shader *shader);
gl_shader_stage stage,
const nir_shader_compiler_options *options)
{
- build->shader = nir_shader_create(mem_ctx, stage, options);
+ build->shader = nir_shader_create(mem_ctx, stage, options, NULL);
nir_function *func = nir_function_create(build->shader, "main");
build->exact = false;
build->impl = nir_function_impl_create(func);
clone_state state;
init_clone_state(&state, true);
- nir_shader *ns = nir_shader_create(mem_ctx, s->stage, s->options);
+ nir_shader *ns = nir_shader_create(mem_ctx, s->stage, s->options, NULL);
state.ns = ns;
clone_var_list(&state, &ns->uniforms, &s->uniforms);
ns->reg_alloc = s->reg_alloc;
ns->info = s->info;
- ns->info.name = ralloc_strdup(ns, ns->info.name);
- if (ns->info.label)
- ns->info.label = ralloc_strdup(ns, ns->info.label);
+ ns->info->name = ralloc_strdup(ns, ns->info->name);
+ if (ns->info->label)
+ ns->info->label = ralloc_strdup(ns, ns->info->label);
ns->num_inputs = s->num_inputs;
ns->num_uniforms = s->num_uniforms;
case nir_intrinsic_discard:
case nir_intrinsic_discard_if:
assert(shader->stage == MESA_SHADER_FRAGMENT);
- shader->info.fs.uses_discard = true;
+ shader->info->fs.uses_discard = true;
break;
case nir_intrinsic_load_front_face:
case nir_intrinsic_load_local_invocation_index:
case nir_intrinsic_load_work_group_id:
case nir_intrinsic_load_num_work_groups:
- shader->info.system_values_read |=
+ shader->info->system_values_read |=
(1 << nir_system_value_from_intrinsic(instr->intrinsic));
break;
case nir_intrinsic_end_primitive:
case nir_intrinsic_end_primitive_with_counter:
assert(shader->stage == MESA_SHADER_GEOMETRY);
- shader->info.gs.uses_end_primitive = 1;
+ shader->info->gs.uses_end_primitive = 1;
break;
default:
gather_tex_info(nir_tex_instr *instr, nir_shader *shader)
{
if (instr->op == nir_texop_tg4)
- shader->info.uses_texture_gather = true;
+ shader->info->uses_texture_gather = true;
}
static void
shader->stage == MESA_SHADER_COMPUTE);
bool uses_sample_qualifier = false;
- shader->info.inputs_read = 0;
+ shader->info->inputs_read = 0;
foreach_list_typed(nir_variable, var, node, &shader->inputs) {
- shader->info.inputs_read |= get_io_mask(var, shader->stage);
+ shader->info->inputs_read |= get_io_mask(var, shader->stage);
uses_sample_qualifier |= var->data.sample;
}
if (shader->stage == MESA_SHADER_FRAGMENT)
- shader->info.fs.uses_sample_qualifier = uses_sample_qualifier;
+ shader->info->fs.uses_sample_qualifier = uses_sample_qualifier;
/* TODO: Some day we may need to add stream support to NIR */
- shader->info.outputs_written = 0;
+ shader->info->outputs_written = 0;
foreach_list_typed(nir_variable, var, node, &shader->outputs)
- shader->info.outputs_written |= get_io_mask(var, shader->stage);
+ shader->info->outputs_written |= get_io_mask(var, shader->stage);
- shader->info.system_values_read = 0;
+ shader->info->system_values_read = 0;
foreach_list_typed(nir_variable, var, node, &shader->system_values)
- shader->info.system_values_read |= get_io_mask(var, shader->stage);
+ shader->info->system_values_read |= get_io_mask(var, shader->stage);
- shader->info.num_textures = 0;
- shader->info.num_images = 0;
+ shader->info->num_textures = 0;
+ shader->info->num_images = 0;
nir_foreach_variable(var, &shader->uniforms) {
const struct glsl_type *type = var->type;
unsigned count = 1;
}
if (glsl_type_is_image(type)) {
- shader->info.num_images += count;
+ shader->info->num_images += count;
} else if (glsl_type_is_sampler(type)) {
- shader->info.num_textures += count;
+ shader->info->num_textures += count;
}
}
discard->src[0] = nir_src_for_ssa(cond);
nir_builder_instr_insert(b, &discard->instr);
- shader->info.fs.uses_discard = true;
+ shader->info->fs.uses_discard = true;
}
static void
discard->src[0] = nir_src_for_ssa(cond);
nir_builder_instr_insert(&b, &discard->instr);
- b.shader->info.fs.uses_discard = true;
+ b.shader->info->fs.uses_discard = true;
}
}
}
b->cursor = nir_before_instr(&intrin->instr);
nir_ssa_def *count = nir_load_var(b, state->vertex_count_var);
- nir_ssa_def *max_vertices = nir_imm_int(b, b->shader->info.gs.vertices_out);
+ nir_ssa_def *max_vertices =
+ nir_imm_int(b, b->shader->info->gs.vertices_out);
/* Create: if (vertex_count < max_vertices) and insert it.
*
*/
nir_const_value local_size;
- local_size.u32[0] = b->shader->info.cs.local_size[0];
- local_size.u32[1] = b->shader->info.cs.local_size[1];
- local_size.u32[2] = b->shader->info.cs.local_size[2];
+ local_size.u32[0] = b->shader->info->cs.local_size[0];
+ local_size.u32[1] = b->shader->info->cs.local_size[1];
+ local_size.u32[2] = b->shader->info->cs.local_size[2];
nir_ssa_def *group_id = nir_load_work_group_id(b);
nir_ssa_def *local_id = nir_load_local_invocation_id(b);
*/
nir_ssa_def *local_id = nir_load_local_invocation_id(b);
- nir_ssa_def *size_x = nir_imm_int(b, b->shader->info.cs.local_size[0]);
- nir_ssa_def *size_y = nir_imm_int(b, b->shader->info.cs.local_size[1]);
+ nir_ssa_def *size_x =
+ nir_imm_int(b, b->shader->info->cs.local_size[0]);
+ nir_ssa_def *size_y =
+ nir_imm_int(b, b->shader->info->cs.local_size[1]);
sysval = nir_imul(b, nir_channel(b, local_id, 2),
nir_imul(b, size_x, size_y));
fprintf(fp, "shader: %s\n", gl_shader_stage_name(shader->stage));
- if (shader->info.name)
- fprintf(fp, "name: %s\n", shader->info.name);
+ if (shader->info->name)
+ fprintf(fp, "name: %s\n", shader->info->name);
- if (shader->info.label)
- fprintf(fp, "label: %s\n", shader->info.label);
+ if (shader->info->label)
+ fprintf(fp, "label: %s\n", shader->info->label);
fprintf(fp, "inputs: %u\n", shader->num_inputs);
fprintf(fp, "outputs: %u\n", shader->num_outputs);
/* First, move ownership of all the memory to a temporary context; assume dead. */
ralloc_adopt(rubbish, nir);
- ralloc_steal(nir, (char *)nir->info.name);
- if (nir->info.label)
- ralloc_steal(nir, (char *)nir->info.label);
+ ralloc_steal(nir, (char *)nir->info->name);
+ if (nir->info->label)
+ ralloc_steal(nir, (char *)nir->info->label);
/* Variables and registers are not dead. Steal them back. */
steal_list(nir, nir_variable, &nir->uniforms);
assert(val->const_type == glsl_vector_type(GLSL_TYPE_UINT, 3));
- b->shader->info.cs.local_size[0] = val->constant->value.u[0];
- b->shader->info.cs.local_size[1] = val->constant->value.u[1];
- b->shader->info.cs.local_size[2] = val->constant->value.u[2];
+ b->shader->info->cs.local_size[0] = val->constant->value.u[0];
+ b->shader->info->cs.local_size[1] = val->constant->value.u[1];
+ b->shader->info->cs.local_size[2] = val->constant->value.u[2];
}
static void
case SpvExecutionModeEarlyFragmentTests:
assert(b->shader->stage == MESA_SHADER_FRAGMENT);
- b->shader->info.fs.early_fragment_tests = true;
+ b->shader->info->fs.early_fragment_tests = true;
break;
case SpvExecutionModeInvocations:
assert(b->shader->stage == MESA_SHADER_GEOMETRY);
- b->shader->info.gs.invocations = MAX2(1, mode->literals[0]);
+ b->shader->info->gs.invocations = MAX2(1, mode->literals[0]);
break;
case SpvExecutionModeDepthReplacing:
assert(b->shader->stage == MESA_SHADER_FRAGMENT);
- b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_ANY;
+ b->shader->info->fs.depth_layout = FRAG_DEPTH_LAYOUT_ANY;
break;
case SpvExecutionModeDepthGreater:
assert(b->shader->stage == MESA_SHADER_FRAGMENT);
- b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_GREATER;
+ b->shader->info->fs.depth_layout = FRAG_DEPTH_LAYOUT_GREATER;
break;
case SpvExecutionModeDepthLess:
assert(b->shader->stage == MESA_SHADER_FRAGMENT);
- b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_LESS;
+ b->shader->info->fs.depth_layout = FRAG_DEPTH_LAYOUT_LESS;
break;
case SpvExecutionModeDepthUnchanged:
assert(b->shader->stage == MESA_SHADER_FRAGMENT);
- b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_UNCHANGED;
+ b->shader->info->fs.depth_layout = FRAG_DEPTH_LAYOUT_UNCHANGED;
break;
case SpvExecutionModeLocalSize:
assert(b->shader->stage == MESA_SHADER_COMPUTE);
- b->shader->info.cs.local_size[0] = mode->literals[0];
- b->shader->info.cs.local_size[1] = mode->literals[1];
- b->shader->info.cs.local_size[2] = mode->literals[2];
+ b->shader->info->cs.local_size[0] = mode->literals[0];
+ b->shader->info->cs.local_size[1] = mode->literals[1];
+ b->shader->info->cs.local_size[2] = mode->literals[2];
break;
case SpvExecutionModeLocalSizeHint:
break; /* Nothing to do with this */
case SpvExecutionModeOutputVertices:
assert(b->shader->stage == MESA_SHADER_GEOMETRY);
- b->shader->info.gs.vertices_out = mode->literals[0];
+ b->shader->info->gs.vertices_out = mode->literals[0];
break;
case SpvExecutionModeInputPoints:
case SpvExecutionModeQuads:
case SpvExecutionModeIsolines:
if (b->shader->stage == MESA_SHADER_GEOMETRY) {
- b->shader->info.gs.vertices_in =
+ b->shader->info->gs.vertices_in =
vertices_in_from_spv_execution_mode(mode->exec_mode);
} else {
assert(!"Tesselation shaders not yet supported");
case SpvExecutionModeOutputLineStrip:
case SpvExecutionModeOutputTriangleStrip:
assert(b->shader->stage == MESA_SHADER_GEOMETRY);
- b->shader->info.gs.output_primitive =
+ b->shader->info->gs.output_primitive =
gl_primitive_from_spv_execution_mode(mode->exec_mode);
break;
return NULL;
}
- b->shader = nir_shader_create(NULL, stage, options);
+ b->shader = nir_shader_create(NULL, stage, options, NULL);
/* Set shader info defaults */
- b->shader->info.gs.invocations = 1;
+ b->shader->info->gs.invocations = 1;
/* Parse execution modes */
vtn_foreach_execution_mode(b, b->entry_point,
nir_var->data.read_only = true;
nir_constant *c = rzalloc(nir_var, nir_constant);
- c->value.u[0] = b->shader->info.cs.local_size[0];
- c->value.u[1] = b->shader->info.cs.local_size[1];
- c->value.u[2] = b->shader->info.cs.local_size[2];
+ c->value.u[0] = b->shader->info->cs.local_size[0];
+ c->value.u[1] = b->shader->info->cs.local_size[1];
+ c->value.u[2] = b->shader->info->cs.local_size[2];
nir_var->constant_initializer = c;
break;
}
case SpvStorageClassUniformConstant:
if (without_array->block) {
var->mode = vtn_variable_mode_ubo;
- b->shader->info.num_ubos++;
+ b->shader->info->num_ubos++;
} else if (without_array->buffer_block) {
var->mode = vtn_variable_mode_ssbo;
- b->shader->info.num_ssbos++;
+ b->shader->info->num_ssbos++;
} else if (glsl_type_is_image(without_array->type)) {
var->mode = vtn_variable_mode_image;
nir_mode = nir_var_uniform;
- b->shader->info.num_images++;
+ b->shader->info->num_images++;
} else if (glsl_type_is_sampler(without_array->type)) {
var->mode = vtn_variable_mode_sampler;
nir_mode = nir_var_uniform;
- b->shader->info.num_textures++;
+ b->shader->info->num_textures++;
} else {
assert(!"Invalid uniform variable type");
}
exec_list_push_tail(&b->shader->inputs, &var->node);
for (int i = 0; i < array_size; i++)
- b->shader->info.inputs_read |= 1 << (var->data.location + i);
+ b->shader->info->inputs_read |= 1 << (var->data.location + i);
break;
case TGSI_FILE_OUTPUT: {
exec_list_push_tail(&b->shader->outputs, &var->node);
for (int i = 0; i < array_size; i++)
- b->shader->info.outputs_written |= 1 << (var->data.location + i);
+ b->shader->info->outputs_written |= 1 << (var->data.location + i);
}
break;
case TGSI_FILE_CONSTANT:
src = nir_src_for_ssa(&load->dest.ssa);
- b->shader->info.system_values_read |=
+ b->shader->info->system_values_read |=
(1 << nir_system_value_from_intrinsic(op));
break;
nir_intrinsic_instr *discard =
nir_intrinsic_instr_create(b->shader, nir_intrinsic_discard);
nir_builder_instr_insert(b, &discard->instr);
- b->shader->info.fs.uses_discard = true;
+ b->shader->info->fs.uses_discard = true;
}
static void
nir_intrinsic_instr_create(b->shader, nir_intrinsic_discard_if);
discard->src[0] = nir_src_for_ssa(cmp);
nir_builder_instr_insert(b, &discard->instr);
- b->shader->info.fs.uses_discard = true;
+ b->shader->info->fs.uses_discard = true;
}
static void
uint64_t
ir3_shader_outputs(const struct ir3_shader *so)
{
- return so->nir->info.outputs_written;
+ return so->nir->info->outputs_written;
}
/* This has to reach into the fd_context a bit more than the rest of
discard->num_components = 1;
discard->src[0] = nir_src_for_ssa(nir_inot(b, condition));
nir_builder_instr_insert(b, &discard->instr);
- c->s->info.fs.uses_discard = true;
+ c->s->info->fs.uses_discard = true;
}
static nir_ssa_def *
}
uint32_t discard_cond = QPU_COND_ALWAYS;
- if (c->s->info.fs.uses_discard) {
+ if (c->s->info->fs.uses_discard) {
qir_SF(c, c->discard);
discard_cond = QPU_COND_ZS;
}
static void
nir_to_qir(struct vc4_compile *c)
{
- if (c->stage == QSTAGE_FRAG && c->s->info.fs.uses_discard)
+ if (c->stage == QSTAGE_FRAG && c->s->info->fs.uses_discard)
c->discard = qir_MOV(c, qir_uniform_ui(c, 0));
ntq_setup_inputs(c);
/* Note: the temporary clone in c->s has been freed. */
nir_shader *orig_shader = key->shader_state->base.ir.nir;
- if (orig_shader->info.outputs_written & (1 << FRAG_RESULT_DEPTH))
+ if (orig_shader->info->outputs_written & (1 << FRAG_RESULT_DEPTH))
shader->disable_early_z = true;
} else {
shader->num_inputs = c->num_inputs;
prog_data->persample_msaa_dispatch = wm_prog_data.persample_dispatch;
prog_data->flat_inputs = wm_prog_data.flat_inputs;
prog_data->num_varying_inputs = wm_prog_data.num_varying_inputs;
- prog_data->inputs_read = nir->info.inputs_read;
+ prog_data->inputs_read = nir->info->inputs_read;
assert(wm_prog_data.base.nr_params == 0);
nir_builder b;
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
- b.shader->info.name = ralloc_strdup(b.shader, "BLORP-clear");
+ b.shader->info->name = ralloc_strdup(b.shader, "BLORP-clear");
nir_variable *v_color = nir_variable_create(b.shader, nir_var_shader_in,
glsl_vec4_type(), "v_color");
nir_validate_shader(nir);
/* Vulkan uses the separate-shader linking model */
- nir->info.separate_shader = true;
+ nir->info->separate_shader = true;
nir = brw_preprocess_nir(compiler, nir);
if (pipeline->layout && pipeline->layout->stage[stage].has_dynamic_offsets)
prog_data->nr_params += MAX_DYNAMIC_BUFFERS * 2;
- if (nir->info.num_images > 0) {
- prog_data->nr_params += nir->info.num_images * BRW_IMAGE_PARAM_SIZE;
+ if (nir->info->num_images > 0) {
+ prog_data->nr_params += nir->info->num_images * BRW_IMAGE_PARAM_SIZE;
pipeline->needs_data_cache = true;
}
((struct brw_cs_prog_data *)prog_data)->thread_local_id_index =
prog_data->nr_params++; /* The CS Thread ID uniform */
- if (nir->info.num_ssbos > 0)
+ if (nir->info->num_ssbos > 0)
pipeline->needs_data_cache = true;
if (prog_data->nr_params > 0) {
ralloc_steal(mem_ctx, nir);
- prog_data.inputs_read = nir->info.inputs_read;
+ prog_data.inputs_read = nir->info->inputs_read;
brw_compute_vue_map(&pipeline->device->info,
&prog_data.base.vue_map,
- nir->info.outputs_written,
- nir->info.separate_shader);
+ nir->info->outputs_written,
+ nir->info->separate_shader);
unsigned code_size;
const unsigned *shader_code =
brw_compute_vue_map(&pipeline->device->info,
&prog_data.base.vue_map,
- nir->info.outputs_written,
- nir->info.separate_shader);
+ nir->info->outputs_written,
+ nir->info->separate_shader);
unsigned code_size;
const unsigned *shader_code =
/* Resolve color buffers for non-coherent framebuffer fetch. */
if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
ctx->FragmentProgram._Current &&
- ctx->FragmentProgram._Current->Base.nir->info.outputs_read) {
+ ctx->FragmentProgram._Current->Base.nir->info->outputs_read) {
const struct gl_framebuffer *fb = ctx->DrawBuffer;
for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
* BRW_NEW_FRAGMENT_PROGRAM
*/
if (brw->gen == 4 && !brw->is_g4x &&
- (brw->fragment_program->Base.nir->info.inputs_read &
+ (brw->fragment_program->Base.nir->info->inputs_read &
(1 << VARYING_SLOT_POS))) {
BEGIN_BATCH(2);
OUT_BATCH(_3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP << 16 | (2 - 2));
}
if (brw->gen < 8 && !brw->is_haswell) {
- uint64_t mask = ctx->VertexProgram._Current->Base.nir->info.inputs_read;
+ uint64_t mask = ctx->VertexProgram._Current->Base.nir->info->inputs_read;
/* Prior to Haswell, the hardware can't natively support GL_FIXED or
* 2_10_10_10_REV vertex formats. Set appropriate workaround flags.
*/
int urb_next = 0;
/* Figure out where each of the incoming setup attributes lands. */
if (devinfo->gen >= 6) {
- if (_mesa_bitcount_64(nir->info.inputs_read &
+ if (_mesa_bitcount_64(nir->info->inputs_read &
BRW_FS_VARYING_INPUT_MASK) <= 16) {
/* The SF/SBE pipeline stage can do arbitrary rearrangement of the
* first 16 varying inputs, so we can put them wherever we want.
* a different vertex (or geometry) shader.
*/
for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
- if (nir->info.inputs_read & BRW_FS_VARYING_INPUT_MASK &
+ if (nir->info->inputs_read & BRW_FS_VARYING_INPUT_MASK &
BITFIELD64_BIT(i)) {
prog_data->urb_setup[i] = urb_next++;
}
}
} else {
bool include_vue_header =
- nir->info.inputs_read & (VARYING_BIT_LAYER | VARYING_BIT_VIEWPORT);
+ nir->info->inputs_read & (VARYING_BIT_LAYER | VARYING_BIT_VIEWPORT);
/* We have enough input varyings that the SF/SBE pipeline stage can't
* arbitrarily rearrange them to suit our whim; we have to put them
struct brw_vue_map prev_stage_vue_map;
brw_compute_vue_map(devinfo, &prev_stage_vue_map,
key->input_slots_valid,
- nir->info.separate_shader);
+ nir->info->separate_shader);
int first_slot =
include_vue_header ? 0 : 2 * BRW_SF_URB_ENTRY_READ_OFFSET;
slot++) {
int varying = prev_stage_vue_map.slot_to_varying[slot];
if (varying != BRW_VARYING_SLOT_PAD &&
- (nir->info.inputs_read & BRW_FS_VARYING_INPUT_MASK &
+ (nir->info->inputs_read & BRW_FS_VARYING_INPUT_MASK &
BITFIELD64_BIT(varying))) {
prog_data->urb_setup[varying] = slot - first_slot;
}
*
* See compile_sf_prog() for more info.
*/
- if (nir->info.inputs_read & BITFIELD64_BIT(VARYING_SLOT_PNTC))
+ if (nir->info->inputs_read & BITFIELD64_BIT(VARYING_SLOT_PNTC))
prog_data->urb_setup[VARYING_SLOT_PNTC] = urb_next++;
}
struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(prog_data);
first_non_payload_grf +=
- 8 * vue_prog_data->urb_read_length * nir->info.gs.vertices_in;
+ 8 * vue_prog_data->urb_read_length * nir->info->gs.vertices_in;
foreach_block_and_inst(block, fs_inst, inst, cfg) {
/* Rewrite all ATTR file references to GRFs. */
/* R27: interpolated depth if uses source depth */
prog_data->uses_src_depth =
- (nir->info.inputs_read & (1 << VARYING_SLOT_POS)) != 0;
+ (nir->info->inputs_read & (1 << VARYING_SLOT_POS)) != 0;
if (prog_data->uses_src_depth) {
payload.source_depth_reg = payload.num_regs;
payload.num_regs++;
/* R29: interpolated W set if GEN6_WM_USES_SOURCE_W. */
prog_data->uses_src_w =
- (nir->info.inputs_read & (1 << VARYING_SLOT_POS)) != 0;
+ (nir->info->inputs_read & (1 << VARYING_SLOT_POS)) != 0;
if (prog_data->uses_src_w) {
payload.source_w_reg = payload.num_regs;
payload.num_regs++;
/* R31: MSAA position offsets. */
if (prog_data->persample_dispatch &&
- (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS)) {
+ (nir->info->system_values_read & SYSTEM_BIT_SAMPLE_POS)) {
/* From the Ivy Bridge PRM documentation for 3DSTATE_PS:
*
* "MSDISPMODE_PERSAMPLE is required in order to select
/* R32: MSAA input coverage mask */
prog_data->uses_sample_mask =
- (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN) != 0;
+ (nir->info->system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN) != 0;
if (prog_data->uses_sample_mask) {
assert(devinfo->gen >= 7);
payload.sample_mask_in_reg = payload.num_regs;
/* R34-: bary for 32-pixel. */
/* R58-59: interp W for 32-pixel. */
- if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
+ if (nir->info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
source_depth_to_render_target = true;
}
}
* Note that the GS reads <URB Read Length> HWords for every vertex - so we
* have to multiply by VerticesIn to obtain the total storage requirement.
*/
- if (8 * vue_prog_data->urb_read_length * nir->info.gs.vertices_in >
+ if (8 * vue_prog_data->urb_read_length * nir->info->gs.vertices_in >
max_push_components || gs_prog_data->invocations > 1) {
gs_prog_data->base.include_vue_handles = true;
/* R3..RN: ICP Handles for each incoming vertex (when using pull model) */
- payload.num_regs += nir->info.gs.vertices_in;
+ payload.num_regs += nir->info->gs.vertices_in;
vue_prog_data->urb_read_length =
- ROUND_DOWN_TO(max_push_components / nir->info.gs.vertices_in, 8) / 8;
+ ROUND_DOWN_TO(max_push_components / nir->info->gs.vertices_in, 8) / 8;
}
}
if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
char filename[64]; \
snprintf(filename, 64, "%s%d-%s-%02d-%02d-" #pass, \
- stage_abbrev, dispatch_width, nir->info.name, iteration, pass_num); \
+ stage_abbrev, dispatch_width, nir->info->name, iteration, pass_num); \
\
backend_shader::dump_instructions(filename); \
} \
if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER)) {
char filename[64];
snprintf(filename, 64, "%s%d-%s-00-00-start",
- stage_abbrev, dispatch_width, nir->info.name);
+ stage_abbrev, dispatch_width, nir->info->name);
backend_shader::dump_instructions(filename);
}
}
/* Fix the disptach mask */
- if (nir->info.tcs.vertices_out % 8) {
+ if (nir->info->tcs.vertices_out % 8) {
bld.CMP(bld.null_reg_ud(), invocation_id,
- brw_imm_ud(nir->info.tcs.vertices_out), BRW_CONDITIONAL_L);
+ brw_imm_ud(nir->info->tcs.vertices_out), BRW_CONDITIONAL_L);
bld.IF(BRW_PREDICATE_NORMAL);
}
emit_nir_code();
- if (nir->info.tcs.vertices_out % 8) {
+ if (nir->info->tcs.vertices_out % 8) {
bld.emit(BRW_OPCODE_ENDIF);
}
emit_shader_time_begin();
calculate_urb_setup();
- if (nir->info.inputs_read > 0 ||
- (nir->info.outputs_read > 0 && !wm_key->coherent_fb_fetch)) {
+ if (nir->info->inputs_read > 0 ||
+ (nir->info->outputs_read > 0 && !wm_key->coherent_fb_fetch)) {
if (devinfo->gen < 6)
emit_interpolation_setup_gen4();
else
static uint8_t
computed_depth_mode(const nir_shader *shader)
{
- if (shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
- switch (shader->info.fs.depth_layout) {
+ if (shader->info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
+ switch (shader->info->fs.depth_layout) {
case FRAG_DEPTH_LAYOUT_NONE:
case FRAG_DEPTH_LAYOUT_ANY:
return BRW_PSCDEPTH_ON;
/* key->alpha_test_func means simulating alpha testing via discards,
* so the shader definitely kills pixels.
*/
- prog_data->uses_kill = shader->info.fs.uses_discard || key->alpha_test_func;
+ prog_data->uses_kill = shader->info->fs.uses_discard ||
+ key->alpha_test_func;
prog_data->uses_omask = key->multisample_fbo &&
- shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK);
+ shader->info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK);
prog_data->computed_depth_mode = computed_depth_mode(shader);
prog_data->computed_stencil =
- shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL);
+ shader->info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL);
prog_data->persample_dispatch =
key->multisample_fbo &&
(key->persample_interp ||
- (shader->info.system_values_read & (SYSTEM_BIT_SAMPLE_ID |
- SYSTEM_BIT_SAMPLE_POS)) ||
- shader->info.fs.uses_sample_qualifier ||
- shader->info.outputs_read);
+ (shader->info->system_values_read & (SYSTEM_BIT_SAMPLE_ID |
+ SYSTEM_BIT_SAMPLE_POS)) ||
+ shader->info->fs.uses_sample_qualifier ||
+ shader->info->outputs_read);
- prog_data->early_fragment_tests = shader->info.fs.early_fragment_tests;
+ prog_data->early_fragment_tests = shader->info->fs.early_fragment_tests;
prog_data->barycentric_interp_modes =
brw_compute_barycentric_interp_modes(compiler->devinfo, shader);
if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
g.enable_debug(ralloc_asprintf(mem_ctx, "%s fragment shader %s",
- shader->info.label ? shader->info.label :
- "unnamed",
- shader->info.name));
+ shader->info->label ?
+ shader->info->label : "unnamed",
+ shader->info->name));
}
if (simd8_cfg) {
brw_nir_lower_intrinsics(shader, &prog_data->base);
shader = brw_postprocess_nir(shader, compiler->devinfo, true);
- prog_data->local_size[0] = shader->info.cs.local_size[0];
- prog_data->local_size[1] = shader->info.cs.local_size[1];
- prog_data->local_size[2] = shader->info.cs.local_size[2];
+ prog_data->local_size[0] = shader->info->cs.local_size[0];
+ prog_data->local_size[1] = shader->info->cs.local_size[1];
+ prog_data->local_size[2] = shader->info->cs.local_size[2];
unsigned local_workgroup_size =
- shader->info.cs.local_size[0] * shader->info.cs.local_size[1] *
- shader->info.cs.local_size[2];
+ shader->info->cs.local_size[0] * shader->info->cs.local_size[1] *
+ shader->info->cs.local_size[2];
unsigned max_cs_threads = compiler->devinfo->max_cs_threads;
unsigned simd_required = DIV_ROUND_UP(local_workgroup_size, max_cs_threads);
MESA_SHADER_COMPUTE);
if (INTEL_DEBUG & DEBUG_CS) {
char *name = ralloc_asprintf(mem_ctx, "%s compute shader %s",
- shader->info.label ? shader->info.label :
+ shader->info->label ? shader->info->label :
"unnamed",
- shader->info.name);
+ shader->info->name);
g.enable_debug(name);
}
* be recorded by transform feedback, we can simply discard all geometry
* bound to these streams when transform feedback is disabled.
*/
- if (stream_id > 0 && !nir->info.has_transform_feedback_varyings)
+ if (stream_id > 0 && !nir->info->has_transform_feedback_varyings)
return;
/* If we're outputting 32 control data bits or less, then we can wait
/* Use first_icp_handle as the base offset. There is one register
* of URB handles per vertex, so inform the register allocator that
- * we might read up to nir->info.gs.vertices_in registers.
+ * we might read up to nir->info->gs.vertices_in registers.
*/
bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
fs_reg(brw_vec8_grf(first_icp_handle, 0)),
fs_reg(icp_offset_bytes),
- brw_imm_ud(nir->info.gs.vertices_in * REG_SIZE));
+ brw_imm_ud(nir->info->gs.vertices_in * REG_SIZE));
}
} else {
assert(gs_prog_data->invocations > 1);
/* Use first_icp_handle as the base offset. There is one DWord
* of URB handles per vertex, so inform the register allocator that
- * we might read up to ceil(nir->info.gs.vertices_in / 8) registers.
+ * we might read up to ceil(nir->info->gs.vertices_in / 8) registers.
*/
bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
fs_reg(brw_vec8_grf(first_icp_handle, 0)),
fs_reg(icp_offset_bytes),
- brw_imm_ud(DIV_ROUND_UP(nir->info.gs.vertices_in, 8) *
+ brw_imm_ud(DIV_ROUND_UP(nir->info->gs.vertices_in, 8) *
REG_SIZE));
}
}
*/
brw_mark_surface_used(prog_data,
stage_prog_data->binding_table.ubo_start +
- nir->info.num_ubos - 1);
+ nir->info->num_ubos - 1);
}
nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
*/
brw_mark_surface_used(prog_data,
stage_prog_data->binding_table.ssbo_start +
- nir->info.num_ssbos - 1);
+ nir->info->num_ssbos - 1);
}
fs_reg offset_reg;
brw_mark_surface_used(prog_data,
stage_prog_data->binding_table.ssbo_start +
- nir->info.num_ssbos - 1);
+ nir->info->num_ssbos - 1);
}
/* Value */
*/
brw_mark_surface_used(prog_data,
stage_prog_data->binding_table.ssbo_start +
- nir->info.num_ssbos - 1);
+ nir->info->num_ssbos - 1);
}
fs_reg offset = get_nir_src(instr->src[1]);
fs_visitor::emit_vs_system_value(int location)
{
fs_reg *reg = new(this->mem_ctx)
- fs_reg(ATTR, 4 * (_mesa_bitcount_64(nir->info.inputs_read) +
- _mesa_bitcount_64(nir->info.double_inputs_read)),
+ fs_reg(ATTR, 4 * (_mesa_bitcount_64(nir->info->inputs_read) +
+ _mesa_bitcount_64(nir->info->double_inputs_read)),
BRW_REGISTER_TYPE_D);
struct brw_vs_prog_data *vs_prog_data = brw_vs_prog_data(prog_data);
vs_prog_data->uses_instanceid = true;
break;
case SYSTEM_VALUE_DRAW_ID:
- if (nir->info.system_values_read &
+ if (nir->info->system_values_read &
(BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX) |
BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE) |
BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) |
fs_reg src_depth, src_stencil;
if (source_depth_to_render_target) {
- if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
+ if (nir->info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
src_depth = frag_depth;
else
src_depth = fs_reg(brw_vec8_grf(payload.source_depth_reg, 0));
}
- if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL))
+ if (nir->info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL))
src_stencil = frag_stencil;
const fs_reg sources[] = {
limit_dispatch_width(8, "Depth writes unsupported in SIMD16+ mode.\n");
}
- if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL)) {
+ if (nir->info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL)) {
/* From the 'Render Target Write message' section of the docs:
* "Output Stencil is not supported with SIMD16 Render Target Write
* Messages."
&prog_data.base.base,
compiler->scalar_stage[MESA_SHADER_GEOMETRY]);
- uint64_t outputs_written = gp->program.Base.nir->info.outputs_written;
+ uint64_t outputs_written = gp->program.Base.nir->info->outputs_written;
prog_data.base.cull_distance_mask =
((1 << gp->program.Base.CullDistanceArraySize) - 1) <<
if (varying == VARYING_SLOT_BFC0 || varying == VARYING_SLOT_BFC1)
frag_attrib = varying - VARYING_SLOT_BFC0 + VARYING_SLOT_COL0;
- if (!(fprog->Base.nir->info.inputs_read & BITFIELD64_BIT(frag_attrib)))
+ if (!(fprog->Base.nir->info->inputs_read & BITFIELD64_BIT(frag_attrib)))
continue;
enum glsl_interp_mode mode = fprog->InterpQualifier[frag_attrib];
nir_foreach_function(function, nir) {
if (function->impl) {
nir_foreach_block(block, function->impl) {
- remap_vs_attrs(block, &nir->info);
+ remap_vs_attrs(block, nir->info);
}
}
}
{
nir_builder *b = &state->builder;
nir_shader *nir = state->nir;
- const unsigned *sizes = nir->info.cs.local_size;
+ const unsigned *sizes = nir->info->cs.local_size;
const unsigned group_size = sizes[0] * sizes[1] * sizes[2];
/* Some programs have local_size dimensions so small that the thread local
* (gl_WorkGroupSize.x * gl_WorkGroupSize.y)) %
* gl_WorkGroupSize.z;
*/
- unsigned *size = nir->info.cs.local_size;
+ unsigned *size = nir->info->cs.local_size;
nir_ssa_def *local_index = nir_load_local_invocation_index(b);
if (key.do_point_sprite) {
key.point_sprite_coord_replace = ctx->Point.CoordReplace & 0xff;
}
- if (brw->fragment_program->Base.nir->info.inputs_read &
+ if (brw->fragment_program->Base.nir->info->inputs_read &
BITFIELD64_BIT(VARYING_SLOT_PNTC)) {
key.do_point_coord = 1;
}
stage_name = _mesa_shader_stage_to_string(stage);
stage_abbrev = _mesa_shader_stage_to_abbrev(stage);
is_passthrough_shader =
- nir->info.name && strcmp(nir->info.name, "passthrough") == 0;
+ nir->info->name && strcmp(nir->info->name, "passthrough") == 0;
}
bool
stage_prog_data->binding_table.shader_time_start = 0xd0d0d0d0;
}
- if (prog->nir->info.uses_texture_gather) {
+ if (prog->nir->info->uses_texture_gather) {
if (devinfo->gen >= 8) {
stage_prog_data->binding_table.gather_texture_start =
stage_prog_data->binding_table.texture_start;
const bool is_scalar = compiler->scalar_stage[MESA_SHADER_TESS_EVAL];
nir_shader *nir = nir_shader_clone(mem_ctx, src_shader);
- nir->info.inputs_read = key->inputs_read;
- nir->info.patch_inputs_read = key->patch_inputs_read;
+ nir->info->inputs_read = key->inputs_read;
+ nir->info->patch_inputs_read = key->patch_inputs_read;
struct brw_vue_map input_vue_map;
brw_compute_tess_vue_map(&input_vue_map,
- nir->info.inputs_read & ~VARYING_BIT_PRIMITIVE_ID,
- nir->info.patch_inputs_read);
+ nir->info->inputs_read & ~VARYING_BIT_PRIMITIVE_ID,
+ nir->info->patch_inputs_read);
nir = brw_nir_apply_sampler_key(nir, devinfo, &key->tex, is_scalar);
brw_nir_lower_tes_inputs(nir, &input_vue_map);
nir = brw_postprocess_nir(nir, compiler->devinfo, is_scalar);
brw_compute_vue_map(devinfo, &prog_data->base.vue_map,
- nir->info.outputs_written,
- nir->info.separate_shader);
+ nir->info->outputs_written,
+ nir->info->separate_shader);
unsigned output_size_bytes = prog_data->base.vue_map.num_slots * 4 * 4;
/* URB entry sizes are stored as a multiple of 64 bytes. */
prog_data->base.urb_entry_size = ALIGN(output_size_bytes, 64) / 64;
- bool need_patch_header = nir->info.system_values_read &
+ bool need_patch_header = nir->info->system_values_read &
(BITFIELD64_BIT(SYSTEM_VALUE_TESS_LEVEL_OUTER) |
BITFIELD64_BIT(SYSTEM_VALUE_TESS_LEVEL_INNER));
if (unlikely(INTEL_DEBUG & DEBUG_TES)) {
g.enable_debug(ralloc_asprintf(mem_ctx,
"%s tessellation evaluation shader %s",
- nir->info.label ? nir->info.label
+ nir->info->label ? nir->info->label
: "unnamed",
- nir->info.name));
+ nir->info->name));
}
g.generate_code(v.cfg, 8);
nir_ssa_def *invoc_id =
nir_load_system_value(&b, nir_intrinsic_load_invocation_id, 0);
- nir->info.inputs_read = key->outputs_written;
- nir->info.outputs_written = key->outputs_written;
- nir->info.tcs.vertices_out = key->input_vertices;
- nir->info.name = ralloc_strdup(nir, "passthrough");
+ nir->info->inputs_read = key->outputs_written;
+ nir->info->outputs_written = key->outputs_written;
+ nir->info->tcs.vertices_out = key->input_vertices;
+ nir->info->name = ralloc_strdup(nir, "passthrough");
nir->num_uniforms = 8 * sizeof(uint32_t);
var = nir_variable_create(nir, nir_var_uniform, glsl_vec4_type(), "hdr_0");
struct brw_tcs_prog_key *key)
{
uint64_t per_vertex_slots =
- brw->tess_eval_program->Base.nir->info.inputs_read;
+ brw->tess_eval_program->Base.nir->info->inputs_read;
uint32_t per_patch_slots =
- brw->tess_eval_program->Base.nir->info.patch_inputs_read;
+ brw->tess_eval_program->Base.nir->info->patch_inputs_read;
struct brw_tess_ctrl_program *tcp =
(struct brw_tess_ctrl_program *) brw->tess_ctrl_program;
if (brw->tess_ctrl_program) {
per_vertex_slots |=
- brw->tess_ctrl_program->Base.nir->info.outputs_written;
+ brw->tess_ctrl_program->Base.nir->info->outputs_written;
per_patch_slots |=
- brw->tess_ctrl_program->Base.nir->info.patch_outputs_written;
+ brw->tess_ctrl_program->Base.nir->info->patch_outputs_written;
}
if (brw->gen < 8 || !tcp)
/* _NEW_TEXTURE */
brw_populate_sampler_prog_key_data(&brw->ctx, prog, &key->tex);
} else {
- key->outputs_written = tep->program.Base.nir->info.inputs_read;
+ key->outputs_written = tep->program.Base.nir->info->inputs_read;
}
}
key.tes_primitive_mode = GL_TRIANGLES;
}
- key.outputs_written = prog->nir->info.outputs_written;
- key.patch_outputs_written = prog->nir->info.patch_outputs_written;
+ key.outputs_written = prog->nir->info->outputs_written;
+ key.patch_outputs_written = prog->nir->info->patch_outputs_written;
success = brw_codegen_tcs_prog(brw, shader_prog, btcp, &key);
{
uint64_t per_vertex_slots =
- brw->tess_eval_program->Base.nir->info.inputs_read;
+ brw->tess_eval_program->Base.nir->info->inputs_read;
uint32_t per_patch_slots =
- brw->tess_eval_program->Base.nir->info.patch_inputs_read;
+ brw->tess_eval_program->Base.nir->info->patch_inputs_read;
struct brw_tess_eval_program *tep =
(struct brw_tess_eval_program *) brw->tess_eval_program;
*/
if (brw->tess_ctrl_program) {
per_vertex_slots |=
- brw->tess_ctrl_program->Base.nir->info.outputs_written;
+ brw->tess_ctrl_program->Base.nir->info->outputs_written;
per_patch_slots |=
- brw->tess_ctrl_program->Base.nir->info.patch_outputs_written;
+ brw->tess_ctrl_program->Base.nir->info->patch_outputs_written;
}
/* Ignore gl_TessLevelInner/Outer - we treat them as system values,
memset(&key, 0, sizeof(key));
key.program_string_id = btep->id;
- key.inputs_read = prog->nir->info.inputs_read;
- key.patch_inputs_read = prog->nir->info.patch_inputs_read;
+ key.inputs_read = prog->nir->info->inputs_read;
+ key.patch_inputs_read = prog->nir->info->patch_inputs_read;
if (shader_prog->_LinkedShaders[MESA_SHADER_TESS_CTRL]) {
struct gl_program *tcp =
shader_prog->_LinkedShaders[MESA_SHADER_TESS_CTRL]->Program;
- key.inputs_read |= tcp->nir->info.outputs_written;
- key.patch_inputs_read |= tcp->nir->info.patch_outputs_written;
+ key.inputs_read |= tcp->nir->info->outputs_written;
+ key.patch_inputs_read |= tcp->nir->info->patch_outputs_written;
}
/* Ignore gl_TessLevelInner/Outer - they're system values. */
if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
char filename[64]; \
snprintf(filename, 64, "%s-%s-%02d-%02d-" #pass, \
- stage_abbrev, nir->info.name, iteration, pass_num); \
+ stage_abbrev, nir->info->name, iteration, pass_num); \
\
backend_shader::dump_instructions(filename); \
} \
if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER)) {
char filename[64];
snprintf(filename, 64, "%s-%s-00-00-start",
- stage_abbrev, nir->info.name);
+ stage_abbrev, nir->info->name);
backend_shader::dump_instructions(filename);
}
/* gl_VertexID and gl_InstanceID are system values, but arrive via an
* incoming vertex attribute. So, add an extra slot.
*/
- if (shader->info.system_values_read &
+ if (shader->info->system_values_read &
(BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX) |
BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE) |
BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) |
}
/* gl_DrawID has its very own vec4 */
- if (shader->info.system_values_read & BITFIELD64_BIT(SYSTEM_VALUE_DRAW_ID)) {
+ if (shader->info->system_values_read &
+ BITFIELD64_BIT(SYSTEM_VALUE_DRAW_ID)) {
nr_attributes++;
}
unsigned nr_attribute_slots =
nr_attributes +
- _mesa_bitcount_64(shader->info.double_inputs_read);
+ _mesa_bitcount_64(shader->info->double_inputs_read);
/* The 3DSTATE_VS documentation lists the lower bound on "Vertex URB Entry
* Read Length" as 1 in vec4 mode, and 0 in SIMD8 mode. Empirically, in
if (INTEL_DEBUG & DEBUG_VS) {
const char *debug_name =
ralloc_asprintf(mem_ctx, "%s vertex shader %s",
- shader->info.label ? shader->info.label : "unnamed",
- shader->info.name);
+ shader->info->label ? shader->info->label :
+ "unnamed",
+ shader->info->name);
g.enable_debug(debug_name);
}
if (unlikely(debug_flag)) {
fprintf(stderr, "Native code for %s %s shader %s:\n",
- nir->info.label ? nir->info.label : "unnamed",
- _mesa_shader_stage_to_string(nir->stage), nir->info.name);
+ nir->info->label ? nir->info->label : "unnamed",
+ _mesa_shader_stage_to_string(nir->stage), nir->info->name);
fprintf(stderr, "%s vec4 shader: %d instructions. %d loops. %u cycles. %d:%d "
"spills:fills. Compacted %d to %d bytes (%.0f%%)\n",
* so the total number of input slots that will be delivered to the GS (and
* thus the stride of the input arrays) is urb_read_length * 2.
*/
- const unsigned num_input_vertices = nir->info.gs.vertices_in;
+ const unsigned num_input_vertices = nir->info->gs.vertices_in;
assert(num_input_vertices <= MAX_GS_INPUT_VERTICES);
unsigned input_array_stride = prog_data->urb_read_length * 2;
* be recorded by transform feedback, we can simply discard all geometry
* bound to these streams when transform feedback is disabled.
*/
- if (stream_id > 0 && !nir->info.has_transform_feedback_varyings)
+ if (stream_id > 0 && !nir->info->has_transform_feedback_varyings)
return;
/* If we're outputting 32 control data bits or less, then we can wait
* written by previous stages and shows up via payload magic.
*/
GLbitfield64 inputs_read =
- shader->info.inputs_read & ~VARYING_BIT_PRIMITIVE_ID;
+ shader->info->inputs_read & ~VARYING_BIT_PRIMITIVE_ID;
brw_compute_vue_map(compiler->devinfo,
&c.input_vue_map, inputs_read,
- shader->info.separate_shader);
+ shader->info->separate_shader);
shader = brw_nir_apply_sampler_key(shader, compiler->devinfo, &key->tex,
is_scalar);
shader = brw_postprocess_nir(shader, compiler->devinfo, is_scalar);
prog_data->include_primitive_id =
- (shader->info.inputs_read & VARYING_BIT_PRIMITIVE_ID) != 0;
+ (shader->info->inputs_read & VARYING_BIT_PRIMITIVE_ID) != 0;
- prog_data->invocations = shader->info.gs.invocations;
+ prog_data->invocations = shader->info->gs.invocations;
if (compiler->devinfo->gen >= 8)
prog_data->static_vertex_count = nir_gs_count_vertices(shader);
if (compiler->devinfo->gen >= 7) {
- if (shader->info.gs.output_primitive == GL_POINTS) {
+ if (shader->info->gs.output_primitive == GL_POINTS) {
/* When the output type is points, the geometry shader may output data
* to multiple streams, and EndPrimitive() has no effect. So we
* configure the hardware to interpret the control data as stream ID.
* EndPrimitive().
*/
c.control_data_bits_per_vertex =
- shader->info.gs.uses_end_primitive ? 1 : 0;
+ shader->info->gs.uses_end_primitive ? 1 : 0;
}
} else {
/* There are no control data bits in gen6. */
c.control_data_bits_per_vertex = 0;
/* If it is using transform feedback, enable it */
- if (shader->info.has_transform_feedback_varyings)
+ if (shader->info->has_transform_feedback_varyings)
prog_data->gen6_xfb_enabled = true;
else
prog_data->gen6_xfb_enabled = false;
}
c.control_data_header_size_bits =
- shader->info.gs.vertices_out * c.control_data_bits_per_vertex;
+ shader->info->gs.vertices_out * c.control_data_bits_per_vertex;
/* 1 HWORD = 32 bytes = 256 bits */
prog_data->control_data_header_size_hwords =
unsigned output_size_bytes;
if (compiler->devinfo->gen >= 7) {
output_size_bytes =
- prog_data->output_vertex_size_hwords * 32 * shader->info.gs.vertices_out;
+ prog_data->output_vertex_size_hwords * 32 * shader->info->gs.vertices_out;
output_size_bytes += 32 * prog_data->control_data_header_size_hwords;
} else {
output_size_bytes = prog_data->output_vertex_size_hwords * 32;
prog_data->base.urb_entry_size = ALIGN(output_size_bytes, 128) / 128;
prog_data->output_topology =
- get_hw_prim_for_gl_prim(shader->info.gs.output_primitive);
+ get_hw_prim_for_gl_prim(shader->info->gs.output_primitive);
- prog_data->vertices_in = shader->info.gs.vertices_in;
+ prog_data->vertices_in = shader->info->gs.vertices_in;
/* GS inputs are read from the VUE 256 bits (2 vec4's) at a time, so we
* need to program a URB read length of ceiling(num_slots / 2).
false, MESA_SHADER_GEOMETRY);
if (unlikely(INTEL_DEBUG & DEBUG_GS)) {
const char *label =
- shader->info.label ? shader->info.label : "unnamed";
+ shader->info->label ? shader->info->label : "unnamed";
char *name = ralloc_asprintf(mem_ctx, "%s geometry shader %s",
- label, shader->info.name);
+ label, shader->info->name);
g.enable_debug(name);
}
g.generate_code(v.cfg, 8);
brw_mark_surface_used(&prog_data->base,
prog_data->base.binding_table.ssbo_start +
- nir->info.num_ssbos - 1);
+ nir->info->num_ssbos - 1);
}
/* Offset */
*/
brw_mark_surface_used(&prog_data->base,
prog_data->base.binding_table.ssbo_start +
- nir->info.num_ssbos - 1);
+ nir->info->num_ssbos - 1);
}
src_reg offset_reg;
*/
brw_mark_surface_used(&prog_data->base,
prog_data->base.binding_table.ubo_start +
- nir->info.num_ubos - 1);
+ nir->info->num_ubos - 1);
}
src_reg offset;
*/
brw_mark_surface_used(&prog_data->base,
prog_data->base.binding_table.ssbo_start +
- nir->info.num_ssbos - 1);
+ nir->info->num_ssbos - 1);
}
src_reg offset = get_nir_src(instr->src[1], 1);
* HS instance dispatched will only have its bottom half doing real
* work, and so we need to disable the upper half:
*/
- if (nir->info.tcs.vertices_out % 2) {
+ if (nir->info->tcs.vertices_out % 2) {
emit(CMP(dst_null_d(), invocation_id,
- brw_imm_ud(nir->info.tcs.vertices_out), BRW_CONDITIONAL_L));
+ brw_imm_ud(nir->info->tcs.vertices_out), BRW_CONDITIONAL_L));
/* Matching ENDIF is in emit_thread_end() */
emit(IF(BRW_PREDICATE_NORMAL));
vec4_instruction *inst;
current_annotation = "thread end";
- if (nir->info.tcs.vertices_out % 2) {
+ if (nir->info->tcs.vertices_out % 2) {
emit(BRW_OPCODE_ENDIF);
}
const bool is_scalar = compiler->scalar_stage[MESA_SHADER_TESS_CTRL];
nir_shader *nir = nir_shader_clone(mem_ctx, src_shader);
- nir->info.outputs_written = key->outputs_written;
- nir->info.patch_outputs_written = key->patch_outputs_written;
+ nir->info->outputs_written = key->outputs_written;
+ nir->info->patch_outputs_written = key->patch_outputs_written;
struct brw_vue_map input_vue_map;
brw_compute_vue_map(devinfo, &input_vue_map,
- nir->info.inputs_read & ~VARYING_BIT_PRIMITIVE_ID,
+ nir->info->inputs_read & ~VARYING_BIT_PRIMITIVE_ID,
true);
brw_compute_tess_vue_map(&vue_prog_data->vue_map,
- nir->info.outputs_written,
- nir->info.patch_outputs_written);
+ nir->info->outputs_written,
+ nir->info->patch_outputs_written);
nir = brw_nir_apply_sampler_key(nir, devinfo, &key->tex, is_scalar);
brw_nir_lower_vue_inputs(nir, is_scalar, &input_vue_map);
nir = brw_postprocess_nir(nir, compiler->devinfo, is_scalar);
if (is_scalar)
- prog_data->instances = DIV_ROUND_UP(nir->info.tcs.vertices_out, 8);
+ prog_data->instances = DIV_ROUND_UP(nir->info->tcs.vertices_out, 8);
else
- prog_data->instances = DIV_ROUND_UP(nir->info.tcs.vertices_out, 2);
+ prog_data->instances = DIV_ROUND_UP(nir->info->tcs.vertices_out, 2);
/* Compute URB entry size. The maximum allowed URB entry size is 32k.
* That divides up as follows:
unsigned output_size_bytes = 0;
/* Note that the patch header is counted in num_per_patch_slots. */
output_size_bytes += num_per_patch_slots * 16;
- output_size_bytes += nir->info.tcs.vertices_out * num_per_vertex_slots * 16;
+ output_size_bytes += nir->info->tcs.vertices_out * num_per_vertex_slots * 16;
assert(output_size_bytes >= 1);
if (output_size_bytes > GEN7_MAX_HS_URB_ENTRY_SIZE_BYTES)
if (unlikely(INTEL_DEBUG & DEBUG_TCS)) {
g.enable_debug(ralloc_asprintf(mem_ctx,
"%s tessellation control shader %s",
- nir->info.label ? nir->info.label
+ nir->info->label ? nir->info->label
: "unnamed",
- nir->info.name));
+ nir->info->name));
}
g.generate_code(v.cfg, 8);
uint64_t outputs_written =
brw_vs_outputs_written(brw, key,
- vp->program.Base.nir->info.outputs_written);
- prog_data.inputs_read = vp->program.Base.nir->info.inputs_read;
+ vp->program.Base.nir->info->outputs_written);
+ prog_data.inputs_read = vp->program.Base.nir->info->inputs_read;
if (key->copy_edgeflag) {
prog_data.inputs_read |= VERT_BIT_EDGEFLAG;
}
}
- if (prog->nir->info.outputs_written &
+ if (prog->nir->info->outputs_written &
(VARYING_BIT_COL0 | VARYING_BIT_COL1 | VARYING_BIT_BFC0 |
VARYING_BIT_BFC1)) {
/* _NEW_LIGHT | _NEW_BUFFERS */
brw_setup_tex_for_precompile(brw, &key.tex, prog);
key.program_string_id = bvp->id;
key.clamp_vertex_color =
- (prog->nir->info.outputs_written &
+ (prog->nir->info->outputs_written &
(VARYING_BIT_COL0 | VARYING_BIT_COL1 | VARYING_BIT_BFC0 |
VARYING_BIT_BFC1));
shader_prog, prog, &prog_data->base,
next_binding_table_offset);
- if (prog->nir->info.outputs_read && !key->coherent_fb_fetch) {
+ if (prog->nir->info->outputs_read && !key->coherent_fb_fetch) {
prog_data->binding_table.render_target_read_start =
next_binding_table_offset;
next_binding_table_offset += key->nr_color_regions;
* a shader w/a on IVB; fixable with just SCS on HSW.
*/
if (brw->gen == 7 && !brw->is_haswell &&
- prog->nir->info.uses_texture_gather) {
+ prog->nir->info->uses_texture_gather) {
if (img->InternalFormat == GL_RG32F)
key->gather_channel_quirk_mask |= 1 << s;
}
/* Gen6's gather4 is broken for UINT/SINT; we treat them as
* UNORM/FLOAT instead and fix it in the shader.
*/
- if (brw->gen == 6 && prog->nir->info.uses_texture_gather) {
+ if (brw->gen == 6 && prog->nir->info->uses_texture_gather) {
key->gen6_gather_wa[s] = gen6_gather_workaround(img->InternalFormat);
}
*/
if (brw->gen < 6) {
/* _NEW_COLOR */
- if (fp->program.Base.nir->info.fs.uses_discard ||
+ if (fp->program.Base.nir->info->fs.uses_discard ||
ctx->Color.AlphaEnabled) {
lookup |= IZ_PS_KILL_ALPHATEST_BIT;
}
- if (fp->program.Base.nir->info.outputs_written &
+ if (fp->program.Base.nir->info->outputs_written &
BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
lookup |= IZ_PS_COMPUTES_DEPTH_BIT;
}
/* BRW_NEW_VUE_MAP_GEOM_OUT */
if (brw->gen < 6 ||
- _mesa_bitcount_64(fp->program.Base.nir->info.inputs_read &
+ _mesa_bitcount_64(fp->program.Base.nir->info->inputs_read &
BRW_FS_VARYING_INPUT_MASK) > 16) {
key->input_slots_valid = brw->vue_map_geom_out.slots_valid;
}
memset(&key, 0, sizeof(key));
- uint64_t outputs_written = fp->Base.nir->info.outputs_written;
+ uint64_t outputs_written = fp->Base.nir->info->outputs_written;
if (brw->gen < 6) {
- if (fp->Base.nir->info.fs.uses_discard)
+ if (fp->Base.nir->info->fs.uses_discard)
key.iz_lookup |= IZ_PS_KILL_ALPHATEST_BIT;
if (outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
key.iz_lookup |= IZ_DEPTH_WRITE_ENABLE_BIT;
}
- if (brw->gen < 6 || _mesa_bitcount_64(fp->Base.nir->info.inputs_read &
+ if (brw->gen < 6 || _mesa_bitcount_64(fp->Base.nir->info->inputs_read &
BRW_FS_VARYING_INPUT_MASK) > 16) {
key.input_slots_valid =
- fp->Base.nir->info.inputs_read | VARYING_BIT_POS;
+ fp->Base.nir->info->inputs_read | VARYING_BIT_POS;
}
brw_setup_tex_for_precompile(brw, &key.tex, &fp->Base);
}
prog_data->uses_src_depth =
- (nir->info.inputs_read & (1 << VARYING_SLOT_POS)) != 0;
+ (nir->info->inputs_read & (1 << VARYING_SLOT_POS)) != 0;
if (wm_iz_table[lookup].sd_present || prog_data->uses_src_depth ||
kill_stats_promoted_workaround) {
payload.source_depth_reg = reg;
/* _NEW_BUFFERS */
for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
struct gl_renderbuffer *rb = ctx->DrawBuffer->_ColorDrawBuffers[i];
- uint64_t outputs_written = fp->Base.nir->info.outputs_written;
+ uint64_t outputs_written = fp->Base.nir->info->outputs_written;
/* _NEW_COLOR */
if (rb && (outputs_written & BITFIELD64_BIT(FRAG_RESULT_COLOR) ||
/* BRW_NEW_FRAGMENT_PROGRAM */
wm->wm5.program_uses_depth = prog_data->uses_src_depth;
- wm->wm5.program_computes_depth = (fp->Base.nir->info.outputs_written &
+ wm->wm5.program_computes_depth = (fp->Base.nir->info->outputs_written &
BITFIELD64_BIT(FRAG_RESULT_DEPTH)) != 0;
/* _NEW_BUFFERS
* Override for NULL depthbuffer case, required by the Pixel Shader Computed
/* BRW_NEW_FRAGMENT_PROGRAM */
if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
brw->fragment_program &&
- brw->fragment_program->Base.nir->info.outputs_read) {
+ brw->fragment_program->Base.nir->info->outputs_read) {
/* _NEW_BUFFERS */
const struct gl_framebuffer *fb = ctx->DrawBuffer;
* allows the surface format to be overriden for only the
* gather4 messages. */
if (brw->gen < 8) {
- if (vs && vs->nir->info.uses_texture_gather)
+ if (vs && vs->nir->info->uses_texture_gather)
update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
- if (tcs && tcs->nir->info.uses_texture_gather)
+ if (tcs && tcs->nir->info->uses_texture_gather)
update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
- if (tes && tes->nir->info.uses_texture_gather)
+ if (tes && tes->nir->info->uses_texture_gather)
update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
- if (gs && gs->nir->info.uses_texture_gather)
+ if (gs && gs->nir->info->uses_texture_gather)
update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
- if (fs && fs->nir->info.uses_texture_gather)
+ if (fs && fs->nir->info->uses_texture_gather)
update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
}
* gather4 messages.
*/
if (brw->gen < 8) {
- if (cs && cs->nir->info.uses_texture_gather)
+ if (cs && cs->nir->info->uses_texture_gather)
update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
}
this->vertex_output = src_reg(this,
glsl_type::uint_type,
(prog_data->vue_map.num_slots + 1) *
- nir->info.gs.vertices_out);
+ nir->info->gs.vertices_out);
this->vertex_output_offset = src_reg(this, glsl_type::uint_type);
emit(MOV(dst_reg(this->vertex_output_offset), brw_imm_ud(0u)));
dst_reg dst(this->vertex_output);
dst.reladdr = ralloc(mem_ctx, src_reg);
memcpy(dst.reladdr, &this->vertex_output_offset, sizeof(src_reg));
- if (nir->info.gs.output_primitive == GL_POINTS) {
+ if (nir->info->gs.output_primitive == GL_POINTS) {
/* If we are outputting points, then every vertex has PrimStart and
* PrimEnd set.
*/
/* Calling EndPrimitive() is optional for point output. In this case we set
* the PrimEnd flag when we process EmitVertex().
*/
- if (nir->info.gs.output_primitive == GL_POINTS)
+ if (nir->info->gs.output_primitive == GL_POINTS)
return;
/* Otherwise we know that the last vertex we have processed was the last
* comparison below (hence the num_output_vertices + 1 in the comparison
* below).
*/
- unsigned num_output_vertices = nir->info.gs.vertices_out;
+ unsigned num_output_vertices = nir->info->gs.vertices_out;
emit(CMP(dst_null_ud(), this->vertex_count,
brw_imm_ud(num_output_vertices + 1), BRW_CONDITIONAL_L));
vec4_instruction *inst = emit(CMP(dst_null_ud(),
* first_vertex is not zero. This is only relevant for outputs other than
* points because in the point case we set PrimEnd on all vertices.
*/
- if (nir->info.gs.output_primitive != GL_POINTS) {
+ if (nir->info->gs.output_primitive != GL_POINTS) {
emit(CMP(dst_null_ud(), this->first_vertex, brw_imm_ud(0u), BRW_CONDITIONAL_Z));
emit(IF(BRW_PREDICATE_NORMAL));
gs_end_primitive();
emit(BRW_OPCODE_ENDIF);
/* Write transform feedback data for all processed vertices. */
- for (int i = 0; i < (int)nir->info.gs.vertices_out; i++) {
+ for (int i = 0; i < (int)nir->info->gs.vertices_out; i++) {
emit(MOV(dst_reg(sol_temp), brw_imm_d(i)));
emit(CMP(dst_null_d(), sol_temp, this->vertex_count,
BRW_CONDITIONAL_L));
*/
bool fs_needs_vue_header =
- brw->fragment_program->Base.nir->info.inputs_read &
+ brw->fragment_program->Base.nir->info->inputs_read &
(VARYING_BIT_LAYER | VARYING_BIT_VIEWPORT);
*urb_entry_read_offset = fs_needs_vue_header ? 0 : 1;
/* prepare the active component dwords */
int input_index = 0;
for (int attr = 0; attr < VARYING_SLOT_MAX; attr++) {
- if (!(brw->fragment_program->Base.nir->info.inputs_read &
+ if (!(brw->fragment_program->Base.nir->info->inputs_read &
BITFIELD64_BIT(attr))) {
continue;
}
compiler->devinfo = devinfo;
prog_data = ralloc(NULL, struct brw_wm_prog_data);
- nir_shader *shader = nir_shader_create(NULL, MESA_SHADER_FRAGMENT, NULL);
+ nir_shader *shader =
+ nir_shader_create(NULL, MESA_SHADER_FRAGMENT, NULL, NULL);
v = new cmod_propagation_fs_visitor(compiler, prog_data, shader);
compiler->devinfo = devinfo;
prog_data = ralloc(NULL, struct brw_wm_prog_data);
- nir_shader *shader = nir_shader_create(NULL, MESA_SHADER_FRAGMENT, NULL);
+ nir_shader *shader =
+ nir_shader_create(NULL, MESA_SHADER_FRAGMENT, NULL, NULL);
v = new saturate_propagation_fs_visitor(compiler, prog_data, shader);
prog_data = (struct brw_vue_prog_data *)calloc(1, sizeof(*prog_data));
compiler->devinfo = devinfo;
- nir_shader *shader = nir_shader_create(NULL, MESA_SHADER_VERTEX, NULL);
+ nir_shader *shader =
+ nir_shader_create(NULL, MESA_SHADER_VERTEX, NULL, NULL);
v = new cmod_propagation_vec4_visitor(compiler, shader, prog_data);
prog_data = (struct brw_vue_prog_data *)calloc(1, sizeof(*prog_data));
compiler->devinfo = devinfo;
- nir_shader *shader = nir_shader_create(NULL, MESA_SHADER_VERTEX, NULL);
+ nir_shader *shader =
+ nir_shader_create(NULL, MESA_SHADER_VERTEX, NULL, NULL);
v = new copy_propagation_vec4_visitor(compiler, shader, prog_data);
prog_data = (struct brw_vue_prog_data *)calloc(1, sizeof(*prog_data));
compiler->devinfo = devinfo;
- nir_shader *shader = nir_shader_create(NULL, MESA_SHADER_VERTEX, NULL);
+ nir_shader *shader =
+ nir_shader_create(NULL, MESA_SHADER_VERTEX, NULL, NULL);
v = new register_coalesce_vec4_visitor(compiler, shader, prog_data);
ptn_add_output_stores(c);
- s->info.name = ralloc_asprintf(s, "ARB%d", prog->Id);
- s->info.num_textures = util_last_bit(prog->SamplersUsed);
- s->info.num_ubos = 0;
- s->info.num_abos = 0;
- s->info.num_ssbos = 0;
- s->info.num_images = 0;
- s->info.inputs_read = prog->InputsRead;
- s->info.outputs_written = prog->OutputsWritten;
- s->info.system_values_read = prog->SystemValuesRead;
- s->info.uses_texture_gather = false;
- s->info.uses_clip_distance_out = false;
- s->info.separate_shader = false;
+ s->info->name = ralloc_asprintf(s, "ARB%d", prog->Id);
+ s->info->num_textures = util_last_bit(prog->SamplersUsed);
+ s->info->num_ubos = 0;
+ s->info->num_abos = 0;
+ s->info->num_ssbos = 0;
+ s->info->num_images = 0;
+ s->info->inputs_read = prog->InputsRead;
+ s->info->outputs_written = prog->OutputsWritten;
+ s->info->system_values_read = prog->SystemValuesRead;
+ s->info->uses_texture_gather = false;
+ s->info->uses_clip_distance_out = false;
+ s->info->separate_shader = false;
if (stage == MESA_SHADER_FRAGMENT) {
struct gl_fragment_program *fp = (struct gl_fragment_program *)prog;
- s->info.fs.uses_discard = fp->UsesKill;
+ s->info->fs.uses_discard = fp->UsesKill;
}
fail: