"SIMD8 at a 10-20%% performance cost: %s", v2.fail_msg);
} else {
// Use simd16 unless it looks some ratio worse than simd8
- if (!_mesa_use_glass(&brw->ctx) ||
- ((v2.estimated_clocks * 7 / 8) < v.estimated_clocks))
+ if ((v2.estimated_clocks * 7 / 8) < v.estimated_clocks)
simd16_instructions = &v2.instructions;
}
} else {
// populate some other fields that would normally happen at draw time
brw->wm.base.sampler_count = _mesa_fls(fp->Base.SamplersUsed);
+ // Others? ...
brw_wm_clear_compile(brw, c);
{
fs_inst *inst = NULL;
- int sampler = 0;
// LunarG : TODO - hook these values up from descriptor set
+ int sampler = 0; // = _mesa_get_sampler_uniform_value(ir->sampler, shader_prog, prog);
-// _mesa_get_sampler_uniform_value(ir->sampler, shader_prog, prog);
/* FINISHME: We're failing to recompile our programs when the sampler is
* updated. This only matters for the texture rectangle scale parameters
* (pre-gen6, or gen6+ with GL_CLAMP).
*/
- // LunarG : TODO - hook these values up from descriptor set
- int texunit = 0;//prog->SamplerUnits[sampler];
+ // LunarG : TODO - hook these values up from descriptor set
+ int texunit = 0;// = prog->SamplerUnits[sampler];
if (ir->op == ir_tg4) {
/* When tg4 is used with the degenerate ZERO/ONE swizzles, don't bother
}
+// LunarG : Remove - Most of this is shader time related, may turn back on later
+
//static GLboolean
//brwIsProgramNative(struct gl_context *ctx,
// GLenum target,
return (struct brw_shader_program *) prog;
}
+// LunarG : ADD - These expose results of the shader compile. There may
+// be another way to get this data, revisit this then.
+
struct brw_wm_prog_data *get_wm_prog_data(struct gl_shader_program *prog)
{
struct brw_shader_program *brw_prog = (struct brw_shader_program *) prog;
lower_packing_builtins(ir, ops);
}
+// LunarG : ADD - We redid indenting for this whole function to make it readable
GLboolean
brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
{
- struct brw_context *brw = brw_context(ctx);
- unsigned int stage;
-
- for (stage = 0; stage < ARRAY_SIZE(shProg->_LinkedShaders); stage++) {
- const struct gl_shader_compiler_options *options =
- &ctx->ShaderCompilerOptions[stage];
- struct brw_shader *shader =
- (struct brw_shader *)shProg->_LinkedShaders[stage];
-
- if (!shader)
- continue;
-
- struct gl_program *prog =
-// ctx->Driver.NewProgram(ctx, _mesa_shader_stage_to_program(stage),
-// shader->base.Name);
- brwNewProgram(ctx, _mesa_shader_stage_to_program(stage),
- shader->base.Name);
- if (!prog)
- return false;
- prog->Parameters = _mesa_new_parameter_list();
-
- // LunarG: TODO - Need this??
- //_mesa_copy_linked_program_data((gl_shader_stage) stage, shProg, prog);
-
- bool progress;
-
- /* lower_packing_builtins() inserts arithmetic instructions, so it
- * must precede lower_instructions().
- */
- brw_lower_packing_builtins(brw, (gl_shader_stage) stage, shader->base.ir);
- do_mat_op_to_vec(shader->base.ir);
- const int bitfield_insert = brw->gen >= 7
- ? BITFIELD_INSERT_TO_BFM_BFI
- : 0;
- lower_instructions(shader->base.ir,
- MOD_TO_FRACT |
- DIV_TO_MUL_RCP |
- SUB_TO_ADD_NEG |
- EXP_TO_EXP2 |
- LOG_TO_LOG2 |
- bitfield_insert |
- LDEXP_TO_ARITH);
-
- /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
- * if-statements need to be flattened.
- */
- if (brw->gen < 6)
- lower_if_to_cond_assign(shader->base.ir, 16);
-
- do_lower_texture_projection(shader->base.ir);
- brw_lower_texture_gradients(brw, shader->base.ir);
- do_vec_index_to_cond_assign(shader->base.ir);
- lower_vector_insert(shader->base.ir, true);
- brw_do_cubemap_normalize(shader->base.ir);
- lower_offset_arrays(shader->base.ir);
- brw_do_lower_unnormalized_offset(shader->base.ir);
- lower_noise(shader->base.ir);
- lower_quadop_vector(shader->base.ir, false);
-
- bool lowered_variable_indexing =
- lower_variable_index_to_cond_assign(shader->base.ir,
- options->EmitNoIndirectInput,
- options->EmitNoIndirectOutput,
- options->EmitNoIndirectTemp,
- options->EmitNoIndirectUniform);
-
- if (unlikely(brw->perf_debug && lowered_variable_indexing)) {
- perf_debug("Unsupported form of variable indexing in FS; falling "
- "back to very inefficient code generation\n");
- }
-
- lower_ubo_reference(&shader->base, shader->base.ir);
-
- do {
- progress = false;
-
- if (stage == MESA_SHADER_FRAGMENT) {
- brw_do_channel_expressions(shader->base.ir);
- brw_do_vector_splitting(shader->base.ir);
- }
-
- progress = do_lower_jumps(shader->base.ir, true, true,
- true, /* main return */
- false, /* continue */
- false /* loops */
- ) || progress;
-
- progress = do_common_optimization(shader->base.ir, true, true,
- options, ctx->Const.NativeIntegers)
- || progress;
- } while (progress);
-
- /* Make a pass over the IR to add state references for any built-in
- * uniforms that are used. This has to be done now (during linking).
- * Code generation doesn't happen until the first time this shader is
- * used for rendering. Waiting until then to generate the parameters is
- * too late. At that point, the values for the built-in uniforms won't
- * get sent to the shader.
- */
- foreach_list(node, shader->base.ir) {
- ir_variable *var = ((ir_instruction *) node)->as_variable();
-
- if ((var == NULL) || (var->data.mode != ir_var_uniform)
- || (strncmp(var->name, "gl_", 3) != 0))
- continue;
-
- const ir_state_slot *const slots = var->state_slots;
- assert(var->state_slots != NULL);
-
- for (unsigned int i = 0; i < var->num_state_slots; i++) {
- _mesa_add_state_reference(prog->Parameters,
- (gl_state_index *) slots[i].tokens);
- }
- }
-
- validate_ir_tree(shader->base.ir);
-
- do_set_program_inouts(shader->base.ir, prog, shader->base.Stage);
-
- prog->SamplersUsed = shader->base.active_samplers;
+ struct brw_context *brw = brw_context(ctx);
+ unsigned int stage;
- // LunarG : TODO - update resource map instead
-// _mesa_update_shader_textures_used(shProg, prog);
-
- _mesa_reference_program(ctx, &shader->base.Program, prog);
-
- // LunarG : TODO - rectangle support
-// brw_add_texrect_params(prog);
-
- /* This has to be done last. Any operation that can cause
- * prog->ParameterValues to get reallocated (e.g., anything that adds a
- * program constant) has to happen before creating this linkage.
- */
- // LunarG : TODO - uniform support
-// _mesa_associate_uniform_storage(ctx, shProg, prog->Parameters);
-
- _mesa_reference_program(ctx, &prog, NULL);
-
- if (ctx->GlslFlags & GLSL_DUMP) {
- fprintf(stderr, "\n");
- fprintf(stderr, "GLSL IR for linked %s program %d:\n",
- _mesa_shader_stage_to_string(shader->base.Stage),
- shProg->Name);
- _mesa_print_ir(stderr, shader->base.ir, NULL);
- fprintf(stderr, "\n");
- }
- }
+ for (stage = 0; stage < ARRAY_SIZE(shProg->_LinkedShaders); stage++) {
+ const struct gl_shader_compiler_options *options =
+ &ctx->ShaderCompilerOptions[stage];
+ struct brw_shader *shader =
+ (struct brw_shader *)shProg->_LinkedShaders[stage];
- if ((ctx->GlslFlags & GLSL_DUMP) && shProg->Name != 0) {
- for (unsigned i = 0; i < shProg->NumShaders; i++) {
- const struct gl_shader *sh = shProg->Shaders[i];
- if (!sh)
+ if (!shader)
continue;
- fprintf(stderr, "GLSL %s shader %d source for linked program %d:\n",
- _mesa_shader_stage_to_string(sh->Stage),
- i, shProg->Name);
- fprintf(stderr, "%s", sh->Source);
- fprintf(stderr, "\n");
- }
- }
-
- if (!brw_shader_precompile(ctx, shProg))
- return false;
-
- return true;
+ struct gl_program *prog =
+ // LunarG : Call the function directly
+ // ctx->Driver.NewProgram(ctx, _mesa_shader_stage_to_program(stage),
+ // shader->base.Name);
+ brwNewProgram(ctx, _mesa_shader_stage_to_program(stage),
+ shader->base.Name);
+ if (!prog)
+ return false;
+ prog->Parameters = _mesa_new_parameter_list();
+
+ // LunarG: TODO - Need this??
+ //_mesa_copy_linked_program_data((gl_shader_stage) stage, shProg, prog);
+
+ bool progress;
+
+ /* lower_packing_builtins() inserts arithmetic instructions, so it
+ * must precede lower_instructions().
+ */
+ brw_lower_packing_builtins(brw, (gl_shader_stage) stage, shader->base.ir);
+ do_mat_op_to_vec(shader->base.ir);
+ const int bitfield_insert = brw->gen >= 7
+ ? BITFIELD_INSERT_TO_BFM_BFI
+ : 0;
+ lower_instructions(shader->base.ir,
+ MOD_TO_FRACT |
+ DIV_TO_MUL_RCP |
+ SUB_TO_ADD_NEG |
+ EXP_TO_EXP2 |
+ LOG_TO_LOG2 |
+ bitfield_insert |
+ LDEXP_TO_ARITH);
+
+ /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
+ * if-statements need to be flattened.
+ */
+ if (brw->gen < 6)
+ lower_if_to_cond_assign(shader->base.ir, 16);
+
+ do_lower_texture_projection(shader->base.ir);
+ brw_lower_texture_gradients(brw, shader->base.ir);
+ do_vec_index_to_cond_assign(shader->base.ir);
+ lower_vector_insert(shader->base.ir, true);
+ brw_do_cubemap_normalize(shader->base.ir);
+ lower_offset_arrays(shader->base.ir);
+ brw_do_lower_unnormalized_offset(shader->base.ir);
+ lower_noise(shader->base.ir);
+ lower_quadop_vector(shader->base.ir, false);
+
+ bool lowered_variable_indexing =
+ lower_variable_index_to_cond_assign(shader->base.ir,
+ options->EmitNoIndirectInput,
+ options->EmitNoIndirectOutput,
+ options->EmitNoIndirectTemp,
+ options->EmitNoIndirectUniform);
+
+ if (unlikely(brw->perf_debug && lowered_variable_indexing)) {
+ perf_debug("Unsupported form of variable indexing in FS; falling "
+ "back to very inefficient code generation\n");
+ }
+
+ lower_ubo_reference(&shader->base, shader->base.ir);
+
+ do {
+ progress = false;
+
+ if (stage == MESA_SHADER_FRAGMENT) {
+ brw_do_channel_expressions(shader->base.ir);
+ brw_do_vector_splitting(shader->base.ir);
+ }
+
+ progress = do_lower_jumps(shader->base.ir, true, true,
+ true, /* main return */
+ false, /* continue */
+ false /* loops */
+ ) || progress;
+
+ progress = do_common_optimization(shader->base.ir, true, true,
+ options, ctx->Const.NativeIntegers)
+ || progress;
+ } while (progress);
+
+ /* Make a pass over the IR to add state references for any built-in
+ * uniforms that are used. This has to be done now (during linking).
+ * Code generation doesn't happen until the first time this shader is
+ * used for rendering. Waiting until then to generate the parameters is
+ * too late. At that point, the values for the built-in uniforms won't
+ * get sent to the shader.
+ */
+ foreach_list(node, shader->base.ir) {
+ ir_variable *var = ((ir_instruction *) node)->as_variable();
+
+ if ((var == NULL) || (var->data.mode != ir_var_uniform)
+ || (strncmp(var->name, "gl_", 3) != 0))
+ continue;
+
+ const ir_state_slot *const slots = var->state_slots;
+ assert(var->state_slots != NULL);
+
+ for (unsigned int i = 0; i < var->num_state_slots; i++) {
+ _mesa_add_state_reference(prog->Parameters,
+ (gl_state_index *) slots[i].tokens);
+ }
+ }
+
+ validate_ir_tree(shader->base.ir);
+
+ do_set_program_inouts(shader->base.ir, prog, shader->base.Stage);
+
+ prog->SamplersUsed = shader->base.active_samplers;
+
+ // LunarG : TODO - update resource map instead
+ // _mesa_update_shader_textures_used(shProg, prog);
+
+ _mesa_reference_program(ctx, &shader->base.Program, prog);
+
+ // LunarG : TODO - rectangle support
+ // brw_add_texrect_params(prog);
+
+ /* This has to be done last. Any operation that can cause
+ * prog->ParameterValues to get reallocated (e.g., anything that adds a
+ * program constant) has to happen before creating this linkage.
+ */
+ // LunarG : TODO - uniform support
+ // _mesa_associate_uniform_storage(ctx, shProg, prog->Parameters);
+
+ _mesa_reference_program(ctx, &prog, NULL);
+
+ if (ctx->GlslFlags & GLSL_DUMP) {
+ fprintf(stderr, "\n");
+ fprintf(stderr, "GLSL IR for linked %s program %d:\n",
+ _mesa_shader_stage_to_string(shader->base.Stage),
+ shProg->Name);
+ _mesa_print_ir(stderr, shader->base.ir, NULL);
+ fprintf(stderr, "\n");
+ }
+ }
+
+ if ((ctx->GlslFlags & GLSL_DUMP) && shProg->Name != 0) {
+ for (unsigned i = 0; i < shProg->NumShaders; i++) {
+ const struct gl_shader *sh = shProg->Shaders[i];
+ if (!sh)
+ continue;
+
+ fprintf(stderr, "GLSL %s shader %d source for linked program %d:\n",
+ _mesa_shader_stage_to_string(sh->Stage),
+ i, shProg->Name);
+ fprintf(stderr, "%s", sh->Source);
+ fprintf(stderr, "\n");
+ }
+ }
+
+ if (!brw_shader_precompile(ctx, shProg))
+ return false;
+
+ return true;
}
return true;
}
-//static void
-//brw_vs_upload_compile(struct brw_context *brw, const struct brw_vs_compile *c)
-//{
-// /* Scratch space is used for register spilling */
-// if (c->prog_data.base.total_scratch) {
-// perf_debug("Vertex shader triggered register spilling. "
-// "Try reducing the number of live vec4 values to "
-// "improve performance.\n");
-
-// brw_get_scratch_bo(brw, &brw->vs.base.scratch_bo,
-// c->prog_data.base.total_scratch * brw->max_vs_threads);
-// }
-
-// brw_upload_cache(&brw->cache, BRW_VS_PROG,
-// &c->key, sizeof(c->key),
-// c->base.program, c->base.program_size,
-// &c->prog_data, sizeof(c->prog_data),
-// &brw->vs.base.prog_offset, &brw->vs.prog_data);
-//}
-
static void
brw_vs_clear_compile(struct brw_context *brw,
struct brw_vs_compile *c)
ralloc_free(c->base.mem_ctx);
}
-//static bool
-//do_vs_prog(struct brw_context *brw,
-// struct gl_shader_program *prog,
-// struct brw_vertex_program *vp,
-// struct brw_vs_prog_key *key)
-//{
-// struct brw_vs_compile c;
-
-// brw_vs_init_compile(brw, prog, vp, key, &c);
-
-// if (!prog || !brw_shader_program_restore_vs_compile(prog, &c)) {
-// if (!brw_vs_do_compile(brw, &c)) {
-// brw_vs_clear_compile(brw, &c);
-// return false;
-// }
-// }
-
-// brw_vs_upload_compile(brw, &c);
-// brw_vs_clear_compile(brw, &c);
-
-// return true;
-//}
-
-//static bool
-//key_debug(struct brw_context *brw, const char *name, int a, int b)
-//{
-// if (a != b) {
-// perf_debug(" %s %d->%d\n", name, a, b);
-// return true;
-// }
-// return false;
-//}
-
-//void
-//brw_vs_debug_recompile(struct brw_context *brw,
-// struct gl_shader_program *prog,
-// const struct brw_vs_prog_key *key)
-//{
-// struct brw_cache_item *c = NULL;
-// const struct brw_vs_prog_key *old_key = NULL;
-// bool found = false;
-
-// perf_debug("Recompiling vertex shader for program %d\n", prog->Name);
-
-// for (unsigned int i = 0; i < brw->cache.size; i++) {
-// for (c = brw->cache.items[i]; c; c = c->next) {
-// if (c->cache_id == BRW_VS_PROG) {
-// old_key = c->key;
-
-// if (old_key->base.program_string_id == key->base.program_string_id)
-// break;
-// }
-// }
-// if (c)
-// break;
-// }
-
-// if (!c) {
-// perf_debug(" Didn't find previous compile in the shader cache for "
-// "debug\n");
-// return;
-// }
-
-// for (unsigned int i = 0; i < VERT_ATTRIB_MAX; i++) {
-// found |= key_debug(brw, "Vertex attrib w/a flags",
-// old_key->gl_attrib_wa_flags[i],
-// key->gl_attrib_wa_flags[i]);
-// }
-
-// found |= key_debug(brw, "user clip flags",
-// old_key->base.userclip_active, key->base.userclip_active);
-
-// found |= key_debug(brw, "user clipping planes as push constants",
-// old_key->base.nr_userclip_plane_consts,
-// key->base.nr_userclip_plane_consts);
-
-// found |= key_debug(brw, "copy edgeflag",
-// old_key->copy_edgeflag, key->copy_edgeflag);
-// found |= key_debug(brw, "PointCoord replace",
-// old_key->point_coord_replace, key->point_coord_replace);
-// found |= key_debug(brw, "vertex color clamping",
-// old_key->base.clamp_vertex_color, key->base.clamp_vertex_color);
-
-// found |= brw_debug_recompile_sampler_key(brw, &old_key->base.tex,
-// &key->base.tex);
-
-// if (!found) {
-// perf_debug(" Something else\n");
-// }
-//}
-
// LunarG : TODO - user clip planes?
//void
//brw_setup_vec4_key_clip_info(struct brw_context *brw,
// }
//}
-
-//static void brw_upload_vs_prog(struct brw_context *brw)
-//{
-// struct gl_context *ctx = &brw->ctx;
-// struct brw_vs_prog_key key;
-// /* BRW_NEW_VERTEX_PROGRAM */
-// struct brw_vertex_program *vp =
-// (struct brw_vertex_program *)brw->vertex_program;
-// struct gl_program *prog = (struct gl_program *) brw->vertex_program;
-// int i;
-
-// memset(&key, 0, sizeof(key));
-
-// /* Just upload the program verbatim for now. Always send it all
-// * the inputs it asks for, whether they are varying or not.
-// */
-// key.base.program_string_id = vp->id;
-// brw_setup_vec4_key_clip_info(brw, &key.base,
-// vp->program.Base.UsesClipDistanceOut);
-
-// /* _NEW_POLYGON */
-// if (brw->gen < 6) {
-// key.copy_edgeflag = (ctx->Polygon.FrontMode != GL_FILL ||
-// ctx->Polygon.BackMode != GL_FILL);
-// }
-
-// /* _NEW_LIGHT | _NEW_BUFFERS */
-// key.base.clamp_vertex_color = ctx->Light._ClampVertexColor;
-
-// /* _NEW_POINT */
-// if (brw->gen < 6 && ctx->Point.PointSprite) {
-// for (i = 0; i < 8; i++) {
-// if (ctx->Point.CoordReplace[i])
-// key.point_coord_replace |= (1 << i);
-// }
-// }
-
-// /* _NEW_TEXTURE */
-// brw_populate_sampler_prog_key_data(ctx, prog, brw->vs.base.sampler_count,
-// &key.base.tex);
-
-// /* BRW_NEW_VERTICES */
-// if (brw->gen < 8 && !brw->is_haswell) {
-// /* Prior to Haswell, the hardware can't natively support GL_FIXED or
-// * 2_10_10_10_REV vertex formats. Set appropriate workaround flags.
-// */
-// for (i = 0; i < VERT_ATTRIB_MAX; i++) {
-// if (!(vp->program.Base.InputsRead & BITFIELD64_BIT(i)))
-// continue;
-
-// uint8_t wa_flags = 0;
-
-// switch (brw->vb.inputs[i].glarray->Type) {
-
-// case GL_FIXED:
-// wa_flags = brw->vb.inputs[i].glarray->Size;
-// break;
-
-// case GL_INT_2_10_10_10_REV:
-// wa_flags |= BRW_ATTRIB_WA_SIGN;
-// /* fallthough */
-
-// case GL_UNSIGNED_INT_2_10_10_10_REV:
-// if (brw->vb.inputs[i].glarray->Format == GL_BGRA)
-// wa_flags |= BRW_ATTRIB_WA_BGRA;
-
-// if (brw->vb.inputs[i].glarray->Normalized)
-// wa_flags |= BRW_ATTRIB_WA_NORMALIZE;
-// else if (!brw->vb.inputs[i].glarray->Integer)
-// wa_flags |= BRW_ATTRIB_WA_SCALE;
-
-// break;
-// }
-
-// key.gl_attrib_wa_flags[i] = wa_flags;
-// }
-// }
-
-// if (!brw_search_cache(&brw->cache, BRW_VS_PROG,
-// &key, sizeof(key),
-// &brw->vs.base.prog_offset, &brw->vs.prog_data)) {
-// bool success =
-// do_vs_prog(brw, ctx->_Shader->CurrentProgram[MESA_SHADER_VERTEX], vp,
-// &key);
-// (void) success;
-// assert(success);
-// }
-// brw->vs.base.prog_data = &brw->vs.prog_data->base.base;
-
-// if (memcmp(&brw->vs.prog_data->base.vue_map, &brw->vue_map_geom_out,
-// sizeof(brw->vue_map_geom_out)) != 0) {
-// brw->vue_map_vs = brw->vs.prog_data->base.vue_map;
-// brw->state.dirty.brw |= BRW_NEW_VUE_MAP_VS;
-// if (brw->gen < 7) {
-// /* No geometry shader support, so the VS VUE map is the VUE map for
-// * the output of the "geometry" portion of the pipeline.
-// */
-// brw->vue_map_geom_out = brw->vue_map_vs;
-// brw->state.dirty.brw |= BRW_NEW_VUE_MAP_GEOM_OUT;
-// }
-// }
-//}
-
-///* See brw_vs.c:
-// */
-//const struct brw_tracked_state brw_vs_prog = {
-// .dirty = {
-// .mesa = (_NEW_TRANSFORM | _NEW_POLYGON | _NEW_POINT | _NEW_LIGHT |
-// _NEW_TEXTURE |
-// _NEW_BUFFERS),
-// .brw = (BRW_NEW_VERTEX_PROGRAM |
-// BRW_NEW_VERTICES),
-// .cache = 0
-// },
-// .emit = brw_upload_vs_prog
-//};
-
bool
brw_vs_precompile(struct gl_context *ctx, struct gl_shader_program *prog)
{
return true;
}
-//void
-//brw_wm_upload_compile(struct brw_context *brw,
-// const struct brw_wm_compile *c)
-//{
-// if (c->prog_data.total_scratch) {
-// brw_get_scratch_bo(brw, &brw->wm.base.scratch_bo,
-// c->prog_data.total_scratch * brw->max_wm_threads);
-// }
-
-// brw_upload_cache(&brw->cache, BRW_WM_PROG,
-// &c->key, sizeof(c->key),
-// c->program, c->program_size,
-// &c->prog_data, sizeof(c->prog_data),
-// &brw->wm.base.prog_offset, &brw->wm.prog_data);
-//}
-
void
brw_wm_clear_compile(struct brw_context *brw,
struct brw_wm_compile *c)
ralloc_free(c);
}
-//static bool do_wm_prog(struct brw_context *brw,
-// struct gl_shader_program *prog,
-// struct brw_fragment_program *fp,
-// struct brw_wm_prog_key *key)
-//{
-// struct brw_wm_compile *c;
-
-// c = brw_wm_init_compile(brw, prog, fp, key);
-// if (!c)
-// return false;
-
-// if (!prog || !brw_shader_program_restore_wm_compile(prog, c)) {
-// if (!brw_wm_do_compile(brw, c)) {
-// brw_wm_clear_compile(brw, c);
-// return false;
-// }
-// }
-
-// brw_wm_upload_compile(brw, c);
-// brw_wm_clear_compile(brw, c);
-
-// return true;
-//}
-
-static bool
-key_debug(struct brw_context *brw, const char *name, int a, int b)
-{
- if (a != b) {
- perf_debug(" %s %d->%d\n", name, a, b);
- return true;
- } else {
- return false;
- }
-}
-
-bool
-brw_debug_recompile_sampler_key(struct brw_context *brw,
- const struct brw_sampler_prog_key_data *old_key,
- const struct brw_sampler_prog_key_data *key)
-{
- bool found = false;
-
- for (unsigned int i = 0; i < MAX_SAMPLERS; i++) {
- found |= key_debug(brw, "EXT_texture_swizzle or DEPTH_TEXTURE_MODE",
- old_key->swizzles[i], key->swizzles[i]);
- }
- found |= key_debug(brw, "GL_CLAMP enabled on any texture unit's 1st coordinate",
- old_key->gl_clamp_mask[0], key->gl_clamp_mask[0]);
- found |= key_debug(brw, "GL_CLAMP enabled on any texture unit's 2nd coordinate",
- old_key->gl_clamp_mask[1], key->gl_clamp_mask[1]);
- found |= key_debug(brw, "GL_CLAMP enabled on any texture unit's 3rd coordinate",
- old_key->gl_clamp_mask[2], key->gl_clamp_mask[2]);
- found |= key_debug(brw, "gather channel quirk on any texture unit",
- old_key->gather_channel_quirk_mask, key->gather_channel_quirk_mask);
-
- return found;
-}
-
-//void
-//brw_wm_debug_recompile(struct brw_context *brw,
-// struct gl_shader_program *prog,
-// const struct brw_wm_prog_key *key)
-//{
-// struct brw_cache_item *c = NULL;
-// const struct brw_wm_prog_key *old_key = NULL;
-// bool found = false;
-
-// perf_debug("Recompiling fragment shader for program %d\n", prog->Name);
-
-// for (unsigned int i = 0; i < brw->cache.size; i++) {
-// for (c = brw->cache.items[i]; c; c = c->next) {
-// if (c->cache_id == BRW_WM_PROG) {
-// old_key = c->key;
-
-// if (old_key->program_string_id == key->program_string_id)
-// break;
-// }
-// }
-// if (c)
-// break;
-// }
-
-// if (!c) {
-// perf_debug(" Didn't find previous compile in the shader cache for debug\n");
-// return;
-// }
-
-// found |= key_debug(brw, "alphatest, computed depth, depth test, or "
-// "depth write",
-// old_key->iz_lookup, key->iz_lookup);
-// found |= key_debug(brw, "depth statistics",
-// old_key->stats_wm, key->stats_wm);
-// found |= key_debug(brw, "flat shading",
-// old_key->flat_shade, key->flat_shade);
-// found |= key_debug(brw, "per-sample shading",
-// old_key->persample_shading, key->persample_shading);
-// found |= key_debug(brw, "per-sample shading and 2x MSAA",
-// old_key->persample_2x, key->persample_2x);
-// found |= key_debug(brw, "number of color buffers",
-// old_key->nr_color_regions, key->nr_color_regions);
-// found |= key_debug(brw, "MRT alpha test or alpha-to-coverage",
-// old_key->replicate_alpha, key->replicate_alpha);
-// found |= key_debug(brw, "rendering to FBO",
-// old_key->render_to_fbo, key->render_to_fbo);
-// found |= key_debug(brw, "fragment color clamping",
-// old_key->clamp_fragment_color, key->clamp_fragment_color);
-// found |= key_debug(brw, "line smoothing",
-// old_key->line_aa, key->line_aa);
-// found |= key_debug(brw, "renderbuffer height",
-// old_key->drawable_height, key->drawable_height);
-// found |= key_debug(brw, "input slots valid",
-// old_key->input_slots_valid, key->input_slots_valid);
-// found |= key_debug(brw, "mrt alpha test function",
-// old_key->alpha_test_func, key->alpha_test_func);
-// found |= key_debug(brw, "mrt alpha test reference value",
-// old_key->alpha_test_ref, key->alpha_test_ref);
-
-// found |= brw_debug_recompile_sampler_key(brw, &old_key->tex, &key->tex);
-
-// if (!found) {
-// perf_debug(" Something else\n");
-// }
-//}
-
//static uint8_t
//gen6_gather_workaround(GLenum internalformat)
//{
// /* The unique fragment program ID */
// key->program_string_id = fp->id;
//}
-
-
-//static void
-//brw_upload_wm_prog(struct brw_context *brw)
-//{
-// struct gl_context *ctx = &brw->ctx;
-// struct brw_wm_prog_key key;
-// struct brw_fragment_program *fp = (struct brw_fragment_program *)
-// brw->fragment_program;
-
-// brw_wm_populate_key(brw, &key);
-
-// if (!brw_search_cache(&brw->cache, BRW_WM_PROG,
-// &key, sizeof(key),
-// &brw->wm.base.prog_offset, &brw->wm.prog_data)) {
-// bool success = do_wm_prog(brw, ctx->_Shader->_CurrentFragmentProgram, fp,
-// &key);
-// (void) success;
-// assert(success);
-// }
-// brw->wm.base.prog_data = &brw->wm.prog_data->base;
-//}
-
-
-//const struct brw_tracked_state brw_wm_prog = {
-// .dirty = {
-// .mesa = (_NEW_COLOR |
-// _NEW_DEPTH |
-// _NEW_STENCIL |
-// _NEW_POLYGON |
-// _NEW_LINE |
-// _NEW_HINT |
-// _NEW_LIGHT |
-// _NEW_FRAG_CLAMP |
-// _NEW_BUFFERS |
-// _NEW_TEXTURE |
-// _NEW_MULTISAMPLE),
-// .brw = (BRW_NEW_FRAGMENT_PROGRAM |
-// BRW_NEW_REDUCED_PRIMITIVE |
-// BRW_NEW_VUE_MAP_GEOM_OUT |
-// BRW_NEW_STATS_WM)
-// },
-// .emit = brw_upload_wm_prog
-//};
-
*/
uint8_t *ra_reg_to_grf;
} vec4_reg_set;
-
- struct {
- struct ra_regs *regs;
-
- /**
- * Array of the ra classes for the unaligned contiguous register
- * block sizes used, indexed by register size.
- */
- int classes[16];
-
- /**
- * Mapping for register-allocated objects in *regs to the first
- * GRF for that object.
- */
- uint8_t *ra_reg_to_grf;
-
- /**
- * ra class for the aligned pairs we use for PLN, which doesn't
- * appear in *classes.
- */
- int aligned_pairs_class;
- } wm_reg_sets[2];
-
};
#endif