* 64 bit map. Per-vertex and per-patch both have separate location domains
* with a max of MAX_VARYING.
*/
-static uint64_t
+uint64_t
reserved_varying_slot(struct gl_shader *stage, ir_variable_mode io_mode)
{
assert(io_mode == ir_var_shader_in || io_mode == ir_var_shader_out);
struct gl_shader_program *prog,
gl_shader *producer, gl_shader *consumer,
unsigned num_tfeedback_decls,
- tfeedback_decl *tfeedback_decls)
+ tfeedback_decl *tfeedback_decls,
+ const uint64_t reserved_slots)
{
/* Tessellation shaders treat inputs and outputs as shared memory and can
* access inputs and outputs of other invocations.
}
}
- const uint64_t reserved_slots =
- reserved_varying_slot(producer, ir_var_shader_out) |
- reserved_varying_slot(consumer, ir_var_shader_in);
-
const unsigned slots_used = matches.assign_locations(prog, reserved_slots);
matches.store_locations();
bool
check_against_output_limit(struct gl_context *ctx,
struct gl_shader_program *prog,
- gl_shader *producer)
+ gl_shader *producer,
+ unsigned num_explicit_locations)
{
- unsigned output_vectors = 0;
+ unsigned output_vectors = num_explicit_locations;
foreach_in_list(ir_instruction, node, producer->ir) {
ir_variable *const var = node->as_variable();
- if (var && var->data.mode == ir_var_shader_out &&
+ if (var && !var->data.explicit_location &&
+ var->data.mode == ir_var_shader_out &&
var_counts_against_varying_limit(producer->Stage, var)) {
/* outputs for fragment shader can't be doubles */
output_vectors += var->type->count_attribute_slots(false);
bool
check_against_input_limit(struct gl_context *ctx,
struct gl_shader_program *prog,
- gl_shader *consumer)
+ gl_shader *consumer,
+ unsigned num_explicit_locations)
{
- unsigned input_vectors = 0;
+ unsigned input_vectors = num_explicit_locations;
foreach_in_list(ir_instruction, node, consumer->ir) {
ir_variable *const var = node->as_variable();
- if (var && var->data.mode == ir_var_shader_in &&
+ if (var && !var->data.explicit_location &&
+ var->data.mode == ir_var_shader_in &&
var_counts_against_varying_limit(consumer->Stage, var)) {
/* vertex inputs aren't varying counted */
input_vectors += var->type->count_attribute_slots(false);
struct gl_shader_program *prog,
gl_shader *producer, gl_shader *consumer,
unsigned num_tfeedback_decls,
- tfeedback_decl *tfeedback_decls);
+ tfeedback_decl *tfeedback_decls,
+ const uint64_t reserved_slots);
+
+uint64_t
+reserved_varying_slot(struct gl_shader *stage, ir_variable_mode io_mode);
bool
check_against_output_limit(struct gl_context *ctx,
struct gl_shader_program *prog,
- gl_shader *producer);
+ gl_shader *producer,
+ unsigned num_explicit_locations);
bool
check_against_input_limit(struct gl_context *ctx,
struct gl_shader_program *prog,
- gl_shader *consumer);
+ gl_shader *consumer,
+ unsigned num_explicit_locations);
#endif /* GLSL_LINK_VARYINGS_H */
*/
if (last < MESA_SHADER_FRAGMENT &&
(num_tfeedback_decls != 0 || prog->SeparateShader)) {
+ const uint64_t reserved_out_slots =
+ reserved_varying_slot(prog->_LinkedShaders[last], ir_var_shader_out);
if (!assign_varying_locations(ctx, mem_ctx, prog,
prog->_LinkedShaders[last], NULL,
- num_tfeedback_decls, tfeedback_decls))
+ num_tfeedback_decls, tfeedback_decls,
+ reserved_out_slots))
goto done;
}
gl_shader *const sh = prog->_LinkedShaders[last];
if (prog->SeparateShader) {
+ const uint64_t reserved_slots =
+ reserved_varying_slot(sh, ir_var_shader_in);
+
/* Assign input locations for SSO, output locations are already
* assigned.
*/
NULL /* producer */,
sh /* consumer */,
0 /* num_tfeedback_decls */,
- NULL /* tfeedback_decls */))
+ NULL /* tfeedback_decls */,
+ reserved_slots))
goto done;
}
gl_shader *const sh_i = prog->_LinkedShaders[i];
gl_shader *const sh_next = prog->_LinkedShaders[next];
+ const uint64_t reserved_out_slots =
+ reserved_varying_slot(sh_i, ir_var_shader_out);
+ const uint64_t reserved_in_slots =
+ reserved_varying_slot(sh_next, ir_var_shader_in);
+
if (!assign_varying_locations(ctx, mem_ctx, prog, sh_i, sh_next,
next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
- tfeedback_decls))
+ tfeedback_decls,
+ reserved_out_slots | reserved_in_slots))
goto done;
do_dead_builtin_varyings(ctx, sh_i, sh_next,
/* This must be done after all dead varyings are eliminated. */
if (sh_i != NULL) {
- if (!check_against_output_limit(ctx, prog, sh_i)) {
+ unsigned slots_used = _mesa_bitcount_64(reserved_out_slots);
+ if (!check_against_output_limit(ctx, prog, sh_i, slots_used)) {
goto done;
}
}
- if (!check_against_input_limit(ctx, prog, sh_next))
+
+ unsigned slots_used = _mesa_bitcount_64(reserved_in_slots);
+ if (!check_against_input_limit(ctx, prog, sh_next, slots_used))
goto done;
next = i;