unsigned semantic_name;
unsigned semantic_index;
unsigned interp;
- } input[UREG_MAX_INPUT];
- unsigned nr_inputs;
+ } fs_input[UREG_MAX_INPUT];
+ unsigned nr_fs_inputs;
+
+ unsigned vs_inputs[UREG_MAX_INPUT/32];
struct {
unsigned semantic_name;
-static struct ureg_src
-ureg_DECL_input( struct ureg_program *ureg,
- unsigned name,
- unsigned index,
- unsigned interp_mode )
+struct ureg_src
+ureg_DECL_fs_input( struct ureg_program *ureg,
+ unsigned name,
+ unsigned index,
+ unsigned interp_mode )
{
unsigned i;
- for (i = 0; i < ureg->nr_inputs; i++) {
- if (ureg->input[i].semantic_name == name &&
- ureg->input[i].semantic_index == index)
+ for (i = 0; i < ureg->nr_fs_inputs; i++) {
+ if (ureg->fs_input[i].semantic_name == name &&
+ ureg->fs_input[i].semantic_index == index)
goto out;
}
- if (ureg->nr_inputs < UREG_MAX_INPUT) {
- ureg->input[i].semantic_name = name;
- ureg->input[i].semantic_index = index;
- ureg->input[i].interp = interp_mode;
- ureg->nr_inputs++;
+ if (ureg->nr_fs_inputs < UREG_MAX_INPUT) {
+ ureg->fs_input[i].semantic_name = name;
+ ureg->fs_input[i].semantic_index = index;
+ ureg->fs_input[i].interp = interp_mode;
+ ureg->nr_fs_inputs++;
}
else {
set_bad( ureg );
}
-
-struct ureg_src
-ureg_DECL_fs_input( struct ureg_program *ureg,
- unsigned name,
- unsigned index,
- unsigned interp )
-{
- assert(ureg->processor == TGSI_PROCESSOR_FRAGMENT);
- return ureg_DECL_input( ureg, name, index, interp );
-}
-
-
struct ureg_src
ureg_DECL_vs_input( struct ureg_program *ureg,
- unsigned name,
unsigned index )
{
assert(ureg->processor == TGSI_PROCESSOR_VERTEX);
- return ureg_DECL_input( ureg, name, index, TGSI_INTERPOLATE_CONSTANT );
+
+ ureg->vs_inputs[index/32] |= 1 << (index % 32);
+ return ureg_src_register( TGSI_FILE_INPUT, index );
}
{
unsigned i;
- for (i = 0; i < ureg->nr_inputs; i++) {
- emit_decl( ureg,
- TGSI_FILE_INPUT,
- i,
- ureg->input[i].semantic_name,
- ureg->input[i].semantic_index,
- ureg->input[i].interp );
+ if (ureg->processor == TGSI_PROCESSOR_VERTEX) {
+ for (i = 0; i < UREG_MAX_INPUT; i++) {
+ if (ureg->vs_inputs[i/32] & (1 << (i%32))) {
+ emit_decl_range( ureg, TGSI_FILE_INPUT, i, 1 );
+ }
+ }
+ }
+ else {
+ for (i = 0; i < ureg->nr_fs_inputs; i++) {
+ emit_decl( ureg,
+ TGSI_FILE_INPUT,
+ i,
+ ureg->fs_input[i].semantic_name,
+ ureg->fs_input[i].semantic_index,
+ ureg->fs_input[i].interp );
+ }
}
for (i = 0; i < ureg->nr_outputs; i++) {
boolean is_fill = vs_traits & VS_FILL;
boolean is_composite = vs_traits & VS_COMPOSITE;
boolean has_mask = vs_traits & VS_MASK;
+ unsigned input_slot = 0;
ureg = ureg_create(TGSI_PROCESSOR_VERTEX);
if (ureg == NULL)
/* it has to be either a fill or a composite op */
debug_assert(is_fill ^ is_composite);
- src = ureg_DECL_vs_input(ureg,
- TGSI_SEMANTIC_POSITION, 0);
+ src = ureg_DECL_vs_input(ureg, input_slot++);
dst = ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0);
src = vs_normalize_coords(ureg, src,
const0, const1);
ureg_MOV(ureg, dst, src);
-
if (is_composite) {
- src = ureg_DECL_vs_input(ureg,
- TGSI_SEMANTIC_GENERIC, 1);
+ src = ureg_DECL_vs_input(ureg, input_slot++);
dst = ureg_DECL_output(ureg, TGSI_SEMANTIC_GENERIC, 1);
ureg_MOV(ureg, dst, src);
}
+
if (is_fill) {
- src = ureg_DECL_vs_input(ureg,
- TGSI_SEMANTIC_COLOR, 0);
+ src = ureg_DECL_vs_input(ureg, input_slot++);
dst = ureg_DECL_output(ureg, TGSI_SEMANTIC_COLOR, 0);
ureg_MOV(ureg, dst, src);
}
if (has_mask) {
- src = ureg_DECL_vs_input(ureg,
- TGSI_SEMANTIC_GENERIC, 2);
+ src = ureg_DECL_vs_input(ureg, input_slot++);
dst = ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 2);
ureg_MOV(ureg, dst, src);
}