m_offset(0),
m_next_input_ring_offset(0),
m_key(key),
- m_num_clip_dist(0),
+ m_clip_dist_mask(0),
m_cur_ring_output(0),
- m_gs_tri_strip_adj_fix(false)
+ m_gs_tri_strip_adj_fix(false),
+ m_input_mask(0)
{
sh_info().atomic_base = key.gs.first_atomic_counter;
}
return false;
}
-bool GeometryShaderFromNir::do_emit_store_deref(const nir_variable *out_var, nir_intrinsic_instr* instr)
+bool GeometryShaderFromNir::do_emit_store_deref(UNUSED const nir_variable *out_var, UNUSED nir_intrinsic_instr* instr)
{
+ return false;
+}
+
+bool GeometryShaderFromNir::emit_store(nir_intrinsic_instr* instr)
+{
+ auto location = nir_intrinsic_io_semantics(instr).location;
+ auto index = nir_src_as_const_value(instr->src[1]);
+ assert(index);
+ auto driver_location = nir_intrinsic_base(instr) + index->u32;
+
uint32_t write_mask = nir_intrinsic_write_mask(instr);
GPRVector::Swizzle swz = swizzle_from_mask(write_mask);
- auto out_value = vec_from_nir_with_fetch_constant(instr->src[1], write_mask, swz, true);
- sh_info().output[out_var->data.driver_location].write_mask = write_mask;
+ auto out_value = vec_from_nir_with_fetch_constant(instr->src[0], write_mask, swz, true);
+
+ sh_info().output[driver_location].write_mask = write_mask;
auto ir = new MemRingOutIntruction(cf_mem_ring, mem_write_ind, out_value,
- 4 * out_var->data.driver_location,
+ 4 * driver_location,
instr->num_components, m_export_base[0]);
-
- streamout_data[out_var->data.location] = ir;
+ streamout_data[location] = ir;
return true;
}
bool GeometryShaderFromNir::scan_sysvalue_access(UNUSED nir_instr *instr)
{
- return true;
+ if (instr->type != nir_instr_type_intrinsic)
+ return true;
+
+ nir_intrinsic_instr *ii = nir_instr_as_intrinsic(instr);
+
+ switch (ii->intrinsic) {
+ case nir_intrinsic_store_output:
+ return process_store_output(ii);
+ case nir_intrinsic_load_input:
+ case nir_intrinsic_load_per_vertex_input:
+ return process_load_input(ii);
+ default:
+ return true;
+ }
+}
+
+bool GeometryShaderFromNir::process_store_output(nir_intrinsic_instr* instr)
+{
+ auto location = nir_intrinsic_io_semantics(instr).location;
+ auto index = nir_src_as_const_value(instr->src[1]);
+ assert(index);
+
+ auto driver_location = nir_intrinsic_base(instr) + index->u32;
+
+ if (location == VARYING_SLOT_COL0 ||
+ location == VARYING_SLOT_COL1 ||
+ (location >= VARYING_SLOT_VAR0 &&
+ location <= VARYING_SLOT_VAR31) ||
+ (location >= VARYING_SLOT_TEX0 &&
+ location <= VARYING_SLOT_TEX7) ||
+ location == VARYING_SLOT_BFC0 ||
+ location == VARYING_SLOT_BFC1 ||
+ location == VARYING_SLOT_PNTC ||
+ location == VARYING_SLOT_CLIP_VERTEX ||
+ location == VARYING_SLOT_CLIP_DIST0 ||
+ location == VARYING_SLOT_CLIP_DIST1 ||
+ location == VARYING_SLOT_PRIMITIVE_ID ||
+ location == VARYING_SLOT_POS ||
+ location == VARYING_SLOT_PSIZ ||
+ location == VARYING_SLOT_LAYER ||
+ location == VARYING_SLOT_VIEWPORT ||
+ location == VARYING_SLOT_FOGC) {
+ r600_shader_io& io = sh_info().output[driver_location];
+
+ auto semantic = r600_get_varying_semantic(location);
+ io.name = semantic.first;
+ io.sid = semantic.second;
+
+ evaluate_spi_sid(io);
+
+ if (sh_info().noutput <= driver_location)
+ sh_info().noutput = driver_location + 1;
+
+ if (location == VARYING_SLOT_CLIP_DIST0 ||
+ location == VARYING_SLOT_CLIP_DIST1) {
+ m_clip_dist_mask |= 1 << (location - VARYING_SLOT_CLIP_DIST0);
+ }
+
+ if (location == VARYING_SLOT_VIEWPORT) {
+ sh_info().vs_out_viewport = 1;
+ sh_info().vs_out_misc_write = 1;
+ }
+ return true;
+ }
+ return false;
+}
+
+bool GeometryShaderFromNir::process_load_input(nir_intrinsic_instr* instr)
+{
+ auto location = nir_intrinsic_io_semantics(instr).location;
+ auto index = nir_src_as_const_value(instr->src[1]);
+ assert(index);
+
+ auto driver_location = nir_intrinsic_base(instr) + index->u32;
+
+ if (location == VARYING_SLOT_POS ||
+ location == VARYING_SLOT_PSIZ ||
+ location == VARYING_SLOT_FOGC ||
+ location == VARYING_SLOT_CLIP_VERTEX ||
+ location == VARYING_SLOT_CLIP_DIST0 ||
+ location == VARYING_SLOT_CLIP_DIST1 ||
+ location == VARYING_SLOT_COL0 ||
+ location == VARYING_SLOT_COL1 ||
+ location == VARYING_SLOT_BFC0 ||
+ location == VARYING_SLOT_BFC1 ||
+ location == VARYING_SLOT_PNTC ||
+ (location >= VARYING_SLOT_VAR0 &&
+ location <= VARYING_SLOT_VAR31) ||
+ (location >= VARYING_SLOT_TEX0 &&
+ location <= VARYING_SLOT_TEX7)) {
+
+ uint64_t bit = 1ull << location;
+ if (!(bit & m_input_mask)) {
+ r600_shader_io& io = sh_info().input[driver_location];
+ auto semantic = r600_get_varying_semantic(location);
+ io.name = semantic.first;
+ io.sid = semantic.second;
+
+ io.ring_offset = 16 * driver_location;
+ ++sh_info().ninput;
+ m_next_input_ring_offset += 16;
+ m_input_mask |= bit;
+ }
+ return true;
+ }
+ return false;
}
bool GeometryShaderFromNir::do_process_inputs(nir_variable *input)
if (output->data.location == VARYING_SLOT_CLIP_DIST0 ||
output->data.location == VARYING_SLOT_CLIP_DIST1) {
- m_num_clip_dist += 4;
+ m_clip_dist_mask |= 1 << (output->data.location - VARYING_SLOT_CLIP_DIST0);
}
if (output->data.location == VARYING_SLOT_VIEWPORT) {
m_per_vertex_offsets[i] = adjhelp[i];
}
-bool GeometryShaderFromNir::emit_deref_instruction_override(nir_deref_instr* instr)
-{
- if (instr->deref_type == nir_deref_type_array) {
- auto var = get_deref_location(instr->parent);
- ArrayDeref ad = {var, &instr->arr.index};
- assert(instr->dest.is_ssa);
- m_in_array_deref[instr->dest.ssa.index] = ad;
-
- /* Problem: nir_intrinsice_load_deref tries to lookup the
- * variable, and will not find it, need to override that too */
- return true;
- }
- return false;
-}
bool GeometryShaderFromNir::emit_intrinsic_instruction_override(nir_intrinsic_instr* instr)
{
switch (instr->intrinsic) {
- case nir_intrinsic_load_deref: {
- auto& src = instr->src[0];
- assert(src.is_ssa);
- auto array = m_in_array_deref.find(src.ssa->index);
- if (array != m_in_array_deref.end())
- return emit_load_from_array(instr, array->second);
- } break;
case nir_intrinsic_emit_vertex:
return emit_vertex(instr, false);
case nir_intrinsic_end_primitive:
return load_preloaded_value(instr->dest, 0, m_primitive_id);
case nir_intrinsic_load_invocation_id:
return load_preloaded_value(instr->dest, 0, m_invocation_id);
+ case nir_intrinsic_store_output:
+ return emit_store(instr);
+ case nir_intrinsic_load_per_vertex_input:
+ return emit_load_per_vertex_input(instr);
default:
;
}
return true;
}
-bool GeometryShaderFromNir::emit_load_from_array(nir_intrinsic_instr* instr,
- const ArrayDeref& array_deref)
+bool GeometryShaderFromNir::emit_load_per_vertex_input(nir_intrinsic_instr* instr)
{
auto dest = vec_from_nir(instr->dest, instr->num_components);
- auto literal_index = nir_src_as_const_value(*array_deref.index);
+ auto literal_index = nir_src_as_const_value(instr->src[0]);
if (!literal_index) {
sfn_log << SfnLog::err << "GS: Indirect input addressing not (yet) supported\n";
return false;
}
assert(literal_index->u32 < 6);
+ assert(nir_intrinsic_io_semantics(instr).num_slots == 1);
PValue addr = m_per_vertex_offsets[literal_index->u32];
auto fetch = new FetchInstruction(vc_fetch, no_index_offset, dest, addr,
- 16 * array_deref.var->data.driver_location,
+ 16 * nir_intrinsic_base(instr),
R600_GS_RING_CONST_BUFFER, PValue(), bim_none, true);
emit_instruction(fetch);
return true;
void GeometryShaderFromNir::do_finalize()
{
- if (m_num_clip_dist) {
- sh_info().cc_dist_mask = (1 << m_num_clip_dist) - 1;
- sh_info().clip_dist_write = (1 << m_num_clip_dist) - 1;
+ if (m_clip_dist_mask) {
+ int num_clip_dist = 4 * util_bitcount(m_clip_dist_mask);
+ sh_info().cc_dist_mask = (1 << num_clip_dist) - 1;
+ sh_info().clip_dist_write = (1 << num_clip_dist) - 1;
}
}
{
public:
GeometryShaderFromNir(r600_pipe_shader *sh, r600_pipe_shader_selector& sel, const r600_shader_key& key, enum chip_class chip_class);
- bool do_emit_load_deref(const nir_variable *in_var, nir_intrinsic_instr* instr) override;
- bool do_emit_store_deref(const nir_variable *out_var, nir_intrinsic_instr* instr) override;
+
bool scan_sysvalue_access(nir_instr *instr) override;
PValue primitive_id() override {return m_primitive_id;}
private:
- struct ArrayDeref {
- const nir_variable *var;
- const nir_src *index;
- };
+ bool do_emit_load_deref(const nir_variable *in_var, nir_intrinsic_instr* instr) override;
+ bool do_emit_store_deref(const nir_variable *out_var, nir_intrinsic_instr* instr) override;
bool do_process_inputs(nir_variable *input) override;
bool do_allocate_reserved_registers() override;
bool do_process_outputs(nir_variable *output) override;
- bool emit_deref_instruction_override(nir_deref_instr* instr) override;
bool emit_intrinsic_instruction_override(nir_intrinsic_instr* instr) override;
- bool emit_load_from_array(nir_intrinsic_instr* instr, const ArrayDeref& array_deref);
+
bool emit_vertex(nir_intrinsic_instr* instr, bool cut);
void emit_adj_fix();
+ bool process_store_output(nir_intrinsic_instr* instr);
+ bool process_load_input(nir_intrinsic_instr* instr);
+
+ bool emit_store(nir_intrinsic_instr* instr);
+ bool emit_load_per_vertex_input(nir_intrinsic_instr* instr);
+
void do_finalize() override;
r600_pipe_shader *m_pipe_shader;
PValue m_export_base[4];
bool m_first_vertex_emitted;
- std::map<unsigned, ArrayDeref> m_in_array_deref;
int m_offset;
int m_next_input_ring_offset;
r600_shader_key m_key;
- int m_num_clip_dist;
+ int m_clip_dist_mask;
unsigned m_cur_ring_output;
bool m_gs_tri_strip_adj_fix;
+ uint64_t m_input_mask;
std::map<int, MemRingOutIntruction *> streamout_data;
};