struct hazard_query {
bool contains_spill;
bool contains_sendmsg;
+ bool uses_exec;
memory_event_set mem_events;
unsigned aliasing_storage; /* storage classes which are accessed (non-SMEM) */
unsigned aliasing_storage_smem; /* storage classes which are accessed (SMEM) */
void init_hazard_query(hazard_query *query) {
query->contains_spill = false;
query->contains_sendmsg = false;
+ query->uses_exec = false;
memset(&query->mem_events, 0, sizeof(query->mem_events));
query->aliasing_storage = 0;
query->aliasing_storage_smem = 0;
if (instr->opcode == aco_opcode::p_spill || instr->opcode == aco_opcode::p_reload)
query->contains_spill = true;
query->contains_sendmsg |= instr->opcode == aco_opcode::s_sendmsg;
+ query->uses_exec |= needs_exec_mask(instr);
memory_sync_info sync = get_sync_info_with_hack(instr);
HazardResult perform_hazard_query(hazard_query *query, Instruction *instr, bool upwards)
{
- if (instr->opcode == aco_opcode::p_exit_early_if)
- return hazard_fail_exec;
- for (const Definition& def : instr->definitions) {
- if (def.isFixed() && def.physReg() == exec)
- return hazard_fail_exec;
+ /* don't schedule discards downwards */
+ if (!upwards && instr->opcode == aco_opcode::p_exit_early_if)
+ return hazard_fail_unreorderable;
+
+ if (query->uses_exec) {
+ for (const Definition& def : instr->definitions) {
+ if (def.isFixed() && def.physReg() == exec)
+ return hazard_fail_exec;
+ }
}
/* don't move exports so that they stay closer together */