2 * Copyright © 2019 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "aco_builder.h"
40 std::vector<aco_ptr<Instruction>> old_instructions;
44 void join(const NOP_ctx_gfx6& other)
46 set_vskip_mode_then_vector =
47 MAX2(set_vskip_mode_then_vector, other.set_vskip_mode_then_vector);
48 valu_wr_vcc_then_vccz = MAX2(valu_wr_vcc_then_vccz, other.valu_wr_vcc_then_vccz);
49 valu_wr_exec_then_execz = MAX2(valu_wr_exec_then_execz, other.valu_wr_exec_then_execz);
50 valu_wr_vcc_then_div_fmas = MAX2(valu_wr_vcc_then_div_fmas, other.valu_wr_vcc_then_div_fmas);
51 salu_wr_m0_then_gds_msg_ttrace =
52 MAX2(salu_wr_m0_then_gds_msg_ttrace, other.salu_wr_m0_then_gds_msg_ttrace);
53 valu_wr_exec_then_dpp = MAX2(valu_wr_exec_then_dpp, other.valu_wr_exec_then_dpp);
54 salu_wr_m0_then_lds = MAX2(salu_wr_m0_then_lds, other.salu_wr_m0_then_lds);
55 salu_wr_m0_then_moverel = MAX2(salu_wr_m0_then_moverel, other.salu_wr_m0_then_moverel);
56 setreg_then_getsetreg = MAX2(setreg_then_getsetreg, other.setreg_then_getsetreg);
57 vmem_store_then_wr_data |= other.vmem_store_then_wr_data;
58 smem_clause |= other.smem_clause;
59 smem_write |= other.smem_write;
60 for (unsigned i = 0; i < BITSET_WORDS(128); i++) {
61 smem_clause_read_write[i] |= other.smem_clause_read_write[i];
62 smem_clause_write[i] |= other.smem_clause_write[i];
66 bool operator==(const NOP_ctx_gfx6& other)
68 return set_vskip_mode_then_vector == other.set_vskip_mode_then_vector &&
69 valu_wr_vcc_then_vccz == other.valu_wr_vcc_then_vccz &&
70 valu_wr_exec_then_execz == other.valu_wr_exec_then_execz &&
71 valu_wr_vcc_then_div_fmas == other.valu_wr_vcc_then_div_fmas &&
72 vmem_store_then_wr_data == other.vmem_store_then_wr_data &&
73 salu_wr_m0_then_gds_msg_ttrace == other.salu_wr_m0_then_gds_msg_ttrace &&
74 valu_wr_exec_then_dpp == other.valu_wr_exec_then_dpp &&
75 salu_wr_m0_then_lds == other.salu_wr_m0_then_lds &&
76 salu_wr_m0_then_moverel == other.salu_wr_m0_then_moverel &&
77 setreg_then_getsetreg == other.setreg_then_getsetreg &&
78 smem_clause == other.smem_clause && smem_write == other.smem_write &&
79 BITSET_EQUAL(smem_clause_read_write, other.smem_clause_read_write) &&
80 BITSET_EQUAL(smem_clause_write, other.smem_clause_write);
83 void add_wait_states(unsigned amount)
85 if ((set_vskip_mode_then_vector -= amount) < 0)
86 set_vskip_mode_then_vector = 0;
88 if ((valu_wr_vcc_then_vccz -= amount) < 0)
89 valu_wr_vcc_then_vccz = 0;
91 if ((valu_wr_exec_then_execz -= amount) < 0)
92 valu_wr_exec_then_execz = 0;
94 if ((valu_wr_vcc_then_div_fmas -= amount) < 0)
95 valu_wr_vcc_then_div_fmas = 0;
97 if ((salu_wr_m0_then_gds_msg_ttrace -= amount) < 0)
98 salu_wr_m0_then_gds_msg_ttrace = 0;
100 if ((valu_wr_exec_then_dpp -= amount) < 0)
101 valu_wr_exec_then_dpp = 0;
103 if ((salu_wr_m0_then_lds -= amount) < 0)
104 salu_wr_m0_then_lds = 0;
106 if ((salu_wr_m0_then_moverel -= amount) < 0)
107 salu_wr_m0_then_moverel = 0;
109 if ((setreg_then_getsetreg -= amount) < 0)
110 setreg_then_getsetreg = 0;
112 vmem_store_then_wr_data.reset();
115 /* setting MODE.vskip and then any vector op requires 2 wait states */
116 int8_t set_vskip_mode_then_vector = 0;
118 /* VALU writing VCC/EXEC and then a VALU reading VCCZ/EXECZ requires 5 wait states */
119 int8_t valu_wr_vcc_then_vccz = 0;
120 int8_t valu_wr_exec_then_execz = 0;
122 /* VALU writing VCC followed by v_div_fmas require 4 wait states */
123 int8_t valu_wr_vcc_then_div_fmas = 0;
125 /* SALU writing M0 followed by GDS, s_sendmsg or s_ttrace_data requires 1 wait state */
126 int8_t salu_wr_m0_then_gds_msg_ttrace = 0;
128 /* VALU writing EXEC followed by DPP requires 5 wait states */
129 int8_t valu_wr_exec_then_dpp = 0;
131 /* SALU writing M0 followed by some LDS instructions requires 1 wait state on GFX10 */
132 int8_t salu_wr_m0_then_lds = 0;
134 /* SALU writing M0 followed by s_moverel requires 1 wait state on GFX9 */
135 int8_t salu_wr_m0_then_moverel = 0;
137 /* s_setreg followed by a s_getreg/s_setreg of the same register needs 2 wait states
138 * currently we don't look at the actual register */
139 int8_t setreg_then_getsetreg = 0;
141 /* some memory instructions writing >64bit followed by a instructions
142 * writing the VGPRs holding the writedata requires 1 wait state */
143 std::bitset<256> vmem_store_then_wr_data;
145 /* we break up SMEM clauses that contain stores or overwrite an
146 * operand/definition of another instruction in the clause */
147 bool smem_clause = false;
148 bool smem_write = false;
149 BITSET_DECLARE(smem_clause_read_write, 128) = {0};
150 BITSET_DECLARE(smem_clause_write, 128) = {0};
153 struct NOP_ctx_gfx10 {
154 bool has_VOPC_write_exec = false;
155 bool has_nonVALU_exec_read = false;
156 bool has_VMEM = false;
157 bool has_branch_after_VMEM = false;
159 bool has_branch_after_DS = false;
160 bool has_NSA_MIMG = false;
161 bool has_writelane = false;
162 std::bitset<128> sgprs_read_by_VMEM;
163 std::bitset<128> sgprs_read_by_VMEM_store;
164 std::bitset<128> sgprs_read_by_DS;
165 std::bitset<128> sgprs_read_by_SMEM;
167 void join(const NOP_ctx_gfx10& other)
169 has_VOPC_write_exec |= other.has_VOPC_write_exec;
170 has_nonVALU_exec_read |= other.has_nonVALU_exec_read;
171 has_VMEM |= other.has_VMEM;
172 has_branch_after_VMEM |= other.has_branch_after_VMEM;
173 has_DS |= other.has_DS;
174 has_branch_after_DS |= other.has_branch_after_DS;
175 has_NSA_MIMG |= other.has_NSA_MIMG;
176 has_writelane |= other.has_writelane;
177 sgprs_read_by_VMEM |= other.sgprs_read_by_VMEM;
178 sgprs_read_by_DS |= other.sgprs_read_by_DS;
179 sgprs_read_by_VMEM_store |= other.sgprs_read_by_VMEM_store;
180 sgprs_read_by_SMEM |= other.sgprs_read_by_SMEM;
183 bool operator==(const NOP_ctx_gfx10& other)
185 return has_VOPC_write_exec == other.has_VOPC_write_exec &&
186 has_nonVALU_exec_read == other.has_nonVALU_exec_read && has_VMEM == other.has_VMEM &&
187 has_branch_after_VMEM == other.has_branch_after_VMEM && has_DS == other.has_DS &&
188 has_branch_after_DS == other.has_branch_after_DS &&
189 has_NSA_MIMG == other.has_NSA_MIMG && has_writelane == other.has_writelane &&
190 sgprs_read_by_VMEM == other.sgprs_read_by_VMEM &&
191 sgprs_read_by_DS == other.sgprs_read_by_DS &&
192 sgprs_read_by_VMEM_store == other.sgprs_read_by_VMEM_store &&
193 sgprs_read_by_SMEM == other.sgprs_read_by_SMEM;
197 template <int Max> struct VGPRCounterMap {
200 BITSET_DECLARE(resident, 256);
203 /* Initializes all counters to Max. */
204 VGPRCounterMap() { BITSET_ZERO(resident); }
206 /* Increase all counters, clamping at Max. */
207 void inc() { base++; }
209 /* Set counter to 0. */
210 void set(unsigned idx)
213 BITSET_SET(resident, idx);
216 void set(PhysReg reg, unsigned bytes)
221 for (unsigned i = 0; i < DIV_ROUND_UP(bytes, 4); i++)
222 set(reg.reg() - 256 + i);
225 /* Reset all counters to Max. */
229 BITSET_ZERO(resident);
232 void reset(PhysReg reg, unsigned bytes)
237 for (unsigned i = 0; i < DIV_ROUND_UP(bytes, 4); i++)
238 BITSET_CLEAR(resident, reg.reg() - 256 + i);
241 uint8_t get(unsigned idx)
243 return BITSET_TEST(resident, idx) ? MIN2(val[idx] + base, Max) : Max;
246 uint8_t get(PhysReg reg, unsigned offset = 0)
248 assert(reg.reg() >= 256);
249 return get(reg.reg() - 256 + offset);
252 void join_min(const VGPRCounterMap& other)
255 BITSET_FOREACH_SET(i, other.resident, 256)
257 if (BITSET_TEST(resident, i))
258 val[i] = MIN2(val[i] + base, other.val[i] + other.base) - base;
260 val[i] = other.val[i] + other.base - base;
262 BITSET_OR(resident, resident, other.resident);
265 bool operator==(const VGPRCounterMap& other) const
267 if (!BITSET_EQUAL(resident, other.resident))
271 BITSET_FOREACH_SET(i, other.resident, 256)
273 if (!BITSET_TEST(resident, i))
275 if (val[i] + base != other.val[i] + other.base)
282 struct NOP_ctx_gfx11 {
283 /* VcmpxPermlaneHazard */
284 bool has_Vcmpx = false;
286 /* LdsDirectVMEMHazard */
287 std::bitset<256> vgpr_used_by_vmem_load;
288 std::bitset<256> vgpr_used_by_vmem_store;
289 std::bitset<256> vgpr_used_by_ds;
291 /* VALUTransUseHazard */
292 VGPRCounterMap<15> valu_since_wr_by_trans;
293 VGPRCounterMap<2> trans_since_wr_by_trans;
295 /* VALUMaskWriteHazard */
296 std::bitset<128> sgpr_read_by_valu_as_lanemask;
297 std::bitset<128> sgpr_read_by_valu_as_lanemask_then_wr_by_salu;
299 void join(const NOP_ctx_gfx11& other)
301 has_Vcmpx |= other.has_Vcmpx;
302 vgpr_used_by_vmem_load |= other.vgpr_used_by_vmem_load;
303 vgpr_used_by_vmem_store |= other.vgpr_used_by_vmem_store;
304 vgpr_used_by_ds |= other.vgpr_used_by_ds;
305 valu_since_wr_by_trans.join_min(other.valu_since_wr_by_trans);
306 trans_since_wr_by_trans.join_min(other.trans_since_wr_by_trans);
307 sgpr_read_by_valu_as_lanemask |= other.sgpr_read_by_valu_as_lanemask;
308 sgpr_read_by_valu_as_lanemask_then_wr_by_salu |=
309 other.sgpr_read_by_valu_as_lanemask_then_wr_by_salu;
312 bool operator==(const NOP_ctx_gfx11& other)
314 return has_Vcmpx == other.has_Vcmpx &&
315 vgpr_used_by_vmem_load == other.vgpr_used_by_vmem_load &&
316 vgpr_used_by_vmem_store == other.vgpr_used_by_vmem_store &&
317 vgpr_used_by_ds == other.vgpr_used_by_ds &&
318 valu_since_wr_by_trans == other.valu_since_wr_by_trans &&
319 trans_since_wr_by_trans == other.trans_since_wr_by_trans &&
320 sgpr_read_by_valu_as_lanemask == other.sgpr_read_by_valu_as_lanemask &&
321 sgpr_read_by_valu_as_lanemask_then_wr_by_salu ==
322 other.sgpr_read_by_valu_as_lanemask_then_wr_by_salu;
327 get_wait_states(aco_ptr<Instruction>& instr)
329 if (instr->opcode == aco_opcode::s_nop)
330 return instr->sopp().imm + 1;
331 else if (instr->opcode == aco_opcode::p_constaddr)
332 return 3; /* lowered to 3 instructions in the assembler */
338 regs_intersect(PhysReg a_reg, unsigned a_size, PhysReg b_reg, unsigned b_size)
340 return a_reg > b_reg ? (a_reg - b_reg < b_size) : (b_reg - a_reg < a_size);
343 template <typename GlobalState, typename BlockState,
344 bool (*block_cb)(GlobalState&, BlockState&, Block*),
345 bool (*instr_cb)(GlobalState&, BlockState&, aco_ptr<Instruction>&)>
347 search_backwards_internal(State& state, GlobalState& global_state, BlockState block_state,
348 Block* block, bool start_at_end)
350 if (block == state.block && start_at_end) {
351 /* If it's the current block, block->instructions is incomplete. */
352 for (int pred_idx = state.old_instructions.size() - 1; pred_idx >= 0; pred_idx--) {
353 aco_ptr<Instruction>& instr = state.old_instructions[pred_idx];
355 break; /* Instruction has been moved to block->instructions. */
356 if (instr_cb(global_state, block_state, instr))
361 for (int pred_idx = block->instructions.size() - 1; pred_idx >= 0; pred_idx--) {
362 if (instr_cb(global_state, block_state, block->instructions[pred_idx]))
366 PRAGMA_DIAGNOSTIC_PUSH
367 PRAGMA_DIAGNOSTIC_IGNORED(-Waddress)
368 if (block_cb != nullptr && !block_cb(global_state, block_state, block))
370 PRAGMA_DIAGNOSTIC_POP
372 for (unsigned lin_pred : block->linear_preds) {
373 search_backwards_internal<GlobalState, BlockState, block_cb, instr_cb>(
374 state, global_state, block_state, &state.program->blocks[lin_pred], true);
378 template <typename GlobalState, typename BlockState,
379 bool (*block_cb)(GlobalState&, BlockState&, Block*),
380 bool (*instr_cb)(GlobalState&, BlockState&, aco_ptr<Instruction>&)>
382 search_backwards(State& state, GlobalState& global_state, BlockState& block_state)
384 search_backwards_internal<GlobalState, BlockState, block_cb, instr_cb>(
385 state, global_state, block_state, state.block, false);
388 struct HandleRawHazardGlobalState {
393 struct HandleRawHazardBlockState {
398 template <bool Valu, bool Vintrp, bool Salu>
400 handle_raw_hazard_instr(HandleRawHazardGlobalState& global_state,
401 HandleRawHazardBlockState& block_state, aco_ptr<Instruction>& pred)
403 unsigned mask_size = util_last_bit(block_state.mask);
405 uint32_t writemask = 0;
406 for (Definition& def : pred->definitions) {
407 if (regs_intersect(global_state.reg, mask_size, def.physReg(), def.size())) {
408 unsigned start = def.physReg() > global_state.reg ? def.physReg() - global_state.reg : 0;
409 unsigned end = MIN2(mask_size, start + def.size());
410 writemask |= u_bit_consecutive(start, end - start);
414 bool is_hazard = writemask != 0 && ((pred->isVALU() && Valu) || (pred->isVINTRP() && Vintrp) ||
415 (pred->isSALU() && Salu));
417 global_state.nops_needed = MAX2(global_state.nops_needed, block_state.nops_needed);
421 block_state.mask &= ~writemask;
422 block_state.nops_needed = MAX2(block_state.nops_needed - get_wait_states(pred), 0);
424 if (block_state.mask == 0)
425 block_state.nops_needed = 0;
427 return block_state.nops_needed == 0;
430 template <bool Valu, bool Vintrp, bool Salu>
432 handle_raw_hazard(State& state, int* NOPs, int min_states, Operand op)
434 if (*NOPs >= min_states)
437 HandleRawHazardGlobalState global = {op.physReg(), 0};
438 HandleRawHazardBlockState block = {u_bit_consecutive(0, op.size()), min_states};
440 /* Loops require branch instructions, which count towards the wait
441 * states. So even with loops this should finish unless nops_needed is some
443 search_backwards<HandleRawHazardGlobalState, HandleRawHazardBlockState, nullptr,
444 handle_raw_hazard_instr<Valu, Vintrp, Salu>>(state, global, block);
446 *NOPs = MAX2(*NOPs, global.nops_needed);
449 static auto handle_valu_then_read_hazard = handle_raw_hazard<true, true, false>;
450 static auto handle_vintrp_then_read_hazard = handle_raw_hazard<false, true, false>;
451 static auto handle_valu_salu_then_read_hazard = handle_raw_hazard<true, true, true>;
454 set_bitset_range(BITSET_WORD* words, unsigned start, unsigned size)
456 unsigned end = start + size - 1;
457 unsigned start_mod = start % BITSET_WORDBITS;
458 if (start_mod + size <= BITSET_WORDBITS) {
459 BITSET_SET_RANGE_INSIDE_WORD(words, start, end);
461 unsigned first_size = BITSET_WORDBITS - start_mod;
462 set_bitset_range(words, start, BITSET_WORDBITS - start_mod);
463 set_bitset_range(words, start + first_size, size - first_size);
468 test_bitset_range(BITSET_WORD* words, unsigned start, unsigned size)
470 unsigned end = start + size - 1;
471 unsigned start_mod = start % BITSET_WORDBITS;
472 if (start_mod + size <= BITSET_WORDBITS) {
473 return BITSET_TEST_RANGE(words, start, end);
475 unsigned first_size = BITSET_WORDBITS - start_mod;
476 return test_bitset_range(words, start, BITSET_WORDBITS - start_mod) ||
477 test_bitset_range(words, start + first_size, size - first_size);
481 /* A SMEM clause is any group of consecutive SMEM instructions. The
482 * instructions in this group may return out of order and/or may be replayed.
484 * To fix this potential hazard correctly, we have to make sure that when a
485 * clause has more than one instruction, no instruction in the clause writes
486 * to a register that is read by another instruction in the clause (including
487 * itself). In this case, we have to break the SMEM clause by inserting non
490 * SMEM clauses are only present on GFX8+, and only matter when XNACK is set.
493 handle_smem_clause_hazards(Program* program, NOP_ctx_gfx6& ctx, aco_ptr<Instruction>& instr,
496 /* break off from previous SMEM clause if needed */
497 if (!*NOPs & (ctx.smem_clause || ctx.smem_write)) {
498 /* Don't allow clauses with store instructions since the clause's
499 * instructions may use the same address. */
500 if (ctx.smem_write || instr->definitions.empty() ||
501 instr_info.is_atomic[(unsigned)instr->opcode]) {
503 } else if (program->dev.xnack_enabled) {
504 for (Operand op : instr->operands) {
505 if (!op.isConstant() &&
506 test_bitset_range(ctx.smem_clause_write, op.physReg(), op.size())) {
512 Definition def = instr->definitions[0];
513 if (!*NOPs && test_bitset_range(ctx.smem_clause_read_write, def.physReg(), def.size()))
519 /* TODO: we don't handle accessing VCC using the actual SGPR instead of using the alias */
521 handle_instruction_gfx6(State& state, NOP_ctx_gfx6& ctx, aco_ptr<Instruction>& instr,
522 std::vector<aco_ptr<Instruction>>& new_instructions)
527 if (instr->isSMEM()) {
528 if (state.program->gfx_level == GFX6) {
529 /* A read of an SGPR by SMRD instruction requires 4 wait states
530 * when the SGPR was written by a VALU instruction. According to LLVM,
531 * there is also an undocumented hardware behavior when the buffer
532 * descriptor is written by a SALU instruction */
533 for (unsigned i = 0; i < instr->operands.size(); i++) {
534 Operand op = instr->operands[i];
538 bool is_buffer_desc = i == 0 && op.size() > 2;
540 handle_valu_salu_then_read_hazard(state, &NOPs, 4, op);
542 handle_valu_then_read_hazard(state, &NOPs, 4, op);
546 handle_smem_clause_hazards(state.program, ctx, instr, &NOPs);
547 } else if (instr->isSALU()) {
548 if (instr->opcode == aco_opcode::s_setreg_b32 ||
549 instr->opcode == aco_opcode::s_setreg_imm32_b32 ||
550 instr->opcode == aco_opcode::s_getreg_b32) {
551 NOPs = MAX2(NOPs, ctx.setreg_then_getsetreg);
554 if (state.program->gfx_level == GFX9) {
555 if (instr->opcode == aco_opcode::s_movrels_b32 ||
556 instr->opcode == aco_opcode::s_movrels_b64 ||
557 instr->opcode == aco_opcode::s_movreld_b32 ||
558 instr->opcode == aco_opcode::s_movreld_b64) {
559 NOPs = MAX2(NOPs, ctx.salu_wr_m0_then_moverel);
563 if (instr->opcode == aco_opcode::s_sendmsg || instr->opcode == aco_opcode::s_ttracedata)
564 NOPs = MAX2(NOPs, ctx.salu_wr_m0_then_gds_msg_ttrace);
565 } else if (instr->isDS() && instr->ds().gds) {
566 NOPs = MAX2(NOPs, ctx.salu_wr_m0_then_gds_msg_ttrace);
567 } else if (instr->isVALU() || instr->isVINTRP()) {
568 for (Operand op : instr->operands) {
569 if (op.physReg() == vccz)
570 NOPs = MAX2(NOPs, ctx.valu_wr_vcc_then_vccz);
571 if (op.physReg() == execz)
572 NOPs = MAX2(NOPs, ctx.valu_wr_exec_then_execz);
575 if (instr->isDPP()) {
576 NOPs = MAX2(NOPs, ctx.valu_wr_exec_then_dpp);
577 handle_valu_then_read_hazard(state, &NOPs, 2, instr->operands[0]);
580 for (Definition def : instr->definitions) {
581 if (def.regClass().type() != RegType::sgpr) {
582 for (unsigned i = 0; i < def.size(); i++)
583 NOPs = MAX2(NOPs, ctx.vmem_store_then_wr_data[(def.physReg() & 0xff) + i]);
587 if ((instr->opcode == aco_opcode::v_readlane_b32 ||
588 instr->opcode == aco_opcode::v_readlane_b32_e64 ||
589 instr->opcode == aco_opcode::v_writelane_b32 ||
590 instr->opcode == aco_opcode::v_writelane_b32_e64) &&
591 !instr->operands[1].isConstant()) {
592 handle_valu_then_read_hazard(state, &NOPs, 4, instr->operands[1]);
595 /* It's required to insert 1 wait state if the dst VGPR of any v_interp_*
596 * is followed by a read with v_readfirstlane or v_readlane to fix GPU
597 * hangs on GFX6. Note that v_writelane_* is apparently not affected.
598 * This hazard isn't documented anywhere but AMD confirmed that hazard.
600 if (state.program->gfx_level == GFX6 &&
601 (instr->opcode == aco_opcode::v_readlane_b32 || /* GFX6 doesn't have v_readlane_b32_e64 */
602 instr->opcode == aco_opcode::v_readfirstlane_b32)) {
603 handle_vintrp_then_read_hazard(state, &NOPs, 1, instr->operands[0]);
606 if (instr->opcode == aco_opcode::v_div_fmas_f32 ||
607 instr->opcode == aco_opcode::v_div_fmas_f64)
608 NOPs = MAX2(NOPs, ctx.valu_wr_vcc_then_div_fmas);
609 } else if (instr->isVMEM() || instr->isFlatLike()) {
610 /* If the VALU writes the SGPR that is used by a VMEM, the user must add five wait states. */
611 for (Operand op : instr->operands) {
612 if (!op.isConstant() && !op.isUndefined() && op.regClass().type() == RegType::sgpr)
613 handle_valu_then_read_hazard(state, &NOPs, 5, op);
617 if (!instr->isSALU() && instr->format != Format::SMEM)
618 NOPs = MAX2(NOPs, ctx.set_vskip_mode_then_vector);
620 if (state.program->gfx_level == GFX9) {
621 bool lds_scratch_global = (instr->isScratch() || instr->isGlobal()) && instr->flatlike().lds;
622 if (instr->isVINTRP() || lds_scratch_global ||
623 instr->opcode == aco_opcode::ds_read_addtid_b32 ||
624 instr->opcode == aco_opcode::ds_write_addtid_b32 ||
625 instr->opcode == aco_opcode::buffer_store_lds_dword) {
626 NOPs = MAX2(NOPs, ctx.salu_wr_m0_then_lds);
630 ctx.add_wait_states(NOPs + get_wait_states(instr));
632 // TODO: try to schedule the NOP-causing instruction up to reduce the number of stall cycles
635 aco_ptr<SOPP_instruction> nop{
636 create_instruction<SOPP_instruction>(aco_opcode::s_nop, Format::SOPP, 0, 0)};
639 new_instructions.emplace_back(std::move(nop));
642 /* update information to check for later hazards */
643 if ((ctx.smem_clause || ctx.smem_write) && (NOPs || instr->format != Format::SMEM)) {
644 ctx.smem_clause = false;
645 ctx.smem_write = false;
647 if (state.program->dev.xnack_enabled) {
648 BITSET_ZERO(ctx.smem_clause_read_write);
649 BITSET_ZERO(ctx.smem_clause_write);
653 if (instr->isSMEM()) {
654 if (instr->definitions.empty() || instr_info.is_atomic[(unsigned)instr->opcode]) {
655 ctx.smem_write = true;
657 ctx.smem_clause = true;
659 if (state.program->dev.xnack_enabled) {
660 for (Operand op : instr->operands) {
661 if (!op.isConstant()) {
662 set_bitset_range(ctx.smem_clause_read_write, op.physReg(), op.size());
666 Definition def = instr->definitions[0];
667 set_bitset_range(ctx.smem_clause_read_write, def.physReg(), def.size());
668 set_bitset_range(ctx.smem_clause_write, def.physReg(), def.size());
671 } else if (instr->isVALU()) {
672 for (Definition def : instr->definitions) {
673 if (def.regClass().type() == RegType::sgpr) {
674 if (def.physReg() == vcc || def.physReg() == vcc_hi) {
675 ctx.valu_wr_vcc_then_vccz = 5;
676 ctx.valu_wr_vcc_then_div_fmas = 4;
678 if (def.physReg() == exec || def.physReg() == exec_hi) {
679 ctx.valu_wr_exec_then_execz = 5;
680 ctx.valu_wr_exec_then_dpp = 5;
684 } else if (instr->isSALU() && !instr->definitions.empty()) {
685 if (!instr->definitions.empty()) {
686 /* all other definitions should be SCC */
687 Definition def = instr->definitions[0];
688 if (def.physReg() == m0) {
689 ctx.salu_wr_m0_then_gds_msg_ttrace = 1;
690 ctx.salu_wr_m0_then_lds = 1;
691 ctx.salu_wr_m0_then_moverel = 1;
693 } else if (instr->opcode == aco_opcode::s_setreg_b32 ||
694 instr->opcode == aco_opcode::s_setreg_imm32_b32) {
695 SOPK_instruction& sopk = instr->sopk();
696 unsigned offset = (sopk.imm >> 6) & 0x1f;
697 unsigned size = ((sopk.imm >> 11) & 0x1f) + 1;
698 unsigned reg = sopk.imm & 0x3f;
699 ctx.setreg_then_getsetreg = 2;
701 if (reg == 1 && offset >= 28 && size > (28 - offset))
702 ctx.set_vskip_mode_then_vector = 2;
704 } else if (instr->isVMEM() || instr->isFlatLike()) {
705 /* >64-bit MUBUF/MTBUF store with a constant in SOFFSET */
706 bool consider_buf = (instr->isMUBUF() || instr->isMTBUF()) && instr->operands.size() == 4 &&
707 instr->operands[3].size() > 2 && instr->operands[2].physReg() >= 128;
708 /* MIMG store with a 128-bit T# with more than two bits set in dmask (making it a >64-bit
710 bool consider_mimg = instr->isMIMG() &&
711 instr->operands[1].regClass().type() == RegType::vgpr &&
712 instr->operands[1].size() > 2 && instr->operands[0].size() == 4;
713 /* FLAT/GLOBAL/SCRATCH store with >64-bit data */
715 instr->isFlatLike() && instr->operands.size() == 3 && instr->operands[2].size() > 2;
716 if (consider_buf || consider_mimg || consider_flat) {
717 PhysReg wrdata = instr->operands[consider_flat ? 2 : 3].physReg();
718 unsigned size = instr->operands[consider_flat ? 2 : 3].size();
719 for (unsigned i = 0; i < size; i++)
720 ctx.vmem_store_then_wr_data[(wrdata & 0xff) + i] = 1;
725 template <std::size_t N>
727 check_written_regs(const aco_ptr<Instruction>& instr, const std::bitset<N>& check_regs)
729 return std::any_of(instr->definitions.begin(), instr->definitions.end(),
730 [&check_regs](const Definition& def) -> bool
732 bool writes_any = false;
733 for (unsigned i = 0; i < def.size(); i++) {
734 unsigned def_reg = def.physReg() + i;
735 writes_any |= def_reg < check_regs.size() && check_regs[def_reg];
741 template <std::size_t N>
743 check_read_regs(const aco_ptr<Instruction>& instr, const std::bitset<N>& check_regs)
745 return std::any_of(instr->operands.begin(), instr->operands.end(),
746 [&check_regs](const Operand& op) -> bool
750 bool writes_any = false;
751 for (unsigned i = 0; i < op.size(); i++) {
752 unsigned op_reg = op.physReg() + i;
753 writes_any |= op_reg < check_regs.size() && check_regs[op_reg];
759 template <std::size_t N>
761 mark_read_regs(const aco_ptr<Instruction>& instr, std::bitset<N>& reg_reads)
763 for (const Operand& op : instr->operands) {
764 for (unsigned i = 0; i < op.size(); i++) {
765 unsigned reg = op.physReg() + i;
766 if (reg < reg_reads.size())
772 template <std::size_t N>
774 mark_read_regs_exec(State& state, const aco_ptr<Instruction>& instr, std::bitset<N>& reg_reads)
776 mark_read_regs(instr, reg_reads);
778 if (state.program->wave_size == 64)
779 reg_reads.set(exec_hi);
783 VALU_writes_sgpr(aco_ptr<Instruction>& instr)
787 if (instr->isVOP3() && instr->definitions.size() == 2)
789 if (instr->opcode == aco_opcode::v_readfirstlane_b32 ||
790 instr->opcode == aco_opcode::v_readlane_b32 ||
791 instr->opcode == aco_opcode::v_readlane_b32_e64)
797 instr_writes_exec(const aco_ptr<Instruction>& instr)
799 return std::any_of(instr->definitions.begin(), instr->definitions.end(),
800 [](const Definition& def) -> bool
801 { return def.physReg() == exec_lo || def.physReg() == exec_hi; });
805 instr_writes_sgpr(const aco_ptr<Instruction>& instr)
807 return std::any_of(instr->definitions.begin(), instr->definitions.end(),
808 [](const Definition& def) -> bool
809 { return def.getTemp().type() == RegType::sgpr; });
813 instr_is_branch(const aco_ptr<Instruction>& instr)
815 return instr->opcode == aco_opcode::s_branch || instr->opcode == aco_opcode::s_cbranch_scc0 ||
816 instr->opcode == aco_opcode::s_cbranch_scc1 ||
817 instr->opcode == aco_opcode::s_cbranch_vccz ||
818 instr->opcode == aco_opcode::s_cbranch_vccnz ||
819 instr->opcode == aco_opcode::s_cbranch_execz ||
820 instr->opcode == aco_opcode::s_cbranch_execnz ||
821 instr->opcode == aco_opcode::s_cbranch_cdbgsys ||
822 instr->opcode == aco_opcode::s_cbranch_cdbguser ||
823 instr->opcode == aco_opcode::s_cbranch_cdbgsys_or_user ||
824 instr->opcode == aco_opcode::s_cbranch_cdbgsys_and_user ||
825 instr->opcode == aco_opcode::s_subvector_loop_begin ||
826 instr->opcode == aco_opcode::s_subvector_loop_end ||
827 instr->opcode == aco_opcode::s_setpc_b64 || instr->opcode == aco_opcode::s_swappc_b64 ||
828 instr->opcode == aco_opcode::s_getpc_b64 || instr->opcode == aco_opcode::s_call_b64;
832 handle_instruction_gfx10(State& state, NOP_ctx_gfx10& ctx, aco_ptr<Instruction>& instr,
833 std::vector<aco_ptr<Instruction>>& new_instructions)
835 // TODO: s_dcache_inv needs to be in it's own group on GFX10
837 Builder bld(state.program, &new_instructions);
839 unsigned vm_vsrc = 7;
840 unsigned sa_sdst = 1;
841 if (debug_flags & DEBUG_FORCE_WAITDEPS) {
842 bld.sopp(aco_opcode::s_waitcnt_depctr, -1, 0x0000);
845 } else if (instr->opcode == aco_opcode::s_waitcnt_depctr) {
846 vm_vsrc = (instr->sopp().imm >> 2) & 0x7;
847 sa_sdst = instr->sopp().imm & 0x1;
850 /* VMEMtoScalarWriteHazard
851 * Handle EXEC/M0/SGPR write following a VMEM/DS instruction without a VALU or "waitcnt vmcnt(0)"
854 if (instr->isVMEM() || instr->isFlatLike() || instr->isDS()) {
855 /* Remember all SGPRs that are read by the VMEM/DS instruction */
856 if (instr->isVMEM() || instr->isFlatLike())
859 instr->definitions.empty() ? ctx.sgprs_read_by_VMEM_store : ctx.sgprs_read_by_VMEM);
860 if (instr->isFlat() || instr->isDS())
861 mark_read_regs_exec(state, instr, ctx.sgprs_read_by_DS);
862 } else if (instr->isSALU() || instr->isSMEM()) {
863 if (instr->opcode == aco_opcode::s_waitcnt) {
864 wait_imm imm(state.program->gfx_level, instr->sopp().imm);
866 ctx.sgprs_read_by_VMEM.reset();
868 ctx.sgprs_read_by_DS.reset();
869 } else if (instr->opcode == aco_opcode::s_waitcnt_vscnt && instr->sopk().imm == 0) {
870 ctx.sgprs_read_by_VMEM_store.reset();
871 } else if (vm_vsrc == 0) {
872 ctx.sgprs_read_by_VMEM.reset();
873 ctx.sgprs_read_by_DS.reset();
874 ctx.sgprs_read_by_VMEM_store.reset();
877 /* Check if SALU writes an SGPR that was previously read by the VALU */
878 if (check_written_regs(instr, ctx.sgprs_read_by_VMEM) ||
879 check_written_regs(instr, ctx.sgprs_read_by_DS) ||
880 check_written_regs(instr, ctx.sgprs_read_by_VMEM_store)) {
881 ctx.sgprs_read_by_VMEM.reset();
882 ctx.sgprs_read_by_DS.reset();
883 ctx.sgprs_read_by_VMEM_store.reset();
885 /* Insert s_waitcnt_depctr instruction with magic imm to mitigate the problem */
886 bld.sopp(aco_opcode::s_waitcnt_depctr, -1, 0xffe3);
888 } else if (instr->isVALU()) {
889 /* Hazard is mitigated by any VALU instruction */
890 ctx.sgprs_read_by_VMEM.reset();
891 ctx.sgprs_read_by_DS.reset();
892 ctx.sgprs_read_by_VMEM_store.reset();
895 /* VcmpxPermlaneHazard
896 * Handle any permlane following a VOPC instruction writing exec, insert v_mov between them.
898 if (instr->isVOPC() && instr->definitions[0].physReg() == exec) {
899 /* we only need to check definitions[0] because since GFX10 v_cmpx only writes one dest */
900 ctx.has_VOPC_write_exec = true;
901 } else if (ctx.has_VOPC_write_exec && (instr->opcode == aco_opcode::v_permlane16_b32 ||
902 instr->opcode == aco_opcode::v_permlanex16_b32)) {
903 ctx.has_VOPC_write_exec = false;
905 /* v_nop would be discarded by SQ, so use v_mov with the first operand of the permlane */
906 bld.vop1(aco_opcode::v_mov_b32, Definition(instr->operands[0].physReg(), v1),
907 Operand(instr->operands[0].physReg(), v1));
908 } else if (instr->isVALU() && instr->opcode != aco_opcode::v_nop) {
909 ctx.has_VOPC_write_exec = false;
912 /* VcmpxExecWARHazard
913 * Handle any VALU instruction writing the exec mask after it was read by a non-VALU instruction.
915 if (!instr->isVALU() && instr->reads_exec()) {
916 ctx.has_nonVALU_exec_read = true;
917 } else if (instr->isVALU()) {
918 if (instr_writes_exec(instr)) {
919 ctx.has_nonVALU_exec_read = false;
921 /* Insert s_waitcnt_depctr instruction with magic imm to mitigate the problem */
922 bld.sopp(aco_opcode::s_waitcnt_depctr, -1, 0xfffe);
923 } else if (instr_writes_sgpr(instr)) {
924 /* Any VALU instruction that writes an SGPR mitigates the problem */
925 ctx.has_nonVALU_exec_read = false;
927 } else if (sa_sdst == 0) {
928 ctx.has_nonVALU_exec_read = false;
931 /* SMEMtoVectorWriteHazard
932 * Handle any VALU instruction writing an SGPR after an SMEM reads it.
934 if (instr->isSMEM()) {
935 /* Remember all SGPRs that are read by the SMEM instruction */
936 mark_read_regs(instr, ctx.sgprs_read_by_SMEM);
937 } else if (VALU_writes_sgpr(instr)) {
938 /* Check if VALU writes an SGPR that was previously read by SMEM */
939 if (check_written_regs(instr, ctx.sgprs_read_by_SMEM)) {
940 ctx.sgprs_read_by_SMEM.reset();
942 /* Insert s_mov to mitigate the problem */
943 bld.sop1(aco_opcode::s_mov_b32, Definition(sgpr_null, s1), Operand::zero());
945 } else if (instr->isSALU()) {
946 if (instr->format != Format::SOPP) {
947 /* SALU can mitigate the hazard */
948 ctx.sgprs_read_by_SMEM.reset();
950 /* Reducing lgkmcnt count to 0 always mitigates the hazard. */
951 const SOPP_instruction& sopp = instr->sopp();
952 if (sopp.opcode == aco_opcode::s_waitcnt_lgkmcnt) {
953 if (sopp.imm == 0 && sopp.definitions[0].physReg() == sgpr_null)
954 ctx.sgprs_read_by_SMEM.reset();
955 } else if (sopp.opcode == aco_opcode::s_waitcnt) {
956 wait_imm imm(state.program->gfx_level, instr->sopp().imm);
958 ctx.sgprs_read_by_SMEM.reset();
963 /* LdsBranchVmemWARHazard
964 * Handle VMEM/GLOBAL/SCRATCH->branch->DS and DS->branch->VMEM/GLOBAL/SCRATCH patterns.
966 if (instr->isVMEM() || instr->isGlobal() || instr->isScratch()) {
967 if (ctx.has_branch_after_DS)
968 bld.sopk(aco_opcode::s_waitcnt_vscnt, Definition(sgpr_null, s1), 0);
969 ctx.has_branch_after_VMEM = ctx.has_branch_after_DS = ctx.has_DS = false;
971 } else if (instr->isDS()) {
972 if (ctx.has_branch_after_VMEM)
973 bld.sopk(aco_opcode::s_waitcnt_vscnt, Definition(sgpr_null, s1), 0);
974 ctx.has_branch_after_VMEM = ctx.has_branch_after_DS = ctx.has_VMEM = false;
976 } else if (instr_is_branch(instr)) {
977 ctx.has_branch_after_VMEM |= ctx.has_VMEM;
978 ctx.has_branch_after_DS |= ctx.has_DS;
979 ctx.has_VMEM = ctx.has_DS = false;
980 } else if (instr->opcode == aco_opcode::s_waitcnt_vscnt) {
981 /* Only s_waitcnt_vscnt can mitigate the hazard */
982 const SOPK_instruction& sopk = instr->sopk();
983 if (sopk.definitions[0].physReg() == sgpr_null && sopk.imm == 0)
984 ctx.has_VMEM = ctx.has_branch_after_VMEM = ctx.has_DS = ctx.has_branch_after_DS = false;
988 * Handles NSA MIMG (4 or more dwords) immediately followed by MUBUF/MTBUF (with offset[2:1] !=
991 if (instr->isMIMG() && get_mimg_nsa_dwords(instr.get()) > 1) {
992 ctx.has_NSA_MIMG = true;
993 } else if (ctx.has_NSA_MIMG) {
994 ctx.has_NSA_MIMG = false;
996 if (instr->isMUBUF() || instr->isMTBUF()) {
997 uint32_t offset = instr->isMUBUF() ? instr->mubuf().offset : instr->mtbuf().offset;
999 bld.sopp(aco_opcode::s_nop, -1, 0);
1003 /* waNsaCannotFollowWritelane
1004 * Handles NSA MIMG immediately following a v_writelane_b32.
1006 if (instr->opcode == aco_opcode::v_writelane_b32_e64) {
1007 ctx.has_writelane = true;
1008 } else if (ctx.has_writelane) {
1009 ctx.has_writelane = false;
1010 if (instr->isMIMG() && get_mimg_nsa_dwords(instr.get()) > 0)
1011 bld.sopp(aco_opcode::s_nop, -1, 0);
1016 fill_vgpr_bitset(std::bitset<256>& set, PhysReg reg, unsigned bytes)
1018 if (reg.reg() < 256)
1020 for (unsigned i = 0; i < DIV_ROUND_UP(bytes, 4); i++)
1021 set.set(reg.reg() - 256 + i);
1026 parse_vdst_wait(aco_ptr<Instruction>& instr)
1028 if (instr->isVMEM() || instr->isFlatLike() || instr->isDS() || instr->isEXP())
1030 else if (instr->isLDSDIR())
1031 return instr->ldsdir().wait_vdst;
1032 else if (instr->opcode == aco_opcode::s_waitcnt_depctr)
1033 return (instr->sopp().imm >> 12) & 0xf;
1038 struct LdsDirectVALUHazardGlobalState {
1039 unsigned wait_vdst = 15;
1041 std::set<unsigned> loop_headers_visited;
1044 struct LdsDirectVALUHazardBlockState {
1045 unsigned num_valu = 0;
1046 bool has_trans = false;
1050 handle_lds_direct_valu_hazard_instr(LdsDirectVALUHazardGlobalState& global_state,
1051 LdsDirectVALUHazardBlockState& block_state,
1052 aco_ptr<Instruction>& instr)
1054 if (instr->isVALU() || instr->isVINTERP_INREG()) {
1055 instr_class cls = instr_info.classes[(int)instr->opcode];
1056 block_state.has_trans |= cls == instr_class::valu_transcendental32 ||
1057 cls == instr_class::valu_double_transcendental;
1059 bool uses_vgpr = false;
1060 for (Definition& def : instr->definitions)
1061 uses_vgpr |= regs_intersect(def.physReg(), def.size(), global_state.vgpr, 1);
1062 for (Operand& op : instr->operands) {
1064 !op.isConstant() && regs_intersect(op.physReg(), op.size(), global_state.vgpr, 1);
1067 /* Transcendentals execute in parallel to other VALU and va_vdst count becomes unusable */
1068 global_state.wait_vdst =
1069 MIN2(global_state.wait_vdst, block_state.has_trans ? 0 : block_state.num_valu);
1073 block_state.num_valu++;
1076 if (parse_vdst_wait(instr) == 0)
1079 return block_state.num_valu >= global_state.wait_vdst;
1083 handle_lds_direct_valu_hazard_block(LdsDirectVALUHazardGlobalState& global_state,
1084 LdsDirectVALUHazardBlockState& block_state, Block* block)
1086 if (block->kind & block_kind_loop_header) {
1087 if (global_state.loop_headers_visited.count(block->index))
1089 global_state.loop_headers_visited.insert(block->index);
1096 handle_lds_direct_valu_hazard(State& state, aco_ptr<Instruction>& instr)
1098 /* LdsDirectVALUHazard
1099 * Handle LDSDIR writing a VGPR after it's used by a VALU instruction.
1101 if (instr->ldsdir().wait_vdst == 0)
1102 return 0; /* early exit */
1104 LdsDirectVALUHazardGlobalState global_state;
1105 global_state.wait_vdst = instr->ldsdir().wait_vdst;
1106 global_state.vgpr = instr->definitions[0].physReg();
1107 LdsDirectVALUHazardBlockState block_state;
1108 search_backwards<LdsDirectVALUHazardGlobalState, LdsDirectVALUHazardBlockState,
1109 &handle_lds_direct_valu_hazard_block, &handle_lds_direct_valu_hazard_instr>(
1110 state, global_state, block_state);
1111 return global_state.wait_vdst;
1114 enum VALUPartialForwardingHazardState : uint8_t {
1116 written_after_exec_write,
1120 struct VALUPartialForwardingHazardGlobalState {
1121 bool hazard_found = false;
1122 std::set<unsigned> loop_headers_visited;
1125 struct VALUPartialForwardingHazardBlockState {
1126 /* initialized by number of VGPRs read by VALU, decrement when encountered to return early */
1127 uint8_t num_vgprs_read = 0;
1128 BITSET_DECLARE(vgprs_read, 256) = {0};
1129 enum VALUPartialForwardingHazardState state = nothing_written;
1130 unsigned num_valu_since_read = 0;
1131 unsigned num_valu_since_write = 0;
1135 handle_valu_partial_forwarding_hazard_instr(VALUPartialForwardingHazardGlobalState& global_state,
1136 VALUPartialForwardingHazardBlockState& block_state,
1137 aco_ptr<Instruction>& instr)
1139 if (instr->isSALU() && !instr->definitions.empty()) {
1140 if (block_state.state == written_after_exec_write && instr_writes_exec(instr))
1141 block_state.state = exec_written;
1142 } else if (instr->isVALU() || instr->isVINTERP_INREG()) {
1143 bool vgpr_write = false;
1144 for (Definition& def : instr->definitions) {
1145 if (def.physReg().reg() < 256)
1148 for (unsigned i = 0; i < def.size(); i++) {
1149 unsigned reg = def.physReg().reg() - 256 + i;
1150 if (!BITSET_TEST(block_state.vgprs_read, reg))
1153 if (block_state.state == exec_written && block_state.num_valu_since_write < 3) {
1154 global_state.hazard_found = true;
1158 BITSET_CLEAR(block_state.vgprs_read, reg);
1159 block_state.num_vgprs_read--;
1165 /* If the state is nothing_written: the check below should ensure that this write is
1166 * close enough to the read.
1168 * If the state is exec_written: the current choice of second write has failed. Reset and
1169 * try with the current write as the second one, if it's close enough to the read.
1171 * If the state is written_after_exec_write: a further second write would be better, if
1172 * it's close enough to the read.
1174 if (block_state.state == nothing_written || block_state.num_valu_since_read < 5) {
1175 block_state.state = written_after_exec_write;
1176 block_state.num_valu_since_write = 0;
1178 block_state.num_valu_since_write++;
1181 block_state.num_valu_since_write++;
1184 block_state.num_valu_since_read++;
1185 } else if (parse_vdst_wait(instr) == 0) {
1189 if (block_state.num_valu_since_read >= (block_state.state == nothing_written ? 5 : 8))
1190 return true; /* Hazard not possible at this distance. */
1191 if (block_state.num_vgprs_read == 0)
1192 return true; /* All VGPRs have been written and a hazard was never found. */
1198 handle_valu_partial_forwarding_hazard_block(VALUPartialForwardingHazardGlobalState& global_state,
1199 VALUPartialForwardingHazardBlockState& block_state,
1202 if (block->kind & block_kind_loop_header) {
1203 if (global_state.loop_headers_visited.count(block->index))
1205 global_state.loop_headers_visited.insert(block->index);
1212 handle_valu_partial_forwarding_hazard(State& state, aco_ptr<Instruction>& instr)
1214 /* VALUPartialForwardingHazard
1215 * VALU instruction reads two VGPRs: one written before an exec write by SALU and one after.
1216 * For the hazard, there must be less than 3 VALU between the first and second VGPR writes.
1217 * There also must be less than 5 VALU between the second VGPR write and the current instruction.
1219 if (state.program->wave_size != 64 || (!instr->isVALU() && !instr->isVINTERP_INREG()))
1222 unsigned num_vgprs = 0;
1223 for (Operand& op : instr->operands)
1224 num_vgprs += op.physReg().reg() < 256 ? op.size() : 1;
1226 return false; /* early exit */
1228 VALUPartialForwardingHazardBlockState block_state;
1230 for (unsigned i = 0; i < instr->operands.size(); i++) {
1231 Operand& op = instr->operands[i];
1232 if (op.physReg().reg() < 256)
1234 for (unsigned j = 0; j < op.size(); j++)
1235 BITSET_SET(block_state.vgprs_read, op.physReg().reg() - 256 + j);
1237 block_state.num_vgprs_read = BITSET_COUNT(block_state.vgprs_read);
1239 if (block_state.num_vgprs_read <= 1)
1240 return false; /* early exit */
1242 VALUPartialForwardingHazardGlobalState global_state;
1243 search_backwards<VALUPartialForwardingHazardGlobalState, VALUPartialForwardingHazardBlockState,
1244 &handle_valu_partial_forwarding_hazard_block,
1245 &handle_valu_partial_forwarding_hazard_instr>(state, global_state, block_state);
1246 return global_state.hazard_found;
1250 handle_instruction_gfx11(State& state, NOP_ctx_gfx11& ctx, aco_ptr<Instruction>& instr,
1251 std::vector<aco_ptr<Instruction>>& new_instructions)
1253 Builder bld(state.program, &new_instructions);
1255 /* VcmpxPermlaneHazard
1256 * Handle any permlane following a VOPC instruction writing exec, insert v_mov between them.
1258 if (instr->isVOPC() && instr->definitions[0].physReg() == exec) {
1259 ctx.has_Vcmpx = true;
1260 } else if (ctx.has_Vcmpx && (instr->opcode == aco_opcode::v_permlane16_b32 ||
1261 instr->opcode == aco_opcode::v_permlanex16_b32)) {
1262 ctx.has_Vcmpx = false;
1264 /* v_nop would be discarded by SQ, so use v_mov with the first operand of the permlane */
1265 bld.vop1(aco_opcode::v_mov_b32, Definition(instr->operands[0].physReg(), v1),
1266 Operand(instr->operands[0].physReg(), v1));
1267 } else if (instr->isVALU() && instr->opcode != aco_opcode::v_nop) {
1268 ctx.has_Vcmpx = false;
1271 unsigned va_vdst = parse_vdst_wait(instr);
1272 unsigned vm_vsrc = 7;
1273 unsigned sa_sdst = 1;
1275 if (debug_flags & DEBUG_FORCE_WAITDEPS) {
1276 bld.sopp(aco_opcode::s_waitcnt_depctr, -1, 0x0000);
1280 } else if (instr->opcode == aco_opcode::s_waitcnt_depctr) {
1281 /* va_vdst already obtained through parse_vdst_wait(). */
1282 vm_vsrc = (instr->sopp().imm >> 2) & 0x7;
1283 sa_sdst = instr->sopp().imm & 0x1;
1286 if (instr->isLDSDIR()) {
1287 unsigned count = handle_lds_direct_valu_hazard(state, instr);
1288 LDSDIR_instruction* ldsdir = &instr->ldsdir();
1289 if (count < va_vdst) {
1290 ldsdir->wait_vdst = MIN2(ldsdir->wait_vdst, count);
1291 va_vdst = MIN2(va_vdst, count);
1295 /* VALUTransUseHazard
1296 * VALU reads VGPR written by transcendental instruction without 6+ VALU or 2+ transcendental
1299 if (va_vdst > 0 && (instr->isVALU() || instr->isVINTERP_INREG())) {
1300 uint8_t num_valu = 15;
1301 uint8_t num_trans = 15;
1302 for (Operand& op : instr->operands) {
1303 if (op.physReg().reg() < 256)
1305 for (unsigned i = 0; i < op.size(); i++) {
1306 num_valu = std::min(num_valu, ctx.valu_since_wr_by_trans.get(op.physReg(), i));
1307 num_trans = std::min(num_trans, ctx.trans_since_wr_by_trans.get(op.physReg(), i));
1310 if (num_trans <= 1 && num_valu <= 5) {
1311 bld.sopp(aco_opcode::s_waitcnt_depctr, -1, 0x0fff);
1316 if (va_vdst > 0 && handle_valu_partial_forwarding_hazard(state, instr)) {
1317 bld.sopp(aco_opcode::s_waitcnt_depctr, -1, 0x0fff);
1321 /* VALUMaskWriteHazard
1322 * VALU reads SGPR as a lane mask and later written by SALU cannot safely be read by SALU.
1324 if (state.program->wave_size == 64 && instr->isSALU() &&
1325 check_written_regs(instr, ctx.sgpr_read_by_valu_as_lanemask)) {
1326 ctx.sgpr_read_by_valu_as_lanemask_then_wr_by_salu = ctx.sgpr_read_by_valu_as_lanemask;
1327 ctx.sgpr_read_by_valu_as_lanemask.reset();
1328 } else if (state.program->wave_size == 64 && instr->isSALU() &&
1329 check_read_regs(instr, ctx.sgpr_read_by_valu_as_lanemask_then_wr_by_salu)) {
1330 bld.sopp(aco_opcode::s_waitcnt_depctr, -1, 0xfffe);
1335 ctx.valu_since_wr_by_trans.reset();
1336 ctx.trans_since_wr_by_trans.reset();
1340 ctx.sgpr_read_by_valu_as_lanemask_then_wr_by_salu.reset();
1342 if (instr->isVALU() || instr->isVINTERP_INREG()) {
1343 instr_class cls = instr_info.classes[(int)instr->opcode];
1344 bool is_trans = cls == instr_class::valu_transcendental32 ||
1345 cls == instr_class::valu_double_transcendental;
1347 ctx.valu_since_wr_by_trans.inc();
1349 ctx.trans_since_wr_by_trans.inc();
1352 for (Definition& def : instr->definitions) {
1353 ctx.valu_since_wr_by_trans.set(def.physReg(), def.bytes());
1354 ctx.trans_since_wr_by_trans.set(def.physReg(), def.bytes());
1358 if (state.program->wave_size == 64) {
1359 for (Operand& op : instr->operands) {
1360 if (op.isLiteral() || (!op.isConstant() && op.physReg().reg() < 128))
1361 ctx.sgpr_read_by_valu_as_lanemask.reset();
1363 switch (instr->opcode) {
1364 case aco_opcode::v_addc_co_u32:
1365 case aco_opcode::v_subb_co_u32:
1366 case aco_opcode::v_subbrev_co_u32:
1367 case aco_opcode::v_cndmask_b16:
1368 case aco_opcode::v_cndmask_b32:
1369 case aco_opcode::v_div_fmas_f32:
1370 case aco_opcode::v_div_fmas_f64:
1371 if (instr->operands.back().physReg() != exec) {
1372 ctx.sgpr_read_by_valu_as_lanemask.set(instr->operands.back().physReg().reg());
1373 ctx.sgpr_read_by_valu_as_lanemask.set(instr->operands.back().physReg().reg() + 1);
1381 /* LdsDirectVMEMHazard
1382 * Handle LDSDIR writing a VGPR after it's used by a VMEM/DS instruction.
1384 if (instr->isVMEM() || instr->isFlatLike()) {
1385 for (Definition& def : instr->definitions)
1386 fill_vgpr_bitset(ctx.vgpr_used_by_vmem_store, def.physReg(), def.bytes());
1387 if (instr->definitions.empty()) {
1388 for (Operand& op : instr->operands)
1389 fill_vgpr_bitset(ctx.vgpr_used_by_vmem_store, op.physReg(), op.bytes());
1391 for (Operand& op : instr->operands)
1392 fill_vgpr_bitset(ctx.vgpr_used_by_vmem_load, op.physReg(), op.bytes());
1395 if (instr->isDS() || instr->isFlat()) {
1396 for (Definition& def : instr->definitions)
1397 fill_vgpr_bitset(ctx.vgpr_used_by_ds, def.physReg(), def.bytes());
1398 for (Operand& op : instr->operands)
1399 fill_vgpr_bitset(ctx.vgpr_used_by_ds, op.physReg(), op.bytes());
1401 if (instr->isVALU() || instr->isVINTERP_INREG() || instr->isEXP() || vm_vsrc == 0) {
1402 ctx.vgpr_used_by_vmem_load.reset();
1403 ctx.vgpr_used_by_vmem_store.reset();
1404 ctx.vgpr_used_by_ds.reset();
1405 } else if (instr->opcode == aco_opcode::s_waitcnt) {
1406 wait_imm imm(GFX11, instr->sopp().imm);
1408 ctx.vgpr_used_by_vmem_load.reset();
1410 ctx.vgpr_used_by_ds.reset();
1411 } else if (instr->opcode == aco_opcode::s_waitcnt_vscnt && instr->sopk().imm == 0) {
1412 ctx.vgpr_used_by_vmem_store.reset();
1414 if (instr->isLDSDIR()) {
1415 if (ctx.vgpr_used_by_vmem_load[instr->definitions[0].physReg().reg() - 256] ||
1416 ctx.vgpr_used_by_vmem_store[instr->definitions[0].physReg().reg() - 256] ||
1417 ctx.vgpr_used_by_ds[instr->definitions[0].physReg().reg() - 256]) {
1418 bld.sopp(aco_opcode::s_waitcnt_depctr, -1, 0xffe3);
1419 ctx.vgpr_used_by_vmem_load.reset();
1420 ctx.vgpr_used_by_vmem_store.reset();
1421 ctx.vgpr_used_by_ds.reset();
1426 template <typename Ctx>
1427 using HandleInstr = void (*)(State& state, Ctx&, aco_ptr<Instruction>&,
1428 std::vector<aco_ptr<Instruction>>&);
1430 template <typename Ctx, HandleInstr<Ctx> Handle>
1432 handle_block(Program* program, Ctx& ctx, Block& block)
1434 if (block.instructions.empty())
1438 state.program = program;
1439 state.block = █
1440 state.old_instructions = std::move(block.instructions);
1442 block.instructions.clear(); // Silence clang-analyzer-cplusplus.Move warning
1443 block.instructions.reserve(state.old_instructions.size());
1445 for (aco_ptr<Instruction>& instr : state.old_instructions) {
1446 Handle(state, ctx, instr, block.instructions);
1447 block.instructions.emplace_back(std::move(instr));
1451 template <typename Ctx, HandleInstr<Ctx> Handle>
1453 mitigate_hazards(Program* program)
1455 std::vector<Ctx> all_ctx(program->blocks.size());
1456 std::stack<unsigned, std::vector<unsigned>> loop_header_indices;
1458 for (unsigned i = 0; i < program->blocks.size(); i++) {
1459 Block& block = program->blocks[i];
1460 Ctx& ctx = all_ctx[i];
1462 if (block.kind & block_kind_loop_header) {
1463 loop_header_indices.push(i);
1464 } else if (block.kind & block_kind_loop_exit) {
1465 /* Go through the whole loop again */
1466 for (unsigned idx = loop_header_indices.top(); idx < i; idx++) {
1468 for (unsigned b : program->blocks[idx].linear_preds)
1469 loop_block_ctx.join(all_ctx[b]);
1471 handle_block<Ctx, Handle>(program, loop_block_ctx, program->blocks[idx]);
1473 /* We only need to continue if the loop header context changed */
1474 if (idx == loop_header_indices.top() && loop_block_ctx == all_ctx[idx])
1477 all_ctx[idx] = loop_block_ctx;
1480 loop_header_indices.pop();
1483 for (unsigned b : block.linear_preds)
1484 ctx.join(all_ctx[b]);
1486 handle_block<Ctx, Handle>(program, ctx, block);
1490 } /* end namespace */
1493 insert_NOPs(Program* program)
1495 if (program->gfx_level >= GFX11)
1496 mitigate_hazards<NOP_ctx_gfx11, handle_instruction_gfx11>(program);
1497 else if (program->gfx_level >= GFX10_3)
1498 ; /* no hazards/bugs to mitigate */
1499 else if (program->gfx_level >= GFX10)
1500 mitigate_hazards<NOP_ctx_gfx10, handle_instruction_gfx10>(program);
1502 mitigate_hazards<NOP_ctx_gfx6, handle_instruction_gfx6>(program);