// Add an implicit def of exec to discourage scheduling VALU after this which
// will interfere with trying to form s_and_saveexec_b64 later.
+ unsigned CopyReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
MachineInstr *CopyExec =
- BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), SaveExecReg)
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), CopyReg)
.addReg(AMDGPU::EXEC)
.addReg(AMDGPU::EXEC, RegState::ImplicitDefine);
MachineInstr *And =
BuildMI(MBB, I, DL, TII->get(AMDGPU::S_AND_B64), Tmp)
- .addReg(SaveExecReg)
+ .addReg(CopyReg)
//.addReg(AMDGPU::EXEC)
.addReg(Cond.getReg());
setImpSCCDefDead(*And, true);
MachineInstr *Xor =
BuildMI(MBB, I, DL, TII->get(AMDGPU::S_XOR_B64), SaveExecReg)
.addReg(Tmp)
- .addReg(SaveExecReg);
+ .addReg(CopyReg);
setImpSCCDefDead(*Xor, ImpDefSCC.isDead());
// Use a copy that is a terminator to get correct spill code placement it with
LIS->removeInterval(SaveExecReg);
LIS->createAndComputeVirtRegInterval(SaveExecReg);
LIS->createAndComputeVirtRegInterval(Tmp);
+ LIS->createAndComputeVirtRegInterval(CopyReg);
}
void SILowerControlFlow::emitElse(MachineInstr &MI) {
// We are running before TwoAddressInstructions, and si_else's operands are
// tied. In order to correctly tie the registers, split this into a copy of
// the src like it does.
- BuildMI(MBB, Start, DL, TII->get(AMDGPU::COPY), DstReg)
+ unsigned CopyReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ BuildMI(MBB, Start, DL, TII->get(AMDGPU::COPY), CopyReg)
.addOperand(MI.getOperand(1)); // Saved EXEC
// This must be inserted before phis and any spill code inserted before the
// else.
+ unsigned SaveReg = ExecModified ?
+ MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass) : DstReg;
MachineInstr *OrSaveExec =
- BuildMI(MBB, Start, DL, TII->get(AMDGPU::S_OR_SAVEEXEC_B64), DstReg)
- .addReg(DstReg);
+ BuildMI(MBB, Start, DL, TII->get(AMDGPU::S_OR_SAVEEXEC_B64), SaveReg)
+ .addReg(CopyReg);
MachineBasicBlock *DestBB = MI.getOperand(2).getMBB();
MachineInstr *And =
BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::S_AND_B64), DstReg)
.addReg(AMDGPU::EXEC)
- .addReg(DstReg);
+ .addReg(SaveReg);
if (LIS)
LIS->InsertMachineInstrInMaps(*And);
// src reg is tied to dst reg.
LIS->removeInterval(DstReg);
LIS->createAndComputeVirtRegInterval(DstReg);
+ LIS->createAndComputeVirtRegInterval(CopyReg);
+ if (ExecModified)
+ LIS->createAndComputeVirtRegInterval(SaveReg);
// Let this be recomputed.
LIS->removeRegUnit(*MCRegUnitIterator(AMDGPU::EXEC, TRI));
; waitcnt should be inserted after exec modification
; SI: v_cmp_lt_i32_e32 vcc, 0,
-; SI-NEXT: s_and_saveexec_b64 [[SAVE:s\[[0-9]+:[0-9]+\]]], vcc
-; SI-NEXT: s_xor_b64 [[SAVE]], exec, [[SAVE]]
+; SI-NEXT: s_and_saveexec_b64 [[SAVE1:s\[[0-9]+:[0-9]+\]]], vcc
+; SI-NEXT: s_xor_b64 [[SAVE2:s\[[0-9]+:[0-9]+\]]], exec, [[SAVE1]]
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: ; mask branch [[FLOW_BB:BB[0-9]+_[0-9]+]]
; SI-NEXT: s_cbranch_execz [[FLOW_BB]]
; v_mov should be after exec modification
; SI: [[FLOW_BB]]:
-; SI-NEXT: s_or_saveexec_b64 [[SAVE]], [[SAVE]]
+; SI-NEXT: s_or_saveexec_b64 [[SAVE3:s\[[0-9]+:[0-9]+\]]], [[SAVE2]]
; SI-NEXT: v_mov_b32_e32 v{{[0-9]+}}
-; SI-NEXT: s_xor_b64 exec, exec, [[SAVE]]
+; SI-NEXT: s_xor_b64 exec, exec, [[SAVE3]]
; SI-NEXT: ; mask branch
;
define void @test_if(i32 %b, i32 addrspace(1)* %src, i32 addrspace(1)* %dst) #1 {
; SI-DAG: v_cmp_ne_u32_e32 [[NEG1_CHECK_1:vcc]], -1, [[B]]
; SI: s_and_b64 [[ORNEG1:s\[[0-9]+:[0-9]+\]]], [[NEG1_CHECK_1]], [[NEG1_CHECK_0]]
; SI: s_and_saveexec_b64 [[ORNEG2:s\[[0-9]+:[0-9]+\]]], [[ORNEG1]]
-; SI: s_xor_b64 [[ORNEG2]], exec, [[ORNEG2]]
+; SI: s_xor_b64 [[ORNEG3:s\[[0-9]+:[0-9]+\]]], exec, [[ORNEG2]]
; SI: s_cbranch_execz [[LABEL_FLOW:BB[0-9]+_[0-9]+]]
; SI: BB{{[0-9]+_[0-9]+}}: ; %bb20
; SI: [[LABEL_FLOW]]:
; SI-NEXT: ; in Loop: Header=[[LABEL_LOOP]]
-; SI-NEXT: s_or_b64 exec, exec, [[ORNEG2]]
-; SI-NEXT: s_or_b64 [[COND_STATE]], [[ORNEG2]], [[TMP]]
+; SI-NEXT: s_or_b64 exec, exec, [[ORNEG3]]
+; SI-NEXT: s_or_b64 [[COND_STATE]], [[ORNEG3]], [[TMP]]
; SI-NEXT: s_andn2_b64 exec, exec, [[COND_STATE]]
; SI-NEXT: s_cbranch_execnz [[LABEL_LOOP]]