From 97279a8ca323355c68e56e9ec8d198f254ecd68b Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Tue, 29 Nov 2016 19:30:44 +0000 Subject: [PATCH] AMDGPU: Rename flat operands to match mubuf Use vaddr/vdst for the same purposes. This also fixes a beg in SIInsertWaits for the operand check. The stored value operand is currently called data0 in the single offset case, not data. llvm-svn: 288188 --- llvm/lib/Target/AMDGPU/FLATInstructions.td | 26 +++++++++++++------------- llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp | 2 +- llvm/lib/Target/AMDGPU/SIInsertWaits.cpp | 12 ++++++------ llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 2 +- 4 files changed, 21 insertions(+), 21 deletions(-) diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td b/llvm/lib/Target/AMDGPU/FLATInstructions.td index 4a86b1e..a46970b 100644 --- a/llvm/lib/Target/AMDGPU/FLATInstructions.td +++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td @@ -57,8 +57,8 @@ class FLAT_Real op, FLAT_Pseudo ps> : let AsmMatchConverter = ps.AsmMatchConverter; // encoding fields - bits<8> addr; - bits<8> data; + bits<8> vaddr; + bits<8> vdata; bits<8> vdst; bits<1> slc; bits<1> glc; @@ -69,8 +69,8 @@ class FLAT_Real op, FLAT_Pseudo ps> : let Inst{17} = slc; let Inst{24-18} = op; let Inst{31-26} = 0x37; // Encoding. - let Inst{39-32} = addr; - let Inst{47-40} = !if(ps.has_data, data, ?); + let Inst{39-32} = vaddr; + let Inst{47-40} = !if(ps.has_data, vdata, ?); // 54-48 is reserved. let Inst{55} = tfe; let Inst{63-56} = !if(ps.has_vdst, vdst, ?); @@ -79,8 +79,8 @@ class FLAT_Real op, FLAT_Pseudo ps> : class FLAT_Load_Pseudo : FLAT_Pseudo< opName, (outs regClass:$vdst), - (ins VReg_64:$addr, GLC:$glc, slc:$slc, tfe:$tfe), - " $vdst, $addr$glc$slc$tfe"> { + (ins VReg_64:$vaddr, GLC:$glc, slc:$slc, tfe:$tfe), + " $vdst, $vaddr$glc$slc$tfe"> { let has_data = 0; let mayLoad = 1; } @@ -88,8 +88,8 @@ class FLAT_Load_Pseudo : FLAT_Pseudo< class FLAT_Store_Pseudo : FLAT_Pseudo< opName, (outs), - (ins VReg_64:$addr, vdataClass:$data, GLC:$glc, slc:$slc, tfe:$tfe), - " $addr, $data$glc$slc$tfe"> { + (ins VReg_64:$vaddr, vdataClass:$vdata, GLC:$glc, slc:$slc, tfe:$tfe), + " $vaddr, $vdata$glc$slc$tfe"> { let mayLoad = 0; let mayStore = 1; let has_vdst = 0; @@ -105,8 +105,8 @@ multiclass FLAT_Atomic_Pseudo< def "" : FLAT_Pseudo , AtomicNoRet { let mayLoad = 1; @@ -119,10 +119,10 @@ multiclass FLAT_Atomic_Pseudo< def _RTN : FLAT_Pseudo , + (atomic (FLATAtomic i64:$vaddr, i1:$slc, i1:$tfe), data_vt:$vdata))]>, AtomicNoRet { let mayLoad = 1; let mayStore = 1; diff --git a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp index 8853628..dd3b46f 100644 --- a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp +++ b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp @@ -427,7 +427,7 @@ int GCNHazardRecognizer::createsVALUHazard(const MachineInstr &MI) { } if (TII->isFLAT(MI)) { - int DataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::data); + int DataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata); if (AMDGPU::getRegBitWidth(Desc.OpInfo[DataIdx].RegClass) > 64) return DataIdx; } diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaits.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaits.cpp index da4db63..7bec2b6 100644 --- a/llvm/lib/Target/AMDGPU/SIInsertWaits.cpp +++ b/llvm/lib/Target/AMDGPU/SIInsertWaits.cpp @@ -252,12 +252,6 @@ bool SIInsertWaits::isOpRelevant(MachineOperand &Op) { // operand comes before the value operand and it may have // multiple data operands. - if (TII->isDS(MI) || TII->isFLAT(MI)) { - MachineOperand *Data = TII->getNamedOperand(MI, AMDGPU::OpName::data); - if (Data && Op.isIdenticalTo(*Data)) - return true; - } - if (TII->isDS(MI)) { MachineOperand *Data0 = TII->getNamedOperand(MI, AMDGPU::OpName::data0); if (Data0 && Op.isIdenticalTo(*Data0)) @@ -267,6 +261,12 @@ bool SIInsertWaits::isOpRelevant(MachineOperand &Op) { return Data1 && Op.isIdenticalTo(*Data1); } + if (TII->isFLAT(MI)) { + MachineOperand *Data = TII->getNamedOperand(MI, AMDGPU::OpName::vdata); + if (Data && Op.isIdenticalTo(*Data)) + return true; + } + // NOTE: This assumes that the value operand is before the // address operand, and that there is only one value operand. for (MachineInstr::mop_iterator I = MI.operands_begin(), diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index 3335ecd..f4b94b3 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -299,7 +299,7 @@ bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg, } if (isFLAT(LdSt)) { - const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::addr); + const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); BaseReg = AddrReg->getReg(); Offset = 0; return true; -- 2.7.4