From 7130ef49cb5c56868b151afb1110e6b71c430359 Mon Sep 17 00:00:00 2001 From: Tom Stellard Date: Thu, 23 Apr 2015 19:33:48 +0000 Subject: [PATCH] R600/SI: Improve AsmParser support for forced e64 encoding We can now force e64 encoding even when the operands would be legal for e32 encoding. llvm-svn: 235626 --- llvm/lib/Target/R600/AsmParser/AMDGPUAsmParser.cpp | 50 +++++++++++++++++++--- llvm/test/MC/R600/vop3-errs.s | 5 +++ llvm/test/MC/R600/vop3.s | 11 +++++ 3 files changed, 61 insertions(+), 5 deletions(-) create mode 100644 llvm/test/MC/R600/vop3-errs.s diff --git a/llvm/lib/Target/R600/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/R600/AsmParser/AMDGPUAsmParser.cpp index aaf9b32..01a56ee 100644 --- a/llvm/lib/Target/R600/AsmParser/AMDGPUAsmParser.cpp +++ b/llvm/lib/Target/R600/AsmParser/AMDGPUAsmParser.cpp @@ -80,6 +80,7 @@ public: unsigned RegNo; int Modifiers; const MCRegisterInfo *TRI; + bool IsForcedVOP3; }; union { @@ -109,7 +110,8 @@ public: } void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const { - Inst.addOperand(MCOperand::CreateImm(Reg.Modifiers)); + Inst.addOperand(MCOperand::CreateImm( + Reg.Modifiers == -1 ? 0 : Reg.Modifiers)); addRegOperands(Inst, N); } @@ -163,12 +165,16 @@ public: return Imm.Type; } + bool isRegKind() const { + return Kind == Register; + } + bool isReg() const override { return Kind == Register && Reg.Modifiers == -1; } bool isRegWithInputMods() const { - return Kind == Register && Reg.Modifiers != -1; + return Kind == Register && (Reg.IsForcedVOP3 || Reg.Modifiers != -1); } void setModifiers(unsigned Mods) { @@ -176,6 +182,11 @@ public: Reg.Modifiers = Mods; } + bool hasModifiers() const { + assert(isRegKind()); + return Reg.Modifiers != -1; + } + unsigned getReg() const override { return Reg.RegNo; } @@ -263,11 +274,13 @@ public: static std::unique_ptr CreateReg(unsigned RegNo, SMLoc S, SMLoc E, - const MCRegisterInfo *TRI) { + const MCRegisterInfo *TRI, + bool ForceVOP3) { auto Op = llvm::make_unique(Register); Op->Reg.RegNo = RegNo; Op->Reg.TRI = TRI; Op->Reg.Modifiers = -1; + Op->Reg.IsForcedVOP3 = ForceVOP3; Op->StartLoc = S; Op->EndLoc = E; return Op; @@ -324,6 +337,10 @@ public: ForcedEncodingSize = Size; } + bool isForcedVOP3() const { + return ForcedEncodingSize == 64; + } + bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override; unsigned checkTargetMatchPredicate(MCInst &Inst) override; bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, @@ -525,6 +542,28 @@ bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, SMLoc ErrorLoc = IDLoc; if (ErrorInfo != ~0ULL) { if (ErrorInfo >= Operands.size()) { + if (isForcedVOP3()) { + // If 64-bit encoding has been forced we can end up with no + // clamp or omod operands if none of the registers have modifiers, + // so we need to add these to the operand list. + AMDGPUOperand &LastOp = + ((AMDGPUOperand &)*Operands[Operands.size() - 1]); + if (LastOp.isRegKind() || + (LastOp.isImm() && + LastOp.getImmTy() != AMDGPUOperand::ImmTyNone)) { + SMLoc S = Parser.getTok().getLoc(); + Operands.push_back(AMDGPUOperand::CreateImm(0, S, + AMDGPUOperand::ImmTyClamp)); + Operands.push_back(AMDGPUOperand::CreateImm(0, S, + AMDGPUOperand::ImmTyOMod)); + bool Res = MatchAndEmitInstruction(IDLoc, Opcode, Operands, + Out, ErrorInfo, + MatchingInlineAsm); + if (!Res) + return Res; + } + + } return Error(IDLoc, "too few operands for instruction"); } @@ -546,7 +585,7 @@ static bool operandsHaveModifiers(const OperandVector &Operands) { for (unsigned i = 0, e = Operands.size(); i != e; ++i) { const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]); - if (Op.isRegWithInputMods()) + if (Op.isRegKind() && Op.hasModifiers()) return true; if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod || Op.getImmTy() == AMDGPUOperand::ImmTyClamp)) @@ -647,7 +686,8 @@ AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) { Operands.push_back(AMDGPUOperand::CreateReg( - RegNo, S, E, getContext().getRegisterInfo())); + RegNo, S, E, getContext().getRegisterInfo(), + isForcedVOP3())); if (HasModifiers || Modifiers) { AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]); diff --git a/llvm/test/MC/R600/vop3-errs.s b/llvm/test/MC/R600/vop3-errs.s new file mode 100644 index 0000000..b57fe6d --- /dev/null +++ b/llvm/test/MC/R600/vop3-errs.s @@ -0,0 +1,5 @@ +// RUN: not llvm-mc -arch=amdgcn -show-encoding %s 2>&1 | FileCheck %s +// RUN: not llvm-mc -arch=amdgcn -mcpu=SI -show-encoding %s 2>&1 | FileCheck %s + +v_add_f32_e64 v0, v1 +// CHECK: error: too few operands for instruction diff --git a/llvm/test/MC/R600/vop3.s b/llvm/test/MC/R600/vop3.s index 7d1ba0b..322756f 100644 --- a/llvm/test/MC/R600/vop3.s +++ b/llvm/test/MC/R600/vop3.s @@ -5,6 +5,11 @@ // VOPC Instructions //===----------------------------------------------------------------------===// +// Test forced e64 encoding + +v_cmp_lt_f32_e64 s[2:3], v4, -v6 +// CHECK: v_cmp_lt_f32_e64 s[2:3], v4, -v6 ; encoding: [0x02,0x00,0x02,0xd0,0x04,0x0d,0x02,0x40] + // // Modifier tests: // @@ -95,6 +100,12 @@ v_fract_f32 v1, v2, div:2 clamp // VOP2 Instructions ///===---------------------------------------------------------------------===// +// Test forced e64 encoding with e32 operands + +v_ldexp_f32_e64 v1, v3, v5 +// CHECK: v_ldexp_f32_e64 v1, v3, v5 ; encoding: [0x01,0x00,0x56,0xd2,0x03,0x0b,0x02,0x00] + + // TODO: Modifier tests v_cndmask_b32 v1, v3, v5, s[4:5] -- 2.7.4