From: Craig Topper Date: Tue, 5 Jan 2021 18:43:44 +0000 (-0800) Subject: [RISCV] Don't parse 'vmsltu.vi v0, v1, 0' as 'vmsleu.vi v0, v1, -1' X-Git-Tag: llvmorg-13-init~2062 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=210bc3dc0eb3550fd99158e5747619ad9e91c548;p=platform%2Fupstream%2Fllvm.git [RISCV] Don't parse 'vmsltu.vi v0, v1, 0' as 'vmsleu.vi v0, v1, -1' vmsltu.vi v0, v1, 0 is always false there is no unsigned number less than 0. vmsleu.vi v0, v1, -1 on the other hand is always true since -1 will be considered unsigned max and all numbers are <= unsigned max. A similar problem exists for vmsgeu.vi v0, v1, 0 which is always true, but becomes vmsgtu.vi v0, v1, -1 which is always false. To match the GNU assembler we'll emit vmsne.vv and vmseq.vv with the same register for these cases instead. I'm using AsmParserOnly pseudo instructions here because we can't match an explicit immediate in an InstAlias. And we can't use a AsmOperand for the zero because the output we want doesn't use an immediate so there's nowhere to name the AsmOperand we want to use. To keep the implementations similar I'm also handling signed with pseudo instructions even though they don't have this issue. This way we can avoid the special renderMethod that decremented by 1 so the immediate we see for the pseudo instruction in processInstruction is 0 and not -1. Another option might have been to have a different simm5_plus1 operand for the unsigned case or just live with the immediate being pre-decremented. I felt this way was clearer, but I'm open to other opinions. Reviewed By: frasercrmck Differential Revision: https://reviews.llvm.org/D94035 --- diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp index d31bb8f..975945c 100644 --- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp +++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp @@ -832,16 +832,6 @@ public: addExpr(Inst, getImm()); } - void addSImm5Plus1Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - int64_t Imm = 0; - RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None; - bool IsConstant = evaluateConstantImm(getImm(), Imm, VK); - assert(IsConstant && "Expect constant value!"); - (void)IsConstant; - Inst.addOperand(MCOperand::createImm(Imm - 1)); - } - void addFenceArgOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); // isFenceArg has validated the operand, meaning this cast is safe @@ -2488,6 +2478,50 @@ bool RISCVAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc, case RISCV::PseudoVMSGE_VX_M_T: emitVMSGE(Inst, RISCV::VMSLT_VX, IDLoc, Out); return false; + case RISCV::PseudoVMSGE_VI: + case RISCV::PseudoVMSLT_VI: { + // These instructions are signed and so is immediate so we can subtract one + // and change the opcode. + int64_t Imm = Inst.getOperand(2).getImm(); + unsigned Opc = Inst.getOpcode() == RISCV::PseudoVMSGE_VI ? RISCV::VMSGT_VI + : RISCV::VMSLE_VI; + emitToStreamer(Out, MCInstBuilder(Opc) + .addOperand(Inst.getOperand(0)) + .addOperand(Inst.getOperand(1)) + .addImm(Imm - 1) + .addOperand(Inst.getOperand(3))); + return false; + } + case RISCV::PseudoVMSGEU_VI: + case RISCV::PseudoVMSLTU_VI: { + int64_t Imm = Inst.getOperand(2).getImm(); + // Unsigned comparisons are tricky because the immediate is signed. If the + // immediate is 0 we can't just subtract one. vmsltu.vi v0, v1, 0 is always + // false, but vmsle.vi v0, v1, -1 is always true. Instead we use + // vmsne v0, v1, v1 which is always false. + if (Imm == 0) { + unsigned Opc = Inst.getOpcode() == RISCV::PseudoVMSGEU_VI + ? RISCV::VMSEQ_VV + : RISCV::VMSNE_VV; + emitToStreamer(Out, MCInstBuilder(Opc) + .addOperand(Inst.getOperand(0)) + .addOperand(Inst.getOperand(1)) + .addOperand(Inst.getOperand(1)) + .addOperand(Inst.getOperand(3))); + } else { + // Other immediate values can subtract one like signed. + unsigned Opc = Inst.getOpcode() == RISCV::PseudoVMSGEU_VI + ? RISCV::VMSGTU_VI + : RISCV::VMSLEU_VI; + emitToStreamer(Out, MCInstBuilder(Opc) + .addOperand(Inst.getOperand(0)) + .addOperand(Inst.getOperand(1)) + .addImm(Imm - 1) + .addOperand(Inst.getOperand(3))); + } + + return false; + } } emitToStreamer(Out, Inst); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td index edcde5f..339bb68 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td @@ -62,7 +62,7 @@ def simm5 : Operand, ImmLeaf(Imm);}]> { def SImm5Plus1AsmOperand : AsmOperandClass { let Name = "SImm5Plus1"; - let RenderMethod = "addSImm5Plus1Operands"; + let RenderMethod = "addImmOperands"; let DiagnosticType = "InvalidSImm5Plus1"; } @@ -604,18 +604,29 @@ def : InstAlias<"vmsgeu.vv $vd, $va, $vb$vm", (VMSLEU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>; def : InstAlias<"vmsge.vv $vd, $va, $vb$vm", (VMSLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>; -def : InstAlias<"vmsltu.vi $vd, $va, $imm$vm", - (VMSLEU_VI VR:$vd, VR:$va, simm5_plus1:$imm, - VMaskOp:$vm), 0>; -def : InstAlias<"vmslt.vi $vd, $va, $imm$vm", - (VMSLE_VI VR:$vd, VR:$va, simm5_plus1:$imm, - VMaskOp:$vm), 0>; -def : InstAlias<"vmsgeu.vi $vd, $va, $imm$vm", - (VMSGTU_VI VR:$vd, VR:$va, simm5_plus1:$imm, - VMaskOp:$vm), 0>; -def : InstAlias<"vmsge.vi $vd, $va, $imm$vm", - (VMSGT_VI VR:$vd, VR:$va, simm5_plus1:$imm, - VMaskOp:$vm), 0>; + +let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0, + mayStore = 0 in { +// For unsigned comparisons we need to special case 0 immediate to maintain +// the always true/false semantics we would invert if we just decremented the +// immediate like we do for signed. To match the GNU assembler we will use +// vmseq/vmsne.vv with the same register for both operands which we can't do +// from an InstAlias. +def PseudoVMSGEU_VI : Pseudo<(outs VR:$vd), + (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm), + [], "vmsgeu.vi", "$vd, $vs2, $imm$vm">; +def PseudoVMSLTU_VI : Pseudo<(outs VR:$vd), + (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm), + [], "vmsltu.vi", "$vd, $vs2, $imm$vm">; +// Handle signed with pseudos as well for more consistency in the +// implementation. +def PseudoVMSGE_VI : Pseudo<(outs VR:$vd), + (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm), + [], "vmsge.vi", "$vd, $vs2, $imm$vm">; +def PseudoVMSLT_VI : Pseudo<(outs VR:$vd), + (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm), + [], "vmslt.vi", "$vd, $vs2, $imm$vm">; +} let isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { def PseudoVMSGEU_VX : Pseudo<(outs VR:$vd), diff --git a/llvm/test/MC/RISCV/rvv/compare.s b/llvm/test/MC/RISCV/rvv/compare.s index 264a324..00f883f 100644 --- a/llvm/test/MC/RISCV/rvv/compare.s +++ b/llvm/test/MC/RISCV/rvv/compare.s @@ -314,6 +314,18 @@ vmsltu.vi v8, v4, 16 # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 b4 47 72 +vmsltu.vi v8, v4, 0, v0.t +# CHECK-INST: vmsne.vv v8, v4, v4, v0.t +# CHECK-ENCODING: [0x57,0x04,0x42,0x64] +# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-UNKNOWN: 57 04 42 64 + +vmsltu.vi v8, v4, 0 +# CHECK-INST: vmsne.vv v8, v4, v4 +# CHECK-ENCODING: [0x57,0x04,0x42,0x66] +# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-UNKNOWN: 57 04 42 66 + vmslt.vi v8, v4, 16, v0.t # CHECK-INST: vmsle.vi v8, v4, 15, v0.t # CHECK-ENCODING: [0x57,0xb4,0x47,0x74] @@ -338,6 +350,18 @@ vmsgeu.vi v8, v4, 16 # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 b4 47 7a +vmsgeu.vi v8, v4, 0, v0.t +# CHECK-INST: vmseq.vv v8, v4, v4, v0.t +# CHECK-ENCODING: [0x57,0x04,0x42,0x60] +# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-UNKNOWN: 57 04 42 60 + +vmsgeu.vi v8, v4, 0 +# CHECK-INST: vmseq.vv v8, v4, v4 +# CHECK-ENCODING: [0x57,0x04,0x42,0x62] +# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-UNKNOWN: 57 04 42 62 + vmsge.vi v8, v4, 16, v0.t # CHECK-INST: vmsgt.vi v8, v4, 15, v0.t # CHECK-ENCODING: [0x57,0xb4,0x47,0x7c]