From b65ef65b22c225efd7755e2edf07c00b7911b969 Mon Sep 17 00:00:00 2001 From: "Kazushi (Jam) Marukawa" Date: Tue, 10 Nov 2020 13:42:24 +0900 Subject: [PATCH] [VE] Support inline assembly Support inline assembly with scalar registers. Add a regression test also. Reviewed By: simoll Differential Revision: https://reviews.llvm.org/D91119 --- llvm/lib/Target/VE/VEAsmPrinter.cpp | 37 ++++++++++++++++++ llvm/lib/Target/VE/VEISelLowering.cpp | 23 ++++++++++++ llvm/lib/Target/VE/VEISelLowering.h | 8 ++++ llvm/test/CodeGen/VE/Scalar/inlineasm-lea.ll | 56 ++++++++++++++++++++++++++++ 4 files changed, 124 insertions(+) create mode 100644 llvm/test/CodeGen/VE/Scalar/inlineasm-lea.ll diff --git a/llvm/lib/Target/VE/VEAsmPrinter.cpp b/llvm/lib/Target/VE/VEAsmPrinter.cpp index 8040d6c..4b63786 100644 --- a/llvm/lib/Target/VE/VEAsmPrinter.cpp +++ b/llvm/lib/Target/VE/VEAsmPrinter.cpp @@ -60,6 +60,9 @@ public: static const char *getRegisterName(unsigned RegNo) { return VEInstPrinter::getRegisterName(RegNo); } + void printOperand(const MachineInstr *MI, int OpNum, raw_ostream &OS); + bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, + const char *ExtraCode, raw_ostream &O) override; }; } // end of anonymous namespace @@ -349,6 +352,40 @@ void VEAsmPrinter::emitInstruction(const MachineInstr *MI) { } while ((++I != E) && I->isInsideBundle()); // Delay slot check. } +void VEAsmPrinter::printOperand(const MachineInstr *MI, int OpNum, + raw_ostream &O) { + const MachineOperand &MO = MI->getOperand(OpNum); + + switch (MO.getType()) { + case MachineOperand::MO_Register: + O << "%" << StringRef(getRegisterName(MO.getReg())).lower(); + break; + default: + llvm_unreachable(""); + } +} + +// PrintAsmOperand - Print out an operand for an inline asm expression. +bool VEAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, + const char *ExtraCode, raw_ostream &O) { + if (ExtraCode && ExtraCode[0]) { + if (ExtraCode[1] != 0) + return true; // Unknown modifier. + + switch (ExtraCode[0]) { + default: + // See if this is a generic print operand + return AsmPrinter::PrintAsmOperand(MI, OpNo, ExtraCode, O); + case 'r': + break; + } + } + + printOperand(MI, OpNo, O); + + return false; +} + // Force static initialization. extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeVEAsmPrinter() { RegisterAsmPrinter X(getTheVETarget()); diff --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp index 85c386c..49a52e9 100644 --- a/llvm/lib/Target/VE/VEISelLowering.cpp +++ b/llvm/lib/Target/VE/VEISelLowering.cpp @@ -1546,3 +1546,26 @@ SDValue VETargetLowering::PerformDAGCombine(SDNode *N, return SDValue(); } + +//===----------------------------------------------------------------------===// +// VE Inline Assembly Support +//===----------------------------------------------------------------------===// + +std::pair +VETargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, + StringRef Constraint, + MVT VT) const { + const TargetRegisterClass *RC = nullptr; + if (Constraint.size() == 1) { + switch (Constraint[0]) { + default: + return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); + case 'r': + RC = &VE::I64RegClass; + break; + } + return std::make_pair(0U, RC); + } + + return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); +} diff --git a/llvm/lib/Target/VE/VEISelLowering.h b/llvm/lib/Target/VE/VEISelLowering.h index fffb6b5..8fbd89d 100644 --- a/llvm/lib/Target/VE/VEISelLowering.h +++ b/llvm/lib/Target/VE/VEISelLowering.h @@ -126,6 +126,14 @@ public: MachineMemOperand::Flags Flags, bool *Fast) const override; + /// Inline Assembly { + + std::pair + getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, + StringRef Constraint, MVT VT) const override; + + /// } Inline Assembly + /// Target Optimization { // SX-Aurora VE's s/udiv is 5-9 times slower than multiply. diff --git a/llvm/test/CodeGen/VE/Scalar/inlineasm-lea.ll b/llvm/test/CodeGen/VE/Scalar/inlineasm-lea.ll new file mode 100644 index 0000000..30f37b66 --- /dev/null +++ b/llvm/test/CodeGen/VE/Scalar/inlineasm-lea.ll @@ -0,0 +1,56 @@ +; RUN: llc < %s -mtriple=ve | FileCheck %s + +define i64 @lea1a(i64 %x) nounwind { +; CHECK-LABEL: lea1a: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: #APP +; CHECK-NEXT: lea %s0, (%s0) +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: or %s11, 0, %s9 + %asmtmp = tail call i64 asm "lea $0, ($1)", "=r,r"(i64 %x) nounwind + ret i64 %asmtmp +} + +define i64 @lea1b(i64 %x) nounwind { +; CHECK-LABEL: lea1b: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: #APP +; CHECK-NEXT: lea %s0, (, %s0) +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: or %s11, 0, %s9 + %asmtmp = tail call i64 asm "lea $0, (, $1)", "=r,r"(i64 %x) nounwind + ret i64 %asmtmp +} + +define i64 @lea2(i64 %x, i64 %y) nounwind { +; CHECK-LABEL: lea2: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: #APP +; CHECK-NEXT: lea %s0, (%s0, %s1) +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: or %s11, 0, %s9 + %asmtmp = tail call i64 asm "lea $0, ($1, $2)", "=r,r,r"(i64 %x, i64 %y) nounwind + ret i64 %asmtmp +} + +define i64 @lea3(i64 %x, i64 %y) nounwind { +; CHECK-LABEL: lea3: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: #APP +; CHECK-NEXT: lea %s0, 2048(%s0, %s1) +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: or %s11, 0, %s9 + %asmtmp = tail call i64 asm "lea $0, 2048($1, $2)", "=r,r,r"(i64 %x, i64 %y) nounwind + ret i64 %asmtmp +} + +define i64 @leasl3(i64 %x, i64 %y) nounwind { +; CHECK-LABEL: leasl3: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: #APP +; CHECK-NEXT: lea.sl %s0, 2048(%s1, %s0) +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: or %s11, 0, %s9 + %asmtmp = tail call i64 asm "lea.sl $0, 2048($1, $2)", "=r,r,r"(i64 %y, i64 %x) nounwind + ret i64 %asmtmp +} -- 2.7.4