/// legal. It is frequently not legal in PIC relocation models.
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
+ /// Return true if the operand with index OpNo corresponding to a target
+ /// branch, for example, in following case
+ ///
+ /// call void asm "lea r8, $0\0A\09call qword ptr ${1:P}\0A\09ret",
+ /// "*m,*m,~{r8},~{dirflag},~{fpsr},~{flags}"
+ /// ([9 x i32]* @Arr), void (...)* @sincos_asm)
+ ///
+ /// the operand $1 (sincos_asm) is target branch in inline asm, but the
+ /// operand $0 (Arr) is not.
+ virtual bool
+ isInlineAsmTargetBranch(const SmallVectorImpl<StringRef> &AsmStrs,
+ unsigned OpNo) const {
+ return false;
+ }
+
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
SDValue &Chain) const;
#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/PBQPRAConstraint.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/IR/GlobalValue.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/CodeGen.h"
#include <memory>
class MachineFunction;
class ScheduleDAGMutation;
class CallLowering;
+class GlobalValue;
class InlineAsmLowering;
class InstrItineraryData;
struct InstrStage;
unsigned PhysReg) const {
return false;
}
+
+ /// Classify a global function reference. This mainly used to fetch target
+ /// special flags for lowering a function address. For example mark a function
+ /// call should be plt or pc-related addressing.
+ virtual unsigned char
+ classifyGlobalFunctionReference(const GlobalValue *GV) const {
+ return 0;
+ }
};
} // end namespace llvm
#ifndef LLVM_IR_INLINEASM_H
#define LLVM_IR_INLINEASM_H
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/ErrorHandling.h"
const std::string &getAsmString() const { return AsmString; }
const std::string &getConstraintString() const { return Constraints; }
+ void collectAsmStrs(SmallVectorImpl<StringRef> &AsmStrs) const;
/// This static method can be used by the parser to check to see if the
/// specified constraint string is legal for the type.
Kind_Clobber = 4, // Clobbered register, "~r".
Kind_Imm = 5, // Immediate.
Kind_Mem = 6, // Memory operand, "m", or an address, "p".
+ Kind_Func = 7, // Address operand of function call
// Memory constraint codes.
// These could be tablegenerated but there's little need to do that since
static unsigned getFlagWord(unsigned Kind, unsigned NumOps) {
assert(((NumOps << 3) & ~0xffff) == 0 && "Too many inline asm operands!");
- assert(Kind >= Kind_RegUse && Kind <= Kind_Mem && "Invalid Kind");
+ assert(Kind >= Kind_RegUse && Kind <= Kind_Func && "Invalid Kind");
return Kind | (NumOps << 3);
}
static bool isRegDefKind(unsigned Flag){ return getKind(Flag) == Kind_RegDef;}
static bool isImmKind(unsigned Flag) { return getKind(Flag) == Kind_Imm; }
static bool isMemKind(unsigned Flag) { return getKind(Flag) == Kind_Mem; }
+ static bool isFuncKind(unsigned Flag) { return getKind(Flag) == Kind_Func; }
static bool isRegDefEarlyClobberKind(unsigned Flag) {
return getKind(Flag) == Kind_RegDefEarlyClobber;
}
/// Augment an existing flag word returned by getFlagWord with the constraint
/// code for a memory constraint.
static unsigned getFlagWordForMem(unsigned InputFlag, unsigned Constraint) {
- assert(isMemKind(InputFlag) && "InputFlag is not a memory constraint!");
+ assert((isMemKind(InputFlag) || isFuncKind(InputFlag)) &&
+ "InputFlag is not a memory (include function) constraint!");
assert(Constraint <= 0x7fff && "Too large a memory constraint ID");
assert(Constraint <= Constraints_Max && "Unknown constraint ID");
assert((InputFlag & ~0xffff) == 0 && "High bits already contain data");
}
static unsigned getMemoryConstraintID(unsigned Flag) {
- assert(isMemKind(Flag));
+ assert((isMemKind(Flag) || isFuncKind(Flag)) &&
+ "Not expected mem or function flang!");
return (Flag >> Constraints_ShiftAmount) & 0x7fff;
}
case InlineAsm::Kind_Imm:
return "imm";
case InlineAsm::Kind_Mem:
+ case InlineAsm::Kind_Func:
return "mem";
default:
llvm_unreachable("Unknown operand kind");
break;
case InlineAsm::Kind_RegUse: // Use of register.
case InlineAsm::Kind_Imm: // Immediate.
- case InlineAsm::Kind_Mem: // Addressing mode.
+ case InlineAsm::Kind_Mem: // Non-function addressing mode.
// The addressing mode has been selected, just add all of the
// operands to the machine instruction.
for (unsigned j = 0; j != NumVals; ++j, ++i)
}
}
break;
+ case InlineAsm::Kind_Func: // Function addressing mode.
+ for (unsigned j = 0; j != NumVals; ++j, ++i) {
+ SDValue Op = Node->getOperand(i);
+ AddOperand(MIB, Op, 0, nullptr, VRBaseMap,
+ /*IsDebug=*/false, IsClone, IsCloned);
+
+ // Adjust Target Flags for function reference.
+ if (auto *TGA = dyn_cast<GlobalAddressSDNode>(Op)) {
+ unsigned NewFlags =
+ MF->getSubtarget().classifyGlobalFunctionReference(
+ TGA->getGlobal());
+ unsigned LastIdx = MIB.getInstr()->getNumOperands() - 1;
+ MIB.getInstr()->getOperand(LastIdx).setTargetFlags(NewFlags);
+ }
+ }
}
}
} // end anonymous namespace
+static bool isFunction(SDValue Op) {
+ if (Op && Op.getOpcode() == ISD::GlobalAddress) {
+ if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
+ auto Fn = dyn_cast_or_null<Function>(GA->getGlobal());
+
+ // In normal "call dllimport func" instruction (non-inlineasm) it force
+ // indirect access by specifing call opcode. And usually specially print
+ // asm with indirect symbol (i.g: "*") according to opcode. Inline asm can
+ // not do in this way now. (In fact, this is similar with "Data Access"
+ // action). So here we ignore dllimport function.
+ if (Fn && !Fn->hasDLLImportStorageClass())
+ return true;
+ }
+ }
+ return false;
+}
+
/// visitInlineAsm - Handle a call to an InlineAsm object.
void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call,
const BasicBlock *EHPadBB) {
Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
}
+ int OpNo = -1;
+ SmallVector<StringRef> AsmStrs;
+ IA->collectAsmStrs(AsmStrs);
+
// Second pass over the constraints: compute which constraint option to use.
for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
+ if (OpInfo.hasArg() || OpInfo.Type == InlineAsm::isOutput)
+ OpNo++;
+
// If this is an output operand with a matching input operand, look up the
// matching input. If their types mismatch, e.g. one is an integer, the
// other is floating point, or their sizes are different, flag it as an
OpInfo.ConstraintType == TargetLowering::C_Address)
continue;
+ // In Linux PIC model, there are 4 cases about value/label addressing:
+ //
+ // 1: Function call or Label jmp inside the module.
+ // 2: Data access (such as global variable, static variable) inside module.
+ // 3: Function call or Label jmp outside the module.
+ // 4: Data access (such as global variable) outside the module.
+ //
+ // Due to current llvm inline asm architecture designed to not "recognize"
+ // the asm code, there are quite troubles for us to treat mem addressing
+ // differently for same value/adress used in different instuctions.
+ // For example, in pic model, call a func may in plt way or direclty
+ // pc-related, but lea/mov a function adress may use got.
+ //
+ // Here we try to "recognize" function call for the case 1 and case 3 in
+ // inline asm. And try to adjust the constraint for them.
+ //
+ // TODO: Due to current inline asm didn't encourage to jmp to the outsider
+ // label, so here we don't handle jmp function label now, but we need to
+ // enhance it (especilly in PIC model) if we meet meaningful requirements.
+ if (OpInfo.isIndirect && isFunction(OpInfo.CallOperand) &&
+ TLI.isInlineAsmTargetBranch(AsmStrs, OpNo) &&
+ TM.getCodeModel() != CodeModel::Large) {
+ OpInfo.isIndirect = false;
+ OpInfo.ConstraintType = TargetLowering::C_Address;
+ }
+
// If this is a memory input, and if the operand is not indirect, do what we
// need to provide an address for the memory input.
if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
break;
}
- if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
- OpInfo.ConstraintType == TargetLowering::C_Address) {
+ if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
assert((OpInfo.isIndirect ||
OpInfo.ConstraintType != TargetLowering::C_Memory) &&
"Operand must be indirect to be a mem!");
break;
}
+ if (OpInfo.ConstraintType == TargetLowering::C_Address) {
+ assert(InOperandVal.getValueType() ==
+ TLI.getPointerTy(DAG.getDataLayout()) &&
+ "Address operands expect pointer values");
+
+ unsigned ConstraintID =
+ TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
+ assert(ConstraintID != InlineAsm::Constraint_Unknown &&
+ "Failed to convert memory constraint code to constraint id.");
+
+ unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
+
+ SDValue AsmOp = InOperandVal;
+ if (isFunction(InOperandVal)) {
+ auto *GA = dyn_cast<GlobalAddressSDNode>(InOperandVal);
+ ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Func, 1);
+ AsmOp = DAG.getTargetGlobalAddress(GA->getGlobal(), getCurSDLoc(),
+ InOperandVal.getValueType(),
+ GA->getOffset());
+ }
+
+ // Add information to the INLINEASM node to know about this input.
+ ResOpType = InlineAsm::getFlagWordForMem(ResOpType, ConstraintID);
+
+ AsmNodeOperands.push_back(
+ DAG.getTargetConstant(ResOpType, getCurSDLoc(), MVT::i32));
+
+ AsmNodeOperands.push_back(AsmOp);
+ break;
+ }
+
assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
OpInfo.ConstraintType == TargetLowering::C_Register) &&
"Unknown constraint type!");
while (i != e) {
unsigned Flags = cast<ConstantSDNode>(InOps[i])->getZExtValue();
- if (!InlineAsm::isMemKind(Flags)) {
+ if (!InlineAsm::isMemKind(Flags) && !InlineAsm::isFuncKind(Flags)) {
// Just skip over this operand, copying the operands verbatim.
Ops.insert(Ops.end(), InOps.begin()+i,
InOps.begin()+i+InlineAsm::getNumOperandRegisters(Flags) + 1);
// Add this to the output node.
unsigned NewFlags =
- InlineAsm::getFlagWord(InlineAsm::Kind_Mem, SelOps.size());
+ InlineAsm::isMemKind(Flags)
+ ? InlineAsm::getFlagWord(InlineAsm::Kind_Mem, SelOps.size())
+ : InlineAsm::getFlagWord(InlineAsm::Kind_Func, SelOps.size());
NewFlags = InlineAsm::getFlagWordForMem(NewFlags, ConstraintID);
Ops.push_back(CurDAG->getTargetConstant(NewFlags, DL, MVT::i32));
llvm::append_range(Ops, SelOps);
return FTy;
}
+void InlineAsm::collectAsmStrs(SmallVectorImpl<StringRef> &AsmStrs) const {
+ StringRef AsmStr(AsmString);
+ AsmStrs.clear();
+
+ // TODO: 1) Unify delimiter for inline asm, we also meet other delimiters
+ // for example "\0A", ";".
+ // 2) Enhance StringRef. Some of the special delimiter ("\0") can't be
+ // split in StringRef. Also empty StringRef can not call split (will stuck).
+ if (AsmStr.empty())
+ return;
+ AsmStr.split(AsmStrs, "\n\t", -1, false);
+}
+
/// Parse - Analyze the specified string (e.g. "==&{eax}") and fill in the
/// fields in this structure. If the constraint string is not understood,
/// return true, otherwise return false.
return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
}
+static StringRef getInstrStrFromOpNo(const SmallVectorImpl<StringRef> &AsmStrs,
+ unsigned OpNo) {
+ const APInt Operand(32, OpNo);
+ std::string OpNoStr = llvm::toString(Operand, 10, false);
+ std::string Str(" $");
+
+ std::string OpNoStr1(Str + OpNoStr); // e.g. " $1" (OpNo=1)
+ std::string OpNoStr2(Str + "{" + OpNoStr + ":"); // With modifier, e.g. ${1:P}
+
+ auto I = StringRef::npos;
+ for (auto &AsmStr : AsmStrs) {
+ // Match the OpNo string. We should match exactly to exclude match
+ // sub-string, e.g. "$12" contain "$1"
+ if (AsmStr.endswith(OpNoStr1))
+ I = AsmStr.size() - OpNoStr1.size();
+
+ // Get the index of operand in AsmStr.
+ if (I == StringRef::npos)
+ I = AsmStr.find(OpNoStr1 + ",");
+ if (I == StringRef::npos)
+ I = AsmStr.find(OpNoStr2);
+
+ if (I == StringRef::npos)
+ continue;
+
+ assert(I > 0 && "Unexpected inline asm string!");
+ // Remove the operand string and label (if exsit).
+ // For example:
+ // ".L__MSASMLABEL_.${:uid}__l:call dword ptr ${0:P}"
+ // ==>
+ // ".L__MSASMLABEL_.${:uid}__l:call dword ptr "
+ // ==>
+ // "call dword ptr "
+ auto TmpStr = AsmStr.substr(0, I);
+ I = TmpStr.rfind(':');
+ if (I == StringRef::npos)
+ return TmpStr;
+
+ assert(I < TmpStr.size() && "Unexpected inline asm string!");
+ auto Asm = TmpStr.drop_front(I + 1);
+ return Asm;
+ }
+
+ return StringRef();
+}
+
+bool X86TargetLowering::isInlineAsmTargetBranch(
+ const SmallVectorImpl<StringRef> &AsmStrs, unsigned OpNo) const {
+ StringRef InstrStr = getInstrStrFromOpNo(AsmStrs, OpNo);
+
+ if (InstrStr.contains("call"))
+ return true;
+
+ return false;
+}
+
/// Provide custom lowering hooks for some operations.
SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
+ bool isInlineAsmTargetBranch(const SmallVectorImpl<StringRef> &AsmStrs,
+ unsigned OpNo) const override;
+
/// Lower interleaved load(s) into target specific
/// instructions/intrinsics.
bool lowerInterleavedLoad(LoadInst *LI,
--- /dev/null
+; RUN: llc -O2 --relocation-model=pic -mtriple=i386-unknown-linux-gnu < %s 2>&1 | FileCheck %s
+
+; List the source code:
+; // clang -m32 -fasm-blocks -S t.c -O2 -fpic -emit-llvm
+; int GV = 17;
+;
+; extern unsigned int extern_func();
+; static unsigned int static_func() __attribute__((noinline));
+; static unsigned int static_func() {
+; return GV++;
+; }
+;
+; void func() {
+; static_func();
+; __asm {
+; call static_func
+; call extern_func
+; shr eax, 0
+; shr ebx, 0
+; shr ecx, 0
+; shr edx, 0
+; shr edi, 0
+; shr esi, 0
+; shr ebp, 0
+; shr esp, 0
+; }
+; }
+
+@GV = local_unnamed_addr global i32 17, align 4
+
+define void @func() local_unnamed_addr #0 {
+; CHECK-LABEL: func:
+; CHECK: calll .L0$pb
+; CHECK-NEXT: .L0$pb:
+; CHECK-NEXT: popl %ebx
+; CHECK-NEXT: .Ltmp0:
+; CHECK-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp0-.L0$pb), %ebx
+; CHECK-NEXT: calll static_func
+; CHECK-NEXT: #APP
+; CHECK-EMPTY:
+; CHECK-NEXT: calll static_func
+; CHECK-NEXT: calll extern_func@PLT
+; CHECK-NEXT: shrl $0, %eax
+; CHECK-NEXT: shrl $0, %ebx
+; CHECK-NEXT: shrl $0, %ecx
+; CHECK-NEXT: shrl $0, %edx
+; CHECK-NEXT: shrl $0, %edi
+; CHECK-NEXT: shrl $0, %esi
+; CHECK-NEXT: shrl $0, %ebp
+; CHECK-NEXT: shrl $0, %esp
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+entry:
+ %call = tail call i32 @static_func()
+ tail call void asm sideeffect inteldialect "call dword ptr ${0:P}\0A\09call dword ptr ${1:P}\0A\09shr eax, $$0\0A\09shr ebx, $$0\0A\09shr ecx, $$0\0A\09shr edx, $$0\0A\09shr edi, $$0\0A\09shr esi, $$0\0A\09shr ebp, $$0\0A\09shr esp, $$0", "*m,*m,~{eax},~{ebp},~{ebx},~{ecx},~{edi},~{edx},~{flags},~{esi},~{esp},~{dirflag},~{fpsr},~{flags}"(ptr nonnull elementtype(i32 (...)) @static_func, ptr nonnull elementtype(i32 (...)) @extern_func) #0
+ ret void
+}
+
+declare i32 @extern_func(...) #0
+
+define internal i32 @static_func() #0 {
+entry:
+ %0 = load i32, ptr @GV, align 4
+ %inc = add nsw i32 %0, 1
+ store i32 %inc, ptr @GV, align 4
+ ret i32 %0
+}
+
+attributes #0 = { nounwind }
; RUN: llc -mtriple=x86_64-unknown-unknown -no-integrated-as < %s 2>&1 | FileCheck %s
-define ptr @foo(ptr %ptr) {
+define ptr @foo(ptr %Ptr) {
; CHECK-LABEL: foo:
- %1 = tail call ptr asm "lea $1, $0", "=r,p,~{dirflag},~{fpsr},~{flags}"(ptr %ptr)
-; CHECK: #APP
-; CHECK-NEXT: lea (%rdi), %rax
+; asm {mov rax, Pointer; lea rax, Pointer}
+; LEA: Computes the effective address of the second operand and stores it in the first operand
+ %Ptr.addr = alloca ptr, align 8
+ store ptr %Ptr, ptr %Ptr.addr, align 8
+; CHECK: movq %rdi, -8(%rsp)
+ %1 = tail call ptr asm "mov $1, $0\0A\09lea $2, $0", "=r,p,*m,~{dirflag},~{fpsr},~{flags}"(ptr %Ptr, ptr elementtype(ptr) %Ptr.addr)
+; CHECK-NEXT: #APP
+; CHECK-NEXT: mov (%rdi), %rax
+; CHECK-NEXT: lea -8(%rsp), %rax
; CHECK-NEXT: #NO_APP
ret ptr %1
; CHECK-NEXT: retq