It used to be that all of our intrinsics were call instructions, but over time, we've added more and more invokable intrinsics. According to the verifier, we're up to 8 right now. As IntrinsicInst is a sub-class of CallInst, this puts us in an awkward spot where the idiomatic means to check for intrinsic has a false negative if the intrinsic is invoked.
This change switches IntrinsicInst from being a sub-class of CallInst to being a subclass of CallBase. This allows invoked intrinsics to be instances of IntrinsicInst, at the cost of requiring a few more casts to CallInst in places where the intrinsic really is known to be a call, not an invoke.
After this lands and has baked for a couple days, planned cleanups:
Make GCStatepointInst a IntrinsicInst subclass.
Merge intrinsic handling in InstCombine and use idiomatic visitIntrinsicInst entry point for InstVisitor.
Do the same in SelectionDAG.
Do the same in FastISEL.
Differential Revision: https://reviews.llvm.org/D99976
// instruction to a special delegation helper.
#define HANDLE_INST(NUM, OPCODE, CLASS) \
RetTy visit##OPCODE(CLASS &I) { \
- if (NUM == Instruction::Call) \
- return delegateCallInst(I); \
+ if (NUM == Instruction::Call || NUM == Instruction::Invoke) \
+ return delegateCallBase(I); \
else \
DELEGATE(CLASS); \
}
RetTy visitVAStartInst(VAStartInst &I) { DELEGATE(IntrinsicInst); }
RetTy visitVAEndInst(VAEndInst &I) { DELEGATE(IntrinsicInst); }
RetTy visitVACopyInst(VACopyInst &I) { DELEGATE(IntrinsicInst); }
- RetTy visitIntrinsicInst(IntrinsicInst &I) { DELEGATE(CallInst); }
+ RetTy visitIntrinsicInst(IntrinsicInst &I) {
+ if (isa<CallInst>(I))
+ return static_cast<SubClass*>(this)->visitCallInst(cast<CallInst>(I));
+ else
+ return static_cast<SubClass*>(this)->visitInvokeInst(cast<InvokeInst>(I));
+ }
RetTy visitCallInst(CallInst &I) { DELEGATE(CallBase); }
RetTy visitInvokeInst(InvokeInst &I) { DELEGATE(CallBase); }
RetTy visitCallBrInst(CallBrInst &I) { DELEGATE(CallBase); }
private:
// Special helper function to delegate to CallInst subclass visitors.
- RetTy delegateCallInst(CallInst &I) {
+ RetTy delegateCallBase(CallBase &I) {
if (const Function *F = I.getCalledFunction()) {
switch (F->getIntrinsicID()) {
default: DELEGATE(IntrinsicInst);
case Intrinsic::not_intrinsic: break;
}
}
- DELEGATE(CallInst);
+ if (isa<CallInst>(I))
+ return static_cast<SubClass*>(this)->visitCallInst(cast<CallInst>(I));
+ else
+ return static_cast<SubClass*>(this)->visitInvokeInst(cast<InvokeInst>(I));
}
// An overload that will never actually be called, it is used only from dead
// code in the dispatching from opcodes to instruction subclasses.
- RetTy delegateCallInst(Instruction &I) {
- llvm_unreachable("delegateCallInst called for non-CallInst");
+ RetTy delegateCallBase(Instruction &I) {
+ llvm_unreachable("delegateCallBase called for non-CallBase");
}
};
// if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(Inst))
// ... MCI->getDest() ... MCI->getSource() ...
//
-// All intrinsic function calls are instances of the call instruction, so these
-// are all subclasses of the CallInst class. Note that none of these classes
-// has state or virtual methods, which is an important part of this gross/neat
-// hack working.
+// All intrinsic function calls are instances of the call or invoke instruction,
+// so these are all subclasses of the CallBase class. Note that none of these
+// classes has state or virtual methods, which is an important part of this
+// gross/neat hack working.
//
//===----------------------------------------------------------------------===//
/// A wrapper class for inspecting calls to intrinsic functions.
/// This allows the standard isa/dyncast/cast functionality to work with calls
/// to intrinsic functions.
-class IntrinsicInst : public CallInst {
+class IntrinsicInst : public CallBase {
public:
IntrinsicInst() = delete;
IntrinsicInst(const IntrinsicInst &) = delete;
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const CallInst *I) {
+ static bool classof(const CallBase *I) {
if (const Function *CF = I->getCalledFunction())
return CF->isIntrinsic();
return false;
}
static bool classof(const Value *V) {
- return isa<CallInst>(V) && classof(cast<CallInst>(V));
+ return isa<CallBase>(V) && classof(cast<CallBase>(V));
}
};
// Find llvm.assume intrinsics for this llvm.type.test call.
for (const Use &CIU : CI->uses())
if (auto *Assume = dyn_cast<AssumeInst>(CIU.getUser()))
- Assumes.push_back(Assume);
+ Assumes.push_back(cast<CallInst>(Assume));
// If we found any, search for virtual calls based on %p and add them to
// DevirtCalls.
return true;
}
case Intrinsic::experimental_stackmap:
- return selectStackmap(II);
+ return selectStackmap(cast<CallInst>(II));
case Intrinsic::experimental_patchpoint_void:
case Intrinsic::experimental_patchpoint_i64:
- return selectPatchpoint(II);
+ return selectPatchpoint(cast<CallInst>(II));
case Intrinsic::xray_customevent:
- return selectXRayCustomEvent(II);
+ return selectXRayCustomEvent(cast<CallInst>(II));
case Intrinsic::xray_typedevent:
- return selectXRayTypedEvent(II);
+ return selectXRayTypedEvent(cast<CallInst>(II));
}
return fastLowerIntrinsicCall(II);
if (Function *F = CI->getCalledFunction())
if (F->getIntrinsicID() == Intrinsic::gcroot) {
std::pair<CallInst *, AllocaInst *> Pair = std::make_pair(
- CI,
+ cast<CallInst>(CI),
cast<AllocaInst>(CI->getArgOperand(0)->stripPointerCasts()));
if (IsNullValue(CI->getArgOperand(1)))
Roots.push_back(Pair);
for (const Instruction &I : BB)
if (const auto *II = dyn_cast<IntrinsicInst>(&I))
if (II->getIntrinsicID() == Intrinsic::stackprotector)
- return II;
+ return cast<CallInst>(II);
return nullptr;
}
bool atBegin(Parent->begin() == Me);
if (!atBegin)
--Me;
- IL->LowerIntrinsicCall(&I);
+ IL->LowerIntrinsicCall(cast<CallInst>(&I));
// Restore the CurInst pointer to the first instruction newly inserted, if
// any.
return false;
const char *IntrMemName = isa<MemCpyInst>(II) ? "memcpy" : "memmove";
- return lowerCallTo(II, IntrMemName, II->getNumArgOperands() - 1);
+ return lowerCallTo(cast<CallInst>(II), IntrMemName,
+ II->getNumArgOperands() - 1);
}
case Intrinsic::memset: {
const MemSetInst *MSI = cast<MemSetInst>(II);
// address spaces.
return false;
- return lowerCallTo(II, "memset", II->getNumArgOperands() - 1);
+ return lowerCallTo(cast<CallInst>(II), "memset",
+ II->getNumArgOperands() - 1);
}
case Intrinsic::sin:
case Intrinsic::cos:
if (!MTI->getLength()->getType()->isIntegerTy(32))
return false;
const char *IntrMemName = isa<MemCpyInst>(II) ? "memcpy" : "memmove";
- return lowerCallTo(II, IntrMemName, II->getNumArgOperands() - 1);
+ return lowerCallTo(cast<CallInst>(II), IntrMemName, II->getNumArgOperands() - 1);
}
case Intrinsic::memset: {
const MemSetInst *MSI = cast<MemSetInst>(II);
return false;
if (!MSI->getLength()->getType()->isIntegerTy(32))
return false;
- return lowerCallTo(II, "memset", II->getNumArgOperands() - 1);
+ return lowerCallTo(cast<CallInst>(II), "memset", II->getNumArgOperands() - 1);
}
}
return false;
if (MCI->getSourceAddressSpace() > 255 || MCI->getDestAddressSpace() > 255)
return false;
- return lowerCallTo(II, "memcpy", II->getNumArgOperands() - 1);
+ return lowerCallTo(cast<CallInst>(II), "memcpy", II->getNumArgOperands() - 1);
}
case Intrinsic::memset: {
const MemSetInst *MSI = cast<MemSetInst>(II);
if (MSI->getDestAddressSpace() > 255)
return false;
- return lowerCallTo(II, "memset", II->getNumArgOperands() - 1);
+ return lowerCallTo(cast<CallInst>(II), "memset", II->getNumArgOperands() - 1);
}
case Intrinsic::stackprotector: {
// Emit code to store the stack guard onto the stack.
Instruction *visitAddrSpaceCast(AddrSpaceCastInst &CI);
Instruction *foldItoFPtoI(CastInst &FI);
Instruction *visitSelectInst(SelectInst &SI);
+ Instruction *visitCallBase(CallBase &Call);
Instruction *visitCallInst(CallInst &CI);
Instruction *visitInvokeInst(InvokeInst &II);
Instruction *visitCallBrInst(CallBrInst &CBI);
Instruction &CtxI, Value *&OperationResult,
Constant *&OverflowResult);
- Instruction *visitCallBase(CallBase &Call);
Instruction *tryOptimizeCall(CallInst *CI);
bool transformConstExprCastCall(CallBase &Call);
Instruction *transformCallThroughTrampoline(CallBase &Call,
Value *VAArgTLSOriginCopy = nullptr;
Value *VAArgOverflowSize = nullptr;
- SmallVector<CallInst*, 16> VAStartInstrumentationList;
+ SmallVector<IntrinsicInst*, 16> VAStartInstrumentationList;
enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
// Instrument va_start.
// Copy va_list shadow from the backup copy of the TLS contents.
for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
- CallInst *OrigInst = VAStartInstrumentationList[i];
+ auto *OrigInst = VAStartInstrumentationList[i];
IRBuilder<> IRB(OrigInst->getNextNode());
Value *VAListTag = OrigInst->getArgOperand(0);
Value *VAArgTLSCopy = nullptr;
Value *VAArgSize = nullptr;
- SmallVector<CallInst*, 16> VAStartInstrumentationList;
+ SmallVector<IntrinsicInst*, 16> VAStartInstrumentationList;
VarArgMIPS64Helper(Function &F, MemorySanitizer &MS,
MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
// Instrument va_start.
// Copy va_list shadow from the backup copy of the TLS contents.
for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
- CallInst *OrigInst = VAStartInstrumentationList[i];
+ auto *OrigInst = VAStartInstrumentationList[i];
IRBuilder<> IRB(OrigInst->getNextNode());
Value *VAListTag = OrigInst->getArgOperand(0);
Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
Value *VAArgTLSCopy = nullptr;
Value *VAArgOverflowSize = nullptr;
- SmallVector<CallInst*, 16> VAStartInstrumentationList;
+ SmallVector<IntrinsicInst*, 16> VAStartInstrumentationList;
enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
// Instrument va_start, copy va_list shadow from the backup copy of
// the TLS contents.
for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
- CallInst *OrigInst = VAStartInstrumentationList[i];
+ auto *OrigInst = VAStartInstrumentationList[i];
IRBuilder<> IRB(OrigInst->getNextNode());
Value *VAListTag = OrigInst->getArgOperand(0);
Value *VAArgTLSCopy = nullptr;
Value *VAArgSize = nullptr;
- SmallVector<CallInst*, 16> VAStartInstrumentationList;
+ SmallVector<IntrinsicInst*, 16> VAStartInstrumentationList;
VarArgPowerPC64Helper(Function &F, MemorySanitizer &MS,
MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
// Instrument va_start.
// Copy va_list shadow from the backup copy of the TLS contents.
for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
- CallInst *OrigInst = VAStartInstrumentationList[i];
+ auto *OrigInst = VAStartInstrumentationList[i];
IRBuilder<> IRB(OrigInst->getNextNode());
Value *VAListTag = OrigInst->getArgOperand(0);
Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
Value *VAArgTLSOriginCopy = nullptr;
Value *VAArgOverflowSize = nullptr;
- SmallVector<CallInst *, 16> VAStartInstrumentationList;
+ SmallVector<IntrinsicInst *, 16> VAStartInstrumentationList;
enum class ArgKind {
GeneralPurpose,
// Copy va_list shadow from the backup copy of the TLS contents.
for (size_t VaStartNo = 0, VaStartNum = VAStartInstrumentationList.size();
VaStartNo < VaStartNum; VaStartNo++) {
- CallInst *OrigInst = VAStartInstrumentationList[VaStartNo];
+ auto *OrigInst = VAStartInstrumentationList[VaStartNo];
IRBuilder<> IRB(OrigInst->getNextNode());
Value *VAListTag = OrigInst->getArgOperand(0);
copyRegSaveArea(IRB, VAListTag);