From a58b62b4a2b96c31b49338b262b609db746449e8 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Mon, 27 Apr 2020 20:15:59 -0700 Subject: [PATCH] [IR] Replace all uses of CallBase::getCalledValue() with getCalledOperand(). This method has been commented as deprecated for a while. Remove it and replace all uses with the equivalent getCalledOperand(). I also made a few cleanups in here. For example, to removes use of getElementType on a pointer when we could just use getFunctionType from the call. Differential Revision: https://reviews.llvm.org/D78882 --- clang/lib/CodeGen/CGCall.cpp | 14 +++++------ clang/lib/CodeGen/CGObjC.cpp | 3 ++- clang/lib/CodeGen/CodeGenFunction.cpp | 2 +- lldb/source/Expression/IRInterpreter.cpp | 2 +- .../ExpressionParser/Clang/IRDynamicChecks.cpp | 2 +- .../Plugins/ExpressionParser/Clang/IRForTarget.cpp | 2 +- llvm/include/llvm-c/Core.h | 4 +-- llvm/include/llvm/CodeGen/FastISel.h | 2 +- llvm/include/llvm/IR/AbstractCallSite.h | 6 ++--- llvm/include/llvm/IR/InstrTypes.h | 4 --- llvm/lib/Analysis/AliasAnalysisEvaluator.cpp | 2 +- llvm/lib/Analysis/InstructionSimplify.cpp | 2 +- llvm/lib/Analysis/Lint.cpp | 2 +- llvm/lib/Analysis/MemorySSA.cpp | 4 +-- llvm/lib/Analysis/ModuleSummaryAnalysis.cpp | 2 +- llvm/lib/Analysis/StackSafetyAnalysis.cpp | 2 +- llvm/lib/Bitcode/Writer/BitcodeWriter.cpp | 6 ++--- llvm/lib/CodeGen/CodeGenPrepare.cpp | 4 +-- llvm/lib/CodeGen/GlobalISel/CallLowering.cpp | 2 +- llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 7 +++--- llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp | 2 +- llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp | 2 +- llvm/lib/CodeGen/SelectionDAG/FastISel.cpp | 4 +-- .../CodeGen/SelectionDAG/FunctionLoweringInfo.cpp | 2 +- .../CodeGen/SelectionDAG/SelectionDAGBuilder.cpp | 16 ++++++------ llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp | 2 +- llvm/lib/CodeGen/WasmEHPrepare.cpp | 4 +-- llvm/lib/CodeGen/WinEHPrepare.cpp | 2 +- llvm/lib/ExecutionEngine/Interpreter/Execution.cpp | 2 +- llvm/lib/IR/AsmWriter.cpp | 6 ++--- llvm/lib/IR/Core.cpp | 2 +- llvm/lib/IR/Instructions.cpp | 18 ++++++-------- llvm/lib/IR/Verifier.cpp | 8 +++--- llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp | 2 +- .../Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp | 2 +- .../Target/AMDGPU/AMDGPUFixFunctionBitcasts.cpp | 3 ++- .../Target/AMDGPU/AMDGPUTargetTransformInfo.cpp | 6 ++--- llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 2 +- llvm/lib/Target/ARM/ARMFastISel.cpp | 2 +- llvm/lib/Target/ARM/ARMISelLowering.cpp | 2 +- llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp | 2 +- llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 14 +++++------ llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 2 +- llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp | 2 +- .../lib/Target/WebAssembly/WebAssemblyFastISel.cpp | 4 +-- .../WebAssembly/WebAssemblyFixFunctionBitcasts.cpp | 4 +-- .../WebAssemblyLowerEmscriptenEHSjLj.cpp | 29 ++++++++-------------- llvm/lib/Target/X86/X86ISelLowering.cpp | 2 +- llvm/lib/Target/X86/X86WinEHState.cpp | 2 +- llvm/lib/Transforms/Coroutines/CoroSplit.cpp | 4 +-- llvm/lib/Transforms/IPO/CalledValuePropagation.cpp | 2 +- llvm/lib/Transforms/IPO/GlobalOpt.cpp | 6 ++--- llvm/lib/Transforms/IPO/PruneEH.cpp | 2 +- llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp | 2 +- .../Transforms/InstCombine/InstCombineCalls.cpp | 11 ++++---- .../Instrumentation/AddressSanitizer.cpp | 2 +- .../Instrumentation/DataFlowSanitizer.cpp | 11 ++++---- .../Transforms/Instrumentation/MemorySanitizer.cpp | 4 +-- .../Instrumentation/SanitizerCoverage.cpp | 2 +- .../Instrumentation/ValueProfilePlugins.inc | 2 +- .../Transforms/Scalar/RewriteStatepointsForGC.cpp | 4 +-- llvm/lib/Transforms/Utils/CallPromotionUtils.cpp | 8 +++--- llvm/lib/Transforms/Utils/Evaluator.cpp | 8 +++--- llvm/lib/Transforms/Utils/InlineFunction.cpp | 4 +-- llvm/lib/Transforms/Utils/Local.cpp | 8 +++--- llvm/lib/Transforms/Utils/LowerInvoke.cpp | 2 +- llvm/lib/Transforms/Utils/SimplifyCFG.cpp | 2 +- llvm/test/LTO/X86/type-mapping-bug3.ll | 2 +- llvm/tools/llvm-diff/DiffConsumer.cpp | 12 ++++----- llvm/tools/llvm-diff/DifferenceEngine.cpp | 8 +++--- mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp | 4 +-- 71 files changed, 161 insertions(+), 173 deletions(-) diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp index b3fda6b..e0e895f 100644 --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -2701,10 +2701,10 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, bool doRetainAutorelease; - if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) { + if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) { doRetainAutorelease = true; - } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints() - .objc_retainAutoreleasedReturnValue) { + } else if (call->getCalledOperand() == + CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) { doRetainAutorelease = false; // If we emitted an assembly marker for this call (and the @@ -2720,8 +2720,8 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, assert(prev); } assert(isa(prev)); - assert(cast(prev)->getCalledValue() == - CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker); + assert(cast(prev)->getCalledOperand() == + CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker); InstsToKill.push_back(prev); } } else { @@ -2764,8 +2764,8 @@ static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, // Look for a retain call. llvm::CallInst *retainCall = dyn_cast(result->stripPointerCasts()); - if (!retainCall || - retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain) + if (!retainCall || retainCall->getCalledOperand() != + CGF.CGM.getObjCEntrypoints().objc_retain) return nullptr; // Look for an ordinary load of 'self'. diff --git a/clang/lib/CodeGen/CGObjC.cpp b/clang/lib/CodeGen/CGObjC.cpp index b512592..e3df22a 100644 --- a/clang/lib/CodeGen/CGObjC.cpp +++ b/clang/lib/CodeGen/CGObjC.cpp @@ -2160,7 +2160,8 @@ llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value, if (!mandatory && isa(result)) { llvm::CallInst *call = cast(result->stripPointerCasts()); - assert(call->getCalledValue() == CGM.getObjCEntrypoints().objc_retainBlock); + assert(call->getCalledOperand() == + CGM.getObjCEntrypoints().objc_retainBlock); call->setMetadata("clang.arc.copy_on_escape", llvm::MDNode::get(Builder.getContext(), None)); diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp index 9929c15..24e01c4 100644 --- a/clang/lib/CodeGen/CodeGenFunction.cpp +++ b/clang/lib/CodeGen/CodeGenFunction.cpp @@ -2464,7 +2464,7 @@ void CodeGenFunction::emitAlignmentAssumptionCheck( llvm::Value *OffsetValue, llvm::Value *TheCheck, llvm::Instruction *Assumption) { assert(Assumption && isa(Assumption) && - cast(Assumption)->getCalledValue() == + cast(Assumption)->getCalledOperand() == llvm::Intrinsic::getDeclaration( Builder.GetInsertBlock()->getParent()->getParent(), llvm::Intrinsic::assume) && diff --git a/lldb/source/Expression/IRInterpreter.cpp b/lldb/source/Expression/IRInterpreter.cpp index ddb2d97..6f61a4b 100644 --- a/lldb/source/Expression/IRInterpreter.cpp +++ b/lldb/source/Expression/IRInterpreter.cpp @@ -1371,7 +1371,7 @@ bool IRInterpreter::Interpret(llvm::Module &module, llvm::Function &function, // Find the address of the callee function lldb_private::Scalar I; - const llvm::Value *val = call_inst->getCalledValue(); + const llvm::Value *val = call_inst->getCalledOperand(); if (!frame.EvaluateValue(I, val, module)) { error.SetErrorToGenericError(); diff --git a/lldb/source/Plugins/ExpressionParser/Clang/IRDynamicChecks.cpp b/lldb/source/Plugins/ExpressionParser/Clang/IRDynamicChecks.cpp index d3ab511..b92f00e 100644 --- a/lldb/source/Plugins/ExpressionParser/Clang/IRDynamicChecks.cpp +++ b/lldb/source/Plugins/ExpressionParser/Clang/IRDynamicChecks.cpp @@ -465,7 +465,7 @@ protected: } static llvm::Function *GetCalledFunction(llvm::CallInst *inst) { - return GetFunction(inst->getCalledValue()); + return GetFunction(inst->getCalledOperand()); } bool InspectInstruction(llvm::Instruction &i) override { diff --git a/lldb/source/Plugins/ExpressionParser/Clang/IRForTarget.cpp b/lldb/source/Plugins/ExpressionParser/Clang/IRForTarget.cpp index 92294ff..8819b8a 100644 --- a/lldb/source/Plugins/ExpressionParser/Clang/IRForTarget.cpp +++ b/lldb/source/Plugins/ExpressionParser/Clang/IRForTarget.cpp @@ -1395,7 +1395,7 @@ bool IRForTarget::RemoveCXAAtExit(BasicBlock &basic_block) { if (func && func->getName() == "__cxa_atexit") remove = true; - llvm::Value *val = call->getCalledValue(); + llvm::Value *val = call->getCalledOperand(); if (val && val->getName() == "__cxa_atexit") remove = true; diff --git a/llvm/include/llvm-c/Core.h b/llvm/include/llvm-c/Core.h index 2d6490d..25802ed 100644 --- a/llvm/include/llvm-c/Core.h +++ b/llvm/include/llvm-c/Core.h @@ -3252,8 +3252,8 @@ LLVMTypeRef LLVMGetCalledFunctionType(LLVMValueRef C); * This expects an LLVMValueRef that corresponds to a llvm::CallInst or * llvm::InvokeInst. * - * @see llvm::CallInst::getCalledValue() - * @see llvm::InvokeInst::getCalledValue() + * @see llvm::CallInst::getCalledOperand() + * @see llvm::InvokeInst::getCalledOperand() */ LLVMValueRef LLVMGetCalledValue(LLVMValueRef Instr); diff --git a/llvm/include/llvm/CodeGen/FastISel.h b/llvm/include/llvm/CodeGen/FastISel.h index 02ec1d3..7662179 100644 --- a/llvm/include/llvm/CodeGen/FastISel.h +++ b/llvm/include/llvm/CodeGen/FastISel.h @@ -127,7 +127,7 @@ public: const CallBase &Call, unsigned FixedArgs = ~0U) { RetTy = ResultTy; - Callee = Call.getCalledValue(); + Callee = Call.getCalledOperand(); Symbol = Target; IsInReg = Call.hasRetAttr(Attribute::InReg); diff --git a/llvm/include/llvm/IR/AbstractCallSite.h b/llvm/include/llvm/IR/AbstractCallSite.h index c13521e..18c0db8 100644 --- a/llvm/include/llvm/IR/AbstractCallSite.h +++ b/llvm/include/llvm/IR/AbstractCallSite.h @@ -201,16 +201,16 @@ public: } /// Return the pointer to function that is being called. - Value *getCalledValue() const { + Value *getCalledOperand() const { if (isDirectCall()) - return CB->getCalledValue(); + return CB->getCalledOperand(); return CB->getArgOperand(getCallArgOperandNoForCallee()); } /// Return the function being called if this is a direct call, otherwise /// return null (if it's an indirect call). Function *getCalledFunction() const { - Value *V = getCalledValue(); + Value *V = getCalledOperand(); return V ? dyn_cast(V->stripPointerCasts()) : nullptr; } }; diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h index d28b330..0353afd 100644 --- a/llvm/include/llvm/IR/InstrTypes.h +++ b/llvm/include/llvm/IR/InstrTypes.h @@ -1286,10 +1286,6 @@ public: Value *getCalledOperand() const { return Op(); } - // DEPRECATED: This routine will be removed in favor of `getCalledOperand` in - // the near future. - Value *getCalledValue() const { return getCalledOperand(); } - const Use &getCalledOperandUse() const { return Op(); } Use &getCalledOperandUse() { return Op(); } diff --git a/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp b/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp index 2e44bbd..b1433c5 100644 --- a/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp +++ b/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp @@ -114,7 +114,7 @@ void AAEvaluator::runInternal(Function &F, AAResults &AA) { Stores.insert(&*I); Instruction &Inst = *I; if (auto *Call = dyn_cast(&Inst)) { - Value *Callee = Call->getCalledValue(); + Value *Callee = Call->getCalledOperand(); // Skip actual functions for direct function calls. if (!isa(Callee) && isInterestingPointer(Callee)) Pointers.insert(Callee); diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp index d1970d4..7765cb70 100644 --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -5407,7 +5407,7 @@ static Value *simplifyIntrinsic(CallBase *Call, const SimplifyQuery &Q) { } Value *llvm::SimplifyCall(CallBase *Call, const SimplifyQuery &Q) { - Value *Callee = Call->getCalledValue(); + Value *Callee = Call->getCalledOperand(); // musttail calls can only be simplified if they are also DCEd. // As we can't guarantee this here, don't simplify them. diff --git a/llvm/lib/Analysis/Lint.cpp b/llvm/lib/Analysis/Lint.cpp index 338c6a4..30a0846 100644 --- a/llvm/lib/Analysis/Lint.cpp +++ b/llvm/lib/Analysis/Lint.cpp @@ -220,7 +220,7 @@ void Lint::visitFunction(Function &F) { } void Lint::visitCallBase(CallBase &I) { - Value *Callee = I.getCalledValue(); + Value *Callee = I.getCalledOperand(); visitMemoryReference(I, Callee, MemoryLocation::UnknownSize, 0, nullptr, MemRef::Callee); diff --git a/llvm/lib/Analysis/MemorySSA.cpp b/llvm/lib/Analysis/MemorySSA.cpp index 77f4125..aeb3fec 100644 --- a/llvm/lib/Analysis/MemorySSA.cpp +++ b/llvm/lib/Analysis/MemorySSA.cpp @@ -167,7 +167,7 @@ public: if (!IsCall) return Loc == Other.Loc; - if (Call->getCalledValue() != Other.Call->getCalledValue()) + if (Call->getCalledOperand() != Other.Call->getCalledOperand()) return false; return Call->arg_size() == Other.Call->arg_size() && @@ -203,7 +203,7 @@ template <> struct DenseMapInfo { hash_code hash = hash_combine(MLOC.IsCall, DenseMapInfo::getHashValue( - MLOC.getCall()->getCalledValue())); + MLOC.getCall()->getCalledOperand())); for (const Value *Arg : MLOC.getCall()->args()) hash = hash_combine(hash, DenseMapInfo::getHashValue(Arg)); diff --git a/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp b/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp index 2f6e2a0..b900a38 100644 --- a/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp +++ b/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp @@ -316,7 +316,7 @@ static void computeFunctionSummary(ModuleSummaryIndex &Index, const Module &M, if (HasLocalsInUsedOrAsm && CI && CI->isInlineAsm()) HasInlineAsmMaybeReferencingInternal = true; - auto *CalledValue = CB->getCalledValue(); + auto *CalledValue = CB->getCalledOperand(); auto *CalledFunction = CB->getCalledFunction(); if (CalledValue && !CalledFunction) { CalledValue = CalledValue->stripPointerCasts(); diff --git a/llvm/lib/Analysis/StackSafetyAnalysis.cpp b/llvm/lib/Analysis/StackSafetyAnalysis.cpp index 1e06fd8..716027a 100644 --- a/llvm/lib/Analysis/StackSafetyAnalysis.cpp +++ b/llvm/lib/Analysis/StackSafetyAnalysis.cpp @@ -353,7 +353,7 @@ bool StackSafetyLocalAnalysis::analyzeAllUses(const Value *Ptr, UseInfo &US) { // Do not follow aliases, otherwise we could inadvertently follow // dso_preemptable aliases or aliases with interposable linkage. const GlobalValue *Callee = - dyn_cast(CB.getCalledValue()->stripPointerCasts()); + dyn_cast(CB.getCalledOperand()->stripPointerCasts()); if (!Callee) { US.updateRange(UnknownRange); return false; diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp index be8307b..0a6741d 100644 --- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp +++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -2775,7 +2775,7 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I, case Instruction::Invoke: { const InvokeInst *II = cast(&I); - const Value *Callee = II->getCalledValue(); + const Value *Callee = II->getCalledOperand(); FunctionType *FTy = II->getFunctionType(); if (II->hasOperandBundles()) @@ -2851,7 +2851,7 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I, } case Instruction::CallBr: { const CallBrInst *CBI = cast(&I); - const Value *Callee = CBI->getCalledValue(); + const Value *Callee = CBI->getCalledOperand(); FunctionType *FTy = CBI->getFunctionType(); if (CBI->hasOperandBundles()) @@ -3029,7 +3029,7 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I, Vals.push_back(Flags); Vals.push_back(VE.getTypeID(FTy)); - pushValueAndType(CI.getCalledValue(), InstID, Vals); // Callee + pushValueAndType(CI.getCalledOperand(), InstID, Vals); // Callee // Emit value #'s for the fixed parameters. for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) { diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp index 3c95b2e..d37bac6 100644 --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -1889,7 +1889,7 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) { // Lower inline assembly if we can. // If we found an inline asm expession, and if the target knows how to // lower it to normal LLVM code, do so now. - if (isa(CI->getCalledValue())) { + if (CI->isInlineAsm()) { if (TLI->ExpandInlineAsm(CI)) { // Avoid invalidating the iterator. CurInstIterator = BB->begin(); @@ -4636,7 +4636,7 @@ static bool FindAllMemoryUses( continue; } - InlineAsm *IA = dyn_cast(CI->getCalledValue()); + InlineAsm *IA = dyn_cast(CI->getCalledOperand()); if (!IA) return true; // If this is a memory operand, we're cool, otherwise bail out. diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp index 70bb272..86fbb4b 100644 --- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp @@ -52,7 +52,7 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB, // Try looking through a bitcast from one function type to another. // Commonly happens with calls to objc_msgSend(). - const Value *CalleeV = CB.getCalledValue()->stripPointerCasts(); + const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts(); if (const Function *F = dyn_cast(CalleeV)) Info.Callee = MachineOperand::CreateGA(F, 0); else diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp index 72d4dbe..1b1d7c5 100644 --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -1607,7 +1607,7 @@ bool IRTranslator::translateCallBase(const CallBase &CB, // scan is done to check if any instructions are calls. bool Success = CLI->lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg, - [&]() { return getOrCreateVReg(*CB.getCalledValue()); }); + [&]() { return getOrCreateVReg(*CB.getCalledOperand()); }); // Check if we just inserted a tail call. if (Success) { @@ -1712,9 +1712,8 @@ bool IRTranslator::translateInvoke(const User &U, const BasicBlock *ReturnBB = I.getSuccessor(0); const BasicBlock *EHPadBB = I.getSuccessor(1); - const Value *Callee = I.getCalledValue(); - const Function *Fn = dyn_cast(Callee); - if (isa(Callee)) + const Function *Fn = I.getCalledFunction(); + if (I.isInlineAsm()) return false; // FIXME: support invoking patchpoint and statepoint intrinsics. diff --git a/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp b/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp index e425910..f05394f 100644 --- a/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp +++ b/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp @@ -32,7 +32,7 @@ void InlineAsmLowering::anchor() {} bool InlineAsmLowering::lowerInlineAsm(MachineIRBuilder &MIRBuilder, const CallBase &Call) const { - const InlineAsm *IA = cast(Call.getCalledValue()); + const InlineAsm *IA = cast(Call.getCalledOperand()); StringRef ConstraintStr = IA->getConstraintString(); bool HasOnlyMemoryClobber = false; diff --git a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp index 2107604..1be9544 100644 --- a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp +++ b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp @@ -39,7 +39,7 @@ static bool lowerLoadRelative(Function &F) { for (auto I = F.use_begin(), E = F.use_end(); I != E;) { auto CI = dyn_cast(I->getUser()); ++I; - if (!CI || CI->getCalledValue() != &F) + if (!CI || CI->getCalledOperand() != &F) continue; IRBuilder<> B(CI); diff --git a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp index 7ffc848..d117c60 100644 --- a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -1290,7 +1290,7 @@ bool FastISel::lowerCall(const CallInst *CI) { IsTailCall = false; CallLoweringInfo CLI; - CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), *CI) + CLI.setCallee(RetTy, FuncTy, CI->getCalledOperand(), std::move(Args), *CI) .setTailCall(IsTailCall); return lowerCallTo(CLI); @@ -1300,7 +1300,7 @@ bool FastISel::selectCall(const User *I) { const CallInst *Call = cast(I); // Handle simple inline asms. - if (const InlineAsm *IA = dyn_cast(Call->getCalledValue())) { + if (const InlineAsm *IA = dyn_cast(Call->getCalledOperand())) { // If the inline asm has side effects, then make sure that no local value // lives across by flushing the local value map. if (IA->hasSideEffects()) diff --git a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp index 9b97d59..3f302d5 100644 --- a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp @@ -183,7 +183,7 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf, // Look for inline asm that clobbers the SP register. if (auto *Call = dyn_cast(&I)) { - if (isa(Call->getCalledValue())) { + if (Call->isInlineAsm()) { unsigned SP = TLI->getStackPointerRegisterToSaveRestore(); const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); std::vector Ops = diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 70f0a1a..4ba0cd6 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -346,7 +346,7 @@ static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, const char *AsmError = ", possible invalid constraint for vector type"; if (const CallInst *CI = dyn_cast(I)) - if (isa(CI->getCalledValue())) + if (isa(CI->getCalledOperand())) return Ctx.emitError(I, ErrMsg + AsmError); return Ctx.emitError(I, ErrMsg); @@ -2776,7 +2776,7 @@ void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) { LLVMContext::OB_cfguardtarget}) && "Cannot lower invokes with arbitrary operand bundles yet!"); - const Value *Callee(I.getCalledValue()); + const Value *Callee(I.getCalledOperand()); const Function *Fn = dyn_cast(Callee); if (isa(Callee)) visitInlineAsm(I); @@ -2856,7 +2856,7 @@ void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) { {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) && "Cannot lower callbrs with arbitrary operand bundles yet!"); - assert(isa(I.getCalledValue()) && + assert(isa(I.getCalledOperand()) && "Only know how to handle inlineasm callbr"); visitInlineAsm(I); CopyToExportRegsIfNeeded(&I); @@ -7476,7 +7476,7 @@ bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I, void SelectionDAGBuilder::visitCall(const CallInst &I) { // Handle inline assembly differently. - if (isa(I.getCalledValue())) { + if (I.isInlineAsm()) { visitInlineAsm(I); return; } @@ -7648,7 +7648,7 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) { LLVMContext::OB_cfguardtarget}) && "Cannot lower calls with arbitrary operand bundles!"); - SDValue Callee = getValue(I.getCalledValue()); + SDValue Callee = getValue(I.getCalledOperand()); if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) LowerCallSiteWithDeoptBundle(&I, Callee, nullptr); @@ -7949,7 +7949,7 @@ class ExtraFlags { public: explicit ExtraFlags(const CallBase &Call) { - const InlineAsm *IA = cast(Call.getCalledValue()); + const InlineAsm *IA = cast(Call.getCalledOperand()); if (IA->hasSideEffects()) Flags |= InlineAsm::Extra_HasSideEffects; if (IA->isAlignStack()) @@ -7982,7 +7982,7 @@ public: /// visitInlineAsm - Handle a call to an InlineAsm object. void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call) { - const InlineAsm *IA = cast(Call.getCalledValue()); + const InlineAsm *IA = cast(Call.getCalledOperand()); /// ConstraintOperands - Information about all of the constraints. SDISelAsmOperandInfoVector ConstraintOperands; @@ -8656,7 +8656,7 @@ void SelectionDAGBuilder::visitStackmap(const CallInst &CI) { SmallVector Ops; SDLoc DL = getCurSDLoc(); - Callee = getValue(CI.getCalledValue()); + Callee = getValue(CI.getCalledOperand()); NullPtr = DAG.getIntPtrConstant(0, DL, true); // The stackmap intrinsic only records the live variables (the arguments diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 8a46d08..ced8b1b 100644 --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -4321,7 +4321,7 @@ TargetLowering::ParseConstraints(const DataLayout &DL, const CallBase &Call) const { /// Information about all of the constraints. AsmOperandInfoVector ConstraintOperands; - const InlineAsm *IA = cast(Call.getCalledValue()); + const InlineAsm *IA = cast(Call.getCalledOperand()); unsigned maCount = 0; // Largest number of multiple alternative constraints. // Do a prepass over the constraints, canonicalizing them, and building up the diff --git a/llvm/lib/CodeGen/WasmEHPrepare.cpp b/llvm/lib/CodeGen/WasmEHPrepare.cpp index b7e9005..44f4fe2 100644 --- a/llvm/lib/CodeGen/WasmEHPrepare.cpp +++ b/llvm/lib/CodeGen/WasmEHPrepare.cpp @@ -358,9 +358,9 @@ void WasmEHPrepare::prepareEHPad(BasicBlock *BB, bool NeedPersonality, Instruction *GetExnCI = nullptr, *GetSelectorCI = nullptr; for (auto &U : FPI->uses()) { if (auto *CI = dyn_cast(U.getUser())) { - if (CI->getCalledValue() == GetExnF) + if (CI->getCalledOperand() == GetExnF) GetExnCI = CI; - if (CI->getCalledValue() == GetSelectorF) + if (CI->getCalledOperand() == GetSelectorF) GetSelectorCI = CI; } } diff --git a/llvm/lib/CodeGen/WinEHPrepare.cpp b/llvm/lib/CodeGen/WinEHPrepare.cpp index d6c049f..cb75cd6 100644 --- a/llvm/lib/CodeGen/WinEHPrepare.cpp +++ b/llvm/lib/CodeGen/WinEHPrepare.cpp @@ -955,7 +955,7 @@ void WinEHPrepare::removeImplausibleInstructions(Function &F) { // Skip call sites which are nounwind intrinsics or inline asm. auto *CalledFn = - dyn_cast(CB->getCalledValue()->stripPointerCasts()); + dyn_cast(CB->getCalledOperand()->stripPointerCasts()); if (CalledFn && ((CalledFn->isIntrinsic() && CB->doesNotThrow()) || CB->isInlineAsm())) continue; diff --git a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp index 46fc1bb..62e1ea6 100644 --- a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp +++ b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp @@ -1167,7 +1167,7 @@ void Interpreter::visitCallBase(CallBase &I) { // To handle indirect calls, we must get the pointer value from the argument // and treat it as a function pointer. - GenericValue SRC = getOperandValue(SF.Caller->getCalledValue(), SF); + GenericValue SRC = getOperandValue(SF.Caller->getCalledOperand(), SF); callFunction((Function*)GVTOP(SRC), ArgVals); } diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp index 9efcf6f..8a2777c 100644 --- a/llvm/lib/IR/AsmWriter.cpp +++ b/llvm/lib/IR/AsmWriter.cpp @@ -3905,7 +3905,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) { PrintCallingConv(CI->getCallingConv(), Out); } - Operand = CI->getCalledValue(); + Operand = CI->getCalledOperand(); FunctionType *FTy = CI->getFunctionType(); Type *RetTy = FTy->getReturnType(); const AttributeList &PAL = CI->getAttributes(); @@ -3944,7 +3944,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) { writeOperandBundles(CI); } else if (const InvokeInst *II = dyn_cast(&I)) { - Operand = II->getCalledValue(); + Operand = II->getCalledOperand(); FunctionType *FTy = II->getFunctionType(); Type *RetTy = FTy->getReturnType(); const AttributeList &PAL = II->getAttributes(); @@ -3987,7 +3987,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) { Out << " unwind "; writeOperand(II->getUnwindDest(), true); } else if (const CallBrInst *CBI = dyn_cast(&I)) { - Operand = CBI->getCalledValue(); + Operand = CBI->getCalledOperand(); FunctionType *FTy = CBI->getFunctionType(); Type *RetTy = FTy->getReturnType(); const AttributeList &PAL = CBI->getAttributes(); diff --git a/llvm/lib/IR/Core.cpp b/llvm/lib/IR/Core.cpp index cfa1ebf..4ebe6a0 100644 --- a/llvm/lib/IR/Core.cpp +++ b/llvm/lib/IR/Core.cpp @@ -2840,7 +2840,7 @@ void LLVMRemoveCallSiteStringAttribute(LLVMValueRef C, LLVMAttributeIndex Idx, } LLVMValueRef LLVMGetCalledValue(LLVMValueRef Instr) { - return wrap(unwrap(Instr)->getCalledValue()); + return wrap(unwrap(Instr)->getCalledOperand()); } LLVMTypeRef LLVMGetCalledFunctionType(LLVMValueRef Instr) { diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp index e04a323e..cccbd51 100644 --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -255,7 +255,7 @@ unsigned CallBase::getNumSubclassExtraOperandsDynamic() const { } bool CallBase::isIndirectCall() const { - const Value *V = getCalledValue(); + const Value *V = getCalledOperand(); if (isa(V) || isa(V)) return false; return !isInlineAsm(); @@ -491,7 +491,7 @@ CallInst *CallInst::Create(CallInst *CI, ArrayRef OpB, Instruction *InsertPt) { std::vector Args(CI->arg_begin(), CI->arg_end()); - auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledValue(), + auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(), Args, OpB, CI->getName(), InsertPt); NewCI->setTailCallKind(CI->getTailCallKind()); NewCI->setCallingConv(CI->getCallingConv()); @@ -802,9 +802,9 @@ InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef OpB, Instruction *InsertPt) { std::vector Args(II->arg_begin(), II->arg_end()); - auto *NewII = InvokeInst::Create(II->getFunctionType(), II->getCalledValue(), - II->getNormalDest(), II->getUnwindDest(), - Args, OpB, II->getName(), InsertPt); + auto *NewII = InvokeInst::Create( + II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(), + II->getUnwindDest(), Args, OpB, II->getName(), InsertPt); NewII->setCallingConv(II->getCallingConv()); NewII->SubclassOptionalData = II->SubclassOptionalData; NewII->setAttributes(II->getAttributes()); @@ -885,11 +885,9 @@ CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef OpB, Instruction *InsertPt) { std::vector Args(CBI->arg_begin(), CBI->arg_end()); - auto *NewCBI = CallBrInst::Create(CBI->getFunctionType(), - CBI->getCalledValue(), - CBI->getDefaultDest(), - CBI->getIndirectDests(), - Args, OpB, CBI->getName(), InsertPt); + auto *NewCBI = CallBrInst::Create( + CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(), + CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt); NewCBI->setCallingConv(CBI->getCallingConv()); NewCBI->SubclassOptionalData = CBI->SubclassOptionalData; NewCBI->setAttributes(CBI->getAttributes()); diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp index 6037787..636849c 100644 --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -2872,9 +2872,9 @@ void Verifier::visitPHINode(PHINode &PN) { } void Verifier::visitCallBase(CallBase &Call) { - Assert(Call.getCalledValue()->getType()->isPointerTy(), + Assert(Call.getCalledOperand()->getType()->isPointerTy(), "Called function must be a pointer!", Call); - PointerType *FPTy = cast(Call.getCalledValue()->getType()); + PointerType *FPTy = cast(Call.getCalledOperand()->getType()); Assert(FPTy->getElementType()->isFunctionTy(), "Called function is not pointer to function type!", Call); @@ -2907,8 +2907,8 @@ void Verifier::visitCallBase(CallBase &Call) { bool IsIntrinsic = Call.getCalledFunction() && Call.getCalledFunction()->getName().startswith("llvm."); - Function *Callee - = dyn_cast(Call.getCalledValue()->stripPointerCasts()); + Function *Callee = + dyn_cast(Call.getCalledOperand()->stripPointerCasts()); if (Attrs.hasAttribute(AttributeList::FunctionIndex, Attribute::Speculatable)) { // Don't allow speculatable on call sites, unless the underlying function diff --git a/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp b/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp index 9135f1b..9a5615a 100644 --- a/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp +++ b/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp @@ -304,7 +304,7 @@ static bool shouldConvertUse(const Constant *Cst, const Instruction *Instr, // Do not mess with inline asm. const CallInst *CI = dyn_cast(Instr); - return !(CI && isa(CI->getCalledValue())); + return !(CI && CI->isInlineAsm()); } /// Check if the given Cst should be converted into diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp index 7a691c3..d241b48 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp @@ -288,7 +288,7 @@ bool AMDGPUAnnotateKernelFeatures::addFeatureAttributes(Function &F) { for (Instruction &I : BB) { if (auto *CB = dyn_cast(&I)) { const Function *Callee = - dyn_cast(CB->getCalledValue()->stripPointerCasts()); + dyn_cast(CB->getCalledOperand()->stripPointerCasts()); // TODO: Do something with indirect calls. if (!Callee) { diff --git a/llvm/lib/Target/AMDGPU/AMDGPUFixFunctionBitcasts.cpp b/llvm/lib/Target/AMDGPU/AMDGPUFixFunctionBitcasts.cpp index 5fbb009..b74f2a2 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUFixFunctionBitcasts.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUFixFunctionBitcasts.cpp @@ -34,7 +34,8 @@ public: void visitCallBase(CallBase &CB) { if (CB.getCalledFunction()) return; - auto *Callee = dyn_cast(CB.getCalledValue()->stripPointerCasts()); + auto *Callee = + dyn_cast(CB.getCalledOperand()->stripPointerCasts()); if (Callee && isLegalToPromote(CB, Callee)) { promoteCall(CB, Callee); Modified = true; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp index 402f557..48cfa06 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp @@ -782,7 +782,7 @@ bool GCNTTIImpl::isSourceOfDivergence(const Value *V) const { // Assume all function calls are a source of divergence. if (const CallInst *CI = dyn_cast(V)) { - if (isa(CI->getCalledValue())) + if (CI->isInlineAsm()) return isInlineAsmSourceOfDivergence(CI); return true; } @@ -810,7 +810,7 @@ bool GCNTTIImpl::isAlwaysUniform(const Value *V) const { } if (const CallInst *CI = dyn_cast(V)) { - if (isa(CI->getCalledValue())) + if (CI->isInlineAsm()) return !isInlineAsmSourceOfDivergence(CI); return false; } @@ -838,7 +838,7 @@ bool GCNTTIImpl::isAlwaysUniform(const Value *V) const { // If we have inline asm returning mixed SGPR and VGPR results, we inferred // divergent for the overall struct return. We need to override it in the // case we're extracting an SGPR component here. - if (isa(CI->getCalledValue())) + if (CI->isInlineAsm()) return !isInlineAsmSourceOfDivergence(CI, ExtValue->getIndices()); return false; diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 69cc962..4696b30 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -11009,7 +11009,7 @@ static bool hasCFUser(const Value *V, SmallPtrSet &Visited, bool SITargetLowering::requiresUniformRegister(MachineFunction &MF, const Value *V) const { if (const CallInst *CI = dyn_cast(V)) { - if (isa(CI->getCalledValue())) { + if (CI->isInlineAsm()) { // FIXME: This cannot give a correct answer. This should only trigger in // the case where inline asm returns mixed SGPR and VGPR results, used // outside the defining block. We don't have a specific result to diff --git a/llvm/lib/Target/ARM/ARMFastISel.cpp b/llvm/lib/Target/ARM/ARMFastISel.cpp index 0939d89..ad0125a 100644 --- a/llvm/lib/Target/ARM/ARMFastISel.cpp +++ b/llvm/lib/Target/ARM/ARMFastISel.cpp @@ -2288,7 +2288,7 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { bool ARMFastISel::SelectCall(const Instruction *I, const char *IntrMemName = nullptr) { const CallInst *CI = cast(I); - const Value *Callee = CI->getCalledValue(); + const Value *Callee = CI->getCalledOperand(); // Can't handle inline asm. if (isa(Callee)) return false; diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 3e4d798..ee0050e 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -16518,7 +16518,7 @@ bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { if (!Subtarget->hasV6Ops()) return false; - InlineAsm *IA = cast(CI->getCalledValue()); + InlineAsm *IA = cast(CI->getCalledOperand()); std::string AsmStr = IA->getAsmString(); SmallVector AsmPieces; SplitString(AsmStr, AsmPieces, ";\n"); diff --git a/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp b/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp index 067b5a8..0e943aa 100644 --- a/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp +++ b/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp @@ -239,7 +239,7 @@ bool BPFAbstractMemberAccess::IsPreserveDIAccessIndexCall(const CallInst *Call, if (!Call) return false; - const auto *GV = dyn_cast(Call->getCalledValue()); + const auto *GV = dyn_cast(Call->getCalledOperand()); if (!GV) return false; if (GV->getName().startswith("llvm.preserve.array.access.index")) { diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp index 1fbbed7..9669fb5 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -1361,19 +1361,19 @@ Align NVPTXTargetLowering::getArgumentAlignment(SDValue Callee, } unsigned Alignment = 0; - const Value *DirectCallee = CB->getCalledFunction(); + const Function *DirectCallee = CB->getCalledFunction(); if (!DirectCallee) { // We don't have a direct function symbol, but that may be because of // constant cast instructions in the call. // With bitcast'd call targets, the instruction will be the call - if (isa(CB)) { + if (const auto *CI = dyn_cast(CB)) { // Check if we have call alignment metadata - if (getAlign(*cast(CB), Idx, Alignment)) + if (getAlign(*CI, Idx, Alignment)) return Align(Alignment); - const Value *CalleeV = cast(CB)->getCalledValue(); + const Value *CalleeV = CI->getCalledOperand(); // Ignore any bitcast instructions while (isa(CalleeV)) { const ConstantExpr *CE = cast(CalleeV); @@ -1385,15 +1385,15 @@ Align NVPTXTargetLowering::getArgumentAlignment(SDValue Callee, // We have now looked past all of the bitcasts. Do we finally have a // Function? - if (isa(CalleeV)) - DirectCallee = CalleeV; + if (const auto *CalleeF = dyn_cast(CalleeV)) + DirectCallee = CalleeF; } } // Check for function alignment information if we found that the // ultimate target is a Function if (DirectCallee) - if (getAlign(*cast(DirectCallee), Idx, Alignment)) + if (getAlign(*DirectCallee, Idx, Alignment)) return Align(Alignment); // Call is indirect or alignment information is not available, fall back to diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index 28c7319..69dca9b 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -5368,7 +5368,7 @@ static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee, MachineMemOperand::MOInvariant) : MachineMemOperand::MONone; - MachinePointerInfo MPI(CB ? CB->getCalledValue() : nullptr); + MachinePointerInfo MPI(CB ? CB->getCalledOperand() : nullptr); // Registers used in building the DAG. const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister(); diff --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp index 85555b1..00656f9 100644 --- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp +++ b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp @@ -271,7 +271,7 @@ bool PPCTTIImpl::mightUseCTR(BasicBlock *BB, TargetLibraryInfo *LibInfo, J != JE; ++J) { if (CallInst *CI = dyn_cast(J)) { // Inline ASM is okay, unless it clobbers the ctr register. - if (InlineAsm *IA = dyn_cast(CI->getCalledValue())) { + if (InlineAsm *IA = dyn_cast(CI->getCalledOperand())) { if (asmClobbersCTR(IA)) return true; continue; diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp index f7e9880..063f20b 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp @@ -761,7 +761,7 @@ bool WebAssemblyFastISel::selectCall(const Instruction *I) { return false; bool IsDirect = Func != nullptr; - if (!IsDirect && isa(Call->getCalledValue())) + if (!IsDirect && isa(Call->getCalledOperand())) return false; FunctionType *FuncTy = Call->getFunctionType(); @@ -847,7 +847,7 @@ bool WebAssemblyFastISel::selectCall(const Instruction *I) { unsigned CalleeReg = 0; if (!IsDirect) { - CalleeReg = getRegForValue(Call->getCalledValue()); + CalleeReg = getRegForValue(Call->getCalledOperand()); if (!CalleeReg) return false; } diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp index 464a54a..7abb6fa 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp @@ -76,7 +76,7 @@ static void findUses(Value *V, Function &F, if (!CB) // Skip uses that aren't immediately called continue; - Value *Callee = CB->getCalledValue(); + Value *Callee = CB->getCalledOperand(); if (Callee != V) // Skip calls where the function isn't the callee continue; @@ -307,7 +307,7 @@ bool FixFunctionBitcasts::runOnModule(Module &M) { if (CallMain) { Main->setName("__original_main"); auto *MainWrapper = - cast(CallMain->getCalledValue()->stripPointerCasts()); + cast(CallMain->getCalledOperand()->stripPointerCasts()); delete CallMain; if (Main->isDeclaration()) { // The wrapper is not needed in this case as we don't need to export diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp index be4fe0b..93de6e8 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp @@ -258,11 +258,11 @@ class WebAssemblyLowerEmscriptenEHSjLj final : public ModulePass { bool runSjLjOnFunction(Function &F); Function *getFindMatchingCatch(Module &M, unsigned NumClauses); - template Value *wrapInvoke(CallOrInvoke *CI); + Value *wrapInvoke(CallBase *CI); void wrapTestSetjmp(BasicBlock *BB, DebugLoc DL, Value *Threw, Value *SetjmpTable, Value *SetjmpTableSize, Value *&Label, Value *&LongjmpResult, BasicBlock *&EndBB); - template Function *getInvokeWrapper(CallOrInvoke *CI); + Function *getInvokeWrapper(CallBase *CI); bool areAllExceptionsAllowed() const { return EHWhitelistSet.empty(); } bool canLongjmp(Module &M, const Value *Callee) const; @@ -388,15 +388,14 @@ WebAssemblyLowerEmscriptenEHSjLj::getFindMatchingCatch(Module &M, // %__THREW__.val = __THREW__; __THREW__ = 0; // Returns %__THREW__.val, which indicates whether an exception is thrown (or // whether longjmp occurred), for future use. -template -Value *WebAssemblyLowerEmscriptenEHSjLj::wrapInvoke(CallOrInvoke *CI) { +Value *WebAssemblyLowerEmscriptenEHSjLj::wrapInvoke(CallBase *CI) { LLVMContext &C = CI->getModule()->getContext(); // If we are calling a function that is noreturn, we must remove that // attribute. The code we insert here does expect it to return, after we // catch the exception. if (CI->doesNotReturn()) { - if (auto *F = dyn_cast(CI->getCalledValue())) + if (auto *F = CI->getCalledFunction()) F->removeFnAttr(Attribute::NoReturn); CI->removeAttribute(AttributeList::FunctionIndex, Attribute::NoReturn); } @@ -412,7 +411,7 @@ Value *WebAssemblyLowerEmscriptenEHSjLj::wrapInvoke(CallOrInvoke *CI) { SmallVector Args; // Put the pointer to the callee as first argument, so it can be called // within the invoke wrapper later - Args.push_back(CI->getCalledValue()); + Args.push_back(CI->getCalledOperand()); Args.append(CI->arg_begin(), CI->arg_end()); CallInst *NewCall = IRB.CreateCall(getInvokeWrapper(CI), Args); NewCall->takeName(CI); @@ -460,18 +459,10 @@ Value *WebAssemblyLowerEmscriptenEHSjLj::wrapInvoke(CallOrInvoke *CI) { } // Get matching invoke wrapper based on callee signature -template -Function *WebAssemblyLowerEmscriptenEHSjLj::getInvokeWrapper(CallOrInvoke *CI) { +Function *WebAssemblyLowerEmscriptenEHSjLj::getInvokeWrapper(CallBase *CI) { Module *M = CI->getModule(); SmallVector ArgTys; - Value *Callee = CI->getCalledValue(); - FunctionType *CalleeFTy; - if (auto *F = dyn_cast(Callee)) - CalleeFTy = F->getFunctionType(); - else { - auto *CalleeTy = cast(Callee->getType())->getElementType(); - CalleeFTy = cast(CalleeTy); - } + FunctionType *CalleeFTy = CI->getFunctionType(); std::string Sig = getSignature(CalleeFTy); if (InvokeWrappers.find(Sig) != InvokeWrappers.end()) @@ -764,7 +755,7 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runEHOnFunction(Function &F) { LandingPads.insert(II->getLandingPadInst()); IRB.SetInsertPoint(II); - bool NeedInvoke = AllowExceptions && canThrow(II->getCalledValue()); + bool NeedInvoke = AllowExceptions && canThrow(II->getCalledOperand()); if (NeedInvoke) { // Wrap invoke with invoke wrapper and generate preamble/postamble Value *Threw = wrapInvoke(II); @@ -779,7 +770,7 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runEHOnFunction(Function &F) { // call+branch SmallVector Args(II->arg_begin(), II->arg_end()); CallInst *NewCall = - IRB.CreateCall(II->getFunctionType(), II->getCalledValue(), Args); + IRB.CreateCall(II->getFunctionType(), II->getCalledOperand(), Args); NewCall->takeName(II); NewCall->setCallingConv(II->getCallingConv()); NewCall->setDebugLoc(II->getDebugLoc()); @@ -1005,7 +996,7 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runSjLjOnFunction(Function &F) { if (!CI) continue; - const Value *Callee = CI->getCalledValue(); + const Value *Callee = CI->getCalledOperand(); if (!canLongjmp(M, Callee)) continue; if (isEmAsmCall(M, Callee)) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index b461313..9f0f3cd 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -47867,7 +47867,7 @@ static bool clobbersFlagRegisters(const SmallVector &AsmPieces) { } bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const { - InlineAsm *IA = cast(CI->getCalledValue()); + InlineAsm *IA = cast(CI->getCalledOperand()); const std::string &AsmStr = IA->getAsmString(); diff --git a/llvm/lib/Target/X86/X86WinEHState.cpp b/llvm/lib/Target/X86/X86WinEHState.cpp index a409907..553acdf 100644 --- a/llvm/lib/Target/X86/X86WinEHState.cpp +++ b/llvm/lib/Target/X86/X86WinEHState.cpp @@ -754,7 +754,7 @@ void WinEHStatePass::addStateStores(Function &F, WinEHFuncInfo &FuncInfo) { auto *Call = dyn_cast(&I); if (!Call) continue; - if (Call->getCalledValue()->stripPointerCasts() != + if (Call->getCalledOperand()->stripPointerCasts() != SetJmp3.getCallee()->stripPointerCasts()) continue; diff --git a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp index 7a1d328..7d719fd 100644 --- a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp +++ b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp @@ -1167,7 +1167,7 @@ static bool simplifySuspendPoint(CoroSuspendInst *Suspend, if (!CB) return false; - auto *Callee = CB->getCalledValue()->stripPointerCasts(); + auto *Callee = CB->getCalledOperand()->stripPointerCasts(); // See if the callsite is for resumption or destruction of the coroutine. auto *SubFn = dyn_cast(Callee); @@ -1197,7 +1197,7 @@ static bool simplifySuspendPoint(CoroSuspendInst *Suspend, } // Grab the CalledValue from CB before erasing the CallInstr. - auto *CalledValue = CB->getCalledValue(); + auto *CalledValue = CB->getCalledOperand(); CB->eraseFromParent(); // If no more users remove it. Usually it is a bitcast of SubFn. diff --git a/llvm/lib/Transforms/IPO/CalledValuePropagation.cpp b/llvm/lib/Transforms/IPO/CalledValuePropagation.cpp index 98a1df5..74f11fa 100644 --- a/llvm/lib/Transforms/IPO/CalledValuePropagation.cpp +++ b/llvm/lib/Transforms/IPO/CalledValuePropagation.cpp @@ -385,7 +385,7 @@ static bool runCVP(Module &M) { bool Changed = false; MDBuilder MDB(M.getContext()); for (CallBase *C : Lattice.getIndirectCalls()) { - auto RegI = CVPLatticeKey(C->getCalledValue(), IPOGrouping::Register); + auto RegI = CVPLatticeKey(C->getCalledOperand(), IPOGrouping::Register); CVPLatticeVal LV = Solver.getExistingValueState(RegI); if (!LV.isFunctionSet() || LV.getFunctions().empty()) continue; diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp index 2f07830..e01d813 100644 --- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp +++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp @@ -658,12 +658,12 @@ static bool AllUsesOfValueWillTrapIfNull(const Value *V, return false; // Storing the value. } } else if (const CallInst *CI = dyn_cast(U)) { - if (CI->getCalledValue() != V) { + if (CI->getCalledOperand() != V) { //cerr << "NONTRAPPING USE: " << *U; return false; // Not calling the ptr } } else if (const InvokeInst *II = dyn_cast(U)) { - if (II->getCalledValue() != V) { + if (II->getCalledOperand() != V) { //cerr << "NONTRAPPING USE: " << *U; return false; // Not calling the ptr } @@ -721,7 +721,7 @@ static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) { } } else if (isa(I) || isa(I)) { CallBase *CB = cast(I); - if (CB->getCalledValue() == V) { + if (CB->getCalledOperand() == V) { // Calling through the pointer! Turn into a direct call, but be careful // that the pointer is not also being passed as an argument. CB->setCalledOperand(NewV); diff --git a/llvm/lib/Transforms/IPO/PruneEH.cpp b/llvm/lib/Transforms/IPO/PruneEH.cpp index 874791b..a16dc66 100644 --- a/llvm/lib/Transforms/IPO/PruneEH.cpp +++ b/llvm/lib/Transforms/IPO/PruneEH.cpp @@ -136,7 +136,7 @@ static bool runImpl(CallGraphSCC &SCC, CallGraph &CG) { } if (CheckReturnViaAsm && !SCCMightReturn) if (const auto *CB = dyn_cast(&I)) - if (const auto *IA = dyn_cast(CB->getCalledValue())) + if (const auto *IA = dyn_cast(CB->getCalledOperand())) if (IA->hasSideEffects()) SCCMightReturn = true; } diff --git a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp index 9b09c52..40177d9 100644 --- a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp +++ b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp @@ -1028,7 +1028,7 @@ void DevirtModule::applySingleImplDevirt(VTableSlotInfo &SlotInfo, VCallSite.emitRemark("single-impl", TheFn->stripPointerCasts()->getName(), OREGetter); VCallSite.CB.setCalledOperand(ConstantExpr::getBitCast( - TheFn, VCallSite.CB.getCalledValue()->getType())); + TheFn, VCallSite.CB.getCalledOperand()->getType())); // This use is no longer unsafe. if (VCallSite.NumUnsafeUses) --*VCallSite.NumUnsafeUses; diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index 765eb24..d561b7a 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -4163,7 +4163,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { // Note: New assumption intrinsics created here are registered by // the InstCombineIRInserter object. FunctionType *AssumeIntrinsicTy = II->getFunctionType(); - Value *AssumeIntrinsic = II->getCalledValue(); + Value *AssumeIntrinsic = II->getCalledOperand(); Value *A, *B; if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) { Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, II->getName()); @@ -4541,7 +4541,7 @@ Instruction *InstCombiner::visitCallBase(CallBase &Call) { // If the callee is a pointer to a function, attempt to move any casts to the // arguments of the call/callbr/invoke. - Value *Callee = Call.getCalledValue(); + Value *Callee = Call.getCalledOperand(); if (!isa(Callee) && transformConstExprCastCall(Call)) return nullptr; @@ -4660,7 +4660,8 @@ Instruction *InstCombiner::visitCallBase(CallBase &Call) { /// If the callee is a constexpr cast of a function, attempt to move the cast to /// the arguments of the call/callbr/invoke. bool InstCombiner::transformConstExprCastCall(CallBase &Call) { - auto *Callee = dyn_cast(Call.getCalledValue()->stripPointerCasts()); + auto *Callee = + dyn_cast(Call.getCalledOperand()->stripPointerCasts()); if (!Callee) return false; @@ -4778,7 +4779,7 @@ bool InstCombiner::transformConstExprCastCall(CallBase &Call) { // If the callee is just a declaration, don't change the varargsness of the // call. We don't want to introduce a varargs call where one doesn't // already exist. - PointerType *APTy = cast(Call.getCalledValue()->getType()); + PointerType *APTy = cast(Call.getCalledOperand()->getType()); if (FT->isVarArg()!=cast(APTy->getElementType())->isVarArg()) return false; @@ -4946,7 +4947,7 @@ bool InstCombiner::transformConstExprCastCall(CallBase &Call) { Instruction * InstCombiner::transformCallThroughTrampoline(CallBase &Call, IntrinsicInst &Tramp) { - Value *Callee = Call.getCalledValue(); + Value *Callee = Call.getCalledOperand(); Type *CalleeTy = Callee->getType(); FunctionType *FTy = Call.getFunctionType(); AttributeList Attrs = Call.getAttributes(); diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp index d12ceea..4556e32 100644 --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -1378,7 +1378,7 @@ Value *AddressSanitizer::isInterestingMemoryAccess(Instruction *I, *Alignment = 0; PtrOperand = XCHG->getPointerOperand(); } else if (auto CI = dyn_cast(I)) { - auto *F = dyn_cast(CI->getCalledValue()); + auto *F = CI->getCalledFunction(); if (F && (F->getName().startswith("llvm.masked.load.") || F->getName().startswith("llvm.masked.store."))) { unsigned OpOffset = 0; diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp index 7952ae7..0de956c 100644 --- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp @@ -1553,7 +1553,7 @@ void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) { Value *DestShadow = IRB.CreateBitCast(RawDestShadow, Int8Ptr); SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr); auto *MTI = cast( - IRB.CreateCall(I.getFunctionType(), I.getCalledValue(), + IRB.CreateCall(I.getFunctionType(), I.getCalledOperand(), {DestShadow, SrcShadow, LenShadow, I.getVolatileCst()})); if (ClPreserveAlignment) { MTI->setDestAlignment(I.getDestAlign() * DFSF.DFS.ShadowWidthBytes); @@ -1593,7 +1593,7 @@ void DFSanVisitor::visitReturnInst(ReturnInst &RI) { void DFSanVisitor::visitCallBase(CallBase &CB) { Function *F = CB.getCalledFunction(); - if ((F && F->isIntrinsic()) || isa(CB.getCalledValue())) { + if ((F && F->isIntrinsic()) || CB.isInlineAsm()) { visitOperandShadowInst(CB); return; } @@ -1606,7 +1606,7 @@ void DFSanVisitor::visitCallBase(CallBase &CB) { IRBuilder<> IRB(&CB); DenseMap::iterator i = - DFSF.DFS.UnwrappedFnMap.find(CB.getCalledValue()); + DFSF.DFS.UnwrappedFnMap.find(CB.getCalledOperand()); if (i != DFSF.DFS.UnwrappedFnMap.end()) { Function *F = i->second; switch (DFSF.DFS.getWrapperKind(F)) { @@ -1728,8 +1728,7 @@ void DFSanVisitor::visitCallBase(CallBase &CB) { } } - FunctionType *FT = cast( - CB.getCalledValue()->getType()->getPointerElementType()); + FunctionType *FT = CB.getFunctionType(); if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) { for (unsigned i = 0, n = FT->getNumParams(); i != n; ++i) { IRB.CreateStore(DFSF.getShadow(CB.getArgOperand(i)), @@ -1766,7 +1765,7 @@ void DFSanVisitor::visitCallBase(CallBase &CB) { if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_Args) { FunctionType *NewFT = DFSF.DFS.getArgsFunctionType(FT); Value *Func = - IRB.CreateBitCast(CB.getCalledValue(), PointerType::getUnqual(NewFT)); + IRB.CreateBitCast(CB.getCalledOperand(), PointerType::getUnqual(NewFT)); std::vector Args; auto i = CB.arg_begin(), E = CB.arg_end(); diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp index 2753d73..79e7820 100644 --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -2743,7 +2743,7 @@ struct MemorySanitizerVisitor : public InstVisitor { : Lower64ShadowExtend(IRB, S2, getShadowTy(&I)); Value *V1 = I.getOperand(0); Value *V2 = I.getOperand(1); - Value *Shift = IRB.CreateCall(I.getFunctionType(), I.getCalledValue(), + Value *Shift = IRB.CreateCall(I.getFunctionType(), I.getCalledOperand(), {IRB.CreateBitCast(S1, V1->getType()), V2}); Shift = IRB.CreateBitCast(Shift, getShadowTy(&I)); setShadow(&I, IRB.CreateOr(Shift, S2Conv)); @@ -3761,7 +3761,7 @@ struct MemorySanitizerVisitor : public InstVisitor { const DataLayout &DL = F.getParent()->getDataLayout(); CallBase *CB = cast(&I); IRBuilder<> IRB(&I); - InlineAsm *IA = cast(CB->getCalledValue()); + InlineAsm *IA = cast(CB->getCalledOperand()); int OutputArgs = getNumOutputArgs(IA, CB); // The last operand of a CallInst is the function itself. int NumOperands = CB->getNumOperands() - 1; diff --git a/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp index d3eeb02..e3596be 100644 --- a/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp +++ b/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp @@ -786,7 +786,7 @@ void ModuleSanitizerCoverage::InjectCoverageForIndirectCalls( for (auto I : IndirCalls) { IRBuilder<> IRB(I); CallBase &CB = cast(*I); - Value *Callee = CB.getCalledValue(); + Value *Callee = CB.getCalledOperand(); if (isa(Callee)) continue; IRB.CreateCall(SanCovTracePCIndir, IRB.CreatePointerCast(Callee, IntptrTy)); diff --git a/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc b/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc index 08195ee..d9542dc 100644 --- a/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc +++ b/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc @@ -59,7 +59,7 @@ public: void run(std::vector &Candidates) { std::vector Result = findIndirectCalls(F); for (Instruction *I : Result) { - Value *Callee = cast(I)->getCalledValue(); + Value *Callee = cast(I)->getCalledOperand(); Instruction *InsertPt = I; Instruction *AnnotatedInst = I; Candidates.emplace_back(CandidateInfo{Callee, InsertPt, AnnotatedInst}); diff --git a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp index e4bc345..652baa5 100644 --- a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp +++ b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp @@ -381,7 +381,7 @@ static void analyzeParsePointLiveness( dbgs() << " " << V->getName() << " " << *V << "\n"; } if (PrintLiveSetSize) { - dbgs() << "Safepoint For: " << Call->getCalledValue()->getName() << "\n"; + dbgs() << "Safepoint For: " << Call->getCalledOperand()->getName() << "\n"; dbgs() << "Number live values: " << LiveSet.size() << "\n"; } Result.LiveSet = LiveSet; @@ -1481,7 +1481,7 @@ makeStatepointExplicitImpl(CallBase *Call, /* to replace */ assert(DeoptLowering.equals("live-through") && "Unsupported value!"); } - Value *CallTarget = Call->getCalledValue(); + Value *CallTarget = Call->getCalledOperand(); if (Function *F = dyn_cast(CallTarget)) { if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize) { // Calls to llvm.experimental.deoptimize are lowered to calls to the diff --git a/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp b/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp index d087717..150c355 100644 --- a/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp +++ b/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp @@ -264,9 +264,9 @@ static CallBase &versionCallSite(CallBase &CB, Value *Callee, // Create the compare. The called value and callee must have the same type to // be compared. - if (CB.getCalledValue()->getType() != Callee->getType()) - Callee = Builder.CreateBitCast(Callee, CB.getCalledValue()->getType()); - auto *Cond = Builder.CreateICmpEQ(CB.getCalledValue(), Callee); + if (CB.getCalledOperand()->getType() != Callee->getType()) + Callee = Builder.CreateBitCast(Callee, CB.getCalledOperand()->getType()); + auto *Cond = Builder.CreateICmpEQ(CB.getCalledOperand(), Callee); // Create an if-then-else structure. The original instruction is moved into // the "else" block, and a clone of the original instruction is placed in the @@ -462,7 +462,7 @@ bool llvm::tryPromoteCall(CallBase &CB) { assert(!CB.getCalledFunction()); Module *M = CB.getCaller()->getParent(); const DataLayout &DL = M->getDataLayout(); - Value *Callee = CB.getCalledValue(); + Value *Callee = CB.getCalledOperand(); LoadInst *VTableEntryLoad = dyn_cast(Callee); if (!VTableEntryLoad) diff --git a/llvm/lib/Transforms/Utils/Evaluator.cpp b/llvm/lib/Transforms/Utils/Evaluator.cpp index 451a68e..c5dfbf9 100644 --- a/llvm/lib/Transforms/Utils/Evaluator.cpp +++ b/llvm/lib/Transforms/Utils/Evaluator.cpp @@ -266,7 +266,7 @@ static Function *getFunction(Constant *C) { Function * Evaluator::getCalleeWithFormalArgs(CallBase &CB, SmallVectorImpl &Formals) { - auto *V = CB.getCalledValue(); + auto *V = CB.getCalledOperand(); if (auto *Fn = getFunction(getVal(V))) return getFormalParams(CB, Fn, Formals) ? Fn : nullptr; @@ -486,7 +486,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, } // Cannot handle inline asm. - if (isa(CB.getCalledValue())) { + if (CB.isInlineAsm()) { LLVM_DEBUG(dbgs() << "Found inline asm, can not evaluate.\n"); return false; } @@ -568,7 +568,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, if (Callee->isDeclaration()) { // If this is a function we can constant fold, do it. if (Constant *C = ConstantFoldCall(&CB, Callee, Formals, TLI)) { - InstResult = castCallResultIfNeeded(CB.getCalledValue(), C); + InstResult = castCallResultIfNeeded(CB.getCalledOperand(), C); if (!InstResult) return false; LLVM_DEBUG(dbgs() << "Constant folded function call. Result: " @@ -591,7 +591,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, return false; } ValueStack.pop_back(); - InstResult = castCallResultIfNeeded(CB.getCalledValue(), RetVal); + InstResult = castCallResultIfNeeded(CB.getCalledOperand(), RetVal); if (RetVal && !InstResult) return false; diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp index 8f8f699..fbf8c7b 100644 --- a/llvm/lib/Transforms/Utils/InlineFunction.cpp +++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp @@ -534,7 +534,7 @@ static BasicBlock *HandleCallsInBlockInlinedThroughInvoke( // instructions require no special handling. CallInst *CI = dyn_cast(I); - if (!CI || CI->doesNotThrow() || isa(CI->getCalledValue())) + if (!CI || CI->doesNotThrow() || CI->isInlineAsm()) continue; // We do not need to (and in fact, cannot) convert possibly throwing calls @@ -2149,7 +2149,7 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, // Skip call sites which are nounwind intrinsics. auto *CalledFn = - dyn_cast(I->getCalledValue()->stripPointerCasts()); + dyn_cast(I->getCalledOperand()->stripPointerCasts()); if (CalledFn && CalledFn->isIntrinsic() && I->doesNotThrow()) continue; diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp index dd5a571..265d6d9 100644 --- a/llvm/lib/Transforms/Utils/Local.cpp +++ b/llvm/lib/Transforms/Utils/Local.cpp @@ -1963,7 +1963,7 @@ CallInst *llvm::createCallMatchingInvoke(InvokeInst *II) { SmallVector OpBundles; II->getOperandBundlesAsDefs(OpBundles); CallInst *NewCall = CallInst::Create(II->getFunctionType(), - II->getCalledValue(), Args, OpBundles); + II->getCalledOperand(), Args, OpBundles); NewCall->setCallingConv(II->getCallingConv()); NewCall->setAttributes(II->getAttributes()); NewCall->setDebugLoc(II->getDebugLoc()); @@ -2014,7 +2014,7 @@ BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI, // as of this time. InvokeInst *II = - InvokeInst::Create(CI->getFunctionType(), CI->getCalledValue(), Split, + InvokeInst::Create(CI->getFunctionType(), CI->getCalledOperand(), Split, UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB); II->setDebugLoc(CI->getDebugLoc()); II->setCallingConv(CI->getCallingConv()); @@ -2045,7 +2045,7 @@ static bool markAliveBlocks(Function &F, // canonicalizes unreachable insts into stores to null or undef. for (Instruction &I : *BB) { if (auto *CI = dyn_cast(&I)) { - Value *Callee = CI->getCalledValue(); + Value *Callee = CI->getCalledOperand(); // Handle intrinsic calls. if (Function *F = dyn_cast(Callee)) { auto IntrinsicID = F->getIntrinsicID(); @@ -2120,7 +2120,7 @@ static bool markAliveBlocks(Function &F, Instruction *Terminator = BB->getTerminator(); if (auto *II = dyn_cast(Terminator)) { // Turn invokes that call 'nounwind' functions into ordinary calls. - Value *Callee = II->getCalledValue(); + Value *Callee = II->getCalledOperand(); if ((isa(Callee) && !NullPointerIsDefined(BB->getParent())) || isa(Callee)) { diff --git a/llvm/lib/Transforms/Utils/LowerInvoke.cpp b/llvm/lib/Transforms/Utils/LowerInvoke.cpp index 1af0ce3..0b225e8 100644 --- a/llvm/lib/Transforms/Utils/LowerInvoke.cpp +++ b/llvm/lib/Transforms/Utils/LowerInvoke.cpp @@ -53,7 +53,7 @@ static bool runImpl(Function &F) { II->getOperandBundlesAsDefs(OpBundles); // Insert a normal call instruction... CallInst *NewCall = - CallInst::Create(II->getFunctionType(), II->getCalledValue(), + CallInst::Create(II->getFunctionType(), II->getCalledOperand(), CallArgs, OpBundles, "", II); NewCall->takeName(II); NewCall->setCallingConv(II->getCallingConv()); diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp index be5a375..6b6526b 100644 --- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -6103,7 +6103,7 @@ static bool passingValueIsAlwaysUndefined(Value *V, Instruction *I) { // A call to null is undefined. if (auto *CB = dyn_cast(Use)) return !NullPointerIsDefined(CB->getFunction()) && - CB->getCalledValue() == I; + CB->getCalledOperand() == I; } return false; } diff --git a/llvm/test/LTO/X86/type-mapping-bug3.ll b/llvm/test/LTO/X86/type-mapping-bug3.ll index 2f845a5..6d9e81d 100644 --- a/llvm/test/LTO/X86/type-mapping-bug3.ll +++ b/llvm/test/LTO/X86/type-mapping-bug3.ll @@ -25,7 +25,7 @@ define void @b() { entry: %f.addr = alloca %"T3"*load %"T3"*, %"T3"** %f.addr - ; The call with the getCalledValue() vs getCalledFunction() mismatch. + ; The call with the getCalledOperand() vs getCalledFunction() mismatch. call void @d(%"T3"* %0) unreachable } diff --git a/llvm/tools/llvm-diff/DiffConsumer.cpp b/llvm/tools/llvm-diff/DiffConsumer.cpp index b797143..6228ff2 100644 --- a/llvm/tools/llvm-diff/DiffConsumer.cpp +++ b/llvm/tools/llvm-diff/DiffConsumer.cpp @@ -50,15 +50,15 @@ void DiffConsumer::printValue(Value *V, bool isL) { return; } if (V->getType()->isVoidTy()) { - if (isa(V)) { + if (auto *SI = dyn_cast(V)) { out << "store to "; - printValue(cast(V)->getPointerOperand(), isL); - } else if (isa(V)) { + printValue(SI->getPointerOperand(), isL); + } else if (auto *CI = dyn_cast(V)) { out << "call to "; - printValue(cast(V)->getCalledValue(), isL); - } else if (isa(V)) { + printValue(CI->getCalledOperand(), isL); + } else if (auto *II = dyn_cast(V)) { out << "invoke to "; - printValue(cast(V)->getCalledValue(), isL); + printValue(II->getCalledOperand(), isL); } else { out << *V; } diff --git a/llvm/tools/llvm-diff/DifferenceEngine.cpp b/llvm/tools/llvm-diff/DifferenceEngine.cpp index 91befdb..e542179 100644 --- a/llvm/tools/llvm-diff/DifferenceEngine.cpp +++ b/llvm/tools/llvm-diff/DifferenceEngine.cpp @@ -224,7 +224,7 @@ class FunctionDifferenceEngine { bool diffCallSites(CallBase &L, CallBase &R, bool Complain) { // FIXME: call attributes - if (!equivalentAsOperands(L.getCalledValue(), R.getCalledValue())) { + if (!equivalentAsOperands(L.getCalledOperand(), R.getCalledOperand())) { if (Complain) Engine.log("called functions differ"); return true; } @@ -638,7 +638,8 @@ void FunctionDifferenceEngine::runBlockDiff(BasicBlock::iterator LStart, if (!isa(*I)) return; CallInst *LCall = cast(&*I); InvokeInst *RInvoke = cast(RTerm); - if (!equivalentAsOperands(LCall->getCalledValue(), RInvoke->getCalledValue())) + if (!equivalentAsOperands(LCall->getCalledOperand(), + RInvoke->getCalledOperand())) return; if (!LCall->use_empty()) Values[LCall] = RInvoke; @@ -651,7 +652,8 @@ void FunctionDifferenceEngine::runBlockDiff(BasicBlock::iterator LStart, if (!isa(*I)) return; CallInst *RCall = cast(I); InvokeInst *LInvoke = cast(LTerm); - if (!equivalentAsOperands(LInvoke->getCalledValue(), RCall->getCalledValue())) + if (!equivalentAsOperands(LInvoke->getCalledOperand(), + RCall->getCalledOperand())) return; if (!LInvoke->use_empty()) Values[LInvoke] = RCall; diff --git a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp index ffc47fc..5856c60 100644 --- a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp +++ b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp @@ -720,7 +720,7 @@ LogicalResult Importer::processInstruction(llvm::Instruction *inst) { op = b.create(loc, tys, b.getSymbolRefAttr(callee->getName()), ops); } else { - Value calledValue = processValue(ci->getCalledValue()); + Value calledValue = processValue(ci->getCalledOperand()); if (!calledValue) return failure(); ops.insert(ops.begin(), calledValue); @@ -766,7 +766,7 @@ LogicalResult Importer::processInstruction(llvm::Instruction *inst) { ops, blocks[ii->getNormalDest()], normalArgs, blocks[ii->getUnwindDest()], unwindArgs); } else { - ops.insert(ops.begin(), processValue(ii->getCalledValue())); + ops.insert(ops.begin(), processValue(ii->getCalledOperand())); op = b.create(loc, tys, ops, blocks[ii->getNormalDest()], normalArgs, blocks[ii->getUnwindDest()], unwindArgs); -- 2.7.4