#include "llvm/IR/Argument.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
void visitLoadInst(LoadInst &LI);
void visitStoreInst(StoreInst &SI);
void visitReturnInst(ReturnInst &RI);
- void visitCallSite(CallSite CS);
+ void visitCallBase(CallBase &CB);
void visitPHINode(PHINode &PN);
void visitExtractElementInst(ExtractElementInst &I);
void visitInsertElementInst(InsertElementInst &I);
}
}
-void DFSanVisitor::visitCallSite(CallSite CS) {
- Function *F = CS.getCalledFunction();
- if ((F && F->isIntrinsic()) || isa<InlineAsm>(CS.getCalledValue())) {
- visitOperandShadowInst(*CS.getInstruction());
+void DFSanVisitor::visitCallBase(CallBase &CB) {
+ Function *F = CB.getCalledFunction();
+ if ((F && F->isIntrinsic()) || isa<InlineAsm>(CB.getCalledValue())) {
+ visitOperandShadowInst(CB);
return;
}
if (F == DFSF.DFS.DFSanVarargWrapperFn.getCallee()->stripPointerCasts())
return;
- IRBuilder<> IRB(CS.getInstruction());
+ IRBuilder<> IRB(&CB);
DenseMap<Value *, Function *>::iterator i =
- DFSF.DFS.UnwrappedFnMap.find(CS.getCalledValue());
+ DFSF.DFS.UnwrappedFnMap.find(CB.getCalledValue());
if (i != DFSF.DFS.UnwrappedFnMap.end()) {
Function *F = i->second;
switch (DFSF.DFS.getWrapperKind(F)) {
case DataFlowSanitizer::WK_Warning:
- CS.setCalledFunction(F);
+ CB.setCalledFunction(F);
IRB.CreateCall(DFSF.DFS.DFSanUnimplementedFn,
IRB.CreateGlobalStringPtr(F->getName()));
- DFSF.setShadow(CS.getInstruction(), DFSF.DFS.ZeroShadow);
+ DFSF.setShadow(&CB, DFSF.DFS.ZeroShadow);
return;
case DataFlowSanitizer::WK_Discard:
- CS.setCalledFunction(F);
- DFSF.setShadow(CS.getInstruction(), DFSF.DFS.ZeroShadow);
+ CB.setCalledFunction(F);
+ DFSF.setShadow(&CB, DFSF.DFS.ZeroShadow);
return;
case DataFlowSanitizer::WK_Functional:
- CS.setCalledFunction(F);
- visitOperandShadowInst(*CS.getInstruction());
+ CB.setCalledFunction(F);
+ visitOperandShadowInst(CB);
return;
case DataFlowSanitizer::WK_Custom:
// Don't try to handle invokes of custom functions, it's too complicated.
// Instead, invoke the dfsw$ wrapper, which will in turn call the __dfsw_
// wrapper.
- if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
+ if (CallInst *CI = dyn_cast<CallInst>(&CB)) {
FunctionType *FT = F->getFunctionType();
TransformedFunction CustomFn = DFSF.DFS.getCustomFunctionType(FT);
std::string CustomFName = "__dfsw_";
std::vector<Value *> Args;
- CallSite::arg_iterator i = CS.arg_begin();
+ auto i = CB.arg_begin();
for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) {
Type *T = (*i)->getType();
FunctionType *ParamFT;
}
}
- i = CS.arg_begin();
+ i = CB.arg_begin();
const unsigned ShadowArgStart = Args.size();
for (unsigned n = FT->getNumParams(); n != 0; ++i, --n)
Args.push_back(DFSF.getShadow(*i));
if (FT->isVarArg()) {
auto *LabelVATy = ArrayType::get(DFSF.DFS.ShadowTy,
- CS.arg_size() - FT->getNumParams());
+ CB.arg_size() - FT->getNumParams());
auto *LabelVAAlloca = new AllocaInst(
LabelVATy, getDataLayout().getAllocaAddrSpace(),
"labelva", &DFSF.F->getEntryBlock().front());
- for (unsigned n = 0; i != CS.arg_end(); ++i, ++n) {
+ for (unsigned n = 0; i != CB.arg_end(); ++i, ++n) {
auto LabelVAPtr = IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, n);
IRB.CreateStore(DFSF.getShadow(*i), LabelVAPtr);
}
Args.push_back(DFSF.LabelReturnAlloca);
}
- for (i = CS.arg_begin() + FT->getNumParams(); i != CS.arg_end(); ++i)
+ for (i = CB.arg_begin() + FT->getNumParams(); i != CB.arg_end(); ++i)
Args.push_back(*i);
CallInst *CustomCI = IRB.CreateCall(CustomF, Args);
}
FunctionType *FT = cast<FunctionType>(
- CS.getCalledValue()->getType()->getPointerElementType());
+ CB.getCalledValue()->getType()->getPointerElementType());
if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) {
for (unsigned i = 0, n = FT->getNumParams(); i != n; ++i) {
- IRB.CreateStore(DFSF.getShadow(CS.getArgument(i)),
- DFSF.getArgTLS(i, CS.getInstruction()));
+ IRB.CreateStore(DFSF.getShadow(CB.getArgOperand(i)),
+ DFSF.getArgTLS(i, &CB));
}
}
Instruction *Next = nullptr;
- if (!CS.getType()->isVoidTy()) {
- if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
+ if (!CB.getType()->isVoidTy()) {
+ if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
if (II->getNormalDest()->getSinglePredecessor()) {
Next = &II->getNormalDest()->front();
} else {
Next = &NewBB->front();
}
} else {
- assert(CS->getIterator() != CS->getParent()->end());
- Next = CS->getNextNode();
+ assert(CB.getIterator() != CB.getParent()->end());
+ Next = CB.getNextNode();
}
if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) {
IRBuilder<> NextIRB(Next);
LoadInst *LI = NextIRB.CreateLoad(DFSF.DFS.ShadowTy, DFSF.getRetvalTLS());
DFSF.SkipInsts.insert(LI);
- DFSF.setShadow(CS.getInstruction(), LI);
+ DFSF.setShadow(&CB, LI);
DFSF.NonZeroChecks.push_back(LI);
}
}
if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_Args) {
FunctionType *NewFT = DFSF.DFS.getArgsFunctionType(FT);
Value *Func =
- IRB.CreateBitCast(CS.getCalledValue(), PointerType::getUnqual(NewFT));
+ IRB.CreateBitCast(CB.getCalledValue(), PointerType::getUnqual(NewFT));
std::vector<Value *> Args;
- CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
+ auto i = CB.arg_begin(), E = CB.arg_end();
for (unsigned n = FT->getNumParams(); n != 0; ++i, --n)
Args.push_back(*i);
- i = CS.arg_begin();
+ i = CB.arg_begin();
for (unsigned n = FT->getNumParams(); n != 0; ++i, --n)
Args.push_back(DFSF.getShadow(*i));
if (FT->isVarArg()) {
- unsigned VarArgSize = CS.arg_size() - FT->getNumParams();
+ unsigned VarArgSize = CB.arg_size() - FT->getNumParams();
ArrayType *VarArgArrayTy = ArrayType::get(DFSF.DFS.ShadowTy, VarArgSize);
AllocaInst *VarArgShadow =
new AllocaInst(VarArgArrayTy, getDataLayout().getAllocaAddrSpace(),
"", &DFSF.F->getEntryBlock().front());
Args.push_back(IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, 0));
- for (unsigned n = 0; i != e; ++i, ++n) {
+ for (unsigned n = 0; i != E; ++i, ++n) {
IRB.CreateStore(
DFSF.getShadow(*i),
IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, n));
}
}
- CallSite NewCS;
- if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
- NewCS = IRB.CreateInvoke(NewFT, Func, II->getNormalDest(),
+ CallBase *NewCB;
+ if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
+ NewCB = IRB.CreateInvoke(NewFT, Func, II->getNormalDest(),
II->getUnwindDest(), Args);
} else {
- NewCS = IRB.CreateCall(NewFT, Func, Args);
+ NewCB = IRB.CreateCall(NewFT, Func, Args);
}
- NewCS.setCallingConv(CS.getCallingConv());
- NewCS.setAttributes(CS.getAttributes().removeAttributes(
+ NewCB->setCallingConv(CB.getCallingConv());
+ NewCB->setAttributes(CB.getAttributes().removeAttributes(
*DFSF.DFS.Ctx, AttributeList::ReturnIndex,
- AttributeFuncs::typeIncompatible(NewCS.getInstruction()->getType())));
+ AttributeFuncs::typeIncompatible(NewCB->getType())));
if (Next) {
- ExtractValueInst *ExVal =
- ExtractValueInst::Create(NewCS.getInstruction(), 0, "", Next);
+ ExtractValueInst *ExVal = ExtractValueInst::Create(NewCB, 0, "", Next);
DFSF.SkipInsts.insert(ExVal);
- ExtractValueInst *ExShadow =
- ExtractValueInst::Create(NewCS.getInstruction(), 1, "", Next);
+ ExtractValueInst *ExShadow = ExtractValueInst::Create(NewCB, 1, "", Next);
DFSF.SkipInsts.insert(ExShadow);
DFSF.setShadow(ExVal, ExShadow);
DFSF.NonZeroChecks.push_back(ExShadow);
- CS.getInstruction()->replaceAllUsesWith(ExVal);
+ CB.replaceAllUsesWith(ExVal);
}
- CS.getInstruction()->eraseFromParent();
+ CB.eraseFromParent();
}
}
#include "llvm/IR/Argument.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
struct VarArgHelper {
virtual ~VarArgHelper() = default;
- /// Visit a CallSite.
- virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0;
+ /// Visit a CallBase.
+ virtual void visitCallBase(CallBase &CB, IRBuilder<> &IRB) = 0;
/// Visit a va_start call.
virtual void visitVAStartInst(VAStartInst &I) = 0;
}
}
- void visitCallSite(CallSite CS) {
- Instruction &I = *CS.getInstruction();
- assert(!I.getMetadata("nosanitize"));
- assert((CS.isCall() || CS.isInvoke() || CS.isCallBr()) &&
- "Unknown type of CallSite");
- if (CS.isCallBr() || (CS.isCall() && cast<CallInst>(&I)->isInlineAsm())) {
+ void visitCallBase(CallBase &CB) {
+ assert(!CB.getMetadata("nosanitize"));
+ if (CB.isInlineAsm()) {
// For inline asm (either a call to asm function, or callbr instruction),
// do the usual thing: check argument shadow and mark all outputs as
// clean. Note that any side effects of the inline asm that are not
// immediately visible in its constraints are not handled.
if (ClHandleAsmConservative && MS.CompileKernel)
- visitAsmInstruction(I);
+ visitAsmInstruction(CB);
else
- visitInstruction(I);
+ visitInstruction(CB);
return;
}
- if (CS.isCall()) {
- CallInst *Call = cast<CallInst>(&I);
- assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere");
+ if (auto *Call = dyn_cast<CallInst>(&CB)) {
+ assert(!isa<IntrinsicInst>(Call) && "intrinsics are handled elsewhere");
// We are going to insert code that relies on the fact that the callee
// will become a non-readonly function after it is instrumented by us. To
maybeMarkSanitizerLibraryCallNoBuiltin(Call, TLI);
}
- IRBuilder<> IRB(&I);
+ IRBuilder<> IRB(&CB);
unsigned ArgOffset = 0;
- LLVM_DEBUG(dbgs() << " CallSite: " << I << "\n");
- for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
- ArgIt != End; ++ArgIt) {
+ LLVM_DEBUG(dbgs() << " CallSite: " << CB << "\n");
+ for (auto ArgIt = CB.arg_begin(), End = CB.arg_end(); ArgIt != End;
+ ++ArgIt) {
Value *A = *ArgIt;
- unsigned i = ArgIt - CS.arg_begin();
+ unsigned i = ArgIt - CB.arg_begin();
if (!A->getType()->isSized()) {
- LLVM_DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n");
+ LLVM_DEBUG(dbgs() << "Arg " << i << " is not sized: " << CB << "\n");
continue;
}
unsigned Size = 0;
<< " Shadow: " << *ArgShadow << "\n");
bool ArgIsInitialized = false;
const DataLayout &DL = F.getParent()->getDataLayout();
- if (CS.paramHasAttr(i, Attribute::ByVal)) {
+ if (CB.paramHasAttr(i, Attribute::ByVal)) {
assert(A->getType()->isPointerTy() &&
"ByVal argument is not a pointer!");
Size = DL.getTypeAllocSize(A->getType()->getPointerElementType());
if (ArgOffset + Size > kParamTLSSize) break;
- const MaybeAlign ParamAlignment(CS.getParamAlignment(i));
+ const MaybeAlign ParamAlignment(CB.getParamAlign(i));
MaybeAlign Alignment = llvm::None;
if (ParamAlignment)
Alignment = std::min(*ParamAlignment, kShadowTLSAlignment);
}
LLVM_DEBUG(dbgs() << " done with call args\n");
- FunctionType *FT = CS.getFunctionType();
+ FunctionType *FT = CB.getFunctionType();
if (FT->isVarArg()) {
- VAHelper->visitCallSite(CS, IRB);
+ VAHelper->visitCallBase(CB, IRB);
}
// Now, get the shadow for the RetVal.
- if (!I.getType()->isSized()) return;
+ if (!CB.getType()->isSized())
+ return;
// Don't emit the epilogue for musttail call returns.
- if (CS.isCall() && cast<CallInst>(&I)->isMustTailCall()) return;
- IRBuilder<> IRBBefore(&I);
+ if (isa<CallInst>(CB) && cast<CallInst>(CB).isMustTailCall())
+ return;
+ IRBuilder<> IRBBefore(&CB);
// Until we have full dynamic coverage, make sure the retval shadow is 0.
- Value *Base = getShadowPtrForRetval(&I, IRBBefore);
- IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment);
+ Value *Base = getShadowPtrForRetval(&CB, IRBBefore);
+ IRBBefore.CreateAlignedStore(getCleanShadow(&CB), Base,
+ kShadowTLSAlignment);
BasicBlock::iterator NextInsn;
- if (CS.isCall()) {
- NextInsn = ++I.getIterator();
- assert(NextInsn != I.getParent()->end());
+ if (isa<CallInst>(CB)) {
+ NextInsn = ++CB.getIterator();
+ assert(NextInsn != CB.getParent()->end());
} else {
- BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest();
+ BasicBlock *NormalDest = cast<InvokeInst>(CB).getNormalDest();
if (!NormalDest->getSinglePredecessor()) {
// FIXME: this case is tricky, so we are just conservative here.
// Perhaps we need to split the edge between this BB and NormalDest,
// but a naive attempt to use SplitEdge leads to a crash.
- setShadow(&I, getCleanShadow(&I));
- setOrigin(&I, getCleanOrigin());
+ setShadow(&CB, getCleanShadow(&CB));
+ setOrigin(&CB, getCleanOrigin());
return;
}
// FIXME: NextInsn is likely in a basic block that has not been visited yet.
}
IRBuilder<> IRBAfter(&*NextInsn);
Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
- getShadowTy(&I), getShadowPtrForRetval(&I, IRBAfter),
+ getShadowTy(&CB), getShadowPtrForRetval(&CB, IRBAfter),
kShadowTLSAlignment, "_msret");
- setShadow(&I, RetvalShadow);
+ setShadow(&CB, RetvalShadow);
if (MS.TrackOrigins)
- setOrigin(&I, IRBAfter.CreateLoad(MS.OriginTy,
- getOriginPtrForRetval(IRBAfter)));
+ setOrigin(&CB, IRBAfter.CreateLoad(MS.OriginTy,
+ getOriginPtrForRetval(IRBAfter)));
}
bool isAMustTailRetVal(Value *RetVal) {
/// AMD64-specific implementation of VarArgHelper.
struct VarArgAMD64Helper : public VarArgHelper {
// An unfortunate workaround for asymmetric lowering of va_arg stuff.
- // See a comment in visitCallSite for more details.
+ // See a comment in visitCallBase for more details.
static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7
static const unsigned AMD64FpEndOffsetSSE = 176;
// If SSE is disabled, fp_offset in va_list is zero.
// would have been to associate each live instance of va_list with a copy of
// MSanParamTLS, and extract shadow on va_arg() call in the argument list
// order.
- void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
+ void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
unsigned GpOffset = 0;
unsigned FpOffset = AMD64GpEndOffset;
unsigned OverflowOffset = AMD64FpEndOffset;
const DataLayout &DL = F.getParent()->getDataLayout();
- for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
- ArgIt != End; ++ArgIt) {
+ for (auto ArgIt = CB.arg_begin(), End = CB.arg_end(); ArgIt != End;
+ ++ArgIt) {
Value *A = *ArgIt;
- unsigned ArgNo = CS.getArgumentNo(ArgIt);
- bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams();
- bool IsByVal = CS.paramHasAttr(ArgNo, Attribute::ByVal);
+ unsigned ArgNo = CB.getArgOperandNo(ArgIt);
+ bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
+ bool IsByVal = CB.paramHasAttr(ArgNo, Attribute::ByVal);
if (IsByVal) {
// ByVal arguments always go to the overflow area.
// Fixed arguments passed through the overflow area will be stepped
VarArgMIPS64Helper(Function &F, MemorySanitizer &MS,
MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
- void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
+ void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
unsigned VAArgOffset = 0;
const DataLayout &DL = F.getParent()->getDataLayout();
- for (CallSite::arg_iterator ArgIt = CS.arg_begin() +
- CS.getFunctionType()->getNumParams(), End = CS.arg_end();
+ for (auto ArgIt = CB.arg_begin() + CB.getFunctionType()->getNumParams(),
+ End = CB.arg_end();
ArgIt != End; ++ArgIt) {
Triple TargetTriple(F.getParent()->getTargetTriple());
Value *A = *ArgIt;
// the remaining arguments.
// Using constant offset within the va_arg TLS array allows fast copy
// in the finalize instrumentation.
- void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
+ void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
unsigned GrOffset = AArch64GrBegOffset;
unsigned VrOffset = AArch64VrBegOffset;
unsigned OverflowOffset = AArch64VAEndOffset;
const DataLayout &DL = F.getParent()->getDataLayout();
- for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
- ArgIt != End; ++ArgIt) {
+ for (auto ArgIt = CB.arg_begin(), End = CB.arg_end(); ArgIt != End;
+ ++ArgIt) {
Value *A = *ArgIt;
- unsigned ArgNo = CS.getArgumentNo(ArgIt);
- bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams();
+ unsigned ArgNo = CB.getArgOperandNo(ArgIt);
+ bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
ArgKind AK = classifyArgument(A);
if (AK == AK_GeneralPurpose && GrOffset >= AArch64GrEndOffset)
AK = AK_Memory;
VarArgPowerPC64Helper(Function &F, MemorySanitizer &MS,
MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
- void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
+ void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
// For PowerPC, we need to deal with alignment of stack arguments -
// they are mostly aligned to 8 bytes, but vectors and i128 arrays
// are aligned to 16 bytes, byvals can be aligned to 8 or 16 bytes,
VAArgBase = 32;
unsigned VAArgOffset = VAArgBase;
const DataLayout &DL = F.getParent()->getDataLayout();
- for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
- ArgIt != End; ++ArgIt) {
+ for (auto ArgIt = CB.arg_begin(), End = CB.arg_end(); ArgIt != End;
+ ++ArgIt) {
Value *A = *ArgIt;
- unsigned ArgNo = CS.getArgumentNo(ArgIt);
- bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams();
- bool IsByVal = CS.paramHasAttr(ArgNo, Attribute::ByVal);
+ unsigned ArgNo = CB.getArgOperandNo(ArgIt);
+ bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
+ bool IsByVal = CB.paramHasAttr(ArgNo, Attribute::ByVal);
if (IsByVal) {
assert(A->getType()->isPointerTy());
Type *RealTy = A->getType()->getPointerElementType();
uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
- uint64_t ArgAlign = CS.getParamAlignment(ArgNo);
- if (ArgAlign < 8)
- ArgAlign = 8;
+ MaybeAlign ArgAlign = CB.getParamAlign(ArgNo);
+ if (!ArgAlign || *ArgAlign < Align(8))
+ ArgAlign = Align(8);
VAArgOffset = alignTo(VAArgOffset, ArgAlign);
if (!IsFixed) {
Value *Base = getShadowPtrForVAArgument(
return ArgKind::Memory;
}
- ShadowExtension getShadowExtension(const CallSite &CS, unsigned ArgNo) {
+ ShadowExtension getShadowExtension(const CallBase &CB, unsigned ArgNo) {
// ABI says: "One of the simple integer types no more than 64 bits wide.
// ... If such an argument is shorter than 64 bits, replace it by a full
// 64-bit integer representing the same number, using sign or zero
// extension". Shadow for an integer argument has the same type as the
// argument itself, so it can be sign or zero extended as well.
- bool ZExt = CS.paramHasAttr(ArgNo, Attribute::ZExt);
- bool SExt = CS.paramHasAttr(ArgNo, Attribute::SExt);
+ bool ZExt = CB.paramHasAttr(ArgNo, Attribute::ZExt);
+ bool SExt = CB.paramHasAttr(ArgNo, Attribute::SExt);
if (ZExt) {
assert(!SExt);
return ShadowExtension::Zero;
return ShadowExtension::None;
}
- void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
- bool IsSoftFloatABI = CS.getCalledFunction()
+ void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
+ bool IsSoftFloatABI = CB.getCalledFunction()
->getFnAttribute("use-soft-float")
.getValueAsString() == "true";
unsigned GpOffset = SystemZGpOffset;
unsigned VrIndex = 0;
unsigned OverflowOffset = SystemZOverflowOffset;
const DataLayout &DL = F.getParent()->getDataLayout();
- for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
- ArgIt != End; ++ArgIt) {
+ for (auto ArgIt = CB.arg_begin(), End = CB.arg_end(); ArgIt != End;
+ ++ArgIt) {
Value *A = *ArgIt;
- unsigned ArgNo = CS.getArgumentNo(ArgIt);
- bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams();
+ unsigned ArgNo = CB.getArgOperandNo(ArgIt);
+ bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
// SystemZABIInfo does not produce ByVal parameters.
- assert(!CS.paramHasAttr(ArgNo, Attribute::ByVal));
+ assert(!CB.paramHasAttr(ArgNo, Attribute::ByVal));
Type *T = A->getType();
ArgKind AK = classifyArgument(T, IsSoftFloatABI);
if (AK == ArgKind::Indirect) {
uint64_t ArgSize = 8;
if (GpOffset + ArgSize <= kParamTLSSize) {
if (!IsFixed) {
- SE = getShadowExtension(CS, ArgNo);
+ SE = getShadowExtension(CB, ArgNo);
uint64_t GapSize = 0;
if (SE == ShadowExtension::None) {
uint64_t ArgAllocSize = DL.getTypeAllocSize(T);
uint64_t ArgAllocSize = DL.getTypeAllocSize(T);
uint64_t ArgSize = alignTo(ArgAllocSize, 8);
if (OverflowOffset + ArgSize <= kParamTLSSize) {
- SE = getShadowExtension(CS, ArgNo);
+ SE = getShadowExtension(CB, ArgNo);
uint64_t GapSize =
SE == ShadowExtension::None ? ArgSize - ArgAllocSize : 0;
ShadowBase =
std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(), Alignment,
/*isStore*/ true);
- // TODO(iii): copy only fragments filled by visitCallSite()
+ // TODO(iii): copy only fragments filled by visitCallBase()
IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
SystemZRegSaveAreaSize);
if (MS.TrackOrigins)
VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
MemorySanitizerVisitor &MSV) {}
- void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {}
+ void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {}
void visitVAStartInst(VAStartInst &I) override {}