const DataLayout &DL);
void instrumentPointerComparisonOrSubtraction(Instruction *I);
void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
- Value *Addr, uint32_t TypeSize, bool IsWrite,
+ Value *Addr, uint32_t TypeStoreSize, bool IsWrite,
Value *SizeArgument, bool UseCalls, uint32_t Exp);
Instruction *instrumentAMDGPUAddress(Instruction *OrigIns,
Instruction *InsertBefore, Value *Addr,
- uint32_t TypeSize, bool IsWrite,
+ uint32_t TypeStoreSize, bool IsWrite,
Value *SizeArgument);
void instrumentUnusualSizeOrAlignment(Instruction *I,
Instruction *InsertBefore, Value *Addr,
- uint32_t TypeSize, bool IsWrite,
+ TypeSize TypeStoreSize, bool IsWrite,
Value *SizeArgument, bool UseCalls,
uint32_t Exp);
Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
- Value *ShadowValue, uint32_t TypeSize);
+ Value *ShadowValue, uint32_t TypeStoreSize);
Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
bool IsWrite, size_t AccessSizeIndex,
Value *SizeArgument, uint32_t Exp);
bool LooksLikeCodeInBug11395(Instruction *I);
bool GlobalIsLinkerInitialized(GlobalVariable *G);
bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr,
- uint64_t TypeSize) const;
+ TypeSize TypeStoreSize) const;
/// Helper to cleanup per-function state.
struct FunctionStateRAII {
return PA;
}
-static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
+static size_t TypeStoreSizeToSizeIndex(uint32_t TypeSize) {
size_t Res = llvm::countr_zero(TypeSize / 8);
assert(Res < kNumberOfAccessSizes);
return Res;
static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
Instruction *InsertBefore, Value *Addr,
MaybeAlign Alignment, unsigned Granularity,
- uint32_t TypeSize, bool IsWrite,
+ TypeSize TypeStoreSize, bool IsWrite,
Value *SizeArgument, bool UseCalls,
uint32_t Exp) {
// Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
// if the data is properly aligned.
- if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 ||
- TypeSize == 128) &&
- (!Alignment || *Alignment >= Granularity || *Alignment >= TypeSize / 8))
- return Pass->instrumentAddress(I, InsertBefore, Addr, TypeSize, IsWrite,
+ if ((TypeStoreSize == 8 || TypeStoreSize == 16 || TypeStoreSize == 32 || TypeStoreSize == 64 ||
+ TypeStoreSize == 128) &&
+ (!Alignment || *Alignment >= Granularity || *Alignment >= TypeStoreSize / 8))
+ return Pass->instrumentAddress(I, InsertBefore, Addr, TypeStoreSize, IsWrite,
nullptr, UseCalls, Exp);
- Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeSize,
+ Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeStoreSize,
IsWrite, nullptr, UseCalls, Exp);
}
InstrumentedAddress =
IRB.CreateGEP(VTy, Addr, {Zero, ConstantInt::get(IntptrTy, Idx)});
doInstrumentAddress(Pass, I, InsertBefore, InstrumentedAddress, Alignment,
- Granularity, ElemTypeSize, IsWrite, SizeArgument,
- UseCalls, Exp);
+ Granularity, TypeSize::Fixed(ElemTypeSize), IsWrite,
+ SizeArgument, UseCalls, Exp);
}
}
Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
Value *ShadowValue,
- uint32_t TypeSize) {
+ uint32_t TypeStoreSize) {
size_t Granularity = static_cast<size_t>(1) << Mapping.Scale;
// Addr & (Granularity - 1)
Value *LastAccessedByte =
IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
// (Addr & (Granularity - 1)) + size - 1
- if (TypeSize / 8 > 1)
+ if (TypeStoreSize / 8 > 1)
LastAccessedByte = IRB.CreateAdd(
- LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1));
+ LastAccessedByte, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1));
// (uint8_t) ((Addr & (Granularity-1)) + size - 1)
LastAccessedByte =
IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false);
Instruction *AddressSanitizer::instrumentAMDGPUAddress(
Instruction *OrigIns, Instruction *InsertBefore, Value *Addr,
- uint32_t TypeSize, bool IsWrite, Value *SizeArgument) {
+ uint32_t TypeStoreSize, bool IsWrite, Value *SizeArgument) {
// Do not instrument unsupported addrspaces.
if (isUnsupportedAMDGPUAddrspace(Addr))
return nullptr;
void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
Instruction *InsertBefore, Value *Addr,
- uint32_t TypeSize, bool IsWrite,
+ uint32_t TypeStoreSize, bool IsWrite,
Value *SizeArgument, bool UseCalls,
uint32_t Exp) {
if (TargetTriple.isAMDGPU()) {
InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr,
- TypeSize, IsWrite, SizeArgument);
+ TypeStoreSize, IsWrite, SizeArgument);
if (!InsertBefore)
return;
}
IRBuilder<> IRB(InsertBefore);
- size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
+ size_t AccessSizeIndex = TypeStoreSizeToSizeIndex(TypeStoreSize);
const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
if (UseCalls && ClOptimizeCallbacks) {
}
Type *ShadowTy =
- IntegerType::get(*C, std::max(8U, TypeSize >> Mapping.Scale));
+ IntegerType::get(*C, std::max(8U, TypeStoreSize >> Mapping.Scale));
Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
Value *ShadowPtr = memToShadow(AddrLong, IRB);
Value *ShadowValue =
size_t Granularity = 1ULL << Mapping.Scale;
Instruction *CrashTerm = nullptr;
- if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) {
+ if (ClAlwaysSlowPath || (TypeStoreSize < 8 * Granularity)) {
// We use branch weights for the slow path check, to indicate that the slow
// path is rarely taken. This seems to be the case for SPEC benchmarks.
Instruction *CheckTerm = SplitBlockAndInsertIfThen(
assert(cast<BranchInst>(CheckTerm)->isUnconditional());
BasicBlock *NextBB = CheckTerm->getSuccessor(0);
IRB.SetInsertPoint(CheckTerm);
- Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize);
+ Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
if (Recover) {
CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false);
} else {
// and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
// to report the actual access size.
void AddressSanitizer::instrumentUnusualSizeOrAlignment(
- Instruction *I, Instruction *InsertBefore, Value *Addr, uint32_t TypeSize,
+ Instruction *I, Instruction *InsertBefore, Value *Addr, TypeSize TypeStoreSize,
bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp) {
IRBuilder<> IRB(InsertBefore);
- Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8);
+ Value *Size = ConstantInt::get(IntptrTy, TypeStoreSize / 8);
Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
if (UseCalls) {
if (Exp == 0)
{AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)});
} else {
Value *LastByte = IRB.CreateIntToPtr(
- IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)),
+ IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1)),
Addr->getType());
instrumentAddress(I, InsertBefore, Addr, 8, IsWrite, Size, false, Exp);
instrumentAddress(I, InsertBefore, LastByte, 8, IsWrite, Size, false, Exp);
// base object. For example, it is a field access or an array access with
// constant inbounds index.
bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis,
- Value *Addr, uint64_t TypeSize) const {
+ Value *Addr, TypeSize TypeStoreSize) const {
SizeOffsetType SizeOffset = ObjSizeVis.compute(Addr);
if (!ObjSizeVis.bothKnown(SizeOffset)) return false;
uint64_t Size = SizeOffset.first.getZExtValue();
// . Size >= Offset (unsigned)
// . Size - Offset >= NeededSize (unsigned)
return Offset >= 0 && Size >= uint64_t(Offset) &&
- Size - uint64_t(Offset) >= TypeSize / 8;
+ Size - uint64_t(Offset) >= TypeStoreSize / 8;
}