From: Philip Reames Date: Thu, 2 Mar 2023 18:39:06 +0000 (-0800) Subject: [ASAN] Rename TypeSize to TypeStoreSize [mostly NFC] X-Git-Tag: upstream/17.0.6~16003 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=67dab19453d00f74d1aec1aa07e3bf5a14d3275f;p=platform%2Fupstream%2Fllvm.git [ASAN] Rename TypeSize to TypeStoreSize [mostly NFC] This is a mechanical prep change for scalable vector support. All it does is move the point of TypeSize to unsigned (i.e. the unsafe cast) closer to point of use. --- diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp index 6320277..a539c47 100644 --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -692,19 +692,19 @@ struct AddressSanitizer { const DataLayout &DL); void instrumentPointerComparisonOrSubtraction(Instruction *I); void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, - Value *Addr, uint32_t TypeSize, bool IsWrite, + Value *Addr, uint32_t TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp); Instruction *instrumentAMDGPUAddress(Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, - uint32_t TypeSize, bool IsWrite, + uint32_t TypeStoreSize, bool IsWrite, Value *SizeArgument); void instrumentUnusualSizeOrAlignment(Instruction *I, Instruction *InsertBefore, Value *Addr, - uint32_t TypeSize, bool IsWrite, + TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp); Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, - Value *ShadowValue, uint32_t TypeSize); + Value *ShadowValue, uint32_t TypeStoreSize); Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr, bool IsWrite, size_t AccessSizeIndex, Value *SizeArgument, uint32_t Exp); @@ -724,7 +724,7 @@ private: bool LooksLikeCodeInBug11395(Instruction *I); bool GlobalIsLinkerInitialized(GlobalVariable *G); bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr, - uint64_t TypeSize) const; + TypeSize TypeStoreSize) const; /// Helper to cleanup per-function state. struct FunctionStateRAII { @@ -1176,7 +1176,7 @@ PreservedAnalyses AddressSanitizerPass::run(Module &M, return PA; } -static size_t TypeSizeToSizeIndex(uint32_t TypeSize) { +static size_t TypeStoreSizeToSizeIndex(uint32_t TypeSize) { size_t Res = llvm::countr_zero(TypeSize / 8); assert(Res < kNumberOfAccessSizes); return Res; @@ -1416,17 +1416,17 @@ void AddressSanitizer::instrumentPointerComparisonOrSubtraction( static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, Instruction *InsertBefore, Value *Addr, MaybeAlign Alignment, unsigned Granularity, - uint32_t TypeSize, bool IsWrite, + TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp) { // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check // if the data is properly aligned. - if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 || - TypeSize == 128) && - (!Alignment || *Alignment >= Granularity || *Alignment >= TypeSize / 8)) - return Pass->instrumentAddress(I, InsertBefore, Addr, TypeSize, IsWrite, + if ((TypeStoreSize == 8 || TypeStoreSize == 16 || TypeStoreSize == 32 || TypeStoreSize == 64 || + TypeStoreSize == 128) && + (!Alignment || *Alignment >= Granularity || *Alignment >= TypeStoreSize / 8)) + return Pass->instrumentAddress(I, InsertBefore, Addr, TypeStoreSize, IsWrite, nullptr, UseCalls, Exp); - Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeSize, + Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeStoreSize, IsWrite, nullptr, UseCalls, Exp); } @@ -1464,8 +1464,8 @@ static void instrumentMaskedLoadOrStore(AddressSanitizer *Pass, InstrumentedAddress = IRB.CreateGEP(VTy, Addr, {Zero, ConstantInt::get(IntptrTy, Idx)}); doInstrumentAddress(Pass, I, InsertBefore, InstrumentedAddress, Alignment, - Granularity, ElemTypeSize, IsWrite, SizeArgument, - UseCalls, Exp); + Granularity, TypeSize::Fixed(ElemTypeSize), IsWrite, + SizeArgument, UseCalls, Exp); } } @@ -1554,15 +1554,15 @@ Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore, Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, Value *ShadowValue, - uint32_t TypeSize) { + uint32_t TypeStoreSize) { size_t Granularity = static_cast(1) << Mapping.Scale; // Addr & (Granularity - 1) Value *LastAccessedByte = IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1)); // (Addr & (Granularity - 1)) + size - 1 - if (TypeSize / 8 > 1) + if (TypeStoreSize / 8 > 1) LastAccessedByte = IRB.CreateAdd( - LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)); + LastAccessedByte, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1)); // (uint8_t) ((Addr & (Granularity-1)) + size - 1) LastAccessedByte = IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false); @@ -1572,7 +1572,7 @@ Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, Instruction *AddressSanitizer::instrumentAMDGPUAddress( Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, - uint32_t TypeSize, bool IsWrite, Value *SizeArgument) { + uint32_t TypeStoreSize, bool IsWrite, Value *SizeArgument) { // Do not instrument unsupported addrspaces. if (isUnsupportedAMDGPUAddrspace(Addr)) return nullptr; @@ -1595,18 +1595,18 @@ Instruction *AddressSanitizer::instrumentAMDGPUAddress( void AddressSanitizer::instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, - uint32_t TypeSize, bool IsWrite, + uint32_t TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp) { if (TargetTriple.isAMDGPU()) { InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr, - TypeSize, IsWrite, SizeArgument); + TypeStoreSize, IsWrite, SizeArgument); if (!InsertBefore) return; } IRBuilder<> IRB(InsertBefore); - size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize); + size_t AccessSizeIndex = TypeStoreSizeToSizeIndex(TypeStoreSize); const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex); if (UseCalls && ClOptimizeCallbacks) { @@ -1631,7 +1631,7 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns, } Type *ShadowTy = - IntegerType::get(*C, std::max(8U, TypeSize >> Mapping.Scale)); + IntegerType::get(*C, std::max(8U, TypeStoreSize >> Mapping.Scale)); Type *ShadowPtrTy = PointerType::get(ShadowTy, 0); Value *ShadowPtr = memToShadow(AddrLong, IRB); Value *ShadowValue = @@ -1641,7 +1641,7 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns, size_t Granularity = 1ULL << Mapping.Scale; Instruction *CrashTerm = nullptr; - if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) { + if (ClAlwaysSlowPath || (TypeStoreSize < 8 * Granularity)) { // We use branch weights for the slow path check, to indicate that the slow // path is rarely taken. This seems to be the case for SPEC benchmarks. Instruction *CheckTerm = SplitBlockAndInsertIfThen( @@ -1649,7 +1649,7 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns, assert(cast(CheckTerm)->isUnconditional()); BasicBlock *NextBB = CheckTerm->getSuccessor(0); IRB.SetInsertPoint(CheckTerm); - Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize); + Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize); if (Recover) { CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false); } else { @@ -1673,10 +1673,10 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns, // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able // to report the actual access size. void AddressSanitizer::instrumentUnusualSizeOrAlignment( - Instruction *I, Instruction *InsertBefore, Value *Addr, uint32_t TypeSize, + Instruction *I, Instruction *InsertBefore, Value *Addr, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp) { IRBuilder<> IRB(InsertBefore); - Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8); + Value *Size = ConstantInt::get(IntptrTy, TypeStoreSize / 8); Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); if (UseCalls) { if (Exp == 0) @@ -1687,7 +1687,7 @@ void AddressSanitizer::instrumentUnusualSizeOrAlignment( {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)}); } else { Value *LastByte = IRB.CreateIntToPtr( - IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)), + IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1)), Addr->getType()); instrumentAddress(I, InsertBefore, Addr, 8, IsWrite, Size, false, Exp); instrumentAddress(I, InsertBefore, LastByte, 8, IsWrite, Size, false, Exp); @@ -3485,7 +3485,7 @@ void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) { // base object. For example, it is a field access or an array access with // constant inbounds index. bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, - Value *Addr, uint64_t TypeSize) const { + Value *Addr, TypeSize TypeStoreSize) const { SizeOffsetType SizeOffset = ObjSizeVis.compute(Addr); if (!ObjSizeVis.bothKnown(SizeOffset)) return false; uint64_t Size = SizeOffset.first.getZExtValue(); @@ -3495,5 +3495,5 @@ bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, // . Size >= Offset (unsigned) // . Size - Offset >= NeededSize (unsigned) return Offset >= 0 && Size >= uint64_t(Offset) && - Size - uint64_t(Offset) >= TypeSize / 8; + Size - uint64_t(Offset) >= TypeStoreSize / 8; }