From: Vitaly Buka Date: Wed, 14 Sep 2022 03:17:55 +0000 (-0700) Subject: [msan] Change logic of ClInstrumentationWithCallThreshold X-Git-Tag: upstream/17.0.6~33497 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=bf204881b6376c57aa56668612321658c4606e38;p=platform%2Fupstream%2Fllvm.git [msan] Change logic of ClInstrumentationWithCallThreshold According to logs, ClInstrumentationWithCallThreshold is workaround for slow backend with large number of basic blocks. However, I can't reproduce that one, but I see significant slowdown after ClCheckConstantShadow. Without ClInstrumentationWithCallThreshold compiler is able to eliminate many of the branches. So maybe we should drop ClInstrumentationWithCallThreshold completly. For now I just change the logic to ignore constant shadow so it will not trigger callback fallback too early. Reviewed By: kstoimenov Differential Revision: https://reviews.llvm.org/D133880 --- diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp index 977a234..f75d5a1 100644 --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -1117,6 +1117,7 @@ struct MemorySanitizerVisitor : public InstVisitor { SmallSetVector AllocaSet; SmallVector, 16> LifetimeStartList; SmallVector StoreList; + int64_t SplittableBlocksCount = 0; MemorySanitizerVisitor(Function &F, MemorySanitizer &MS, const TargetLibraryInfo &TLI) @@ -1148,6 +1149,16 @@ struct MemorySanitizerVisitor : public InstVisitor { << F.getName() << "'\n"); } + bool instrumentWithCalls(Value *V) { + // Constants likely will be eliminated by follow-up passes. + if (isa(V)) + return false; + + ++SplittableBlocksCount; + return ClInstrumentationWithCallThreshold >= 0 && + SplittableBlocksCount > ClInstrumentationWithCallThreshold; + } + bool isInPrologue(Instruction &I) { return I.getParent() == FnPrologueEnd->getParent() && (&I == FnPrologueEnd || I.comesBefore(FnPrologueEnd)); @@ -1206,7 +1217,7 @@ struct MemorySanitizerVisitor : public InstVisitor { } void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin, - Value *OriginPtr, Align Alignment, bool AsCall) { + Value *OriginPtr, Align Alignment) { const DataLayout &DL = F.getParent()->getDataLayout(); const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment); unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType()); @@ -1228,7 +1239,8 @@ struct MemorySanitizerVisitor : public InstVisitor { unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType()); unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits); - if (AsCall && SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) { + if (instrumentWithCalls(ConvertedShadow) && + SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) { FunctionCallee Fn = MS.MaybeStoreOriginFn[SizeIndex]; Value *ConvertedShadow2 = IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex))); @@ -1247,7 +1259,7 @@ struct MemorySanitizerVisitor : public InstVisitor { } } - void materializeStores(bool InstrumentWithCalls) { + void materializeStores() { for (StoreInst *SI : StoreList) { IRBuilder<> IRB(SI); Value *Val = SI->getValueOperand(); @@ -1269,7 +1281,7 @@ struct MemorySanitizerVisitor : public InstVisitor { if (MS.TrackOrigins && !SI->isAtomic()) storeOrigin(IRB, Addr, Shadow, getOrigin(Val), OriginPtr, - OriginAlignment, InstrumentWithCalls); + OriginAlignment); } } @@ -1319,11 +1331,12 @@ struct MemorySanitizerVisitor : public InstVisitor { } void materializeOneCheck(IRBuilder<> &IRB, Value *ConvertedShadow, - Value *Origin, bool AsCall) { + Value *Origin) { const DataLayout &DL = F.getParent()->getDataLayout(); unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType()); unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits); - if (AsCall && SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) { + if (instrumentWithCalls(ConvertedShadow) && + SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) { FunctionCallee Fn = MS.MaybeWarningFn[SizeIndex]; Value *ConvertedShadow2 = IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex))); @@ -1345,12 +1358,11 @@ struct MemorySanitizerVisitor : public InstVisitor { } void materializeInstructionChecks( - bool InstrumentWithCalls, ArrayRef InstructionChecks) { const DataLayout &DL = F.getParent()->getDataLayout(); // Disable combining in some cases. TrackOrigins checks each shadow to pick - // correct origin. InstrumentWithCalls expects to reduce shadow using API. - bool Combine = !InstrumentWithCalls && !MS.TrackOrigins; + // correct origin. + bool Combine = !MS.TrackOrigins; Instruction *Instruction = InstructionChecks.front().OrigIns; Value *Shadow = nullptr; for (const auto &ShadowData : InstructionChecks) { @@ -1377,9 +1389,16 @@ struct MemorySanitizerVisitor : public InstVisitor { // Fallback to runtime check, which still can be optimized out later. } + // Work around to keep existing tests. + // FIXME: update tests and remove. + if (ClInstrumentationWithCallThreshold >= 0 && + SplittableBlocksCount >= ClInstrumentationWithCallThreshold) { + materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin); + continue; + } + if (!Combine) { - materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin, - InstrumentWithCalls); + materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin); continue; } @@ -1390,11 +1409,11 @@ struct MemorySanitizerVisitor : public InstVisitor { if (Shadow) { assert(Combine); IRBuilder<> IRB(Instruction); - materializeOneCheck(IRB, Shadow, nullptr, false); + materializeOneCheck(IRB, Shadow, nullptr); } } - void materializeChecks(bool InstrumentWithCalls) { + void materializeChecks() { llvm::stable_sort(InstrumentationList, [](const ShadowOriginAndInsertPoint &L, const ShadowOriginAndInsertPoint &R) { @@ -1409,8 +1428,7 @@ struct MemorySanitizerVisitor : public InstVisitor { return L != R.OrigIns; }); // Process all checks of instruction at once. - materializeInstructionChecks(InstrumentWithCalls, - ArrayRef(I, J)); + materializeInstructionChecks(ArrayRef(I, J)); I = J; } @@ -1474,16 +1492,12 @@ struct MemorySanitizerVisitor : public InstVisitor { for (AllocaInst *AI : AllocaSet) instrumentAlloca(*AI); - bool InstrumentWithCalls = ClInstrumentationWithCallThreshold >= 0 && - InstrumentationList.size() + StoreList.size() > - (unsigned)ClInstrumentationWithCallThreshold; - // Insert shadow value checks. - materializeChecks(InstrumentWithCalls); + materializeChecks(); // Delayed instrumentation of StoreInst. // This may not add new address checks. - materializeStores(InstrumentWithCalls); + materializeStores(); return true; } diff --git a/llvm/test/Instrumentation/MemorySanitizer/with-call-type-size.ll b/llvm/test/Instrumentation/MemorySanitizer/with-call-type-size.ll index f3456ae..2461a3e 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/with-call-type-size.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/with-call-type-size.ll @@ -84,3 +84,11 @@ define <4 x i32> @test65(<4 x i32> %vec, i65 %idx, i32 %x) sanitize_memory { ; CHECK: icmp ne i65 %{{.*}}, 0 ; CHECK-NOT: call void @__msan_maybe_warning_ ; CHECK: ret <4 x i32> + +define <4 x i32> @testUndef(<4 x i32> %vec, i32 %x) sanitize_memory { + %vec1 = insertelement <4 x i32> %vec, i32 undef, i32 undef + ret <4 x i32> %vec1 +} +; CHECK-LABEL: @testUndef( +; CHECK-NOT: call void @__msan_maybe_warning_ +; CHECK: ret <4 x i32>