return false;
LLVM_FALLTHROUGH;
}
- case Intrinsic::invariant_start:
case Intrinsic::invariant_end:
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
case Intrinsic::objectsize:
Users.emplace_back(I);
continue;
+ case Intrinsic::invariant_start:
+ // Only try to delete this if it has no uses or a single 'end' use.
+ if (I->use_empty()) {
+ Users.emplace_back(I);
+ continue;
+ } else if (I->hasOneUse() &&
+ match(I->user_back(),
+ m_Intrinsic<Intrinsic::invariant_end>())) {
+ Users.emplace_back(I);
+ continue;
+ }
+ return false;
}
}
if (isAllocSiteRemovable(&MI, Users, &TLI)) {
for (unsigned i = 0, e = Users.size(); i != e; ++i) {
- // Lowering all @llvm.objectsize calls first because they may
- // use a bitcast/GEP of the alloca we are removing.
if (!Users[i])
continue;
Instruction *I = cast<Instruction>(&*Users[i]);
-
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
+ // Lowering all @llvm.objectsize calls first because they may
+ // use a bitcast/GEP of the alloca we are removing.
if (II->getIntrinsicID() == Intrinsic::objectsize) {
Value *Result =
lowerObjectSizeCall(II, DL, &TLI, /*MustSucceed=*/true);
eraseInstFromFunction(*I);
Users[i] = nullptr; // Skip examining in the next loop.
}
+ // Erase llvm.invariant.end because we expect that it uses an
+ // llvm.invariant.start that we will remove below.
+ if (II->getIntrinsicID() == Intrinsic::invariant_end) {
+ eraseInstFromFunction(*I);
+ Users[i] = nullptr; // Skip examining in the next loop.
+ }
}
}
for (unsigned i = 0, e = Users.size(); i != e; ++i) {
ret i32 %conv
}
+; This used to crash while erasing instructions:
+; https://bugs.llvm.org/show_bug.cgi?id=43723
+
+define void @PR43723() {
+; CHECK-LABEL: @PR43723(
+; CHECK-NEXT: ret void
+;
+ %tab = alloca [10 x i8], align 16
+ %t0 = bitcast [10 x i8]* %tab to i8*
+ call void @llvm.memset.p0i8.i64(i8* align 16 %t0, i8 9, i64 10, i1 false)
+ %t1 = call {}* @llvm.invariant.start.p0i8(i64 10, i8* align 16 %t0)
+ call void @llvm.invariant.end.p0i8({}* %t1, i64 10, i8* align 16 %t0)
+ ret void
+
+ uselistorder i8* %t0, { 1, 0, 2 }
+}
+
+define void @unknown_use_of_invariant_start({}** %p) {
+; CHECK-LABEL: @unknown_use_of_invariant_start(
+; CHECK-NEXT: [[TAB1:%.*]] = alloca [10 x i8], align 16
+; CHECK-NEXT: [[TAB1_SUB:%.*]] = getelementptr inbounds [10 x i8], [10 x i8]* [[TAB1]], i64 0, i64 0
+; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* nonnull align 16 dereferenceable(10) [[TAB1_SUB]], i8 9, i64 10, i1 false)
+; CHECK-NEXT: [[T1:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 10, i8* nonnull align 16 [[TAB1_SUB]])
+; CHECK-NEXT: call void @llvm.invariant.end.p0i8({}* [[T1]], i64 10, i8* nonnull align 16 [[TAB1_SUB]])
+; CHECK-NEXT: store {}* [[T1]], {}** [[P:%.*]], align 8
+; CHECK-NEXT: ret void
+;
+ %tab = alloca [10 x i8], align 16
+ %t0 = bitcast [10 x i8]* %tab to i8*
+ call void @llvm.memset.p0i8.i64(i8* align 16 %t0, i8 9, i64 10, i1 false)
+ %t1 = call {}* @llvm.invariant.start.p0i8(i64 10, i8* align 16 %t0)
+ call void @llvm.invariant.end.p0i8({}* %t1, i64 10, i8* align 16 %t0)
+ store {}* %t1, {}** %p
+ ret void
+}
+
declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
declare i64 @llvm.objectsize.i64.p0i8(i8*, i1) #2
declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg) #0
+declare {}* @llvm.invariant.start.p0i8(i64 immarg, i8* nocapture) #0
+declare void @llvm.invariant.end.p0i8({}*, i64 immarg, i8* nocapture) #0