return;
}
- if (!WI.WidestNativeType) {
+ if (!WI.WidestNativeType ||
+ Width > SE->getTypeSizeInBits(WI.WidestNativeType)) {
WI.WidestNativeType = SE->getEffectiveSCEVType(Ty);
WI.IsSigned = IsSigned;
return;
}
- // We extend the IV to satisfy the sign of its first user, arbitrarily.
- if (WI.IsSigned != IsSigned)
- return;
-
- if (Width > SE->getTypeSizeInBits(WI.WidestNativeType))
- WI.WidestNativeType = SE->getEffectiveSCEVType(Ty);
+ // We extend the IV to satisfy the sign of its user(s), or 'signed'
+ // if there are multiple users with both sign- and zero extensions,
+ // in order not to introduce nondeterministic behaviour based on the
+ // unspecified order of a PHI nodes' users-iterator.
+ WI.IsSigned |= IsSigned;
}
//===----------------------------------------------------------------------===//
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt "-passes=loop-rotate,indvars" -S < %s | FileCheck %s
+; RUN: opt "-passes=loop-rotate" < %s | opt "-passes=indvars" -S - | FileCheck %s
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64-unknown-linux-gnu"
+
+define dso_local float @foo(float* noalias %dst, float* %src, i32 %offset, i32 %N) {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 1, [[N:%.*]]
+; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]]
+; CHECK: for.body.lr.ph:
+; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[OFFSET:%.*]] to i64
+; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[N]] to i64
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.cond.for.cond.cleanup_crit_edge:
+; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
+; CHECK: for.cond.cleanup:
+; CHECK-NEXT: ret float undef
+; CHECK: for.body:
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 1, [[FOR_BODY_LR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP1:%.*]] = add nsw i64 [[INDVARS_IV]], [[TMP0]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP2:%.*]] = load float, float* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = add nsw i64 [[TMP1]], 1
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP4:%.*]] = load float, float* [[ARRAYIDX3]], align 4
+; CHECK-NEXT: [[ADD4:%.*]] = fadd fast float [[TMP2]], [[TMP4]]
+; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: store float [[ADD4]], float* [[ARRAYIDX6]], align 4
+; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_COND_FOR_COND_CLEANUP_CRIT_EDGE:%.*]], !llvm.loop [[LOOP0:![0-9]+]]
+;
+entry:
+ br label %for.cond
+
+for.cond: ; preds = %for.body, %entry
+ %i.0 = phi i32 [ 1, %entry ], [ %inc, %for.body ]
+ %cmp = icmp slt i32 %i.0, %N
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.cond
+ ret float undef
+
+for.body: ; preds = %for.cond
+ %add = add nsw i32 %i.0, %offset
+ %idxprom = sext i32 %add to i64
+ %arrayidx = getelementptr inbounds float, float* %src, i64 %idxprom
+ %0 = load float, float* %arrayidx, align 4
+ %add1 = add nsw i32 %add, 1
+ %idxprom2 = sext i32 %add1 to i64
+ %arrayidx3 = getelementptr inbounds float, float* %src, i64 %idxprom2
+ %1 = load float, float* %arrayidx3, align 4
+ %add4 = fadd fast float %0, %1
+ %idxprom5 = zext i32 %i.0 to i64
+ %arrayidx6 = getelementptr inbounds float, float* %dst, i64 %idxprom5
+ store float %add4, float* %arrayidx6, align 4
+ %inc = add nuw nsw i32 %i.0, 1
+ br label %for.cond, !llvm.loop !1
+}
+
+!1 = distinct !{!1, !2}
+!2 = !{!"llvm.loop.mustprogress"}