//===----------------------------------------------------------------------===//
namespace {
-/// Represents zext(sext(V)).
+/// Represents zext(sext(trunc(V))).
struct CastedValue {
const Value *V;
unsigned ZExtBits = 0;
unsigned SExtBits = 0;
+ unsigned TruncBits = 0;
explicit CastedValue(const Value *V) : V(V) {}
- explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits)
- : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits) {}
+ explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits,
+ unsigned TruncBits)
+ : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits) {}
unsigned getBitWidth() const {
- return V->getType()->getPrimitiveSizeInBits() + ZExtBits + SExtBits;
+ return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
+ SExtBits;
}
CastedValue withValue(const Value *NewV) const {
- return CastedValue(NewV, ZExtBits, SExtBits);
+ return CastedValue(NewV, ZExtBits, SExtBits, TruncBits);
}
/// Replace V with zext(NewV)
CastedValue withZExtOfValue(const Value *NewV) const {
unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
NewV->getType()->getPrimitiveSizeInBits();
+ if (ExtendBy <= TruncBits)
+ return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
+
// zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
- return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0);
+ ExtendBy -= TruncBits;
+ return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0);
}
/// Replace V with sext(NewV)
CastedValue withSExtOfValue(const Value *NewV) const {
unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
NewV->getType()->getPrimitiveSizeInBits();
+ if (ExtendBy <= TruncBits)
+ return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
+
// zext(sext(sext(NewV)))
- return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy);
+ ExtendBy -= TruncBits;
+ return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0);
}
APInt evaluateWith(APInt N) const {
assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
"Incompatible bit width");
+ if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits);
if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
return N;
KnownBits evaluateWith(KnownBits N) const {
assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
"Incompatible bit width");
+ if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits);
if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
return N;
ConstantRange evaluateWith(ConstantRange N) const {
assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
"Incompatible bit width");
+ if (TruncBits) N = N.truncate(N.getBitWidth() - TruncBits);
if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits);
if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits);
return N;
bool canDistributeOver(bool NUW, bool NSW) const {
// zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
// sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
+ // trunc(x op y) == trunc(x) op trunc(y)
return (!ZExtBits || NUW) && (!SExtBits || NSW);
}
bool hasSameCastsAs(const CastedValue &Other) const {
- return ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits;
+ return ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits &&
+ TruncBits == Other.TruncBits;
}
};
-/// Represents zext(sext(V)) * Scale + Offset.
+/// Represents zext(sext(trunc(V))) * Scale + Offset.
struct LinearExpression {
CastedValue Val;
APInt Scale;
if (!Val.canDistributeOver(NUW, NSW))
return Val;
+ // While we can distribute over trunc, we cannot preserve nowrap flags
+ // in that case.
+ if (Val.TruncBits)
+ NUW = NSW = false;
+
LinearExpression E(Val);
switch (BOp->getOpcode()) {
default:
namespace {
// A linear transformation of a Value; this class represents
-// ZExt(SExt(V, SExtBits), ZExtBits) * Scale.
+// ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale.
struct VariableGEPIndex {
CastedValue Val;
APInt Scale;
OS << "(V=" << Val.V->getName()
<< ", zextbits=" << Val.ZExtBits
<< ", sextbits=" << Val.SExtBits
+ << ", truncbits=" << Val.TruncBits
<< ", scale=" << Scale << ")";
}
};
// sign extended to pointer size.
unsigned Width = Index->getType()->getIntegerBitWidth();
unsigned SExtBits = PointerSize > Width ? PointerSize - Width : 0;
+ unsigned TruncBits = PointerSize < Width ? Width - PointerSize : 0;
LinearExpression LE = GetLinearExpression(
- CastedValue(Index, 0, SExtBits), DL, 0, AC, DT);
+ CastedValue(Index, 0, SExtBits, TruncBits), DL, 0, AC, DT);
// The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
// This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
APInt ScaledOffset = LE.Offset.sextOrTrunc(MaxPointerSize)
.smul_ov(Scale, Overflow);
if (Overflow) {
- LE = LinearExpression(CastedValue(Index, 0, SExtBits));
+ LE = LinearExpression(CastedValue(Index, 0, SExtBits, TruncBits));
} else {
Decomposed.Offset += ScaledOffset;
Scale *= LE.Scale.sextOrTrunc(MaxPointerSize);
if (AllNonNegative || AllNonPositive) {
KnownBits Known = Index.Val.evaluateWith(
computeKnownBits(Index.Val.V, DL, 0, &AC, Index.CxtI, DT));
- // TODO: Account for implicit trunc.
bool SignKnownZero = Known.isNonNegative();
bool SignKnownOne = Known.isNegative();
AllNonNegative &= (SignKnownZero && Scale.isNonNegative()) ||
if (DecompGEP1.VarIndices.size() == 1) {
// VarIndex = Scale*V.
const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
- if (isKnownNonZero(Var.Val.V, DL, 0, &AC, Var.CxtI, DT)) {
+ if (Var.Val.TruncBits == 0 &&
+ isKnownNonZero(Var.Val.V, DL, 0, &AC, Var.CxtI, DT)) {
// If V != 0 then abs(VarIndex) >= abs(Scale).
MinAbsVarIndex = Var.Scale.abs();
}
// inequality of values across loop iterations.
const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
- if (Var0.Scale == -Var1.Scale &&
+ if (Var0.Scale == -Var1.Scale && Var0.Val.TruncBits == 0 &&
Var0.Val.hasSameCastsAs(Var1.Val) && VisitedPhiBBs.empty() &&
isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC, /* CxtI */ nullptr,
DT))
const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1];
- if (!Var0.Val.hasSameCastsAs(Var1.Val) || Var0.Scale != -Var1.Scale ||
+ if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) ||
+ Var0.Scale != -Var1.Scale ||
Var0.Val.V->getType() != Var1.Val.V->getType())
return false;
ret void
}
-; FIXME: Currently we incorrectly determine NoAlias for %gep.1 and %gep.2. The
-; GEP indices get implicitly truncated to 32 bit, so multiples of 2^32
+; The GEP indices get implicitly truncated to 32 bit, so multiples of 2^32
; (=4294967296) will be 0.
; See https://alive2.llvm.org/ce/z/HHjQgb.
define void @mustalias_overflow_in_32_bit_add_mul_gep(i8* %ptr, i64 %i) {
; CHECK-LABEL: Function: mustalias_overflow_in_32_bit_add_mul_gep: 3 pointers, 1 call sites
-; CHECK-NEXT: NoAlias: i8* %gep.1, i8* %ptr
-; CHECK-NEXT: NoAlias: i8* %gep.2, i8* %ptr
-; CHECK-NEXT: NoAlias: i8* %gep.1, i8* %gep.2
+; CHECK-NEXT: MayAlias: i8* %gep.1, i8* %ptr
+; CHECK-NEXT: MayAlias: i8* %gep.2, i8* %ptr
+; CHECK-NEXT: MayAlias: i8* %gep.1, i8* %gep.2
;
%s.1 = icmp sgt i64 %i, 0
call void @llvm.assume(i1 %s.1)
ret void
}
-; FIXME: While %n is non-zero, its low 32 bits may not be.
define void @mayalias_overflow_in_32_bit_non_zero(i8* %ptr, i64 %n) {
; CHECK-LABEL: Function: mayalias_overflow_in_32_bit_non_zero
-; CHECK: NoAlias: i8* %gep, i8* %ptr
+; CHECK: MayAlias: i8* %gep, i8* %ptr
;
%c = icmp ne i64 %n, 0
call void @llvm.assume(i1 %c)
ret void
}
-; FIXME: While %n is positive, its low 32 bits may not be.
define void @mayalias_overflow_in_32_bit_positive(i8* %ptr, i64 %n) {
; CHECK-LABEL: Function: mayalias_overflow_in_32_bit_positive
; CHECK: NoAlias: i8* %gep.1, i8* %ptr
-; CHECK: NoAlias: i8* %gep.2, i8* %ptr
-; CHECK: NoAlias: i8* %gep.1, i8* %gep.2
+; CHECK: MayAlias: i8* %gep.2, i8* %ptr
+; CHECK: MayAlias: i8* %gep.1, i8* %gep.2
;
%c = icmp sgt i64 %n, 0
call void @llvm.assume(i1 %c)