// For an array/pointer, add the element offset, explicitly scaled.
unsigned Scale = DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize();
- Value *Op0, *Op1;
- ConstantInt *CI;
- // If the index is zero-extended, it is guaranteed to be positive.
- if (match(Index, m_ZExt(m_Value(Op0)))) {
- if (match(Op0, m_NUWShl(m_Value(Op1), m_ConstantInt(CI))) &&
- canUseSExt(CI)) {
- Result.emplace_back(
- multiplyWithOverflow(
- Scale, int64_t(std::pow(int64_t(2), CI->getSExtValue()))),
- Op1);
- continue;
- }
-
- if (match(Op0, m_NSWAdd(m_Value(Op1), m_ConstantInt(CI))) &&
- canUseSExt(CI) && match(Op0, m_NUWAdd(m_Value(), m_Value()))) {
- Result[0].Coefficient +=
- multiplyWithOverflow(Scale, CI->getSExtValue());
- Result.emplace_back(Scale, Op1);
- continue;
- }
-
- Result.emplace_back(Scale, Op0, true);
- continue;
- }
-
- if (match(Index, m_ConstantInt(CI)) && !CI->isNegative() &&
- canUseSExt(CI)) {
- Result[0].Coefficient += multiplyWithOverflow(Scale, CI->getSExtValue());
- continue;
- }
-
- if (match(Index, m_NSWShl(m_Value(Op0), m_ConstantInt(CI))) &&
- canUseSExt(CI)) {
- Result.emplace_back(
- multiplyWithOverflow(
- Scale, int64_t(std::pow(int64_t(2), CI->getSExtValue()))),
- Op0);
- } else if (match(Index, m_NSWAdd(m_Value(Op0), m_ConstantInt(CI))) &&
- canUseSExt(CI)) {
- Result[0].Coefficient += multiplyWithOverflow(Scale, CI->getSExtValue());
- Result.emplace_back(Scale, Op0);
+ auto IdxResult = decompose(Index, Preconditions, IsSigned, DL);
+ if (IdxResult.empty()) {
+ Result.emplace_back(Scale, Index);
} else {
- Op0 = Index;
- Result.emplace_back(Scale, Op0);
+ for (auto &KV : IdxResult)
+ KV.Coefficient *= Scale;
+ Result[0].Coefficient += IdxResult[0].Coefficient;
+ append_range(Result, ArrayRef<DecompEntry>(IdxResult).drop_front());
}
// If Op0 is signed non-negative, the GEP is increasing monotonically and
// can be de-composed.
- Preconditions.emplace_back(CmpInst::ICMP_SGE, Op0,
- ConstantInt::get(Op0->getType(), 0));
+ if (!isKnownNonNegative(Index, DL, /*Depth=*/MaxAnalysisRecursionDepth - 1))
+ Preconditions.emplace_back(CmpInst::ICMP_SGE, Index,
+ ConstantInt::get(Index->getType(), 0));
}
return Result;
}
; CHECK-NEXT: [[C_1:%.*]] = icmp ult ptr [[ADD_I32_IDX_1]], [[UPPER]]
; CHECK-NEXT: [[ADD_I8_IDX_1:%.*]] = getelementptr inbounds i8, ptr [[A]], i8 [[IDX_1]]
; CHECK-NEXT: [[T_1:%.*]] = icmp ult ptr [[ADD_I8_IDX_1]], [[UPPER]]
-; CHECK-NEXT: [[RES_1:%.*]] = xor i1 [[C_1]], true
+; CHECK-NEXT: [[RES_1:%.*]] = xor i1 [[C_1]], [[T_1]]
; CHECK-NEXT: ret i1 [[RES_1]]
;
%idx.pos = icmp sge i8 %idx, 0
; CHECK-NEXT: [[C_1:%.*]] = icmp ult ptr [[ADD_I32_IDX_1]], [[UPPER]]
; CHECK-NEXT: [[ADD_I8_IDX_1:%.*]] = getelementptr inbounds i8, ptr [[A]], i16 [[IDX_1_EXT]]
; CHECK-NEXT: [[T_1:%.*]] = icmp ult ptr [[ADD_I8_IDX_1]], [[UPPER]]
-; CHECK-NEXT: [[RES_1:%.*]] = xor i1 [[C_1]], [[T_1]]
+; CHECK-NEXT: [[RES_1:%.*]] = xor i1 [[C_1]], true
; CHECK-NEXT: ret i1 [[RES_1]]
;
%idx.2 = add nuw i8 %idx, 2
; CHECK-NEXT: [[IDX_SHL_1:%.*]] = shl nuw nsw i8 [[IDX]], 1
; CHECK-NEXT: [[ADD_PTR_SHL_1:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i8 [[IDX_SHL_1]]
; CHECK-NEXT: [[C_MAX_0:%.*]] = icmp ult ptr [[ADD_PTR_SHL_1]], [[MAX]]
-; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: call void @use(i1 [[C_MAX_0]])
; CHECK-NEXT: [[IDX_SHL_2:%.*]] = shl nuw i8 [[IDX]], 2
; CHECK-NEXT: [[ADD_PTR_SHL_2:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i8 [[IDX_SHL_2]]
; CHECK-NEXT: [[C_MAX_1:%.*]] = icmp ult ptr [[ADD_PTR_SHL_2]], [[MAX]]