return &CI;
break;
}
+ case Intrinsic::uadd_with_overflow:
case Intrinsic::sadd_with_overflow: {
if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
return I;
return I;
// Given 2 constant operands whose sum does not overflow:
+ // uaddo (X +nuw C0), C1 -> uaddo X, C0 + C1
// saddo (X +nsw C0), C1 -> saddo X, C0 + C1
Value *X;
const APInt *C0, *C1;
Value *Arg0 = II->getArgOperand(0);
Value *Arg1 = II->getArgOperand(1);
- if (match(Arg0, m_NSWAdd(m_Value(X), m_APInt(C0))) &&
- match(Arg1, m_APInt(C1))) {
+ bool IsSigned = II->getIntrinsicID() == Intrinsic::sadd_with_overflow;
+ bool HasNWAdd = IsSigned ? match(Arg0, m_NSWAdd(m_Value(X), m_APInt(C0)))
+ : match(Arg0, m_NUWAdd(m_Value(X), m_APInt(C0)));
+ if (HasNWAdd && match(Arg1, m_APInt(C1))) {
bool Overflow;
- APInt NewC = C1->sadd_ov(*C0, Overflow);
+ APInt NewC =
+ IsSigned ? C1->sadd_ov(*C0, Overflow) : C1->uadd_ov(*C0, Overflow);
if (!Overflow)
return replaceInstUsesWith(
*II, Builder.CreateBinaryIntrinsic(
- Intrinsic::sadd_with_overflow, X,
+ II->getIntrinsicID(), X,
ConstantInt::get(Arg1->getType(), NewC)));
}
-
break;
}
- case Intrinsic::uadd_with_overflow:
case Intrinsic::umul_with_overflow:
case Intrinsic::smul_with_overflow:
if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
define { i32, i1 } @simple_fold(i32 %x) {
; CHECK-LABEL: @simple_fold(
-; CHECK-NEXT: [[A:%.*]] = add nuw i32 [[X:%.*]], 7
-; CHECK-NEXT: [[B:%.*]] = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A]], i32 13)
-; CHECK-NEXT: ret { i32, i1 } [[B]]
+; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 20)
+; CHECK-NEXT: ret { i32, i1 } [[TMP1]]
;
%a = add nuw i32 %x, 7
%b = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 13)
define { i8, i1 } @fold_on_constant_add_no_overflow(i8 %x) {
; CHECK-LABEL: @fold_on_constant_add_no_overflow(
-; CHECK-NEXT: [[A:%.*]] = add nuw i8 [[X:%.*]], -56
-; CHECK-NEXT: [[B:%.*]] = tail call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 [[A]], i8 55)
-; CHECK-NEXT: ret { i8, i1 } [[B]]
+; CHECK-NEXT: [[TMP1:%.*]] = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 [[X:%.*]], i8 -1)
+; CHECK-NEXT: ret { i8, i1 } [[TMP1]]
;
%a = add nuw i8 %x, 200
%b = tail call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %a, i8 55)
define { <2 x i32>, <2 x i1> } @fold_simple_splat_constant(<2 x i32> %x) {
; CHECK-LABEL: @fold_simple_splat_constant(
-; CHECK-NEXT: [[A:%.*]] = add nuw <2 x i32> [[X:%.*]], <i32 12, i32 12>
-; CHECK-NEXT: [[B:%.*]] = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> [[A]], <2 x i32> <i32 30, i32 30>)
-; CHECK-NEXT: ret { <2 x i32>, <2 x i1> } [[B]]
+; CHECK-NEXT: [[TMP1:%.*]] = call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> [[X:%.*]], <2 x i32> <i32 42, i32 42>)
+; CHECK-NEXT: ret { <2 x i32>, <2 x i1> } [[TMP1]]
;
%a = add nuw <2 x i32> %x, <i32 12, i32 12>
%b = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>)
define { i32, i1 } @fold_nuwnsw(i32 %x) {
; CHECK-LABEL: @fold_nuwnsw(
-; CHECK-NEXT: [[A:%.*]] = add nuw nsw i32 [[X:%.*]], 12
-; CHECK-NEXT: [[B:%.*]] = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A]], i32 30)
-; CHECK-NEXT: ret { i32, i1 } [[B]]
+; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 42)
+; CHECK-NEXT: ret { i32, i1 } [[TMP1]]
;
%a = add nuw nsw i32 %x, 12
%b = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 30)