return nullptr;
}
+Instruction *InstCombiner::foldIntrinsicWithOverflowCommon(IntrinsicInst *II) {
+ OverflowCheckFlavor OCF =
+ IntrinsicIDToOverflowCheckFlavor(II->getIntrinsicID());
+ assert(OCF != OCF_INVALID && "unexpected!");
+
+ Value *OperationResult = nullptr;
+ Constant *OverflowResult = nullptr;
+ if (OptimizeOverflowCheck(OCF, II->getArgOperand(0), II->getArgOperand(1),
+ *II, OperationResult, OverflowResult))
+ return CreateOverflowTuple(II, OperationResult, OverflowResult);
+ return nullptr;
+}
+
/// CallInst simplification. This mostly only handles folding of intrinsic
/// instructions. For normal calls, it allows visitCallBase to do the heavy
/// lifting.
return &CI;
break;
}
+ case Intrinsic::sadd_with_overflow: {
+ if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
+ return I;
+ if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
+ return I;
+
+ // Given 2 constant operands whose sum does not overflow:
+ // saddo (X +nsw C0), C1 -> saddo X, C0 + C1
+ Value *X;
+ const APInt *C0, *C1;
+ Value *Arg0 = II->getArgOperand(0);
+ Value *Arg1 = II->getArgOperand(1);
+ if (match(Arg0, m_NSWAdd(m_Value(X), m_APInt(C0))) &&
+ match(Arg1, m_APInt(C1))) {
+ bool Overflow;
+ APInt NewC = C1->sadd_ov(*C0, Overflow);
+ if (!Overflow)
+ return replaceInstUsesWith(
+ *II, Builder.CreateBinaryIntrinsic(
+ Intrinsic::sadd_with_overflow, X,
+ ConstantInt::get(Arg1->getType(), NewC)));
+ }
+
+ break;
+ }
case Intrinsic::uadd_with_overflow:
- case Intrinsic::sadd_with_overflow:
case Intrinsic::umul_with_overflow:
case Intrinsic::smul_with_overflow:
if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
case Intrinsic::usub_with_overflow:
case Intrinsic::ssub_with_overflow: {
- OverflowCheckFlavor OCF =
- IntrinsicIDToOverflowCheckFlavor(II->getIntrinsicID());
- assert(OCF != OCF_INVALID && "unexpected!");
-
- Value *OperationResult = nullptr;
- Constant *OverflowResult = nullptr;
- if (OptimizeOverflowCheck(OCF, II->getArgOperand(0), II->getArgOperand(1),
- *II, OperationResult, OverflowResult))
- return CreateOverflowTuple(II, OperationResult, OverflowResult);
+ if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
+ return I;
break;
}
define { i32, i1 } @simple_fold(i32) {
; CHECK-LABEL: @simple_fold(
-; CHECK-NEXT: [[TMP2:%.*]] = add nsw i32 [[TMP0:%.*]], 7
-; CHECK-NEXT: [[TMP3:%.*]] = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[TMP2]], i32 13)
-; CHECK-NEXT: ret { i32, i1 } [[TMP3]]
+; CHECK-NEXT: [[TMP2:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[TMP0:%.*]], i32 20)
+; CHECK-NEXT: ret { i32, i1 } [[TMP2]]
;
%2 = add nsw i32 %0, 7
%3 = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %2, i32 13)
define { i32, i1 } @fold_mixed_signs(i32) {
; CHECK-LABEL: @fold_mixed_signs(
-; CHECK-NEXT: [[TMP2:%.*]] = add nsw i32 [[TMP0:%.*]], 13
-; CHECK-NEXT: [[TMP3:%.*]] = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[TMP2]], i32 -7)
-; CHECK-NEXT: ret { i32, i1 } [[TMP3]]
+; CHECK-NEXT: [[TMP2:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[TMP0:%.*]], i32 6)
+; CHECK-NEXT: ret { i32, i1 } [[TMP2]]
;
%2 = add nsw i32 %0, 13
%3 = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %2, i32 -7)
ret { i32, i1 } %3
}
+define { i8, i1 } @fold_on_constant_add_no_overflow(i8) {
+; CHECK-LABEL: @fold_on_constant_add_no_overflow(
+; CHECK-NEXT: [[TMP2:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[TMP0:%.*]], i8 127)
+; CHECK-NEXT: ret { i8, i1 } [[TMP2]]
+;
+ %2 = add nsw i8 %0, 100
+ %3 = tail call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 %2, i8 27)
+ ret { i8, i1 } %3
+}
+
define { i8, i1 } @no_fold_on_constant_add_overflow(i8) {
; CHECK-LABEL: @no_fold_on_constant_add_overflow(
-; CHECK-NEXT: [[TMP2:%.*]] = add nsw i8 [[TMP0:%.*]], 127
-; CHECK-NEXT: [[TMP3:%.*]] = tail call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[TMP2]], i8 127)
+; CHECK-NEXT: [[TMP2:%.*]] = add nsw i8 [[TMP0:%.*]], 100
+; CHECK-NEXT: [[TMP3:%.*]] = tail call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[TMP2]], i8 28)
; CHECK-NEXT: ret { i8, i1 } [[TMP3]]
;
- %2 = add nsw i8 %0, 127
- %3 = tail call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 %2, i8 127)
+ %2 = add nsw i8 %0, 100
+ %3 = tail call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 %2, i8 28)
ret { i8, i1 } %3
}
define { <2 x i32>, <2 x i1> } @fold_simple_splat_constant(<2 x i32>) {
; CHECK-LABEL: @fold_simple_splat_constant(
-; CHECK-NEXT: [[TMP2:%.*]] = add nsw <2 x i32> [[TMP0:%.*]], <i32 12, i32 12>
-; CHECK-NEXT: [[TMP3:%.*]] = tail call { <2 x i32>, <2 x i1> } @llvm.sadd.with.overflow.v2i32(<2 x i32> [[TMP2]], <2 x i32> <i32 30, i32 30>)
-; CHECK-NEXT: ret { <2 x i32>, <2 x i1> } [[TMP3]]
+; CHECK-NEXT: [[TMP2:%.*]] = call { <2 x i32>, <2 x i1> } @llvm.sadd.with.overflow.v2i32(<2 x i32> [[TMP0:%.*]], <2 x i32> <i32 42, i32 42>)
+; CHECK-NEXT: ret { <2 x i32>, <2 x i1> } [[TMP2]]
;
%2 = add nsw <2 x i32> %0, <i32 12, i32 12>
%3 = tail call { <2 x i32>, <2 x i1> } @llvm.sadd.with.overflow.v2i32(<2 x i32> %2, <2 x i32> <i32 30, i32 30>)
define { i32, i1 } @fold_nuwnsw(i32) {
; CHECK-LABEL: @fold_nuwnsw(
-; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i32 [[TMP0:%.*]], 12
-; CHECK-NEXT: [[TMP3:%.*]] = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[TMP2]], i32 30)
-; CHECK-NEXT: ret { i32, i1 } [[TMP3]]
+; CHECK-NEXT: [[TMP2:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[TMP0:%.*]], i32 42)
+; CHECK-NEXT: ret { i32, i1 } [[TMP2]]
;
%2 = add nuw nsw i32 %0, 12
%3 = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %2, i32 30)