static bool refineInstruction(SCCPSolver &Solver,
const SmallPtrSetImpl<Value *> &InsertedValues,
Instruction &Inst) {
- if (!isa<OverflowingBinaryOperator>(Inst))
+ if (Inst.getOpcode() != Instruction::Add)
return false;
auto GetRange = [&Solver, &InsertedValues](Value *Op) {
bool Changed = false;
if (!Inst.hasNoUnsignedWrap()) {
auto NUWRange = ConstantRange::makeGuaranteedNoWrapRegion(
- Instruction::BinaryOps(Inst.getOpcode()), RangeB,
- OverflowingBinaryOperator::NoUnsignedWrap);
+ Instruction::Add, RangeB, OverflowingBinaryOperator::NoUnsignedWrap);
if (NUWRange.contains(RangeA)) {
Inst.setHasNoUnsignedWrap();
Changed = true;
}
if (!Inst.hasNoSignedWrap()) {
auto NSWRange = ConstantRange::makeGuaranteedNoWrapRegion(
- Instruction::BinaryOps(Inst.getOpcode()), RangeA,
- OverflowingBinaryOperator::NoSignedWrap);
+ Instruction::Add, RangeA, OverflowingBinaryOperator::NoSignedWrap);
if (NSWRange.contains(RangeB)) {
Inst.setHasNoSignedWrap();
Changed = true;
; CHECK-NEXT: [[SUB:%.*]] = sub nsw i8 [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i8 [[A]], [[B]]
; CHECK-NEXT: [[M1:%.*]] = select i1 [[CMP1]], i8 0, i8 [[SUB]]
-; CHECK-NEXT: [[M2:%.*]] = tail call i8 @llvm.smax.i8(i8 [[SUB]], i8 0)
-; CHECK-NEXT: [[R:%.*]] = sub i8 [[M2]], [[M1]]
+; CHECK-NEXT: [[TMP1:%.*]] = tail call i8 @llvm.smax.i8(i8 [[SUB]], i8 0)
+; CHECK-NEXT: [[R:%.*]] = sub i8 [[TMP1]], [[M1]]
; CHECK-NEXT: ret i8 [[R]]
;
%sub = sub nsw i8 %a, %b
define i8 @abs_swapped(i8 %a) {
; CHECK-LABEL: @abs_swapped(
-; CHECK-NEXT: [[M1:%.*]] = tail call i8 @llvm.abs.i8(i8 [[A:%.*]], i1 true)
-; CHECK-NEXT: ret i8 [[M1]]
+; CHECK-NEXT: [[TMP1:%.*]] = tail call i8 @llvm.abs.i8(i8 [[A:%.*]], i1 false)
+; CHECK-NEXT: ret i8 [[TMP1]]
;
%neg = sub i8 0, %a
%cmp1 = icmp sgt i8 %a, 0
define i8 @nabs_different_constants(i8 %a) {
; CHECK-LABEL: @nabs_different_constants(
; CHECK-NEXT: [[TMP1:%.*]] = tail call i8 @llvm.abs.i8(i8 [[A:%.*]], i1 false)
-; CHECK-NEXT: [[M1:%.*]] = sub nsw i8 0, [[TMP1]]
+; CHECK-NEXT: [[M1:%.*]] = sub i8 0, [[TMP1]]
; CHECK-NEXT: ret i8 [[M1]]
;
%neg = sub i8 0, %a
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -O3 -S < %s | FileCheck %s
-; RUN: opt -passes='default<O3>' -S < %s | FileCheck %s
+; RUN: opt -passes='default<O3>' -S < %s | FileCheck %s\r
define void @foo(i1 %which, i32 %a, i32 %b, ptr %result) {
; CHECK-LABEL: @foo(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = sub nsw i32 0, [[B:%.*]]
+; CHECK-NEXT: [[TMP0:%.*]] = sub i32 0, [[B:%.*]]
; CHECK-NEXT: [[Z_V_P:%.*]] = select i1 [[WHICH:%.*]], i32 [[B]], i32 [[TMP0]]
; CHECK-NEXT: [[Z_V:%.*]] = add i32 [[Z_V_P]], [[A:%.*]]
; CHECK-NEXT: [[Z:%.*]] = zext i32 [[Z_V]] to i64
define void @bar(i1 %which, i32 %a, i32 %b, ptr %result) {
; CHECK-LABEL: @bar(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = sub nsw i32 0, [[B:%.*]]
+; CHECK-NEXT: [[TMP0:%.*]] = sub i32 0, [[B:%.*]]
; CHECK-NEXT: [[SPEC_SELECT_P:%.*]] = select i1 [[WHICH:%.*]], i32 [[B]], i32 [[TMP0]]
; CHECK-NEXT: [[SPEC_SELECT:%.*]] = add i32 [[SPEC_SELECT_P]], [[A:%.*]]
; CHECK-NEXT: [[Z2:%.*]] = zext i32 [[SPEC_SELECT]] to i64
define void @foo_opt(i1 %which, i32 %a, i32 %b, ptr nocapture %result) {
; CHECK-LABEL: @foo_opt(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = sub nsw i32 0, [[B:%.*]]
+; CHECK-NEXT: [[TMP0:%.*]] = sub i32 0, [[B:%.*]]
; CHECK-NEXT: [[Z_V_P:%.*]] = select i1 [[WHICH:%.*]], i32 [[B]], i32 [[TMP0]]
; CHECK-NEXT: [[Z_V:%.*]] = add i32 [[Z_V_P]], [[A:%.*]]
; CHECK-NEXT: [[Z:%.*]] = zext i32 [[Z_V]] to i64
; CHECK: if.then:
; CHECK-NEXT: br label [[RETURN:%.*]]
; CHECK: if.else:
-; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i32 [[I]], 1
+; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[I]], 1
; CHECK-NEXT: [[CALL:%.*]] = call i32 @recursive_f(i32 [[SUB]])
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[I]], [[CALL]]
; CHECK-NEXT: br label [[RETURN]]
; x - y = [-190, -79)
define internal i1 @f.sub(i32 %x, i32 %y) {
; CHECK-LABEL: @f.sub(
-; CHECK-NEXT: [[A_1:%.*]] = sub nsw i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[A_1:%.*]] = sub i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[C_2:%.*]] = icmp sgt i32 [[A_1]], -81
; CHECK-NEXT: [[C_4:%.*]] = icmp slt i32 [[A_1]], -189
; CHECK-NEXT: [[C_5:%.*]] = icmp eq i32 [[A_1]], -150
; x * y = [1000, 4001)
define internal i1 @f.mul(i32 %x, i32 %y) {
; CHECK-LABEL: @f.mul(
-; CHECK-NEXT: [[A_1:%.*]] = mul nuw nsw i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[A_1:%.*]] = mul i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[C_2:%.*]] = icmp sgt i32 [[A_1]], 3999
; CHECK-NEXT: [[C_4:%.*]] = icmp slt i32 [[A_1]], 1001
; CHECK-NEXT: [[C_5:%.*]] = icmp eq i32 [[A_1]], 1500
; CHECK-LABEL: @shift_undef_64(
; CHECK-NEXT: store i64 0, ptr [[P:%.*]], align 4
; CHECK-NEXT: store i64 -1, ptr [[P]], align 4
-; CHECK-NEXT: [[R3:%.*]] = shl nuw nsw i64 -1, 4294967298
+; CHECK-NEXT: [[R3:%.*]] = shl i64 -1, 4294967298
; CHECK-NEXT: store i64 [[R3]], ptr [[P]], align 4
; CHECK-NEXT: ret void
;
; CHECK-LABEL: @shift_undef_65(
; CHECK-NEXT: store i65 0, ptr [[P:%.*]], align 4
; CHECK-NEXT: store i65 0, ptr [[P]], align 4
-; CHECK-NEXT: [[R3:%.*]] = shl nuw i65 1, -18446744073709551615
+; CHECK-NEXT: [[R3:%.*]] = shl i65 1, -18446744073709551615
; CHECK-NEXT: store i65 [[R3]], ptr [[P]], align 4
; CHECK-NEXT: ret void
;
; CHECK-LABEL: @shift_undef_256(
; CHECK-NEXT: store i256 0, ptr [[P:%.*]], align 4
; CHECK-NEXT: store i256 0, ptr [[P]], align 4
-; CHECK-NEXT: [[R3:%.*]] = shl nuw nsw i256 1, 18446744073709551619
+; CHECK-NEXT: [[R3:%.*]] = shl i256 1, 18446744073709551619
; CHECK-NEXT: store i256 [[R3]], ptr [[P]], align 4
; CHECK-NEXT: ret void
;
; CHECK-LABEL: @shift_undef_511(
; CHECK-NEXT: store i511 0, ptr [[P:%.*]], align 4
; CHECK-NEXT: store i511 -1, ptr [[P]], align 4
-; CHECK-NEXT: [[R3:%.*]] = shl nuw nsw i511 -3, 1208925819614629174706180
+; CHECK-NEXT: [[R3:%.*]] = shl i511 -3, 1208925819614629174706180
; CHECK-NEXT: store i511 [[R3]], ptr [[P]], align 4
; CHECK-NEXT: ret void
;
; SCCP: bb4:
; SCCP-NEXT: [[TMP5:%.*]] = add i64 [[TMP2]], 3
; SCCP-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], 3
-; SCCP-NEXT: [[TMP7:%.*]] = sub nuw nsw i64 3, [[TMP6]]
-; SCCP-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 1
+; SCCP-NEXT: [[TMP7:%.*]] = sub i64 3, [[TMP6]]
+; SCCP-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 1
; SCCP-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP8]] to i32
; SCCP-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
; SCCP-NEXT: br label [[BB11:%.*]]
; IPSCCP: bb4:
; IPSCCP-NEXT: [[TMP5:%.*]] = add i64 [[TMP2]], 3
; IPSCCP-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], 3
-; IPSCCP-NEXT: [[TMP7:%.*]] = sub nuw nsw i64 3, [[TMP6]]
-; IPSCCP-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 1
+; IPSCCP-NEXT: [[TMP7:%.*]] = sub i64 3, [[TMP6]]
+; IPSCCP-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 1
; IPSCCP-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP8]] to i32
; IPSCCP-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
; IPSCCP-NEXT: br label [[BB11:%.*]]