}
; We have a constant range for the LHS, but only known bits for the RHS
-define i8 @test_scalar_uadd_urem_known_bits(i8 %a, i8 %b) {
-; CHECK-LABEL: @test_scalar_uadd_urem_known_bits(
+define i8 @test_scalar_uadd_udiv_known_bits(i8 %a, i8 %b) {
+; CHECK-LABEL: @test_scalar_uadd_udiv_known_bits(
; CHECK-NEXT: [[AA:%.*]] = udiv i8 -66, [[A:%.*]]
; CHECK-NEXT: [[BB:%.*]] = and i8 [[B:%.*]], 63
; CHECK-NEXT: [[R:%.*]] = add nuw i8 [[AA]], [[BB]]
ret i8 %r
}
+define i8 @test_scalar_sadd_srem_no_ov(i8 %a) {
+; CHECK-LABEL: @test_scalar_sadd_srem_no_ov(
+; CHECK-NEXT: [[B:%.*]] = srem i8 [[A:%.*]], 100
+; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[B]], i8 28)
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %b = srem i8 %a, 100
+ %r = call i8 @llvm.sadd.sat.i8(i8 %b, i8 28)
+ ret i8 %r
+}
+
+define i8 @test_scalar_sadd_srem_may_ov(i8 %a) {
+; CHECK-LABEL: @test_scalar_sadd_srem_may_ov(
+; CHECK-NEXT: [[B:%.*]] = srem i8 [[A:%.*]], 100
+; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[B]], i8 29)
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %b = srem i8 %a, 100
+ %r = call i8 @llvm.sadd.sat.i8(i8 %b, i8 29)
+ ret i8 %r
+}
+
+define i8 @test_scalar_sadd_srem_and_no_ov(i8 %a, i8 %b) {
+; CHECK-LABEL: @test_scalar_sadd_srem_and_no_ov(
+; CHECK-NEXT: [[AA:%.*]] = srem i8 [[A:%.*]], 100
+; CHECK-NEXT: [[BB:%.*]] = and i8 [[B:%.*]], 15
+; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[AA]], i8 [[BB]])
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %aa = srem i8 %a, 100
+ %bb = and i8 %b, 15
+ %r = call i8 @llvm.sadd.sat.i8(i8 %aa, i8 %bb)
+ ret i8 %r
+}
+
;
; Saturating subtraction.
;
ret <2 x i8> %r
}
+define i8 @test_scalar_ssub_add_nsw_no_ov(i8 %a, i8 %b) {
+; CHECK-LABEL: @test_scalar_ssub_add_nsw_no_ov(
+; CHECK-NEXT: [[AA:%.*]] = add nsw i8 [[A:%.*]], 7
+; CHECK-NEXT: [[BB:%.*]] = and i8 [[B:%.*]], 7
+; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[AA]], i8 [[BB]])
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %aa = add nsw i8 %a, 7
+ %bb = and i8 %b, 7
+ %r = call i8 @llvm.ssub.sat.i8(i8 %aa, i8 %bb)
+ ret i8 %r
+}
+
+define i8 @test_scalar_ssub_add_nsw_may_ov(i8 %a, i8 %b) {
+; CHECK-LABEL: @test_scalar_ssub_add_nsw_may_ov(
+; CHECK-NEXT: [[AA:%.*]] = add nsw i8 [[A:%.*]], 6
+; CHECK-NEXT: [[BB:%.*]] = and i8 [[B:%.*]], 7
+; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[AA]], i8 [[BB]])
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %aa = add nsw i8 %a, 6
+ %bb = and i8 %b, 7
+ %r = call i8 @llvm.ssub.sat.i8(i8 %aa, i8 %bb)
+ ret i8 %r
+}
+
+define <2 x i8> @test_vector_ssub_add_nsw_no_ov_splat(<2 x i8> %a, <2 x i8> %b) {
+; CHECK-LABEL: @test_vector_ssub_add_nsw_no_ov_splat(
+; CHECK-NEXT: [[AA:%.*]] = add nsw <2 x i8> [[A:%.*]], <i8 7, i8 7>
+; CHECK-NEXT: [[BB:%.*]] = and <2 x i8> [[B:%.*]], <i8 7, i8 7>
+; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[AA]], <2 x i8> [[BB]])
+; CHECK-NEXT: ret <2 x i8> [[R]]
+;
+ %aa = add nsw <2 x i8> %a, <i8 7, i8 7>
+ %bb = and <2 x i8> %b, <i8 7, i8 7>
+ %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %aa, <2 x i8> %bb)
+ ret <2 x i8> %r
+}
+
+define <2 x i8> @test_vector_ssub_add_nsw_no_ov_nonsplat1(<2 x i8> %a, <2 x i8> %b) {
+; CHECK-LABEL: @test_vector_ssub_add_nsw_no_ov_nonsplat1(
+; CHECK-NEXT: [[AA:%.*]] = add nsw <2 x i8> [[A:%.*]], <i8 7, i8 7>
+; CHECK-NEXT: [[BB:%.*]] = and <2 x i8> [[B:%.*]], <i8 7, i8 6>
+; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[AA]], <2 x i8> [[BB]])
+; CHECK-NEXT: ret <2 x i8> [[R]]
+;
+ %aa = add nsw <2 x i8> %a, <i8 7, i8 7>
+ %bb = and <2 x i8> %b, <i8 7, i8 6>
+ %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %aa, <2 x i8> %bb)
+ ret <2 x i8> %r
+}
+
+define <2 x i8> @test_vector_ssub_add_nsw_no_ov_nonsplat2(<2 x i8> %a, <2 x i8> %b) {
+; CHECK-LABEL: @test_vector_ssub_add_nsw_no_ov_nonsplat2(
+; CHECK-NEXT: [[AA:%.*]] = add nsw <2 x i8> [[A:%.*]], <i8 7, i8 8>
+; CHECK-NEXT: [[BB:%.*]] = and <2 x i8> [[B:%.*]], <i8 7, i8 7>
+; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[AA]], <2 x i8> [[BB]])
+; CHECK-NEXT: ret <2 x i8> [[R]]
+;
+ %aa = add nsw <2 x i8> %a, <i8 7, i8 8>
+ %bb = and <2 x i8> %b, <i8 7, i8 7>
+ %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %aa, <2 x i8> %bb)
+ ret <2 x i8> %r
+}
+
+define <2 x i8> @test_vector_ssub_add_nsw_no_ov_nonsplat3(<2 x i8> %a, <2 x i8> %b) {
+; CHECK-LABEL: @test_vector_ssub_add_nsw_no_ov_nonsplat3(
+; CHECK-NEXT: [[AA:%.*]] = add nsw <2 x i8> [[A:%.*]], <i8 7, i8 6>
+; CHECK-NEXT: [[BB:%.*]] = and <2 x i8> [[B:%.*]], <i8 7, i8 6>
+; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[AA]], <2 x i8> [[BB]])
+; CHECK-NEXT: ret <2 x i8> [[R]]
+;
+ %aa = add nsw <2 x i8> %a, <i8 7, i8 6>
+ %bb = and <2 x i8> %b, <i8 7, i8 6>
+ %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %aa, <2 x i8> %bb)
+ ret <2 x i8> %r
+}
+
; Raw IR tests
define i32 @uadd_sat(i32 %x, i32 %y) {