; Can combine uadds with constant operands.
define i8 @test_scalar_uadd_combine(i8 %a) {
; CHECK-LABEL: @test_scalar_uadd_combine(
-; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A:%.*]], i8 30)
-; CHECK-NEXT: ret i8 [[TMP1]]
+; CHECK-NEXT: [[X2:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A:%.*]], i8 30)
+; CHECK-NEXT: ret i8 [[X2]]
;
%x1 = call i8 @llvm.uadd.sat.i8(i8 %a, i8 10)
%x2 = call i8 @llvm.uadd.sat.i8(i8 %x1, i8 20)
define <2 x i8> @test_vector_uadd_combine(<2 x i8> %a) {
; CHECK-LABEL: @test_vector_uadd_combine(
-; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 30, i8 30>)
-; CHECK-NEXT: ret <2 x i8> [[TMP1]]
+; CHECK-NEXT: [[X2:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 30, i8 30>)
+; CHECK-NEXT: ret <2 x i8> [[X2]]
;
%x1 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 10>)
%x2 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %x1, <2 x i8> <i8 20, i8 20>)
; Can combine sadds if sign matches.
define i8 @test_scalar_sadd_both_positive(i8 %a) {
; CHECK-LABEL: @test_scalar_sadd_both_positive(
-; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 30)
-; CHECK-NEXT: ret i8 [[TMP1]]
+; CHECK-NEXT: [[Z2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 30)
+; CHECK-NEXT: ret i8 [[Z2]]
;
%z1 = call i8 @llvm.sadd.sat.i8(i8 %a, i8 10)
%z2 = call i8 @llvm.sadd.sat.i8(i8 %z1, i8 20)
define <2 x i8> @test_vector_sadd_both_positive(<2 x i8> %a) {
; CHECK-LABEL: @test_vector_sadd_both_positive(
-; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 30, i8 30>)
-; CHECK-NEXT: ret <2 x i8> [[TMP1]]
+; CHECK-NEXT: [[Z2:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 30, i8 30>)
+; CHECK-NEXT: ret <2 x i8> [[Z2]]
;
%z1 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 10>)
%z2 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %z1, <2 x i8> <i8 20, i8 20>)
define i8 @test_scalar_sadd_both_negative(i8 %a) {
; CHECK-LABEL: @test_scalar_sadd_both_negative(
-; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -30)
-; CHECK-NEXT: ret i8 [[TMP1]]
+; CHECK-NEXT: [[U2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -30)
+; CHECK-NEXT: ret i8 [[U2]]
;
%u1 = call i8 @llvm.sadd.sat.i8(i8 %a, i8 -10)
%u2 = call i8 @llvm.sadd.sat.i8(i8 %u1, i8 -20)
define <2 x i8> @test_vector_sadd_both_negative(<2 x i8> %a) {
; CHECK-LABEL: @test_vector_sadd_both_negative(
-; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 -30, i8 -30>)
-; CHECK-NEXT: ret <2 x i8> [[TMP1]]
+; CHECK-NEXT: [[U2:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 -30, i8 -30>)
+; CHECK-NEXT: ret <2 x i8> [[U2]]
;
%u1 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 -10, i8 -10>)
%u2 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %u1, <2 x i8> <i8 -20, i8 -20>)
; Canonicalize ssub to sadd.
define i8 @test_scalar_ssub_canonical(i8 %a) {
; CHECK-LABEL: @test_scalar_ssub_canonical(
-; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -10)
-; CHECK-NEXT: ret i8 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -10)
+; CHECK-NEXT: ret i8 [[R]]
;
%r = call i8 @llvm.ssub.sat.i8(i8 %a, i8 10)
ret i8 %r
define <2 x i8> @test_vector_ssub_canonical(<2 x i8> %a) {
; CHECK-LABEL: @test_vector_ssub_canonical(
-; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 -10, i8 -10>)
-; CHECK-NEXT: ret <2 x i8> [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 -10, i8 -10>)
+; CHECK-NEXT: ret <2 x i8> [[R]]
;
%r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 10>)
ret <2 x i8> %r
define <2 x i8> @test_vector_ssub_canonical_min_non_splat(<2 x i8> %a) {
; CHECK-LABEL: @test_vector_ssub_canonical_min_non_splat(
-; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 -10, i8 10>)
-; CHECK-NEXT: ret <2 x i8> [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 -10, i8 10>)
+; CHECK-NEXT: ret <2 x i8> [[R]]
;
%r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 -10>)
ret <2 x i8> %r
; Can combine usubs with constant operands.
define i8 @test_scalar_usub_combine(i8 %a) {
; CHECK-LABEL: @test_scalar_usub_combine(
-; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 30)
-; CHECK-NEXT: ret i8 [[TMP1]]
+; CHECK-NEXT: [[X2:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 30)
+; CHECK-NEXT: ret i8 [[X2]]
;
%x1 = call i8 @llvm.usub.sat.i8(i8 %a, i8 10)
%x2 = call i8 @llvm.usub.sat.i8(i8 %x1, i8 20)
ret i8 %x2
}
+; Can simplify zero check followed by decrement
+define i8 @test_simplify_decrement(i8 %a) {
+; CHECK-LABEL: @test_simplify_decrement(
+; CHECK-NEXT: [[I:%.*]] = icmp eq i8 [[A:%.*]], 0
+; CHECK-NEXT: [[I1:%.*]] = add i8 [[A]], -1
+; CHECK-NEXT: [[I2:%.*]] = select i1 [[I]], i8 0, i8 [[I1]]
+; CHECK-NEXT: ret i8 [[I2]]
+;
+ %i = icmp eq i8 %a, 0
+ %i1 = sub i8 %a, 1
+ %i2 = select i1 %i, i8 0, i8 %i1
+ ret i8 %i2
+}
+
+declare void @use.i1(i1)
+
+define i8 @test_simplify_decrement_ne(i8 %a) {
+; CHECK-LABEL: @test_simplify_decrement_ne(
+; CHECK-NEXT: [[I:%.*]] = icmp ne i8 [[A:%.*]], 0
+; CHECK-NEXT: call void @use.i1(i1 [[I]])
+; CHECK-NEXT: [[I1:%.*]] = add i8 [[A]], -1
+; CHECK-NEXT: [[I2:%.*]] = select i1 [[I]], i8 [[I1]], i8 0
+; CHECK-NEXT: ret i8 [[I2]]
+;
+ %i = icmp ne i8 %a, 0
+ call void @use.i1(i1 %i)
+ %i1 = add i8 %a, -1
+ %i2 = select i1 %i, i8 %i1, i8 0
+ ret i8 %i2
+}
+
+define <2 x i8> @test_simplify_decrement_vec(<2 x i8> %a) {
+; CHECK-LABEL: @test_simplify_decrement_vec(
+; CHECK-NEXT: [[I:%.*]] = icmp eq <2 x i8> [[A:%.*]], zeroinitializer
+; CHECK-NEXT: [[I1:%.*]] = add <2 x i8> [[A]], <i8 -1, i8 -1>
+; CHECK-NEXT: [[I2:%.*]] = select <2 x i1> [[I]], <2 x i8> zeroinitializer, <2 x i8> [[I1]]
+; CHECK-NEXT: ret <2 x i8> [[I2]]
+;
+ %i = icmp eq <2 x i8> %a, <i8 0, i8 0>
+ %i1 = sub <2 x i8> %a, <i8 1, i8 1>
+ %i2 = select <2 x i1> %i, <2 x i8> <i8 0, i8 0>, <2 x i8> %i1
+ ret <2 x i8> %i2
+}
+
+define <2 x i8> @test_simplify_decrement_vec_undef(<2 x i8> %a) {
+; CHECK-LABEL: @test_simplify_decrement_vec_undef(
+; CHECK-NEXT: [[I:%.*]] = icmp eq <2 x i8> [[A:%.*]], zeroinitializer
+; CHECK-NEXT: [[I1:%.*]] = add <2 x i8> [[A]], <i8 -1, i8 -1>
+; CHECK-NEXT: [[I2:%.*]] = select <2 x i1> [[I]], <2 x i8> <i8 0, i8 undef>, <2 x i8> [[I1]]
+; CHECK-NEXT: ret <2 x i8> [[I2]]
+;
+ %i = icmp eq <2 x i8> %a, <i8 0, i8 0>
+ %i1 = sub <2 x i8> %a, <i8 1, i8 1>
+ %i2 = select <2 x i1> %i, <2 x i8> <i8 0, i8 undef>, <2 x i8> %i1
+ ret <2 x i8> %i2
+}
+
+define i8 @test_simplify_decrement_invalid_ne(i8 %a) {
+; CHECK-LABEL: @test_simplify_decrement_invalid_ne(
+; CHECK-NEXT: [[I_NOT:%.*]] = icmp eq i8 [[A:%.*]], 0
+; CHECK-NEXT: [[I2:%.*]] = sext i1 [[I_NOT]] to i8
+; CHECK-NEXT: ret i8 [[I2]]
+;
+ %i = icmp ne i8 %a, 0
+ %i1 = sub i8 %a, 1
+ %i2 = select i1 %i, i8 0, i8 %i1
+ ret i8 %i2
+}
+
+define i8 @test_invalid_simplify_sub2(i8 %a) {
+; CHECK-LABEL: @test_invalid_simplify_sub2(
+; CHECK-NEXT: [[I:%.*]] = icmp eq i8 [[A:%.*]], 0
+; CHECK-NEXT: [[I1:%.*]] = add i8 [[A]], -2
+; CHECK-NEXT: [[I2:%.*]] = select i1 [[I]], i8 0, i8 [[I1]]
+; CHECK-NEXT: ret i8 [[I2]]
+;
+ %i = icmp eq i8 %a, 0
+ %i1 = sub i8 %a, 2
+ %i2 = select i1 %i, i8 0, i8 %i1
+ ret i8 %i2
+}
+
+define i8 @test_invalid_simplify_eq2(i8 %a) {
+; CHECK-LABEL: @test_invalid_simplify_eq2(
+; CHECK-NEXT: [[I:%.*]] = icmp eq i8 [[A:%.*]], 2
+; CHECK-NEXT: [[I1:%.*]] = add i8 [[A]], -1
+; CHECK-NEXT: [[I2:%.*]] = select i1 [[I]], i8 0, i8 [[I1]]
+; CHECK-NEXT: ret i8 [[I2]]
+;
+ %i = icmp eq i8 %a, 2
+ %i1 = sub i8 %a, 1
+ %i2 = select i1 %i, i8 0, i8 %i1
+ ret i8 %i2
+}
+
+define i8 @test_invalid_simplify_select_1(i8 %a) {
+; CHECK-LABEL: @test_invalid_simplify_select_1(
+; CHECK-NEXT: [[I:%.*]] = icmp eq i8 [[A:%.*]], 0
+; CHECK-NEXT: [[I1:%.*]] = add i8 [[A]], -1
+; CHECK-NEXT: [[I2:%.*]] = select i1 [[I]], i8 1, i8 [[I1]]
+; CHECK-NEXT: ret i8 [[I2]]
+;
+ %i = icmp eq i8 %a, 0
+ %i1 = sub i8 %a, 1
+ %i2 = select i1 %i, i8 1, i8 %i1
+ ret i8 %i2
+}
+
+define i8 @test_invalid_simplify_other(i8 %a, i8 %b) {
+; CHECK-LABEL: @test_invalid_simplify_other(
+; CHECK-NEXT: [[I:%.*]] = icmp eq i8 [[A:%.*]], 0
+; CHECK-NEXT: [[I1:%.*]] = add i8 [[B:%.*]], -1
+; CHECK-NEXT: [[I2:%.*]] = select i1 [[I]], i8 0, i8 [[I1]]
+; CHECK-NEXT: ret i8 [[I2]]
+;
+ %i = icmp eq i8 %a, 0
+ %i1 = sub i8 %b, 1
+ %i2 = select i1 %i, i8 0, i8 %i1
+ ret i8 %i2
+}
+
define <2 x i8> @test_vector_usub_combine(<2 x i8> %a) {
; CHECK-LABEL: @test_vector_usub_combine(
-; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 30, i8 30>)
-; CHECK-NEXT: ret <2 x i8> [[TMP1]]
+; CHECK-NEXT: [[X2:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 30, i8 30>)
+; CHECK-NEXT: ret <2 x i8> [[X2]]
;
%x1 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 10>)
%x2 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %x1, <2 x i8> <i8 20, i8 20>)
; Can combine ssubs if sign matches.
define i8 @test_scalar_ssub_both_positive(i8 %a) {
; CHECK-LABEL: @test_scalar_ssub_both_positive(
-; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -30)
-; CHECK-NEXT: ret i8 [[TMP1]]
+; CHECK-NEXT: [[Z2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -30)
+; CHECK-NEXT: ret i8 [[Z2]]
;
%z1 = call i8 @llvm.ssub.sat.i8(i8 %a, i8 10)
%z2 = call i8 @llvm.ssub.sat.i8(i8 %z1, i8 20)
define <2 x i8> @test_vector_ssub_both_positive(<2 x i8> %a) {
; CHECK-LABEL: @test_vector_ssub_both_positive(
-; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 -30, i8 -30>)
-; CHECK-NEXT: ret <2 x i8> [[TMP1]]
+; CHECK-NEXT: [[Z2:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 -30, i8 -30>)
+; CHECK-NEXT: ret <2 x i8> [[Z2]]
;
%z1 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 10>)
%z2 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %z1, <2 x i8> <i8 20, i8 20>)
define i8 @test_scalar_ssub_both_negative(i8 %a) {
; CHECK-LABEL: @test_scalar_ssub_both_negative(
-; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 30)
-; CHECK-NEXT: ret i8 [[TMP1]]
+; CHECK-NEXT: [[U2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 30)
+; CHECK-NEXT: ret i8 [[U2]]
;
%u1 = call i8 @llvm.ssub.sat.i8(i8 %a, i8 -10)
%u2 = call i8 @llvm.ssub.sat.i8(i8 %u1, i8 -20)
define <2 x i8> @test_vector_ssub_both_negative(<2 x i8> %a) {
; CHECK-LABEL: @test_vector_ssub_both_negative(
-; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 30, i8 30>)
-; CHECK-NEXT: ret <2 x i8> [[TMP1]]
+; CHECK-NEXT: [[U2:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 30, i8 30>)
+; CHECK-NEXT: ret <2 x i8> [[U2]]
;
%u1 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 -10, i8 -10>)
%u2 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %u1, <2 x i8> <i8 -20, i8 -20>)
; Can't combine ssubs if constants have different sign.
define i8 @test_scalar_ssub_different_sign(i8 %a) {
; CHECK-LABEL: @test_scalar_ssub_different_sign(
-; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -10)
-; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[TMP1]], i8 20)
-; CHECK-NEXT: ret i8 [[TMP2]]
+; CHECK-NEXT: [[V1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -10)
+; CHECK-NEXT: [[V2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[V1]], i8 20)
+; CHECK-NEXT: ret i8 [[V2]]
;
%v1 = call i8 @llvm.ssub.sat.i8(i8 %a, i8 10)
%v2 = call i8 @llvm.ssub.sat.i8(i8 %v1, i8 -20)
; Can combine sadd and ssub with appropriate signs.
define i8 @test_scalar_sadd_ssub(i8 %a) {
; CHECK-LABEL: @test_scalar_sadd_ssub(
-; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 30)
-; CHECK-NEXT: ret i8 [[TMP1]]
+; CHECK-NEXT: [[V2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 30)
+; CHECK-NEXT: ret i8 [[V2]]
;
%v1 = call i8 @llvm.sadd.sat.i8(i8 10, i8 %a)
%v2 = call i8 @llvm.ssub.sat.i8(i8 %v1, i8 -20)
define <2 x i8> @test_vector_sadd_ssub(<2 x i8> %a) {
; CHECK-LABEL: @test_vector_sadd_ssub(
-; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 -30, i8 -30>)
-; CHECK-NEXT: ret <2 x i8> [[TMP1]]
+; CHECK-NEXT: [[V2:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 -30, i8 -30>)
+; CHECK-NEXT: ret <2 x i8> [[V2]]
;
%v1 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> <i8 -10, i8 -10>, <2 x i8> %a)
%v2 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %v1, <2 x i8> <i8 20, i8 20>)
; Can't combine ssubs if they overflow.
define i8 @test_scalar_ssub_overflow(i8 %a) {
; CHECK-LABEL: @test_scalar_ssub_overflow(
-; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -100)
-; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[TMP1]], i8 -100)
-; CHECK-NEXT: ret i8 [[TMP2]]
+; CHECK-NEXT: [[W1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -100)
+; CHECK-NEXT: [[W2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[W1]], i8 -100)
+; CHECK-NEXT: ret i8 [[W2]]
;
%w1 = call i8 @llvm.ssub.sat.i8(i8 %a, i8 100)
%w2 = call i8 @llvm.ssub.sat.i8(i8 %w1, i8 100)
define i8 @test_scalar_ssub_neg_nneg(i8 %a) {
; CHECK-LABEL: @test_scalar_ssub_neg_nneg(
; CHECK-NEXT: [[A_NEG:%.*]] = or i8 [[A:%.*]], -128
-; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A_NEG]], i8 -10)
-; CHECK-NEXT: ret i8 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A_NEG]], i8 -10)
+; CHECK-NEXT: ret i8 [[R]]
;
%a_neg = or i8 %a, -128
%r = call i8 @llvm.ssub.sat.i8(i8 %a_neg, i8 10)
define <2 x i8> @test_vector_ssub_neg_nneg(<2 x i8> %a) {
; CHECK-LABEL: @test_vector_ssub_neg_nneg(
; CHECK-NEXT: [[A_NEG:%.*]] = or <2 x i8> [[A:%.*]], <i8 -128, i8 -128>
-; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A_NEG]], <2 x i8> <i8 -10, i8 -20>)
-; CHECK-NEXT: ret <2 x i8> [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A_NEG]], <2 x i8> <i8 -10, i8 -20>)
+; CHECK-NEXT: ret <2 x i8> [[R]]
;
%a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
%r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a_neg, <2 x i8> <i8 10, i8 20>)
define i8 @test_scalar_usub_add(i8 %a, i8 %b) {
; CHECK-LABEL: @test_scalar_usub_add(
-; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.umax.i8(i8 [[A:%.*]], i8 [[B:%.*]])
-; CHECK-NEXT: ret i8 [[TMP1]]
+; CHECK-NEXT: [[RES:%.*]] = call i8 @llvm.umax.i8(i8 [[A:%.*]], i8 [[B:%.*]])
+; CHECK-NEXT: ret i8 [[RES]]
;
%sat = call i8 @llvm.usub.sat.i8(i8 %a, i8 %b)
%res = add i8 %sat, %b
define i8 @test_scalar_usub_add_commuted(i8 %a, i8 %b) {
; CHECK-LABEL: @test_scalar_usub_add_commuted(
-; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.umax.i8(i8 [[A:%.*]], i8 [[B:%.*]])
-; CHECK-NEXT: ret i8 [[TMP1]]
+; CHECK-NEXT: [[RES:%.*]] = call i8 @llvm.umax.i8(i8 [[A:%.*]], i8 [[B:%.*]])
+; CHECK-NEXT: ret i8 [[RES]]
;
%sat = call i8 @llvm.usub.sat.i8(i8 %a, i8 %b)
%res = add i8 %b, %sat
define i8 @test_scalar_usub_add_const(i8 %a) {
; CHECK-LABEL: @test_scalar_usub_add_const(
-; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.umax.i8(i8 [[A:%.*]], i8 42)
-; CHECK-NEXT: ret i8 [[TMP1]]
+; CHECK-NEXT: [[RES:%.*]] = call i8 @llvm.umax.i8(i8 [[A:%.*]], i8 42)
+; CHECK-NEXT: ret i8 [[RES]]
;
%sat = call i8 @llvm.usub.sat.i8(i8 %a, i8 42)
%res = add i8 %sat, 42
define i8 @test_scalar_usub_sub(i8 %a, i8 %b) {
; CHECK-LABEL: @test_scalar_usub_sub(
-; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.umin.i8(i8 [[A:%.*]], i8 [[B:%.*]])
-; CHECK-NEXT: ret i8 [[TMP1]]
+; CHECK-NEXT: [[RES:%.*]] = call i8 @llvm.umin.i8(i8 [[A:%.*]], i8 [[B:%.*]])
+; CHECK-NEXT: ret i8 [[RES]]
;
%sat = call i8 @llvm.usub.sat.i8(i8 %a, i8 %b)
%res = sub i8 %a, %sat
define <2 x i8> @test_vector_usub_sub(<2 x i8> %a, <2 x i8> %b) {
; CHECK-LABEL: @test_vector_usub_sub(
-; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.umin.v2i8(<2 x i8> [[A:%.*]], <2 x i8> [[B:%.*]])
-; CHECK-NEXT: ret <2 x i8> [[TMP1]]
+; CHECK-NEXT: [[RES:%.*]] = call <2 x i8> @llvm.umin.v2i8(<2 x i8> [[A:%.*]], <2 x i8> [[B:%.*]])
+; CHECK-NEXT: ret <2 x i8> [[RES]]
;
%sat = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a, <2 x i8> %b)
%res = sub <2 x i8> %a, %sat
define i32 @uadd_sat(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat(
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
+; CHECK-NEXT: ret i32 [[R]]
;
%notx = xor i32 %x, -1
%a = add i32 %y, %x
}
define i32 @uadd_sat_nonstrict(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat_nonstrict(
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
+; CHECK-NEXT: ret i32 [[R]]
;
%notx = xor i32 %x, -1
%a = add i32 %y, %x
define i32 @uadd_sat_commute_add(i32 %xp, i32 %y) {
; CHECK-LABEL: @uadd_sat_commute_add(
; CHECK-NEXT: [[X:%.*]] = urem i32 42, [[XP:%.*]]
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X]], i32 [[Y:%.*]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X]], i32 [[Y:%.*]])
+; CHECK-NEXT: ret i32 [[R]]
;
%x = urem i32 42, %xp ; thwart complexity-based-canonicalization
%notx = xor i32 %x, -1
define i32 @uadd_sat_ugt(i32 %x, i32 %yp) {
; CHECK-LABEL: @uadd_sat_ugt(
; CHECK-NEXT: [[Y:%.*]] = sdiv i32 [[YP:%.*]], 2442
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y]])
+; CHECK-NEXT: ret i32 [[R]]
;
%y = sdiv i32 %yp, 2442 ; thwart complexity-based-canonicalization
%notx = xor i32 %x, -1
define i32 @uadd_sat_uge(i32 %x, i32 %yp) {
; CHECK-LABEL: @uadd_sat_uge(
; CHECK-NEXT: [[Y:%.*]] = sdiv i32 [[YP:%.*]], 2442
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y]])
+; CHECK-NEXT: ret i32 [[R]]
;
%y = sdiv i32 %yp, 2442 ; thwart complexity-based-canonicalization
%notx = xor i32 %x, -1
; CHECK-LABEL: @uadd_sat_ugt_commute_add(
; CHECK-NEXT: [[Y:%.*]] = sdiv <2 x i32> [[YP:%.*]], <i32 2442, i32 4242>
; CHECK-NEXT: [[X:%.*]] = srem <2 x i32> <i32 42, i32 43>, [[XP:%.*]]
-; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> [[X]], <2 x i32> [[Y]])
-; CHECK-NEXT: ret <2 x i32> [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> [[X]], <2 x i32> [[Y]])
+; CHECK-NEXT: ret <2 x i32> [[R]]
;
%y = sdiv <2 x i32> %yp, <i32 2442, i32 4242> ; thwart complexity-based-canonicalization
%x = srem <2 x i32> <i32 42, i32 43>, %xp ; thwart complexity-based-canonicalization
define i32 @uadd_sat_commute_select(i32 %x, i32 %yp) {
; CHECK-LABEL: @uadd_sat_commute_select(
; CHECK-NEXT: [[Y:%.*]] = sdiv i32 [[YP:%.*]], 2442
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y]])
+; CHECK-NEXT: ret i32 [[R]]
;
%y = sdiv i32 %yp, 2442 ; thwart complexity-based-canonicalization
%notx = xor i32 %x, -1
define i32 @uadd_sat_commute_select_nonstrict(i32 %x, i32 %yp) {
; CHECK-LABEL: @uadd_sat_commute_select_nonstrict(
; CHECK-NEXT: [[Y:%.*]] = sdiv i32 [[YP:%.*]], 2442
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y]])
+; CHECK-NEXT: ret i32 [[R]]
;
%y = sdiv i32 %yp, 2442 ; thwart complexity-based-canonicalization
%notx = xor i32 %x, -1
; CHECK-LABEL: @uadd_sat_commute_select_commute_add(
; CHECK-NEXT: [[X:%.*]] = urem i32 42, [[XP:%.*]]
; CHECK-NEXT: [[Y:%.*]] = sdiv i32 [[YP:%.*]], 2442
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X]], i32 [[Y]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X]], i32 [[Y]])
+; CHECK-NEXT: ret i32 [[R]]
;
%x = urem i32 42, %xp ; thwart complexity-based-canonicalization
%y = sdiv i32 %yp, 2442 ; thwart complexity-based-canonicalization
define <2 x i32> @uadd_sat_commute_select_ugt(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @uadd_sat_commute_select_ugt(
-; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> [[X:%.*]], <2 x i32> [[Y:%.*]])
-; CHECK-NEXT: ret <2 x i32> [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> [[X:%.*]], <2 x i32> [[Y:%.*]])
+; CHECK-NEXT: ret <2 x i32> [[R]]
;
%notx = xor <2 x i32> %x, <i32 -1, i32 -1>
%a = add <2 x i32> %y, %x
define i32 @uadd_sat_commute_select_ugt_commute_add(i32 %xp, i32 %y) {
; CHECK-LABEL: @uadd_sat_commute_select_ugt_commute_add(
; CHECK-NEXT: [[X:%.*]] = srem i32 42, [[XP:%.*]]
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X]], i32 [[Y:%.*]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X]], i32 [[Y:%.*]])
+; CHECK-NEXT: ret i32 [[R]]
;
%x = srem i32 42, %xp ; thwart complexity-based-canonicalization
%notx = xor i32 %x, -1
define i32 @uadd_sat_not(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat_not(
; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
+; CHECK-NEXT: ret i32 [[R]]
;
%notx = xor i32 %x, -1
%a = add i32 %notx, %y
define i32 @uadd_sat_not_nonstrict(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat_not_nonstrict(
; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
+; CHECK-NEXT: ret i32 [[R]]
;
%notx = xor i32 %x, -1
%a = add i32 %notx, %y
; CHECK-NEXT: [[X:%.*]] = srem i32 42, [[XP:%.*]]
; CHECK-NEXT: [[Y:%.*]] = urem i32 42, [[YP:%.*]]
; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y]], i32 [[NOTX]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y]], i32 [[NOTX]])
+; CHECK-NEXT: ret i32 [[R]]
;
%x = srem i32 42, %xp ; thwart complexity-based-canonicalization
%y = urem i32 42, %yp ; thwart complexity-based-canonicalization
define i32 @uadd_sat_not_ugt(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat_not_ugt(
; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
+; CHECK-NEXT: ret i32 [[R]]
;
%notx = xor i32 %x, -1
%a = add i32 %notx, %y
define i32 @uadd_sat_not_uge(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat_not_uge(
; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
+; CHECK-NEXT: ret i32 [[R]]
;
%notx = xor i32 %x, -1
%a = add i32 %notx, %y
; CHECK-LABEL: @uadd_sat_not_ugt_commute_add(
; CHECK-NEXT: [[Y:%.*]] = sdiv <2 x i32> [[YP:%.*]], <i32 2442, i32 4242>
; CHECK-NEXT: [[NOTX:%.*]] = xor <2 x i32> [[X:%.*]], <i32 -1, i32 -1>
-; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> [[Y]], <2 x i32> [[NOTX]])
-; CHECK-NEXT: ret <2 x i32> [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> [[Y]], <2 x i32> [[NOTX]])
+; CHECK-NEXT: ret <2 x i32> [[R]]
;
%y = sdiv <2 x i32> %yp, <i32 2442, i32 4242> ; thwart complexity-based-canonicalization
%notx = xor <2 x i32> %x, <i32 -1, i32 -1>
define i32 @uadd_sat_not_commute_select(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat_not_commute_select(
; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
+; CHECK-NEXT: ret i32 [[R]]
;
%notx = xor i32 %x, -1
%a = add i32 %notx, %y
define i32 @uadd_sat_not_commute_select_nonstrict(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat_not_commute_select_nonstrict(
; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
+; CHECK-NEXT: ret i32 [[R]]
;
%notx = xor i32 %x, -1
%a = add i32 %notx, %y
; CHECK-LABEL: @uadd_sat_not_commute_select_commute_add(
; CHECK-NEXT: [[Y:%.*]] = sdiv i32 42, [[YP:%.*]]
; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y]], i32 [[NOTX]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y]], i32 [[NOTX]])
+; CHECK-NEXT: ret i32 [[R]]
;
%y = sdiv i32 42, %yp ; thwart complexity-based-canonicalization
%notx = xor i32 %x, -1
; CHECK-NEXT: [[X:%.*]] = urem <2 x i32> <i32 42, i32 -42>, [[XP:%.*]]
; CHECK-NEXT: [[Y:%.*]] = srem <2 x i32> <i32 12, i32 412>, [[YP:%.*]]
; CHECK-NEXT: [[NOTX:%.*]] = xor <2 x i32> [[X]], <i32 -1, i32 -1>
-; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> [[Y]], <2 x i32> [[NOTX]])
-; CHECK-NEXT: ret <2 x i32> [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> [[Y]], <2 x i32> [[NOTX]])
+; CHECK-NEXT: ret <2 x i32> [[R]]
;
%x = urem <2 x i32> <i32 42, i32 -42>, %xp ; thwart complexity-based-canonicalization
%y = srem <2 x i32> <i32 12, i32 412>, %yp ; thwart complexity-based-canonicalization
define i32 @uadd_sat_not_commute_select_ugt_commute_add(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat_not_commute_select_ugt_commute_add(
; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
+; CHECK-NEXT: ret i32 [[R]]
;
%notx = xor i32 %x, -1
%a = add i32 %notx, %y
define i32 @uadd_sat_not_commute_select_uge_commute_add(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat_not_commute_select_uge_commute_add(
; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
+; CHECK-NEXT: ret i32 [[R]]
;
%notx = xor i32 %x, -1
%a = add i32 %notx, %y
define i32 @uadd_sat_constant_commute(i32 %x) {
; CHECK-LABEL: @uadd_sat_constant_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 42)
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 42)
+; CHECK-NEXT: ret i32 [[R]]
;
%a = add i32 %x, 42
%c = icmp ult i32 %x, -43
define i32 @uadd_sat_canon(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat_canon(
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
+; CHECK-NEXT: ret i32 [[R]]
;
%a = add i32 %x, %y
%c = icmp ult i32 %a, %x
define i32 @uadd_sat_canon_y(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat_canon_y(
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[X:%.*]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[X:%.*]])
+; CHECK-NEXT: ret i32 [[R]]
;
%a = add i32 %x, %y
%c = icmp ult i32 %a, %y
define <4 x i32> @uadd_sat_constant_vec_commute(<4 x i32> %x) {
; CHECK-LABEL: @uadd_sat_constant_vec_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> [[X:%.*]], <4 x i32> <i32 42, i32 42, i32 42, i32 42>)
-; CHECK-NEXT: ret <4 x i32> [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> [[X:%.*]], <4 x i32> <i32 42, i32 42, i32 42, i32 42>)
+; CHECK-NEXT: ret <4 x i32> [[R]]
;
%a = add <4 x i32> %x, <i32 42, i32 42, i32 42, i32 42>
%c = icmp ult <4 x i32> %x, <i32 -43, i32 -43, i32 -43, i32 -43>
; CHECK-LABEL: @unsigned_sat_variable_using_wrong_min(
; CHECK-NEXT: [[Y:%.*]] = call i32 @get_i32()
; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.smin.i32(i32 [[NOTY]], i32 [[X:%.*]])
-; CHECK-NEXT: [[R:%.*]] = add i32 [[Y]], [[TMP1]]
+; CHECK-NEXT: [[S:%.*]] = call i32 @llvm.smin.i32(i32 [[NOTY]], i32 [[X:%.*]])
+; CHECK-NEXT: [[R:%.*]] = add i32 [[Y]], [[S]]
; CHECK-NEXT: ret i32 [[R]]
;
%y = call i32 @get_i32() ; thwart complexity-based canonicalization
; CHECK-LABEL: @unsigned_sat_variable_using_wrong_value(
; CHECK-NEXT: [[Y:%.*]] = call i32 @get_i32()
; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.umin.i32(i32 [[NOTY]], i32 [[X:%.*]])
-; CHECK-NEXT: [[R:%.*]] = add i32 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: [[S:%.*]] = call i32 @llvm.umin.i32(i32 [[NOTY]], i32 [[X:%.*]])
+; CHECK-NEXT: [[R:%.*]] = add i32 [[S]], [[Z:%.*]]
; CHECK-NEXT: ret i32 [[R]]
;
%y = call i32 @get_i32() ; thwart complexity-based canonicalization
define i32 @unsigned_sat_constant_using_min_wrong_constant(i32 %x) {
; CHECK-LABEL: @unsigned_sat_constant_using_min_wrong_constant(
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.umin.i32(i32 [[X:%.*]], i32 42)
-; CHECK-NEXT: [[R:%.*]] = add nsw i32 [[TMP1]], -42
+; CHECK-NEXT: [[S:%.*]] = call i32 @llvm.umin.i32(i32 [[X:%.*]], i32 42)
+; CHECK-NEXT: [[R:%.*]] = add nsw i32 [[S]], -42
; CHECK-NEXT: ret i32 [[R]]
;
%c = icmp ult i32 %x, 42
define i32 @uadd_sat_via_add(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat_via_add(
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[X:%.*]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[X:%.*]])
+; CHECK-NEXT: ret i32 [[R]]
;
%a = add i32 %x, %y
%c = icmp ult i32 %a, %y
define i32 @uadd_sat_via_add_swapped_select(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat_via_add_swapped_select(
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[X:%.*]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[X:%.*]])
+; CHECK-NEXT: ret i32 [[R]]
;
%a = add i32 %x, %y
%c = icmp uge i32 %a, %y
define i32 @uadd_sat_via_add_swapped_cmp(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat_via_add_swapped_cmp(
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[X:%.*]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[X:%.*]])
+; CHECK-NEXT: ret i32 [[R]]
;
%a = add i32 %x, %y
%c = icmp ugt i32 %y, %a
define i32 @uadd_sat_via_add_swapped_cmp_nonstric(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat_via_add_swapped_cmp_nonstric(
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[X:%.*]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[X:%.*]])
+; CHECK-NEXT: ret i32 [[R]]
;
%a = add i32 %x, %y
%c = icmp ule i32 %y, %a