+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128-ni:1"
; Test that a PHI in catchswitch BB are excluded from combining into a non-PHI
; instruction.
define void @test0(i1 %c1) personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+; CHECK-LABEL: @test0(
+; CHECK-NEXT: bb:
+; CHECK-NEXT: [[TMP0:%.*]] = alloca [[STRUCT_BLAM:%.*]], align 4
+; CHECK-NEXT: br i1 [[C1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_BLAM]], %struct.blam* [[TMP0]], i32 0, i32 0
+; CHECK-NEXT: invoke void @foo()
+; CHECK-NEXT: to label [[BB3:%.*]] unwind label [[BB4:%.*]]
+; CHECK: bb2:
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_BLAM]], %struct.blam* [[TMP0]], i32 0, i32 0
+; CHECK-NEXT: invoke void @foo()
+; CHECK-NEXT: to label [[BB3]] unwind label [[BB4]]
+; CHECK: bb3:
+; CHECK-NEXT: unreachable
+; CHECK: bb4:
+; CHECK-NEXT: [[TMP3:%.*]] = phi %struct.quux* [ [[TMP1]], [[BB1]] ], [ [[TMP2]], [[BB2]] ]
+; CHECK-NEXT: [[TMP4:%.*]] = catchswitch within none [label %bb5] unwind label [[BB7:%.*]]
+; CHECK: bb5:
+; CHECK-NEXT: [[TMP5:%.*]] = catchpad within [[TMP4]] [i8* null]
+; CHECK-NEXT: invoke void @foo() [ "funclet"(token [[TMP5]]) ]
+; CHECK-NEXT: to label [[BB6:%.*]] unwind label [[BB7]]
+; CHECK: bb6:
+; CHECK-NEXT: unreachable
+; CHECK: bb7:
+; CHECK-NEXT: [[TMP6:%.*]] = cleanuppad within none []
+; CHECK-NEXT: call void @bar(%struct.quux* [[TMP3]]) [ "funclet"(token [[TMP6]]) ]
+; CHECK-NEXT: unreachable
+;
bb:
%tmp0 = alloca %struct.blam, align 4
br i1 %c1, label %bb1, label %bb2
bb1: ; preds = %bb
%tmp1 = getelementptr inbounds %struct.blam, %struct.blam* %tmp0, i32 0, i32 0
invoke void @foo()
- to label %bb3 unwind label %bb4
+ to label %bb3 unwind label %bb4
bb2: ; preds = %bb
%tmp2 = getelementptr inbounds %struct.blam, %struct.blam* %tmp0, i32 0, i32 0
invoke void @foo()
- to label %bb3 unwind label %bb4
+ to label %bb3 unwind label %bb4
bb3: ; preds = %bb2, %bb1
unreachable
; This PHI should not be combined into a non-PHI instruction, because
; catchswitch BB cannot have any non-PHI instruction other than catchswitch
; itself.
- ; CHECK: bb4:
- ; CHECK-NEXT: phi
- ; CHECK-NEXT: catchswitch
%tmp3 = phi %struct.quux* [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
%tmp4 = catchswitch within none [label %bb5] unwind label %bb7
bb5: ; preds = %bb4
%tmp5 = catchpad within %tmp4 [i8* null]
invoke void @foo() [ "funclet"(token %tmp5) ]
- to label %bb6 unwind label %bb7
+ to label %bb6 unwind label %bb7
bb6: ; preds = %bb5
unreachable
; Test that slicing-up of illegal integer type PHI does not happen in catchswitch
; BBs, which can't have any non-PHI instruction before the catchswitch.
define void @test1() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+; CHECK-LABEL: @test1(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: invoke void @foo()
+; CHECK-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[CATCH_DISPATCH1:%.*]]
+; CHECK: invoke.cont:
+; CHECK-NEXT: [[CALL:%.*]] = invoke i32 @baz()
+; CHECK-NEXT: to label [[INVOKE_CONT1:%.*]] unwind label [[CATCH_DISPATCH:%.*]]
+; CHECK: invoke.cont1:
+; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[CALL]], 0
+; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: br label [[IF_END]]
+; CHECK: if.end:
+; CHECK-NEXT: [[AP_0:%.*]] = phi i8 [ 1, [[IF_THEN]] ], [ 0, [[INVOKE_CONT1]] ]
+; CHECK-NEXT: invoke void @foo()
+; CHECK-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[CATCH_DISPATCH]]
+; CHECK: invoke.cont2:
+; CHECK-NEXT: br label [[TRY_CONT:%.*]]
+; CHECK: catch.dispatch:
+; CHECK-NEXT: [[AP_1:%.*]] = phi i8 [ [[AP_0]], [[IF_END]] ], [ 0, [[INVOKE_CONT]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = catchswitch within none [label %catch.start] unwind label [[CATCH_DISPATCH1]]
+; CHECK: catch.start:
+; CHECK-NEXT: [[TMP1:%.*]] = catchpad within [[TMP0]] [i8* null]
+; CHECK-NEXT: br i1 false, label [[CATCH:%.*]], label [[RETHROW:%.*]]
+; CHECK: catch:
+; CHECK-NEXT: catchret from [[TMP1]] to label [[TRY_CONT]]
+; CHECK: rethrow:
+; CHECK-NEXT: invoke void @llvm.wasm.rethrow() #[[ATTR0:[0-9]+]] [ "funclet"(token [[TMP1]]) ]
+; CHECK-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[CATCH_DISPATCH1]]
+; CHECK: catch.dispatch1:
+; CHECK-NEXT: [[AP_2:%.*]] = phi i8 [ [[AP_1]], [[CATCH_DISPATCH]] ], [ [[AP_1]], [[RETHROW]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[TMP2:%.*]] = catchswitch within none [label %catch.start1] unwind to caller
+; CHECK: catch.start1:
+; CHECK-NEXT: [[TMP3:%.*]] = catchpad within [[TMP2]] [i8* null]
+; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[AP_2]], 1
+; CHECK-NEXT: [[TOBOOL1_NOT:%.*]] = icmp eq i8 [[TMP0]], 0
+; CHECK-NEXT: br i1 [[TOBOOL1_NOT]], label [[IF_END1:%.*]], label [[IF_THEN1:%.*]]
+; CHECK: if.then1:
+; CHECK-NEXT: br label [[IF_END1]]
+; CHECK: if.end1:
+; CHECK-NEXT: catchret from [[TMP3]] to label [[TRY_CONT]]
+; CHECK: try.cont:
+; CHECK-NEXT: ret void
+; CHECK: unreachable:
+; CHECK-NEXT: unreachable
+;
entry:
invoke void @foo()
- to label %invoke.cont unwind label %catch.dispatch1
+ to label %invoke.cont unwind label %catch.dispatch1
invoke.cont: ; preds = %entry
%call = invoke i32 @baz()
- to label %invoke.cont1 unwind label %catch.dispatch
+ to label %invoke.cont1 unwind label %catch.dispatch
invoke.cont1: ; preds = %invoke.cont
%tobool = icmp ne i32 %call, 0
if.end: ; preds = %if.then, %invoke.cont1
%ap.0 = phi i8 [ 1, %if.then ], [ 0, %invoke.cont1 ]
invoke void @foo()
- to label %invoke.cont2 unwind label %catch.dispatch
+ to label %invoke.cont2 unwind label %catch.dispatch
invoke.cont2: ; preds = %if.end
br label %try.cont
; So if one of sliced-up PHI's predecessor is a catchswitch block, we don't
; optimize that case and bail out. This BB should be preserved intact after
; InstCombine and the pass shouldn't produce invalid code.
- ; CHECK: catch.dispatch:
- ; CHECK-NEXT: phi
- ; CHECK-NEXT: catchswitch
%ap.1 = phi i8 [ %ap.0, %if.end ], [ 0, %invoke.cont ]
%tmp0 = catchswitch within none [label %catch.start] unwind label %catch.dispatch1
rethrow: ; preds = %catch.start
invoke void @llvm.wasm.rethrow() #0 [ "funclet"(token %tmp1) ]
- to label %unreachable unwind label %catch.dispatch1
+ to label %unreachable unwind label %catch.dispatch1
catch.dispatch1: ; preds = %rethrow, %catch.dispatch, %entry
%ap.2 = phi i8 [ %ap.1, %catch.dispatch ], [ %ap.1, %rethrow ], [ 0, %entry ]
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
;
; Verify that a constant with size in excess of 32-bit SIZE_MAX doesn't
%ptr = getelementptr <{ i8, [4294967295 x i8] }>, <{ i8, [4294967295 x i8] }>* @a, i32 0, i32 0
%chr = tail call i8* @memrchr(i8* %ptr, i32 0, i64 4294967296)
ret i8* %chr
-}
\ No newline at end of file
+}
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
@test.data = private unnamed_addr addrspace(2) constant [8 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7], align 4
-; CHECK-LABEL: test_load
-; CHECK: %[[GEP:.*]] = getelementptr [8 x i32], [8 x i32] addrspace(2)* @test.data, i64 0, i64 %x
-; CHECK: %{{.*}} = load i32, i32 addrspace(2)* %[[GEP]]
-; CHECK-NOT: alloca
-; CHECK-NOT: call void @llvm.memcpy.p0i8.p2i8.i64
-; CHECK-NOT: addrspacecast
-; CHECK-NOT: load i32, i32*
define void @test_load(i32 addrspace(1)* %out, i64 %x) {
+; CHECK-LABEL: @test_load(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr [8 x i32], [8 x i32] addrspace(2)* @test.data, i64 0, i64 [[X:%.*]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32 addrspace(2)* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32 addrspace(1)* [[OUT:%.*]], i64 [[X]]
+; CHECK-NEXT: store i32 [[TMP0]], i32 addrspace(1)* [[ARRAYIDX1]], align 4
+; CHECK-NEXT: ret void
+;
entry:
%data = alloca [8 x i32], align 4
%0 = bitcast [8 x i32]* %data to i8*
ret void
}
-; CHECK-LABEL: test_load_bitcast_chain
-; CHECK: %[[GEP:.*]] = getelementptr [8 x i32], [8 x i32] addrspace(2)* @test.data, i64 0, i64 %x
-; CHECK: %{{.*}} = load i32, i32 addrspace(2)* %[[GEP]]
-; CHECK-NOT: alloca
-; CHECK-NOT: call void @llvm.memcpy.p0i8.p2i8.i64
-; CHECK-NOT: addrspacecast
-; CHECK-NOT: load i32, i32*
define void @test_load_bitcast_chain(i32 addrspace(1)* %out, i64 %x) {
+; CHECK-LABEL: @test_load_bitcast_chain(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr [8 x i32], [8 x i32] addrspace(2)* @test.data, i64 0, i64 [[X:%.*]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32 addrspace(2)* [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32 addrspace(1)* [[OUT:%.*]], i64 [[X]]
+; CHECK-NEXT: store i32 [[TMP0]], i32 addrspace(1)* [[ARRAYIDX1]], align 4
+; CHECK-NEXT: ret void
+;
entry:
%data = alloca [8 x i32], align 4
%0 = bitcast [8 x i32]* %data to i8*
ret void
}
-; CHECK-LABEL: test_call
-; CHECK: alloca
-; CHECK: call void @llvm.memcpy.p0i8.p2i8.i64
-; CHECK-NOT: addrspacecast
-; CHECK: call i32 @foo(i32* nonnull %{{.*}})
define void @test_call(i32 addrspace(1)* %out, i64 %x) {
+; CHECK-LABEL: @test_call(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[DATA:%.*]] = alloca [8 x i32], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast [8 x i32]* [[DATA]] to i8*
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p2i8.i64(i8* noundef nonnull align 4 dereferenceable(32) [[TMP0]], i8 addrspace(2)* noundef align 4 dereferenceable(32) bitcast ([8 x i32] addrspace(2)* @test.data to i8 addrspace(2)*), i64 32, i1 false)
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i32], [8 x i32]* [[DATA]], i64 0, i64 [[X:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @foo(i32* nonnull [[ARRAYIDX]])
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32 addrspace(1)* [[OUT:%.*]], i64 [[X]]
+; CHECK-NEXT: store i32 [[TMP1]], i32 addrspace(1)* [[ARRAYIDX1]], align 4
+; CHECK-NEXT: ret void
+;
entry:
%data = alloca [8 x i32], align 4
%0 = bitcast [8 x i32]* %data to i8*
ret void
}
-; CHECK-LABEL: test_call_no_null_opt
-; CHECK: alloca
-; CHECK: call void @llvm.memcpy.p0i8.p2i8.i64
-; CHECK-NOT: addrspacecast
-; CHECK: call i32 @foo(i32* %{{.*}})
define void @test_call_no_null_opt(i32 addrspace(1)* %out, i64 %x) #0 {
+; CHECK-LABEL: @test_call_no_null_opt(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[DATA:%.*]] = alloca [8 x i32], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast [8 x i32]* [[DATA]] to i8*
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p2i8.i64(i8* noundef nonnull align 4 dereferenceable(32) [[TMP0]], i8 addrspace(2)* noundef align 4 dereferenceable(32) bitcast ([8 x i32] addrspace(2)* @test.data to i8 addrspace(2)*), i64 32, i1 false)
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i32], [8 x i32]* [[DATA]], i64 0, i64 [[X:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @foo(i32* [[ARRAYIDX]])
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32 addrspace(1)* [[OUT:%.*]], i64 [[X]]
+; CHECK-NEXT: store i32 [[TMP1]], i32 addrspace(1)* [[ARRAYIDX1]], align 4
+; CHECK-NEXT: ret void
+;
entry:
%data = alloca [8 x i32], align 4
%0 = bitcast [8 x i32]* %data to i8*
ret void
}
-; CHECK-LABEL: test_load_and_call
-; CHECK: alloca
-; CHECK: call void @llvm.memcpy.p0i8.p2i8.i64
-; CHECK: load i32, i32* %{{.*}}
-; CHECK: call i32 @foo(i32* nonnull %{{.*}})
-; CHECK-NOT: addrspacecast
-; CHECK-NOT: load i32, i32 addrspace(2)*
define void @test_load_and_call(i32 addrspace(1)* %out, i64 %x, i64 %y) {
+; CHECK-LABEL: @test_load_and_call(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[DATA:%.*]] = alloca [8 x i32], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast [8 x i32]* [[DATA]] to i8*
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p2i8.i64(i8* noundef nonnull align 4 dereferenceable(32) [[TMP0]], i8 addrspace(2)* noundef align 4 dereferenceable(32) bitcast ([8 x i32] addrspace(2)* @test.data to i8 addrspace(2)*), i64 32, i1 false)
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i32], [8 x i32]* [[DATA]], i64 0, i64 [[X:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32 addrspace(1)* [[OUT:%.*]], i64 [[X]]
+; CHECK-NEXT: store i32 [[TMP1]], i32 addrspace(1)* [[ARRAYIDX1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 @foo(i32* nonnull [[ARRAYIDX]])
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32 addrspace(1)* [[OUT]], i64 [[Y:%.*]]
+; CHECK-NEXT: store i32 [[TMP2]], i32 addrspace(1)* [[ARRAYIDX2]], align 4
+; CHECK-NEXT: ret void
+;
entry:
%data = alloca [8 x i32], align 4
%0 = bitcast [8 x i32]* %data to i8*
ret void
}
-; CHECK-LABEL: test_load_and_call_no_null_opt
-; CHECK: alloca
-; CHECK: call void @llvm.memcpy.p0i8.p2i8.i64
-; CHECK: load i32, i32* %{{.*}}
-; CHECK: call i32 @foo(i32* %{{.*}})
-; CHECK-NOT: addrspacecast
-; CHECK-NOT: load i32, i32 addrspace(2)*
define void @test_load_and_call_no_null_opt(i32 addrspace(1)* %out, i64 %x, i64 %y) #0 {
+; CHECK-LABEL: @test_load_and_call_no_null_opt(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[DATA:%.*]] = alloca [8 x i32], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast [8 x i32]* [[DATA]] to i8*
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p2i8.i64(i8* noundef nonnull align 4 dereferenceable(32) [[TMP0]], i8 addrspace(2)* noundef align 4 dereferenceable(32) bitcast ([8 x i32] addrspace(2)* @test.data to i8 addrspace(2)*), i64 32, i1 false)
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i32], [8 x i32]* [[DATA]], i64 0, i64 [[X:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32 addrspace(1)* [[OUT:%.*]], i64 [[X]]
+; CHECK-NEXT: store i32 [[TMP1]], i32 addrspace(1)* [[ARRAYIDX1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 @foo(i32* [[ARRAYIDX]])
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32 addrspace(1)* [[OUT]], i64 [[Y:%.*]]
+; CHECK-NEXT: store i32 [[TMP2]], i32 addrspace(1)* [[ARRAYIDX2]], align 4
+; CHECK-NEXT: ret void
+;
entry:
%data = alloca [8 x i32], align 4
%0 = bitcast [8 x i32]* %data to i8*
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; Test that the memset library call simplifier works correctly.
;
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
define i8 @test_no_simplify1(ptr %mem, i32 %val, i32 %size) {
; CHECK-LABEL: @test_no_simplify1(
+; CHECK-NEXT: [[RET:%.*]] = call i8 @memset(ptr [[MEM:%.*]], i32 [[VAL:%.*]], i32 [[SIZE:%.*]])
+; CHECK-NEXT: ret i8 [[RET]]
+;
%ret = call i8 @memset(ptr %mem, i32 %val, i32 %size)
-; CHECK: call i8 @memset
ret i8 %ret
-; CHECK: ret i8 %ret
}
define i64 @test21(i32 %x) {
; CHECK-LABEL: @test21(
; CHECK-NEXT: [[X_LOBIT:%.*]] = ashr i32 [[X:%.*]], 31
-; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[X_LOBIT]] to i64
-; CHECK-NEXT: ret i64 [[TMP1]]
+; CHECK-NEXT: [[RETVAL:%.*]] = sext i32 [[X_LOBIT]] to i64
+; CHECK-NEXT: ret i64 [[RETVAL]]
;
%t = icmp slt i32 %x, 0
%retval = select i1 %t, i64 -1, i64 0
define i16 @test22(i32 %x) {
; CHECK-LABEL: @test22(
; CHECK-NEXT: [[X_LOBIT:%.*]] = ashr i32 [[X:%.*]], 31
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[X_LOBIT]] to i16
-; CHECK-NEXT: ret i16 [[TMP1]]
+; CHECK-NEXT: [[RETVAL:%.*]] = trunc i32 [[X_LOBIT]] to i16
+; CHECK-NEXT: ret i16 [[RETVAL]]
;
%t = icmp slt i32 %x, 0
%retval = select i1 %t, i16 -1, i16 0
; SMAX(SMAX(x, y), x) -> SMAX(x, y)
define i32 @test30(i32 %x, i32 %y) {
; CHECK-LABEL: @test30(
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.smax.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
+; CHECK-NEXT: ret i32 [[COND]]
;
%cmp = icmp sgt i32 %x, %y
%cond = select i1 %cmp, i32 %x, i32 %y
; UMAX(UMAX(x, y), x) -> UMAX(x, y)
define i32 @test31(i32 %x, i32 %y) {
; CHECK-LABEL: @test31(
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.umax.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.umax.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
+; CHECK-NEXT: ret i32 [[COND]]
;
%cmp = icmp ugt i32 %x, %y
%cond = select i1 %cmp, i32 %x, i32 %y
; SMIN(SMIN(x, y), x) -> SMIN(x, y)
define i32 @test32(i32 %x, i32 %y) {
; CHECK-LABEL: @test32(
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.smin.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
+; CHECK-NEXT: ret i32 [[COND]]
;
%cmp = icmp sgt i32 %x, %y
%cond = select i1 %cmp, i32 %y, i32 %x
define i32 @PR27137(i32 %a) {
; CHECK-LABEL: @PR27137(
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.smin.i32(i32 [[A:%.*]], i32 0)
-; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[TMP1]], -1
-; CHECK-NEXT: ret i32 [[TMP2]]
+; CHECK-NEXT: [[S1:%.*]] = xor i32 [[TMP1]], -1
+; CHECK-NEXT: ret i32 [[S1]]
;
%not_a = xor i32 %a, -1
%c0 = icmp slt i32 %a, 0
define i32 @select_icmp_slt0_xor(i32 %x) {
; CHECK-LABEL: @select_icmp_slt0_xor(
-; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[X:%.*]], -2147483648
-; CHECK-NEXT: ret i32 [[TMP1]]
+; CHECK-NEXT: [[X_XOR:%.*]] = or i32 [[X:%.*]], -2147483648
+; CHECK-NEXT: ret i32 [[X_XOR]]
;
%cmp = icmp slt i32 %x, zeroinitializer
%xor = xor i32 %x, 2147483648
define <2 x i32> @select_icmp_slt0_xor_vec(<2 x i32> %x) {
; CHECK-LABEL: @select_icmp_slt0_xor_vec(
-; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i32> [[X:%.*]], <i32 -2147483648, i32 -2147483648>
-; CHECK-NEXT: ret <2 x i32> [[TMP1]]
+; CHECK-NEXT: [[X_XOR:%.*]] = or <2 x i32> [[X:%.*]], <i32 -2147483648, i32 -2147483648>
+; CHECK-NEXT: ret <2 x i32> [[X_XOR]]
;
%cmp = icmp slt <2 x i32> %x, zeroinitializer
%xor = xor <2 x i32> %x, <i32 2147483648, i32 2147483648>