From: Sanjay Patel Date: Fri, 29 Mar 2019 17:47:51 +0000 (+0000) Subject: [InstCombine] regenerate test checks; NFC X-Git-Tag: llvmorg-10-init~8870 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=2bff8b42727730e8360835406218ef28bce80dcd;p=platform%2Fupstream%2Fllvm.git [InstCombine] regenerate test checks; NFC llvm-svn: 357288 --- diff --git a/llvm/test/Transforms/InstCombine/X86/x86-insertps.ll b/llvm/test/Transforms/InstCombine/X86/x86-insertps.ll index 6bcc78b..4ec61d3 100644 --- a/llvm/test/Transforms/InstCombine/X86/x86-insertps.ll +++ b/llvm/test/Transforms/InstCombine/X86/x86-insertps.ll @@ -11,144 +11,130 @@ define <4 x float> @insertps_0x0f(<4 x float> %v1, <4 x float> %v2) { ; %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %v1, <4 x float> %v2, i8 15) ret <4 x float> %res - } + define <4 x float> @insertps_0xff(<4 x float> %v1, <4 x float> %v2) { ; CHECK-LABEL: @insertps_0xff( ; CHECK-NEXT: ret <4 x float> zeroinitializer ; %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %v1, <4 x float> %v2, i8 255) ret <4 x float> %res - } ; If some zero mask bits are set that do not override the insertion, we do not change anything. define <4 x float> @insertps_0x0c(<4 x float> %v1, <4 x float> %v2) { ; CHECK-LABEL: @insertps_0x0c( -; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %v1, <4 x float> %v2, i8 12) +; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> [[V1:%.*]], <4 x float> [[V2:%.*]], i8 12) ; CHECK-NEXT: ret <4 x float> [[RES]] ; %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %v1, <4 x float> %v2, i8 12) ret <4 x float> %res - } ; ...unless both input vectors are the same operand. define <4 x float> @insertps_0x15_single_input(<4 x float> %v1) { ; CHECK-LABEL: @insertps_0x15_single_input( -; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> %v1, <4 x float> , <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[V1:%.*]], <4 x float> , <4 x i32> ; CHECK-NEXT: ret <4 x float> [[TMP1]] ; %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %v1, <4 x float> %v1, i8 21) ret <4 x float> %res - } ; The zero mask overrides the insertion lane. define <4 x float> @insertps_0x1a_single_input(<4 x float> %v1) { ; CHECK-LABEL: @insertps_0x1a_single_input( -; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> %v1, <4 x float> , <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[V1:%.*]], <4 x float> , <4 x i32> ; CHECK-NEXT: ret <4 x float> [[TMP1]] ; %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %v1, <4 x float> %v1, i8 26) ret <4 x float> %res - } ; The zero mask overrides the insertion lane, so the second input vector is not used. define <4 x float> @insertps_0xc1(<4 x float> %v1, <4 x float> %v2) { ; CHECK-LABEL: @insertps_0xc1( -; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> %v1, float 0.000000e+00, i32 0 +; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> [[V1:%.*]], float 0.000000e+00, i32 0 ; CHECK-NEXT: ret <4 x float> [[TMP1]] ; %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %v1, <4 x float> %v2, i8 193) ret <4 x float> %res - } ; If no zero mask bits are set, convert to a shuffle. define <4 x float> @insertps_0x00(<4 x float> %v1, <4 x float> %v2) { ; CHECK-LABEL: @insertps_0x00( -; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> %v1, <4 x float> %v2, <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[V1:%.*]], <4 x float> [[V2:%.*]], <4 x i32> ; CHECK-NEXT: ret <4 x float> [[TMP1]] ; %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %v1, <4 x float> %v2, i8 0) ret <4 x float> %res - } define <4 x float> @insertps_0x10(<4 x float> %v1, <4 x float> %v2) { ; CHECK-LABEL: @insertps_0x10( -; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> %v1, <4 x float> %v2, <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[V1:%.*]], <4 x float> [[V2:%.*]], <4 x i32> ; CHECK-NEXT: ret <4 x float> [[TMP1]] ; %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %v1, <4 x float> %v2, i8 16) ret <4 x float> %res - } define <4 x float> @insertps_0x20(<4 x float> %v1, <4 x float> %v2) { ; CHECK-LABEL: @insertps_0x20( -; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> %v1, <4 x float> %v2, <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[V1:%.*]], <4 x float> [[V2:%.*]], <4 x i32> ; CHECK-NEXT: ret <4 x float> [[TMP1]] ; %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %v1, <4 x float> %v2, i8 32) ret <4 x float> %res - } define <4 x float> @insertps_0x30(<4 x float> %v1, <4 x float> %v2) { ; CHECK-LABEL: @insertps_0x30( -; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> %v1, <4 x float> %v2, <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[V1:%.*]], <4 x float> [[V2:%.*]], <4 x i32> ; CHECK-NEXT: ret <4 x float> [[TMP1]] ; %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %v1, <4 x float> %v2, i8 48) ret <4 x float> %res - } define <4 x float> @insertps_0xc0(<4 x float> %v1, <4 x float> %v2) { ; CHECK-LABEL: @insertps_0xc0( -; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> %v1, <4 x float> %v2, <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[V1:%.*]], <4 x float> [[V2:%.*]], <4 x i32> ; CHECK-NEXT: ret <4 x float> [[TMP1]] ; %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %v1, <4 x float> %v2, i8 192) ret <4 x float> %res - } define <4 x float> @insertps_0xd0(<4 x float> %v1, <4 x float> %v2) { ; CHECK-LABEL: @insertps_0xd0( -; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> %v1, <4 x float> %v2, <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[V1:%.*]], <4 x float> [[V2:%.*]], <4 x i32> ; CHECK-NEXT: ret <4 x float> [[TMP1]] ; %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %v1, <4 x float> %v2, i8 208) ret <4 x float> %res - } define <4 x float> @insertps_0xe0(<4 x float> %v1, <4 x float> %v2) { ; CHECK-LABEL: @insertps_0xe0( -; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> %v1, <4 x float> %v2, <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[V1:%.*]], <4 x float> [[V2:%.*]], <4 x i32> ; CHECK-NEXT: ret <4 x float> [[TMP1]] ; %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %v1, <4 x float> %v2, i8 224) ret <4 x float> %res - } define <4 x float> @insertps_0xf0(<4 x float> %v1, <4 x float> %v2) { ; CHECK-LABEL: @insertps_0xf0( -; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> %v1, <4 x float> %v2, <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[V1:%.*]], <4 x float> [[V2:%.*]], <4 x i32> ; CHECK-NEXT: ret <4 x float> [[TMP1]] ; %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %v1, <4 x float> %v2, i8 240) ret <4 x float> %res - } - diff --git a/llvm/test/Transforms/InstCombine/X86/x86-sse4a.ll b/llvm/test/Transforms/InstCombine/X86/x86-sse4a.ll index e36a735..70ed3c6 100644 --- a/llvm/test/Transforms/InstCombine/X86/x86-sse4a.ll +++ b/llvm/test/Transforms/InstCombine/X86/x86-sse4a.ll @@ -7,7 +7,7 @@ define <2 x i64> @test_extrq_call(<2 x i64> %x, <16 x i8> %y) { ; CHECK-LABEL: @test_extrq_call( -; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %x, <16 x i8> %y) #1 +; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> [[X:%.*]], <16 x i8> [[Y:%.*]]) #1 ; CHECK-NEXT: ret <2 x i64> [[TMP1]] ; %1 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %x, <16 x i8> %y) nounwind @@ -24,7 +24,7 @@ define <2 x i64> @test_extrq_zero_arg0(<2 x i64> %x, <16 x i8> %y) { define <2 x i64> @test_extrq_zero_arg1(<2 x i64> %x, <16 x i8> %y) { ; CHECK-LABEL: @test_extrq_zero_arg1( -; CHECK-NEXT: ret <2 x i64> %x +; CHECK-NEXT: ret <2 x i64> [[X:%.*]] ; %1 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %x, <16 x i8> zeroinitializer) nounwind ret <2 x i64> %1 @@ -32,7 +32,7 @@ define <2 x i64> @test_extrq_zero_arg1(<2 x i64> %x, <16 x i8> %y) { define <2 x i64> @test_extrq_to_extqi(<2 x i64> %x, <16 x i8> %y) { ; CHECK-LABEL: @test_extrq_to_extqi( -; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %x, i8 8, i8 15) +; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> [[X:%.*]], i8 8, i8 15) ; CHECK-NEXT: ret <2 x i64> [[TMP1]] ; %1 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %x, <16 x i8> ) nounwind @@ -57,7 +57,7 @@ define <2 x i64> @test_extrq_constant_undef(<2 x i64> %x, <16 x i8> %y) { define <2 x i64> @test_extrq_call_constexpr(<2 x i64> %x) { ; CHECK-LABEL: @test_extrq_call_constexpr( -; CHECK-NEXT: ret <2 x i64> %x +; CHECK-NEXT: ret <2 x i64> [[X:%.*]] ; %1 = call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %x, <16 x i8> bitcast (<2 x i64> to <16 x i8>)) ret <2 x i64> %1 @@ -69,7 +69,7 @@ define <2 x i64> @test_extrq_call_constexpr(<2 x i64> %x) { define <2 x i64> @test_extrqi_call(<2 x i64> %x) { ; CHECK-LABEL: @test_extrqi_call( -; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %x, i8 8, i8 23) +; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> [[X:%.*]], i8 8, i8 23) ; CHECK-NEXT: ret <2 x i64> [[TMP1]] ; %1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %x, i8 8, i8 23) @@ -78,7 +78,7 @@ define <2 x i64> @test_extrqi_call(<2 x i64> %x) { define <2 x i64> @test_extrqi_shuffle_1zuu(<2 x i64> %x) { ; CHECK-LABEL: @test_extrqi_shuffle_1zuu( -; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> %x to <16 x i8> +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[X:%.*]] to <16 x i8> ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> , <16 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x i64> ; CHECK-NEXT: ret <2 x i64> [[TMP3]] @@ -89,7 +89,7 @@ define <2 x i64> @test_extrqi_shuffle_1zuu(<2 x i64> %x) { define <2 x i64> @test_extrqi_shuffle_2zzzzzzzuuuuuuuu(<2 x i64> %x) { ; CHECK-LABEL: @test_extrqi_shuffle_2zzzzzzzuuuuuuuu( -; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> %x to <16 x i8> +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[X:%.*]] to <16 x i8> ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> , <16 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x i64> ; CHECK-NEXT: ret <2 x i64> [[TMP3]] @@ -144,7 +144,7 @@ define <2 x i64> @test_extrqi_call_constexpr() { define <2 x i64> @test_insertq_call(<2 x i64> %x, <2 x i64> %y) { ; CHECK-LABEL: @test_insertq_call( -; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %x, <2 x i64> %y) #1 +; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> [[X:%.*]], <2 x i64> [[Y:%.*]]) #1 ; CHECK-NEXT: ret <2 x i64> [[TMP1]] ; %1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %x, <2 x i64> %y) nounwind @@ -153,7 +153,7 @@ define <2 x i64> @test_insertq_call(<2 x i64> %x, <2 x i64> %y) { define <2 x i64> @test_insertq_to_insertqi(<2 x i64> %x, <2 x i64> %y) { ; CHECK-LABEL: @test_insertq_to_insertqi( -; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %x, <2 x i64> , i8 18, i8 2) +; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> [[X:%.*]], <2 x i64> , i8 18, i8 2) ; CHECK-NEXT: ret <2 x i64> [[TMP1]] ; %1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %x, <2 x i64> ) nounwind @@ -178,7 +178,7 @@ define <2 x i64> @test_insertq_constant_undef(<2 x i64> %x, <2 x i64> %y) { define <2 x i64> @test_insertq_call_constexpr(<2 x i64> %x) { ; CHECK-LABEL: @test_insertq_call_constexpr( -; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %x, <2 x i64> , i8 2, i8 0) +; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> [[X:%.*]], <2 x i64> , i8 2, i8 0) ; CHECK-NEXT: ret <2 x i64> [[TMP1]] ; %1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %x, <2 x i64> bitcast (<16 x i8> trunc (<16 x i16> bitcast (<4 x i64> to <16 x i16>) to <16 x i8>) to <2 x i64>)) @@ -191,7 +191,7 @@ define <2 x i64> @test_insertq_call_constexpr(<2 x i64> %x) { define <16 x i8> @test_insertqi_shuffle_04uu(<16 x i8> %v, <16 x i8> %i) { ; CHECK-LABEL: @test_insertqi_shuffle_04uu( -; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> %v, <16 x i8> %i, <16 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[V:%.*]], <16 x i8> [[I:%.*]], <16 x i32> ; CHECK-NEXT: ret <16 x i8> [[TMP1]] ; %1 = bitcast <16 x i8> %v to <2 x i64> @@ -203,7 +203,7 @@ define <16 x i8> @test_insertqi_shuffle_04uu(<16 x i8> %v, <16 x i8> %i) { define <16 x i8> @test_insertqi_shuffle_8123uuuu(<16 x i8> %v, <16 x i8> %i) { ; CHECK-LABEL: @test_insertqi_shuffle_8123uuuu( -; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> %v, <16 x i8> %i, <16 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[V:%.*]], <16 x i8> [[I:%.*]], <16 x i32> ; CHECK-NEXT: ret <16 x i8> [[TMP1]] ; %1 = bitcast <16 x i8> %v to <2 x i64> @@ -223,7 +223,7 @@ define <2 x i64> @test_insertqi_constant(<2 x i64> %v, <2 x i64> %i) { define <2 x i64> @test_insertqi_call_constexpr(<2 x i64> %x) { ; CHECK-LABEL: @test_insertqi_call_constexpr( -; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %x, <2 x i64> , i8 48, i8 3) +; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> [[X:%.*]], <2 x i64> , i8 48, i8 3) ; CHECK-NEXT: ret <2 x i64> [[TMP1]] ; %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %x, <2 x i64> bitcast (<16 x i8> trunc (<16 x i16> bitcast (<4 x i64> to <16 x i16>) to <16 x i8>) to <2 x i64>), i8 48, i8 3) @@ -235,7 +235,7 @@ define <2 x i64> @test_insertqi_call_constexpr(<2 x i64> %x) { ; second arg define <2 x i64> @testInsert64Bits(<2 x i64> %v, <2 x i64> %i) { ; CHECK-LABEL: @testInsert64Bits( -; CHECK-NEXT: ret <2 x i64> %i +; CHECK-NEXT: ret <2 x i64> [[I:%.*]] ; %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 64, i8 0) ret <2 x i64> %1 @@ -243,7 +243,7 @@ define <2 x i64> @testInsert64Bits(<2 x i64> %v, <2 x i64> %i) { define <2 x i64> @testZeroLength(<2 x i64> %v, <2 x i64> %i) { ; CHECK-LABEL: @testZeroLength( -; CHECK-NEXT: ret <2 x i64> %i +; CHECK-NEXT: ret <2 x i64> [[I:%.*]] ; %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 0, i8 0) ret <2 x i64> %1 @@ -279,7 +279,7 @@ define <2 x i64> @testUndefinedInsertq_3(<2 x i64> %v, <2 x i64> %i) { define <2 x i64> @test_extrq_arg0(<2 x i64> %x, <16 x i8> %y) { ; CHECK-LABEL: @test_extrq_arg0( -; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %x, <16 x i8> %y) #1 +; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> [[X:%.*]], <16 x i8> [[Y:%.*]]) #1 ; CHECK-NEXT: ret <2 x i64> [[TMP1]] ; %1 = shufflevector <2 x i64> %x, <2 x i64> undef, <2 x i32> @@ -289,7 +289,7 @@ define <2 x i64> @test_extrq_arg0(<2 x i64> %x, <16 x i8> %y) { define <2 x i64> @test_extrq_arg1(<2 x i64> %x, <16 x i8> %y) { ; CHECK-LABEL: @test_extrq_arg1( -; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %x, <16 x i8> %y) #1 +; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> [[X:%.*]], <16 x i8> [[Y:%.*]]) #1 ; CHECK-NEXT: ret <2 x i64> [[TMP1]] ; %1 = shufflevector <16 x i8> %y, <16 x i8> undef, <16 x i32> @@ -299,7 +299,7 @@ define <2 x i64> @test_extrq_arg1(<2 x i64> %x, <16 x i8> %y) { define <2 x i64> @test_extrq_args01(<2 x i64> %x, <16 x i8> %y) { ; CHECK-LABEL: @test_extrq_args01( -; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %x, <16 x i8> %y) #1 +; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> [[X:%.*]], <16 x i8> [[Y:%.*]]) #1 ; CHECK-NEXT: ret <2 x i64> [[TMP1]] ; %1 = shufflevector <2 x i64> %x, <2 x i64> undef, <2 x i32> @@ -319,7 +319,7 @@ define <2 x i64> @test_extrq_ret(<2 x i64> %x, <16 x i8> %y) { define <2 x i64> @test_extrqi_arg0(<2 x i64> %x) { ; CHECK-LABEL: @test_extrqi_arg0( -; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %x, i8 3, i8 2) +; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> [[X:%.*]], i8 3, i8 2) ; CHECK-NEXT: ret <2 x i64> [[TMP1]] ; %1 = shufflevector <2 x i64> %x, <2 x i64> undef, <2 x i32> @@ -338,7 +338,7 @@ define <2 x i64> @test_extrqi_ret(<2 x i64> %x) { define <2 x i64> @test_insertq_arg0(<2 x i64> %x, <2 x i64> %y) { ; CHECK-LABEL: @test_insertq_arg0( -; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %x, <2 x i64> %y) #1 +; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> [[X:%.*]], <2 x i64> [[Y:%.*]]) #1 ; CHECK-NEXT: ret <2 x i64> [[TMP1]] ; %1 = shufflevector <2 x i64> %x, <2 x i64> undef, <2 x i32> @@ -357,7 +357,7 @@ define <2 x i64> @test_insertq_ret(<2 x i64> %x, <2 x i64> %y) { define <2 x i64> @test_insertqi_arg0(<2 x i64> %x, <2 x i64> %y) { ; CHECK-LABEL: @test_insertqi_arg0( -; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %x, <2 x i64> %y, i8 3, i8 2) #1 +; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> [[X:%.*]], <2 x i64> [[Y:%.*]], i8 3, i8 2) #1 ; CHECK-NEXT: ret <2 x i64> [[TMP1]] ; %1 = shufflevector <2 x i64> %x, <2 x i64> undef, <2 x i32> @@ -367,7 +367,7 @@ define <2 x i64> @test_insertqi_arg0(<2 x i64> %x, <2 x i64> %y) { define <2 x i64> @test_insertqi_arg1(<2 x i64> %x, <2 x i64> %y) { ; CHECK-LABEL: @test_insertqi_arg1( -; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %x, <2 x i64> %y, i8 3, i8 2) #1 +; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> [[X:%.*]], <2 x i64> [[Y:%.*]], i8 3, i8 2) #1 ; CHECK-NEXT: ret <2 x i64> [[TMP1]] ; %1 = shufflevector <2 x i64> %y, <2 x i64> undef, <2 x i32> @@ -377,7 +377,7 @@ define <2 x i64> @test_insertqi_arg1(<2 x i64> %x, <2 x i64> %y) { define <2 x i64> @test_insertqi_args01(<2 x i64> %x, <2 x i64> %y) { ; CHECK-LABEL: @test_insertqi_args01( -; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %x, <2 x i64> %y, i8 3, i8 2) #1 +; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> [[X:%.*]], <2 x i64> [[Y:%.*]], i8 3, i8 2) #1 ; CHECK-NEXT: ret <2 x i64> [[TMP1]] ; %1 = shufflevector <2 x i64> %x, <2 x i64> undef, <2 x i32>