From 106307aa134fa18e305cb3486c3a013464028c24 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Wed, 26 Jul 2017 10:54:51 +0000 Subject: [PATCH] [X86][AVX] Regenerated and cleaned up AVX1 intrinsic tests. Cleaned up triple settings, added 32-bit/64-bit targets where useful, added broadcast comments llvm-svn: 309100 --- llvm/test/CodeGen/X86/avx-basic.ll | 7 + .../test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll | 651 +++++++++++------ llvm/test/CodeGen/X86/avx-intrinsics-x86.ll | 770 ++++++++++----------- llvm/test/CodeGen/X86/avx-intrinsics-x86_64.ll | 20 +- llvm/test/CodeGen/X86/avx-select.ll | 65 +- llvm/test/CodeGen/X86/avx-vbroadcastf128.ll | 56 +- llvm/test/CodeGen/X86/avx-vperm2x128.ll | 138 ++-- 7 files changed, 989 insertions(+), 718 deletions(-) diff --git a/llvm/test/CodeGen/X86/avx-basic.ll b/llvm/test/CodeGen/X86/avx-basic.ll index 6869d08..2a956d08 100644 --- a/llvm/test/CodeGen/X86/avx-basic.ll +++ b/llvm/test/CodeGen/X86/avx-basic.ll @@ -12,6 +12,7 @@ define void @zero128() nounwind ssp { ; CHECK-NEXT: movq _z@{{.*}}(%rip), %rax ; CHECK-NEXT: vmovaps %xmm0, (%rax) ; CHECK-NEXT: retq +; CHECK-NEXT: ## -- End function store <4 x float> zeroinitializer, <4 x float>* @z, align 16 ret void } @@ -26,6 +27,7 @@ define void @zero256() nounwind ssp { ; CHECK-NEXT: vmovaps %ymm0, (%rax) ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq +; CHECK-NEXT: ## -- End function store <8 x float> zeroinitializer, <8 x float>* @x, align 32 store <4 x double> zeroinitializer, <4 x double>* @y, align 32 ret void @@ -39,6 +41,7 @@ define void @ones([0 x float]* nocapture %RET, [0 x float]* nocapture %aFOO) nou ; CHECK-NEXT: vmovaps %ymm0, (%rdi) ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq +; CHECK-NEXT: ## -- End function allocas: %ptr2vec615 = bitcast [0 x float]* %RET to <8 x float>* store <8 x float> * store <8 x i32> , <8 x i32>* %ptr2vec615, align 32 @@ -79,6 +83,7 @@ define <8 x i32> @VMOVZQI2PQI([0 x float]* nocapture %aFOO) nounwind { ; CHECK-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] ; CHECK-NEXT: retq +; CHECK-NEXT: ## -- End function %ptrcast.i33.i = bitcast [0 x float]* %aFOO to i32* %val.i34.i = load i32, i32* %ptrcast.i33.i, align 4 %ptroffset.i22.i992 = getelementptr [0 x float], [0 x float]* %aFOO, i64 0, i64 1 @@ -97,6 +102,7 @@ define <16 x float> @fneg(<16 x float> %a) nounwind { ; CHECK-NEXT: vxorps %ymm2, %ymm0, %ymm0 ; CHECK-NEXT: vxorps %ymm2, %ymm1, %ymm1 ; CHECK-NEXT: retq +; CHECK-NEXT: ## -- End function %1 = fsub <16 x float> , %a ret <16 x float> %1 } @@ -108,6 +114,7 @@ define <16 x i16> @build_vec_16x16(i16 %a) nounwind readonly { ; CHECK-NEXT: movzwl %di, %eax ; CHECK-NEXT: vmovd %eax, %xmm0 ; CHECK-NEXT: retq +; CHECK-NEXT: ## -- End function %res = insertelement <16 x i16> , i16 %a, i32 0 ret <16 x i16> %res } diff --git a/llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll b/llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll index 2bcb083..8d7b894 100644 --- a/llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll +++ b/llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll @@ -1,33 +1,49 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=avx | FileCheck %s +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X86 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64 ; We don't check any vinsertf128 variant with immediate 0 because that's just a blend. define <4 x double> @test_x86_avx_vinsertf128_pd_256_1(<4 x double> %a0, <2 x double> %a1) { -; CHECK-LABEL: test_x86_avx_vinsertf128_pd_256_1: -; CHECK: ## BB#0: -; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; CHECK-NEXT: retl +; X86-LABEL: test_x86_avx_vinsertf128_pd_256_1: +; X86: # BB#0: +; X86-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X86-NEXT: retl +; +; X64-LABEL: test_x86_avx_vinsertf128_pd_256_1: +; X64: # BB#0: +; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X64-NEXT: retq %res = call <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double> %a0, <2 x double> %a1, i8 1) ret <4 x double> %res } declare <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double>, <2 x double>, i8) nounwind readnone define <8 x float> @test_x86_avx_vinsertf128_ps_256_1(<8 x float> %a0, <4 x float> %a1) { -; CHECK-LABEL: test_x86_avx_vinsertf128_ps_256_1: -; CHECK: ## BB#0: -; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; CHECK-NEXT: retl +; X86-LABEL: test_x86_avx_vinsertf128_ps_256_1: +; X86: # BB#0: +; X86-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X86-NEXT: retl +; +; X64-LABEL: test_x86_avx_vinsertf128_ps_256_1: +; X64: # BB#0: +; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X64-NEXT: retq %res = call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> %a0, <4 x float> %a1, i8 1) ret <8 x float> %res } declare <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float>, <4 x float>, i8) nounwind readnone define <8 x i32> @test_x86_avx_vinsertf128_si_256_1(<8 x i32> %a0, <4 x i32> %a1) { -; CHECK-LABEL: test_x86_avx_vinsertf128_si_256_1: -; CHECK: ## BB#0: -; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; CHECK-NEXT: retl +; X86-LABEL: test_x86_avx_vinsertf128_si_256_1: +; X86: # BB#0: +; X86-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X86-NEXT: retl +; +; X64-LABEL: test_x86_avx_vinsertf128_si_256_1: +; X64: # BB#0: +; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X64-NEXT: retq %res = call <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32> %a0, <4 x i32> %a1, i8 1) ret <8 x i32> %res } @@ -36,11 +52,17 @@ define <8 x i32> @test_x86_avx_vinsertf128_si_256_1(<8 x i32> %a0, <4 x i32> %a1 ; of a vinsertf128 $0 which should be optimized into a blend, so just check that it's ; not a vinsertf128 $1. define <8 x i32> @test_x86_avx_vinsertf128_si_256_2(<8 x i32> %a0, <4 x i32> %a1) { -; CHECK-LABEL: test_x86_avx_vinsertf128_si_256_2: -; CHECK: ## BB#0: -; CHECK-NEXT: ## kill: %XMM1 %XMM1 %YMM1 -; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] -; CHECK-NEXT: retl +; X86-LABEL: test_x86_avx_vinsertf128_si_256_2: +; X86: # BB#0: +; X86-NEXT: # kill: %XMM1 %XMM1 %YMM1 +; X86-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] +; X86-NEXT: retl +; +; X64-LABEL: test_x86_avx_vinsertf128_si_256_2: +; X64: # BB#0: +; X64-NEXT: # kill: %XMM1 %XMM1 %YMM1 +; X64-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] +; X64-NEXT: retq %res = call <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32> %a0, <4 x i32> %a1, i8 2) ret <8 x i32> %res } @@ -49,33 +71,51 @@ declare <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32>, <4 x i32>, i8) nou ; We don't check any vextractf128 variant with immediate 0 because that's just a move. define <2 x double> @test_x86_avx_vextractf128_pd_256_1(<4 x double> %a0) { -; CHECK-LABEL: test_x86_avx_vextractf128_pd_256_1: -; CHECK: ## BB#0: -; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 -; CHECK-NEXT: vzeroupper -; CHECK-NEXT: retl +; X86-LABEL: test_x86_avx_vextractf128_pd_256_1: +; X86: # BB#0: +; X86-NEXT: vextractf128 $1, %ymm0, %xmm0 +; X86-NEXT: vzeroupper +; X86-NEXT: retl +; +; X64-LABEL: test_x86_avx_vextractf128_pd_256_1: +; X64: # BB#0: +; X64-NEXT: vextractf128 $1, %ymm0, %xmm0 +; X64-NEXT: vzeroupper +; X64-NEXT: retq %res = call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a0, i8 1) ret <2 x double> %res } declare <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double>, i8) nounwind readnone define <4 x float> @test_x86_avx_vextractf128_ps_256_1(<8 x float> %a0) { -; CHECK-LABEL: test_x86_avx_vextractf128_ps_256_1: -; CHECK: ## BB#0: -; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 -; CHECK-NEXT: vzeroupper -; CHECK-NEXT: retl +; X86-LABEL: test_x86_avx_vextractf128_ps_256_1: +; X86: # BB#0: +; X86-NEXT: vextractf128 $1, %ymm0, %xmm0 +; X86-NEXT: vzeroupper +; X86-NEXT: retl +; +; X64-LABEL: test_x86_avx_vextractf128_ps_256_1: +; X64: # BB#0: +; X64-NEXT: vextractf128 $1, %ymm0, %xmm0 +; X64-NEXT: vzeroupper +; X64-NEXT: retq %res = call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %a0, i8 1) ret <4 x float> %res } declare <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float>, i8) nounwind readnone define <4 x i32> @test_x86_avx_vextractf128_si_256_1(<8 x i32> %a0) { -; CHECK-LABEL: test_x86_avx_vextractf128_si_256_1: -; CHECK: ## BB#0: -; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 -; CHECK-NEXT: vzeroupper -; CHECK-NEXT: retl +; X86-LABEL: test_x86_avx_vextractf128_si_256_1: +; X86: # BB#0: +; X86-NEXT: vextractf128 $1, %ymm0, %xmm0 +; X86-NEXT: vzeroupper +; X86-NEXT: retl +; +; X64-LABEL: test_x86_avx_vextractf128_si_256_1: +; X64: # BB#0: +; X64-NEXT: vextractf128 $1, %ymm0, %xmm0 +; X64-NEXT: vzeroupper +; X64-NEXT: retq %res = call <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32> %a0, i8 1) ret <4 x i32> %res } @@ -85,22 +125,33 @@ declare <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32>, i8) nounwind read ; of a vextractf128 $0 which should be optimized away, so just check that it's ; not a vextractf128 of any kind. define <2 x double> @test_x86_avx_extractf128_pd_256_2(<4 x double> %a0) { -; CHECK-LABEL: test_x86_avx_extractf128_pd_256_2: -; CHECK: ## BB#0: -; CHECK-NEXT: ## kill: %XMM0 %XMM0 %YMM0 -; CHECK-NEXT: vzeroupper -; CHECK-NEXT: retl +; X86-LABEL: test_x86_avx_extractf128_pd_256_2: +; X86: # BB#0: +; X86-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X86-NEXT: vzeroupper +; X86-NEXT: retl +; +; X64-LABEL: test_x86_avx_extractf128_pd_256_2: +; X64: # BB#0: +; X64-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-NEXT: vzeroupper +; X64-NEXT: retq %res = call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a0, i8 2) ret <2 x double> %res } define <4 x double> @test_x86_avx_vbroadcastf128_pd_256(i8* %a0) { -; CHECK-LABEL: test_x86_avx_vbroadcastf128_pd_256: -; CHECK: ## BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax -; CHECK-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] -; CHECK-NEXT: retl +; X86-LABEL: test_x86_avx_vbroadcastf128_pd_256: +; X86: # BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] +; X86-NEXT: retl +; +; X64-LABEL: test_x86_avx_vbroadcastf128_pd_256: +; X64: # BB#0: +; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] +; X64-NEXT: retq %res = call <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8* %a0) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -108,11 +159,16 @@ declare <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8*) nounwind readonly define <8 x float> @test_x86_avx_vbroadcastf128_ps_256(i8* %a0) { -; CHECK-LABEL: test_x86_avx_vbroadcastf128_ps_256: -; CHECK: ## BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax -; CHECK-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] -; CHECK-NEXT: retl +; X86-LABEL: test_x86_avx_vbroadcastf128_ps_256: +; X86: # BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] +; X86-NEXT: retl +; +; X64-LABEL: test_x86_avx_vbroadcastf128_ps_256: +; X64: # BB#0: +; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] +; X64-NEXT: retq %res = call <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8* %a0) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -120,10 +176,15 @@ declare <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8*) nounwind readonly define <4 x double> @test_x86_avx_blend_pd_256(<4 x double> %a0, <4 x double> %a1) { -; CHECK-LABEL: test_x86_avx_blend_pd_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3] -; CHECK-NEXT: retl +; X86-LABEL: test_x86_avx_blend_pd_256: +; X86: # BB#0: +; X86-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3] +; X86-NEXT: retl +; +; X64-LABEL: test_x86_avx_blend_pd_256: +; X64: # BB#0: +; X64-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3] +; X64-NEXT: retq %res = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a1, i32 7) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -131,10 +192,15 @@ declare <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double>, <4 x double>, i32) define <8 x float> @test_x86_avx_blend_ps_256(<8 x float> %a0, <8 x float> %a1) { -; CHECK-LABEL: test_x86_avx_blend_ps_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7] -; CHECK-NEXT: retl +; X86-LABEL: test_x86_avx_blend_ps_256: +; X86: # BB#0: +; X86-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7] +; X86-NEXT: retl +; +; X64-LABEL: test_x86_avx_blend_ps_256: +; X64: # BB#0: +; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7] +; X64-NEXT: retq %res = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a1, i32 7) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -142,10 +208,15 @@ declare <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float>, <8 x float>, i32) no define <8 x float> @test_x86_avx_dp_ps_256(<8 x float> %a0, <8 x float> %a1) { -; CHECK-LABEL: test_x86_avx_dp_ps_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 -; CHECK-NEXT: retl +; X86-LABEL: test_x86_avx_dp_ps_256: +; X86: # BB#0: +; X86-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 +; X86-NEXT: retl +; +; X64-LABEL: test_x86_avx_dp_ps_256: +; X64: # BB#0: +; X64-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 +; X64-NEXT: retq %res = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %a0, <8 x float> %a1, i32 7) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -153,10 +224,15 @@ declare <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float>, <8 x float>, i32) nounw define <2 x i64> @test_x86_sse2_psll_dq(<2 x i64> %a0) { -; CHECK-LABEL: test_x86_sse2_psll_dq: -; CHECK: ## BB#0: -; CHECK-NEXT: vpslldq {{.*#+}} xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14] -; CHECK-NEXT: retl +; X86-LABEL: test_x86_sse2_psll_dq: +; X86: # BB#0: +; X86-NEXT: vpslldq {{.*#+}} xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14] +; X86-NEXT: retl +; +; X64-LABEL: test_x86_sse2_psll_dq: +; X64: # BB#0: +; X64-NEXT: vpslldq {{.*#+}} xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14] +; X64-NEXT: retq %res = call <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64> %a0, i32 8) ; <<2 x i64>> [#uses=1] ret <2 x i64> %res } @@ -164,10 +240,15 @@ declare <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64>, i32) nounwind readnone define <2 x i64> @test_x86_sse2_psrl_dq(<2 x i64> %a0) { -; CHECK-LABEL: test_x86_sse2_psrl_dq: -; CHECK: ## BB#0: -; CHECK-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero -; CHECK-NEXT: retl +; X86-LABEL: test_x86_sse2_psrl_dq: +; X86: # BB#0: +; X86-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero +; X86-NEXT: retl +; +; X64-LABEL: test_x86_sse2_psrl_dq: +; X64: # BB#0: +; X64-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero +; X64-NEXT: retq %res = call <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64> %a0, i32 8) ; <<2 x i64>> [#uses=1] ret <2 x i64> %res } @@ -175,10 +256,15 @@ declare <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64>, i32) nounwind readnone define <2 x double> @test_x86_sse41_blendpd(<2 x double> %a0, <2 x double> %a1) { -; CHECK-LABEL: test_x86_sse41_blendpd: -; CHECK: ## BB#0: -; CHECK-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] -; CHECK-NEXT: retl +; X86-LABEL: test_x86_sse41_blendpd: +; X86: # BB#0: +; X86-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] +; X86-NEXT: retl +; +; X64-LABEL: test_x86_sse41_blendpd: +; X64: # BB#0: +; X64-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] +; X64-NEXT: retq %res = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a1, i8 2) ; <<2 x double>> [#uses=1] ret <2 x double> %res } @@ -186,10 +272,15 @@ declare <2 x double> @llvm.x86.sse41.blendpd(<2 x double>, <2 x double>, i8) nou define <4 x float> @test_x86_sse41_blendps(<4 x float> %a0, <4 x float> %a1) { -; CHECK-LABEL: test_x86_sse41_blendps: -; CHECK: ## BB#0: -; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3] -; CHECK-NEXT: retl +; X86-LABEL: test_x86_sse41_blendps: +; X86: # BB#0: +; X86-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3] +; X86-NEXT: retl +; +; X64-LABEL: test_x86_sse41_blendps: +; X64: # BB#0: +; X64-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3] +; X64-NEXT: retq %res = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a1, i8 7) ; <<4 x float>> [#uses=1] ret <4 x float> %res } @@ -197,10 +288,15 @@ declare <4 x float> @llvm.x86.sse41.blendps(<4 x float>, <4 x float>, i8) nounwi define <8 x i16> @test_x86_sse41_pblendw(<8 x i16> %a0, <8 x i16> %a1) { -; CHECK-LABEL: test_x86_sse41_pblendw: -; CHECK: ## BB#0: -; CHECK-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4,5,6,7] -; CHECK-NEXT: retl +; X86-LABEL: test_x86_sse41_pblendw: +; X86: # BB#0: +; X86-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4,5,6,7] +; X86-NEXT: retl +; +; X64-LABEL: test_x86_sse41_pblendw: +; X64: # BB#0: +; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4,5,6,7] +; X64-NEXT: retq %res = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a1, i8 7) ; <<8 x i16>> [#uses=1] ret <8 x i16> %res } @@ -208,10 +304,15 @@ declare <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16>, <8 x i16>, i8) nounwind rea define <4 x i32> @test_x86_sse41_pmovsxbd(<16 x i8> %a0) { -; CHECK-LABEL: test_x86_sse41_pmovsxbd: -; CHECK: ## BB#0: -; CHECK-NEXT: vpmovsxbd %xmm0, %xmm0 -; CHECK-NEXT: retl +; X86-LABEL: test_x86_sse41_pmovsxbd: +; X86: # BB#0: +; X86-NEXT: vpmovsxbd %xmm0, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: test_x86_sse41_pmovsxbd: +; X64: # BB#0: +; X64-NEXT: vpmovsxbd %xmm0, %xmm0 +; X64-NEXT: retq %res = call <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8> %a0) ; <<4 x i32>> [#uses=1] ret <4 x i32> %res } @@ -219,10 +320,15 @@ declare <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8>) nounwind readnone define <2 x i64> @test_x86_sse41_pmovsxbq(<16 x i8> %a0) { -; CHECK-LABEL: test_x86_sse41_pmovsxbq: -; CHECK: ## BB#0: -; CHECK-NEXT: vpmovsxbq %xmm0, %xmm0 -; CHECK-NEXT: retl +; X86-LABEL: test_x86_sse41_pmovsxbq: +; X86: # BB#0: +; X86-NEXT: vpmovsxbq %xmm0, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: test_x86_sse41_pmovsxbq: +; X64: # BB#0: +; X64-NEXT: vpmovsxbq %xmm0, %xmm0 +; X64-NEXT: retq %res = call <2 x i64> @llvm.x86.sse41.pmovsxbq(<16 x i8> %a0) ; <<2 x i64>> [#uses=1] ret <2 x i64> %res } @@ -230,10 +336,15 @@ declare <2 x i64> @llvm.x86.sse41.pmovsxbq(<16 x i8>) nounwind readnone define <8 x i16> @test_x86_sse41_pmovsxbw(<16 x i8> %a0) { -; CHECK-LABEL: test_x86_sse41_pmovsxbw: -; CHECK: ## BB#0: -; CHECK-NEXT: vpmovsxbw %xmm0, %xmm0 -; CHECK-NEXT: retl +; X86-LABEL: test_x86_sse41_pmovsxbw: +; X86: # BB#0: +; X86-NEXT: vpmovsxbw %xmm0, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: test_x86_sse41_pmovsxbw: +; X64: # BB#0: +; X64-NEXT: vpmovsxbw %xmm0, %xmm0 +; X64-NEXT: retq %res = call <8 x i16> @llvm.x86.sse41.pmovsxbw(<16 x i8> %a0) ; <<8 x i16>> [#uses=1] ret <8 x i16> %res } @@ -241,10 +352,15 @@ declare <8 x i16> @llvm.x86.sse41.pmovsxbw(<16 x i8>) nounwind readnone define <2 x i64> @test_x86_sse41_pmovsxdq(<4 x i32> %a0) { -; CHECK-LABEL: test_x86_sse41_pmovsxdq: -; CHECK: ## BB#0: -; CHECK-NEXT: vpmovsxdq %xmm0, %xmm0 -; CHECK-NEXT: retl +; X86-LABEL: test_x86_sse41_pmovsxdq: +; X86: # BB#0: +; X86-NEXT: vpmovsxdq %xmm0, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: test_x86_sse41_pmovsxdq: +; X64: # BB#0: +; X64-NEXT: vpmovsxdq %xmm0, %xmm0 +; X64-NEXT: retq %res = call <2 x i64> @llvm.x86.sse41.pmovsxdq(<4 x i32> %a0) ; <<2 x i64>> [#uses=1] ret <2 x i64> %res } @@ -252,10 +368,15 @@ declare <2 x i64> @llvm.x86.sse41.pmovsxdq(<4 x i32>) nounwind readnone define <4 x i32> @test_x86_sse41_pmovsxwd(<8 x i16> %a0) { -; CHECK-LABEL: test_x86_sse41_pmovsxwd: -; CHECK: ## BB#0: -; CHECK-NEXT: vpmovsxwd %xmm0, %xmm0 -; CHECK-NEXT: retl +; X86-LABEL: test_x86_sse41_pmovsxwd: +; X86: # BB#0: +; X86-NEXT: vpmovsxwd %xmm0, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: test_x86_sse41_pmovsxwd: +; X64: # BB#0: +; X64-NEXT: vpmovsxwd %xmm0, %xmm0 +; X64-NEXT: retq %res = call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %a0) ; <<4 x i32>> [#uses=1] ret <4 x i32> %res } @@ -263,10 +384,15 @@ declare <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16>) nounwind readnone define <2 x i64> @test_x86_sse41_pmovsxwq(<8 x i16> %a0) { -; CHECK-LABEL: test_x86_sse41_pmovsxwq: -; CHECK: ## BB#0: -; CHECK-NEXT: vpmovsxwq %xmm0, %xmm0 -; CHECK-NEXT: retl +; X86-LABEL: test_x86_sse41_pmovsxwq: +; X86: # BB#0: +; X86-NEXT: vpmovsxwq %xmm0, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: test_x86_sse41_pmovsxwq: +; X64: # BB#0: +; X64-NEXT: vpmovsxwq %xmm0, %xmm0 +; X64-NEXT: retq %res = call <2 x i64> @llvm.x86.sse41.pmovsxwq(<8 x i16> %a0) ; <<2 x i64>> [#uses=1] ret <2 x i64> %res } @@ -274,10 +400,15 @@ declare <2 x i64> @llvm.x86.sse41.pmovsxwq(<8 x i16>) nounwind readnone define <4 x i32> @test_x86_sse41_pmovzxbd(<16 x i8> %a0) { -; CHECK-LABEL: test_x86_sse41_pmovzxbd: -; CHECK: ## BB#0: -; CHECK-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; CHECK-NEXT: retl +; X86-LABEL: test_x86_sse41_pmovzxbd: +; X86: # BB#0: +; X86-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; X86-NEXT: retl +; +; X64-LABEL: test_x86_sse41_pmovzxbd: +; X64: # BB#0: +; X64-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; X64-NEXT: retq %res = call <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8> %a0) ; <<4 x i32>> [#uses=1] ret <4 x i32> %res } @@ -285,10 +416,15 @@ declare <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8>) nounwind readnone define <2 x i64> @test_x86_sse41_pmovzxbq(<16 x i8> %a0) { -; CHECK-LABEL: test_x86_sse41_pmovzxbq: -; CHECK: ## BB#0: -; CHECK-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero -; CHECK-NEXT: retl +; X86-LABEL: test_x86_sse41_pmovzxbq: +; X86: # BB#0: +; X86-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero +; X86-NEXT: retl +; +; X64-LABEL: test_x86_sse41_pmovzxbq: +; X64: # BB#0: +; X64-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero +; X64-NEXT: retq %res = call <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8> %a0) ; <<2 x i64>> [#uses=1] ret <2 x i64> %res } @@ -296,10 +432,15 @@ declare <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8>) nounwind readnone define <8 x i16> @test_x86_sse41_pmovzxbw(<16 x i8> %a0) { -; CHECK-LABEL: test_x86_sse41_pmovzxbw: -; CHECK: ## BB#0: -; CHECK-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; CHECK-NEXT: retl +; X86-LABEL: test_x86_sse41_pmovzxbw: +; X86: # BB#0: +; X86-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; X86-NEXT: retl +; +; X64-LABEL: test_x86_sse41_pmovzxbw: +; X64: # BB#0: +; X64-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; X64-NEXT: retq %res = call <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8> %a0) ; <<8 x i16>> [#uses=1] ret <8 x i16> %res } @@ -307,10 +448,15 @@ declare <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8>) nounwind readnone define <2 x i64> @test_x86_sse41_pmovzxdq(<4 x i32> %a0) { -; CHECK-LABEL: test_x86_sse41_pmovzxdq: -; CHECK: ## BB#0: -; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero -; CHECK-NEXT: retl +; X86-LABEL: test_x86_sse41_pmovzxdq: +; X86: # BB#0: +; X86-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero +; X86-NEXT: retl +; +; X64-LABEL: test_x86_sse41_pmovzxdq: +; X64: # BB#0: +; X64-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero +; X64-NEXT: retq %res = call <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32> %a0) ; <<2 x i64>> [#uses=1] ret <2 x i64> %res } @@ -318,10 +464,15 @@ declare <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32>) nounwind readnone define <4 x i32> @test_x86_sse41_pmovzxwd(<8 x i16> %a0) { -; CHECK-LABEL: test_x86_sse41_pmovzxwd: -; CHECK: ## BB#0: -; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; CHECK-NEXT: retl +; X86-LABEL: test_x86_sse41_pmovzxwd: +; X86: # BB#0: +; X86-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; X86-NEXT: retl +; +; X64-LABEL: test_x86_sse41_pmovzxwd: +; X64: # BB#0: +; X64-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; X64-NEXT: retq %res = call <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16> %a0) ; <<4 x i32>> [#uses=1] ret <4 x i32> %res } @@ -329,10 +480,15 @@ declare <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16>) nounwind readnone define <2 x i64> @test_x86_sse41_pmovzxwq(<8 x i16> %a0) { -; CHECK-LABEL: test_x86_sse41_pmovzxwq: -; CHECK: ## BB#0: -; CHECK-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero -; CHECK-NEXT: retl +; X86-LABEL: test_x86_sse41_pmovzxwq: +; X86: # BB#0: +; X86-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero +; X86-NEXT: retl +; +; X64-LABEL: test_x86_sse41_pmovzxwq: +; X64: # BB#0: +; X64-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero +; X64-NEXT: retq %res = call <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16> %a0) ; <<2 x i64>> [#uses=1] ret <2 x i64> %res } @@ -340,10 +496,15 @@ declare <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16>) nounwind readnone define <2 x double> @test_x86_sse2_cvtdq2pd(<4 x i32> %a0) { -; CHECK-LABEL: test_x86_sse2_cvtdq2pd: -; CHECK: ## BB#0: -; CHECK-NEXT: vcvtdq2pd %xmm0, %xmm0 -; CHECK-NEXT: retl +; X86-LABEL: test_x86_sse2_cvtdq2pd: +; X86: # BB#0: +; X86-NEXT: vcvtdq2pd %xmm0, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: test_x86_sse2_cvtdq2pd: +; X64: # BB#0: +; X64-NEXT: vcvtdq2pd %xmm0, %xmm0 +; X64-NEXT: retq %res = call <2 x double> @llvm.x86.sse2.cvtdq2pd(<4 x i32> %a0) ; <<2 x double>> [#uses=1] ret <2 x double> %res } @@ -351,10 +512,15 @@ declare <2 x double> @llvm.x86.sse2.cvtdq2pd(<4 x i32>) nounwind readnone define <4 x double> @test_x86_avx_cvtdq2_pd_256(<4 x i32> %a0) { -; CHECK-LABEL: test_x86_avx_cvtdq2_pd_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vcvtdq2pd %xmm0, %ymm0 -; CHECK-NEXT: retl +; X86-LABEL: test_x86_avx_cvtdq2_pd_256: +; X86: # BB#0: +; X86-NEXT: vcvtdq2pd %xmm0, %ymm0 +; X86-NEXT: retl +; +; X64-LABEL: test_x86_avx_cvtdq2_pd_256: +; X64: # BB#0: +; X64-NEXT: vcvtdq2pd %xmm0, %ymm0 +; X64-NEXT: retq %res = call <4 x double> @llvm.x86.avx.cvtdq2.pd.256(<4 x i32> %a0) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -362,10 +528,15 @@ declare <4 x double> @llvm.x86.avx.cvtdq2.pd.256(<4 x i32>) nounwind readnone define <2 x double> @test_x86_sse2_cvtps2pd(<4 x float> %a0) { -; CHECK-LABEL: test_x86_sse2_cvtps2pd: -; CHECK: ## BB#0: -; CHECK-NEXT: vcvtps2pd %xmm0, %xmm0 -; CHECK-NEXT: retl +; X86-LABEL: test_x86_sse2_cvtps2pd: +; X86: # BB#0: +; X86-NEXT: vcvtps2pd %xmm0, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: test_x86_sse2_cvtps2pd: +; X64: # BB#0: +; X64-NEXT: vcvtps2pd %xmm0, %xmm0 +; X64-NEXT: retq %res = call <2 x double> @llvm.x86.sse2.cvtps2pd(<4 x float> %a0) ; <<2 x double>> [#uses=1] ret <2 x double> %res } @@ -373,10 +544,15 @@ declare <2 x double> @llvm.x86.sse2.cvtps2pd(<4 x float>) nounwind readnone define <4 x double> @test_x86_avx_cvt_ps2_pd_256(<4 x float> %a0) { -; CHECK-LABEL: test_x86_avx_cvt_ps2_pd_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vcvtps2pd %xmm0, %ymm0 -; CHECK-NEXT: retl +; X86-LABEL: test_x86_avx_cvt_ps2_pd_256: +; X86: # BB#0: +; X86-NEXT: vcvtps2pd %xmm0, %ymm0 +; X86-NEXT: retl +; +; X64-LABEL: test_x86_avx_cvt_ps2_pd_256: +; X64: # BB#0: +; X64-NEXT: vcvtps2pd %xmm0, %ymm0 +; X64-NEXT: retq %res = call <4 x double> @llvm.x86.avx.cvt.ps2.pd.256(<4 x float> %a0) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -385,13 +561,20 @@ declare <4 x double> @llvm.x86.avx.cvt.ps2.pd.256(<4 x float>) nounwind readnone define void @test_x86_sse2_storeu_dq(i8* %a0, <16 x i8> %a1) { ; add operation forces the execution domain. -; CHECK-LABEL: test_x86_sse2_storeu_dq: -; CHECK: ## BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax -; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 -; CHECK-NEXT: vpsubb %xmm1, %xmm0, %xmm0 -; CHECK-NEXT: vmovdqu %xmm0, (%eax) -; CHECK-NEXT: retl +; X86-LABEL: test_x86_sse2_storeu_dq: +; X86: # BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; X86-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; X86-NEXT: vmovdqu %xmm0, (%eax) +; X86-NEXT: retl +; +; X64-LABEL: test_x86_sse2_storeu_dq: +; X64: # BB#0: +; X64-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; X64-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; X64-NEXT: vmovdqu %xmm0, (%rdi) +; X64-NEXT: retq %a2 = add <16 x i8> %a1, call void @llvm.x86.sse2.storeu.dq(i8* %a0, <16 x i8> %a2) ret void @@ -401,14 +584,22 @@ declare void @llvm.x86.sse2.storeu.dq(i8*, <16 x i8>) nounwind define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) { ; fadd operation forces the execution domain. -; CHECK-LABEL: test_x86_sse2_storeu_pd: -; CHECK: ## BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax -; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1 -; CHECK-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] -; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0 -; CHECK-NEXT: vmovupd %xmm0, (%eax) -; CHECK-NEXT: retl +; X86-LABEL: test_x86_sse2_storeu_pd: +; X86: # BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; X86-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] +; X86-NEXT: vaddpd %xmm1, %xmm0, %xmm0 +; X86-NEXT: vmovupd %xmm0, (%eax) +; X86-NEXT: retl +; +; X64-LABEL: test_x86_sse2_storeu_pd: +; X64: # BB#0: +; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; X64-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] +; X64-NEXT: vaddpd %xmm1, %xmm0, %xmm0 +; X64-NEXT: vmovupd %xmm0, (%rdi) +; X64-NEXT: retq %a2 = fadd <2 x double> %a1, call void @llvm.x86.sse2.storeu.pd(i8* %a0, <2 x double> %a2) ret void @@ -417,11 +608,16 @@ declare void @llvm.x86.sse2.storeu.pd(i8*, <2 x double>) nounwind define void @test_x86_sse_storeu_ps(i8* %a0, <4 x float> %a1) { -; CHECK-LABEL: test_x86_sse_storeu_ps: -; CHECK: ## BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax -; CHECK-NEXT: vmovups %xmm0, (%eax) -; CHECK-NEXT: retl +; X86-LABEL: test_x86_sse_storeu_ps: +; X86: # BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups %xmm0, (%eax) +; X86-NEXT: retl +; +; X64-LABEL: test_x86_sse_storeu_ps: +; X64: # BB#0: +; X64-NEXT: vmovups %xmm0, (%rdi) +; X64-NEXT: retq call void @llvm.x86.sse.storeu.ps(i8* %a0, <4 x float> %a1) ret void } @@ -431,17 +627,28 @@ declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind define void @test_x86_avx_storeu_dq_256(i8* %a0, <32 x i8> %a1) { ; FIXME: unfortunately the execution domain fix pass changes this to vmovups and its hard to force with no 256-bit integer instructions ; add operation forces the execution domain. -; CHECK-LABEL: test_x86_avx_storeu_dq_256: -; CHECK: ## BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax -; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 -; CHECK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; CHECK-NEXT: vpsubb %xmm2, %xmm1, %xmm1 -; CHECK-NEXT: vpsubb %xmm2, %xmm0, %xmm0 -; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; CHECK-NEXT: vmovups %ymm0, (%eax) -; CHECK-NEXT: vzeroupper -; CHECK-NEXT: retl +; X86-LABEL: test_x86_avx_storeu_dq_256: +; X86: # BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vextractf128 $1, %ymm0, %xmm1 +; X86-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; X86-NEXT: vpsubb %xmm2, %xmm1, %xmm1 +; X86-NEXT: vpsubb %xmm2, %xmm0, %xmm0 +; X86-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X86-NEXT: vmovups %ymm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl +; +; X64-LABEL: test_x86_avx_storeu_dq_256: +; X64: # BB#0: +; X64-NEXT: vextractf128 $1, %ymm0, %xmm1 +; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; X64-NEXT: vpsubb %xmm2, %xmm1, %xmm1 +; X64-NEXT: vpsubb %xmm2, %xmm0, %xmm0 +; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X64-NEXT: vmovups %ymm0, (%rdi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq %a2 = add <32 x i8> %a1, call void @llvm.x86.avx.storeu.dq.256(i8* %a0, <32 x i8> %a2) ret void @@ -451,14 +658,22 @@ declare void @llvm.x86.avx.storeu.dq.256(i8*, <32 x i8>) nounwind define void @test_x86_avx_storeu_pd_256(i8* %a0, <4 x double> %a1) { ; add operation forces the execution domain. -; CHECK-LABEL: test_x86_avx_storeu_pd_256: -; CHECK: ## BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax -; CHECK-NEXT: vxorpd %ymm1, %ymm1, %ymm1 -; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0 -; CHECK-NEXT: vmovupd %ymm0, (%eax) -; CHECK-NEXT: vzeroupper -; CHECK-NEXT: retl +; X86-LABEL: test_x86_avx_storeu_pd_256: +; X86: # BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vxorpd %ymm1, %ymm1, %ymm1 +; X86-NEXT: vaddpd %ymm1, %ymm0, %ymm0 +; X86-NEXT: vmovupd %ymm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl +; +; X64-LABEL: test_x86_avx_storeu_pd_256: +; X64: # BB#0: +; X64-NEXT: vxorpd %ymm1, %ymm1, %ymm1 +; X64-NEXT: vaddpd %ymm1, %ymm0, %ymm0 +; X64-NEXT: vmovupd %ymm0, (%rdi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq %a2 = fadd <4 x double> %a1, call void @llvm.x86.avx.storeu.pd.256(i8* %a0, <4 x double> %a2) ret void @@ -467,12 +682,18 @@ declare void @llvm.x86.avx.storeu.pd.256(i8*, <4 x double>) nounwind define void @test_x86_avx_storeu_ps_256(i8* %a0, <8 x float> %a1) { -; CHECK-LABEL: test_x86_avx_storeu_ps_256: -; CHECK: ## BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax -; CHECK-NEXT: vmovups %ymm0, (%eax) -; CHECK-NEXT: vzeroupper -; CHECK-NEXT: retl +; X86-LABEL: test_x86_avx_storeu_ps_256: +; X86: # BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: vmovups %ymm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl +; +; X64-LABEL: test_x86_avx_storeu_ps_256: +; X64: # BB#0: +; X64-NEXT: vmovups %ymm0, (%rdi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq call void @llvm.x86.avx.storeu.ps.256(i8* %a0, <8 x float> %a1) ret void } @@ -480,10 +701,15 @@ declare void @llvm.x86.avx.storeu.ps.256(i8*, <8 x float>) nounwind define <2 x double> @test_x86_avx_vpermil_pd(<2 x double> %a0) { -; CHECK-LABEL: test_x86_avx_vpermil_pd: -; CHECK: ## BB#0: -; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; CHECK-NEXT: retl +; X86-LABEL: test_x86_avx_vpermil_pd: +; X86: # BB#0: +; X86-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] +; X86-NEXT: retl +; +; X64-LABEL: test_x86_avx_vpermil_pd: +; X64: # BB#0: +; X64-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] +; X64-NEXT: retq %res = call <2 x double> @llvm.x86.avx.vpermil.pd(<2 x double> %a0, i8 1) ; <<2 x double>> [#uses=1] ret <2 x double> %res } @@ -491,10 +717,15 @@ declare <2 x double> @llvm.x86.avx.vpermil.pd(<2 x double>, i8) nounwind readnon define <4 x double> @test_x86_avx_vpermil_pd_256(<4 x double> %a0) { -; CHECK-LABEL: test_x86_avx_vpermil_pd_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,1,3,2] -; CHECK-NEXT: retl +; X86-LABEL: test_x86_avx_vpermil_pd_256: +; X86: # BB#0: +; X86-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,1,3,2] +; X86-NEXT: retl +; +; X64-LABEL: test_x86_avx_vpermil_pd_256: +; X64: # BB#0: +; X64-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,1,3,2] +; X64-NEXT: retq %res = call <4 x double> @llvm.x86.avx.vpermil.pd.256(<4 x double> %a0, i8 7) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -502,10 +733,15 @@ declare <4 x double> @llvm.x86.avx.vpermil.pd.256(<4 x double>, i8) nounwind rea define <4 x float> @test_x86_avx_vpermil_ps(<4 x float> %a0) { -; CHECK-LABEL: test_x86_avx_vpermil_ps: -; CHECK: ## BB#0: -; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,0,0] -; CHECK-NEXT: retl +; X86-LABEL: test_x86_avx_vpermil_ps: +; X86: # BB#0: +; X86-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,0,0] +; X86-NEXT: retl +; +; X64-LABEL: test_x86_avx_vpermil_ps: +; X64: # BB#0: +; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,0,0] +; X64-NEXT: retq %res = call <4 x float> @llvm.x86.avx.vpermil.ps(<4 x float> %a0, i8 7) ; <<4 x float>> [#uses=1] ret <4 x float> %res } @@ -513,10 +749,15 @@ declare <4 x float> @llvm.x86.avx.vpermil.ps(<4 x float>, i8) nounwind readnone define <8 x float> @test_x86_avx_vpermil_ps_256(<8 x float> %a0) { -; CHECK-LABEL: test_x86_avx_vpermil_ps_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,1,0,0,7,5,4,4] -; CHECK-NEXT: retl +; X86-LABEL: test_x86_avx_vpermil_ps_256: +; X86: # BB#0: +; X86-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,1,0,0,7,5,4,4] +; X86-NEXT: retl +; +; X64-LABEL: test_x86_avx_vpermil_ps_256: +; X64: # BB#0: +; X64-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,1,0,0,7,5,4,4] +; X64-NEXT: retq %res = call <8 x float> @llvm.x86.avx.vpermil.ps.256(<8 x float> %a0, i8 7) ; <<8 x float>> [#uses=1] ret <8 x float> %res } diff --git a/llvm/test/CodeGen/X86/avx-intrinsics-x86.ll b/llvm/test/CodeGen/X86/avx-intrinsics-x86.ll index c1fa2b1..5825b19 100644 --- a/llvm/test/CodeGen/X86/avx-intrinsics-x86.ll +++ b/llvm/test/CodeGen/X86/avx-intrinsics-x86.ll @@ -1,12 +1,12 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=avx,pclmul -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX -; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx,pclmul -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX +; RUN: llc < %s -mtriple=i686-unknown-unknown -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL define <4 x double> @test_x86_avx_addsub_pd_256(<4 x double> %a0, <4 x double> %a1) { ; CHECK-LABEL: test_x86_avx_addsub_pd_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd0,0xc1] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xd0,0xc1] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -15,9 +15,9 @@ declare <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double>, <4 x double>) nou define <8 x float> @test_x86_avx_addsub_ps_256(<8 x float> %a0, <8 x float> %a1) { ; CHECK-LABEL: test_x86_avx_addsub_ps_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xff,0xd0,0xc1] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xff,0xd0,0xc1] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -26,9 +26,9 @@ declare <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float>, <8 x float>) nounwi define <4 x double> @test_x86_avx_blendv_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) { ; CHECK-LABEL: test_x86_avx_blendv_pd_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x4b,0xc1,0x20] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x4b,0xc1,0x20] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -37,9 +37,9 @@ declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4 define <8 x float> @test_x86_avx_blendv_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) { ; CHECK-LABEL: test_x86_avx_blendv_ps_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x4a,0xc1,0x20] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x4a,0xc1,0x20] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -48,9 +48,9 @@ declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, <8 x f define <4 x double> @test_x86_avx_cmp_pd_256(<4 x double> %a0, <4 x double> %a1) { ; CHECK-LABEL: test_x86_avx_cmp_pd_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vcmpordpd %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xc2,0xc1,0x07] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: vcmpordpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xc2,0xc1,0x07] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double> %a0, <4 x double> %a1, i8 7) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -59,49 +59,49 @@ declare <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double>, <4 x double>, i8) no define <8 x float> @test_x86_avx_cmp_ps_256(<8 x float> %a0, <8 x float> %a1) { ; CHECK-LABEL: test_x86_avx_cmp_ps_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vcmpordps %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfc,0xc2,0xc1,0x07] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: vcmpordps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfc,0xc2,0xc1,0x07] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 7) ; <<8 x float>> [#uses=1] ret <8 x float> %res } define <8 x float> @test_x86_avx_cmp_ps_256_pseudo_op(<8 x float> %a0, <8 x float> %a1) { ; CHECK-LABEL: test_x86_avx_cmp_ps_256_pseudo_op: -; CHECK: ## BB#0: -; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x00] -; CHECK-NEXT: vcmpltps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x01] -; CHECK-NEXT: vcmpleps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x02] -; CHECK-NEXT: vcmpunordps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x03] -; CHECK-NEXT: vcmpneqps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x04] -; CHECK-NEXT: vcmpnltps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x05] -; CHECK-NEXT: vcmpnleps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x06] -; CHECK-NEXT: vcmpordps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x07] -; CHECK-NEXT: vcmpeq_uqps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x08] -; CHECK-NEXT: vcmpngeps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x09] -; CHECK-NEXT: vcmpngtps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x0a] -; CHECK-NEXT: vcmpfalseps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x0b] -; CHECK-NEXT: vcmpneq_oqps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x0c] -; CHECK-NEXT: vcmpgeps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x0d] -; CHECK-NEXT: vcmpgtps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x0e] -; CHECK-NEXT: vcmptrueps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x0f] -; CHECK-NEXT: vcmpeq_osps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x10] -; CHECK-NEXT: vcmplt_oqps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x11] -; CHECK-NEXT: vcmple_oqps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x12] -; CHECK-NEXT: vcmpunord_sps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x13] -; CHECK-NEXT: vcmpneq_usps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x14] -; CHECK-NEXT: vcmpnlt_uqps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x15] -; CHECK-NEXT: vcmpnle_uqps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x16] -; CHECK-NEXT: vcmpord_sps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x17] -; CHECK-NEXT: vcmpeq_usps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x18] -; CHECK-NEXT: vcmpnge_uqps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x19] -; CHECK-NEXT: vcmpngt_uqps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x1a] -; CHECK-NEXT: vcmpfalse_osps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x1b] -; CHECK-NEXT: vcmpneq_osps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x1c] -; CHECK-NEXT: vcmpge_oqps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x1d] -; CHECK-NEXT: vcmpgt_oqps %ymm1, %ymm0, %ymm1 ## encoding: [0xc5,0xfc,0xc2,0xc9,0x1e] -; CHECK-NEXT: vcmptrue_usps %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfc,0xc2,0xc1,0x1f] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x00] +; CHECK-NEXT: vcmpltps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x01] +; CHECK-NEXT: vcmpleps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x02] +; CHECK-NEXT: vcmpunordps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x03] +; CHECK-NEXT: vcmpneqps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x04] +; CHECK-NEXT: vcmpnltps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x05] +; CHECK-NEXT: vcmpnleps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x06] +; CHECK-NEXT: vcmpordps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x07] +; CHECK-NEXT: vcmpeq_uqps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x08] +; CHECK-NEXT: vcmpngeps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x09] +; CHECK-NEXT: vcmpngtps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x0a] +; CHECK-NEXT: vcmpfalseps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x0b] +; CHECK-NEXT: vcmpneq_oqps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x0c] +; CHECK-NEXT: vcmpgeps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x0d] +; CHECK-NEXT: vcmpgtps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x0e] +; CHECK-NEXT: vcmptrueps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x0f] +; CHECK-NEXT: vcmpeq_osps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x10] +; CHECK-NEXT: vcmplt_oqps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x11] +; CHECK-NEXT: vcmple_oqps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x12] +; CHECK-NEXT: vcmpunord_sps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x13] +; CHECK-NEXT: vcmpneq_usps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x14] +; CHECK-NEXT: vcmpnlt_uqps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x15] +; CHECK-NEXT: vcmpnle_uqps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x16] +; CHECK-NEXT: vcmpord_sps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x17] +; CHECK-NEXT: vcmpeq_usps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x18] +; CHECK-NEXT: vcmpnge_uqps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x19] +; CHECK-NEXT: vcmpngt_uqps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x1a] +; CHECK-NEXT: vcmpfalse_osps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x1b] +; CHECK-NEXT: vcmpneq_osps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x1c] +; CHECK-NEXT: vcmpge_oqps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x1d] +; CHECK-NEXT: vcmpgt_oqps %ymm1, %ymm0, %ymm1 # encoding: [0xc5,0xfc,0xc2,0xc9,0x1e] +; CHECK-NEXT: vcmptrue_usps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfc,0xc2,0xc1,0x1f] +; CHECK-NEXT: retl # encoding: [0xc3] %a2 = call <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 0) ; <<8 x float>> [#uses=1] %a3 = call <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float> %a0, <8 x float> %a2, i8 1) ; <<8 x float>> [#uses=1] %a4 = call <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float> %a0, <8 x float> %a3, i8 2) ; <<8 x float>> [#uses=1] @@ -141,16 +141,16 @@ declare <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float>, <8 x float>, i8) nounw define <4 x float> @test_x86_avx_cvt_pd2_ps_256(<4 x double> %a0) { ; AVX-LABEL: test_x86_avx_cvt_pd2_ps_256: -; AVX: ## BB#0: -; AVX-NEXT: vcvtpd2ps %ymm0, %xmm0 ## encoding: [0xc5,0xfd,0x5a,0xc0] -; AVX-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; AVX-NEXT: retl ## encoding: [0xc3] +; AVX: # BB#0: +; AVX-NEXT: vcvtpd2ps %ymm0, %xmm0 # encoding: [0xc5,0xfd,0x5a,0xc0] +; AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; AVX-NEXT: retl # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_cvt_pd2_ps_256: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vcvtpd2ps %ymm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x5a,0xc0] -; AVX512VL-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; AVX512VL-NEXT: retl ## encoding: [0xc3] +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vcvtpd2ps %ymm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x5a,0xc0] +; AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; AVX512VL-NEXT: retl # encoding: [0xc3] %res = call <4 x float> @llvm.x86.avx.cvt.pd2.ps.256(<4 x double> %a0) ; <<4 x float>> [#uses=1] ret <4 x float> %res } @@ -159,16 +159,16 @@ declare <4 x float> @llvm.x86.avx.cvt.pd2.ps.256(<4 x double>) nounwind readnone define <4 x i32> @test_x86_avx_cvt_pd2dq_256(<4 x double> %a0) { ; AVX-LABEL: test_x86_avx_cvt_pd2dq_256: -; AVX: ## BB#0: -; AVX-NEXT: vcvtpd2dq %ymm0, %xmm0 ## encoding: [0xc5,0xff,0xe6,0xc0] -; AVX-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; AVX-NEXT: retl ## encoding: [0xc3] +; AVX: # BB#0: +; AVX-NEXT: vcvtpd2dq %ymm0, %xmm0 # encoding: [0xc5,0xff,0xe6,0xc0] +; AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; AVX-NEXT: retl # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_cvt_pd2dq_256: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vcvtpd2dq %ymm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xff,0xe6,0xc0] -; AVX512VL-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; AVX512VL-NEXT: retl ## encoding: [0xc3] +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vcvtpd2dq %ymm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xff,0xe6,0xc0] +; AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; AVX512VL-NEXT: retl # encoding: [0xc3] %res = call <4 x i32> @llvm.x86.avx.cvt.pd2dq.256(<4 x double> %a0) ; <<4 x i32>> [#uses=1] ret <4 x i32> %res } @@ -177,9 +177,9 @@ declare <4 x i32> @llvm.x86.avx.cvt.pd2dq.256(<4 x double>) nounwind readnone define <8 x i32> @test_x86_avx_cvt_ps2dq_256(<8 x float> %a0) { ; CHECK-LABEL: test_x86_avx_cvt_ps2dq_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vcvtps2dq %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x5b,0xc0] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: vcvtps2dq %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x5b,0xc0] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx.cvt.ps2dq.256(<8 x float> %a0) ; <<8 x i32>> [#uses=1] ret <8 x i32> %res } @@ -188,14 +188,14 @@ declare <8 x i32> @llvm.x86.avx.cvt.ps2dq.256(<8 x float>) nounwind readnone define <8 x float> @test_x86_avx_cvtdq2_ps_256(<8 x i32> %a0) { ; AVX-LABEL: test_x86_avx_cvtdq2_ps_256: -; AVX: ## BB#0: -; AVX-NEXT: vcvtdq2ps %ymm0, %ymm0 ## encoding: [0xc5,0xfc,0x5b,0xc0] -; AVX-NEXT: retl ## encoding: [0xc3] +; AVX: # BB#0: +; AVX-NEXT: vcvtdq2ps %ymm0, %ymm0 # encoding: [0xc5,0xfc,0x5b,0xc0] +; AVX-NEXT: retl # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_cvtdq2_ps_256: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vcvtdq2ps %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5b,0xc0] -; AVX512VL-NEXT: retl ## encoding: [0xc3] +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vcvtdq2ps %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5b,0xc0] +; AVX512VL-NEXT: retl # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.cvtdq2.ps.256(<8 x i32> %a0) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -204,16 +204,16 @@ declare <8 x float> @llvm.x86.avx.cvtdq2.ps.256(<8 x i32>) nounwind readnone define <4 x i32> @test_x86_avx_cvtt_pd2dq_256(<4 x double> %a0) { ; AVX-LABEL: test_x86_avx_cvtt_pd2dq_256: -; AVX: ## BB#0: -; AVX-NEXT: vcvttpd2dq %ymm0, %xmm0 ## encoding: [0xc5,0xfd,0xe6,0xc0] -; AVX-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; AVX-NEXT: retl ## encoding: [0xc3] +; AVX: # BB#0: +; AVX-NEXT: vcvttpd2dq %ymm0, %xmm0 # encoding: [0xc5,0xfd,0xe6,0xc0] +; AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; AVX-NEXT: retl # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_cvtt_pd2dq_256: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vcvttpd2dq %ymm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe6,0xc0] -; AVX512VL-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; AVX512VL-NEXT: retl ## encoding: [0xc3] +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vcvttpd2dq %ymm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe6,0xc0] +; AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; AVX512VL-NEXT: retl # encoding: [0xc3] %res = call <4 x i32> @llvm.x86.avx.cvtt.pd2dq.256(<4 x double> %a0) ; <<4 x i32>> [#uses=1] ret <4 x i32> %res } @@ -222,14 +222,14 @@ declare <4 x i32> @llvm.x86.avx.cvtt.pd2dq.256(<4 x double>) nounwind readnone define <8 x i32> @test_x86_avx_cvtt_ps2dq_256(<8 x float> %a0) { ; AVX-LABEL: test_x86_avx_cvtt_ps2dq_256: -; AVX: ## BB#0: -; AVX-NEXT: vcvttps2dq %ymm0, %ymm0 ## encoding: [0xc5,0xfe,0x5b,0xc0] -; AVX-NEXT: retl ## encoding: [0xc3] +; AVX: # BB#0: +; AVX-NEXT: vcvttps2dq %ymm0, %ymm0 # encoding: [0xc5,0xfe,0x5b,0xc0] +; AVX-NEXT: retl # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_cvtt_ps2dq_256: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vcvttps2dq %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfe,0x5b,0xc0] -; AVX512VL-NEXT: retl ## encoding: [0xc3] +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vcvttps2dq %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfe,0x5b,0xc0] +; AVX512VL-NEXT: retl # encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx.cvtt.ps2dq.256(<8 x float> %a0) ; <<8 x i32>> [#uses=1] ret <8 x i32> %res } @@ -238,9 +238,9 @@ declare <8 x i32> @llvm.x86.avx.cvtt.ps2dq.256(<8 x float>) nounwind readnone define <8 x float> @test_x86_avx_dp_ps_256(<8 x float> %a0, <8 x float> %a1) { ; CHECK-LABEL: test_x86_avx_dp_ps_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x40,0xc1,0x07] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x40,0xc1,0x07] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 7) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -249,9 +249,9 @@ declare <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float>, <8 x float>, i8) nounwi define <4 x double> @test_x86_avx_hadd_pd_256(<4 x double> %a0, <4 x double> %a1) { ; CHECK-LABEL: test_x86_avx_hadd_pd_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x7c,0xc1] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x7c,0xc1] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -260,9 +260,9 @@ declare <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double>, <4 x double>) nounw define <8 x float> @test_x86_avx_hadd_ps_256(<8 x float> %a0, <8 x float> %a1) { ; CHECK-LABEL: test_x86_avx_hadd_ps_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vhaddps %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xff,0x7c,0xc1] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xff,0x7c,0xc1] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -271,9 +271,9 @@ declare <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float>, <8 x float>) nounwind define <4 x double> @test_x86_avx_hsub_pd_256(<4 x double> %a0, <4 x double> %a1) { ; CHECK-LABEL: test_x86_avx_hsub_pd_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x7d,0xc1] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x7d,0xc1] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -282,9 +282,9 @@ declare <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double>, <4 x double>) nounw define <8 x float> @test_x86_avx_hsub_ps_256(<8 x float> %a0, <8 x float> %a1) { ; CHECK-LABEL: test_x86_avx_hsub_ps_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vhsubps %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xff,0x7d,0xc1] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xff,0x7d,0xc1] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -293,10 +293,10 @@ declare <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float>, <8 x float>) nounwind define <32 x i8> @test_x86_avx_ldu_dq_256(i8* %a0) { ; CHECK-LABEL: test_x86_avx_ldu_dq_256: -; CHECK: ## BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] -; CHECK-NEXT: vlddqu (%eax), %ymm0 ## encoding: [0xc5,0xff,0xf0,0x00] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; CHECK-NEXT: vlddqu (%eax), %ymm0 # encoding: [0xc5,0xff,0xf0,0x00] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call <32 x i8> @llvm.x86.avx.ldu.dq.256(i8* %a0) ; <<32 x i8>> [#uses=1] ret <32 x i8> %res } @@ -305,10 +305,10 @@ declare <32 x i8> @llvm.x86.avx.ldu.dq.256(i8*) nounwind readonly define <2 x double> @test_x86_avx_maskload_pd(i8* %a0, <2 x i64> %mask) { ; CHECK-LABEL: test_x86_avx_maskload_pd: -; CHECK: ## BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] -; CHECK-NEXT: vmaskmovpd (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x2d,0x00] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; CHECK-NEXT: vmaskmovpd (%eax), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x2d,0x00] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call <2 x double> @llvm.x86.avx.maskload.pd(i8* %a0, <2 x i64> %mask) ; <<2 x double>> [#uses=1] ret <2 x double> %res } @@ -317,10 +317,10 @@ declare <2 x double> @llvm.x86.avx.maskload.pd(i8*, <2 x i64>) nounwind readonly define <4 x double> @test_x86_avx_maskload_pd_256(i8* %a0, <4 x i64> %mask) { ; CHECK-LABEL: test_x86_avx_maskload_pd_256: -; CHECK: ## BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] -; CHECK-NEXT: vmaskmovpd (%eax), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x2d,0x00] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; CHECK-NEXT: vmaskmovpd (%eax), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x2d,0x00] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %a0, <4 x i64> %mask) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -329,10 +329,10 @@ declare <4 x double> @llvm.x86.avx.maskload.pd.256(i8*, <4 x i64>) nounwind read define <4 x float> @test_x86_avx_maskload_ps(i8* %a0, <4 x i32> %mask) { ; CHECK-LABEL: test_x86_avx_maskload_ps: -; CHECK: ## BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] -; CHECK-NEXT: vmaskmovps (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x2c,0x00] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; CHECK-NEXT: vmaskmovps (%eax), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x2c,0x00] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call <4 x float> @llvm.x86.avx.maskload.ps(i8* %a0, <4 x i32> %mask) ; <<4 x float>> [#uses=1] ret <4 x float> %res } @@ -341,10 +341,10 @@ declare <4 x float> @llvm.x86.avx.maskload.ps(i8*, <4 x i32>) nounwind readonly define <8 x float> @test_x86_avx_maskload_ps_256(i8* %a0, <8 x i32> %mask) { ; CHECK-LABEL: test_x86_avx_maskload_ps_256: -; CHECK: ## BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] -; CHECK-NEXT: vmaskmovps (%eax), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x2c,0x00] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; CHECK-NEXT: vmaskmovps (%eax), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x2c,0x00] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %a0, <8 x i32> %mask) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -353,10 +353,10 @@ declare <8 x float> @llvm.x86.avx.maskload.ps.256(i8*, <8 x i32>) nounwind reado define void @test_x86_avx_maskstore_pd(i8* %a0, <2 x i64> %mask, <2 x double> %a2) { ; CHECK-LABEL: test_x86_avx_maskstore_pd: -; CHECK: ## BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] -; CHECK-NEXT: vmaskmovpd %xmm1, %xmm0, (%eax) ## encoding: [0xc4,0xe2,0x79,0x2f,0x08] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; CHECK-NEXT: vmaskmovpd %xmm1, %xmm0, (%eax) # encoding: [0xc4,0xe2,0x79,0x2f,0x08] +; CHECK-NEXT: retl # encoding: [0xc3] call void @llvm.x86.avx.maskstore.pd(i8* %a0, <2 x i64> %mask, <2 x double> %a2) ret void } @@ -365,11 +365,11 @@ declare void @llvm.x86.avx.maskstore.pd(i8*, <2 x i64>, <2 x double>) nounwind define void @test_x86_avx_maskstore_pd_256(i8* %a0, <4 x i64> %mask, <4 x double> %a2) { ; CHECK-LABEL: test_x86_avx_maskstore_pd_256: -; CHECK: ## BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] -; CHECK-NEXT: vmaskmovpd %ymm1, %ymm0, (%eax) ## encoding: [0xc4,0xe2,0x7d,0x2f,0x08] -; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; CHECK-NEXT: vmaskmovpd %ymm1, %ymm0, (%eax) # encoding: [0xc4,0xe2,0x7d,0x2f,0x08] +; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; CHECK-NEXT: retl # encoding: [0xc3] call void @llvm.x86.avx.maskstore.pd.256(i8* %a0, <4 x i64> %mask, <4 x double> %a2) ret void } @@ -378,10 +378,10 @@ declare void @llvm.x86.avx.maskstore.pd.256(i8*, <4 x i64>, <4 x double>) nounwi define void @test_x86_avx_maskstore_ps(i8* %a0, <4 x i32> %mask, <4 x float> %a2) { ; CHECK-LABEL: test_x86_avx_maskstore_ps: -; CHECK: ## BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] -; CHECK-NEXT: vmaskmovps %xmm1, %xmm0, (%eax) ## encoding: [0xc4,0xe2,0x79,0x2e,0x08] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; CHECK-NEXT: vmaskmovps %xmm1, %xmm0, (%eax) # encoding: [0xc4,0xe2,0x79,0x2e,0x08] +; CHECK-NEXT: retl # encoding: [0xc3] call void @llvm.x86.avx.maskstore.ps(i8* %a0, <4 x i32> %mask, <4 x float> %a2) ret void } @@ -390,11 +390,11 @@ declare void @llvm.x86.avx.maskstore.ps(i8*, <4 x i32>, <4 x float>) nounwind define void @test_x86_avx_maskstore_ps_256(i8* %a0, <8 x i32> %mask, <8 x float> %a2) { ; CHECK-LABEL: test_x86_avx_maskstore_ps_256: -; CHECK: ## BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] -; CHECK-NEXT: vmaskmovps %ymm1, %ymm0, (%eax) ## encoding: [0xc4,0xe2,0x7d,0x2e,0x08] -; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; CHECK-NEXT: vmaskmovps %ymm1, %ymm0, (%eax) # encoding: [0xc4,0xe2,0x7d,0x2e,0x08] +; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; CHECK-NEXT: retl # encoding: [0xc3] call void @llvm.x86.avx.maskstore.ps.256(i8* %a0, <8 x i32> %mask, <8 x float> %a2) ret void } @@ -403,14 +403,14 @@ declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x i32>, <8 x float>) nounwin define <4 x double> @test_x86_avx_max_pd_256(<4 x double> %a0, <4 x double> %a1) { ; AVX-LABEL: test_x86_avx_max_pd_256: -; AVX: ## BB#0: -; AVX-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x5f,0xc1] -; AVX-NEXT: retl ## encoding: [0xc3] +; AVX: # BB#0: +; AVX-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x5f,0xc1] +; AVX-NEXT: retl # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_max_pd_256: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x5f,0xc1] -; AVX512VL-NEXT: retl ## encoding: [0xc3] +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x5f,0xc1] +; AVX512VL-NEXT: retl # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -419,14 +419,14 @@ declare <4 x double> @llvm.x86.avx.max.pd.256(<4 x double>, <4 x double>) nounwi define <8 x float> @test_x86_avx_max_ps_256(<8 x float> %a0, <8 x float> %a1) { ; AVX-LABEL: test_x86_avx_max_ps_256: -; AVX: ## BB#0: -; AVX-NEXT: vmaxps %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfc,0x5f,0xc1] -; AVX-NEXT: retl ## encoding: [0xc3] +; AVX: # BB#0: +; AVX-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfc,0x5f,0xc1] +; AVX-NEXT: retl # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_max_ps_256: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vmaxps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5f,0xc1] -; AVX512VL-NEXT: retl ## encoding: [0xc3] +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5f,0xc1] +; AVX512VL-NEXT: retl # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -435,14 +435,14 @@ declare <8 x float> @llvm.x86.avx.max.ps.256(<8 x float>, <8 x float>) nounwind define <4 x double> @test_x86_avx_min_pd_256(<4 x double> %a0, <4 x double> %a1) { ; AVX-LABEL: test_x86_avx_min_pd_256: -; AVX: ## BB#0: -; AVX-NEXT: vminpd %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x5d,0xc1] -; AVX-NEXT: retl ## encoding: [0xc3] +; AVX: # BB#0: +; AVX-NEXT: vminpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x5d,0xc1] +; AVX-NEXT: retl # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_min_pd_256: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vminpd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x5d,0xc1] -; AVX512VL-NEXT: retl ## encoding: [0xc3] +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vminpd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x5d,0xc1] +; AVX512VL-NEXT: retl # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -451,14 +451,14 @@ declare <4 x double> @llvm.x86.avx.min.pd.256(<4 x double>, <4 x double>) nounwi define <8 x float> @test_x86_avx_min_ps_256(<8 x float> %a0, <8 x float> %a1) { ; AVX-LABEL: test_x86_avx_min_ps_256: -; AVX: ## BB#0: -; AVX-NEXT: vminps %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfc,0x5d,0xc1] -; AVX-NEXT: retl ## encoding: [0xc3] +; AVX: # BB#0: +; AVX-NEXT: vminps %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfc,0x5d,0xc1] +; AVX-NEXT: retl # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_min_ps_256: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vminps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5d,0xc1] -; AVX512VL-NEXT: retl ## encoding: [0xc3] +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vminps %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5d,0xc1] +; AVX512VL-NEXT: retl # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -467,10 +467,10 @@ declare <8 x float> @llvm.x86.avx.min.ps.256(<8 x float>, <8 x float>) nounwind define i32 @test_x86_avx_movmsk_pd_256(<4 x double> %a0) { ; CHECK-LABEL: test_x86_avx_movmsk_pd_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vmovmskpd %ymm0, %eax ## encoding: [0xc5,0xfd,0x50,0xc0] -; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: vmovmskpd %ymm0, %eax # encoding: [0xc5,0xfd,0x50,0xc0] +; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %a0) ; [#uses=1] ret i32 %res } @@ -479,10 +479,10 @@ declare i32 @llvm.x86.avx.movmsk.pd.256(<4 x double>) nounwind readnone define i32 @test_x86_avx_movmsk_ps_256(<8 x float> %a0) { ; CHECK-LABEL: test_x86_avx_movmsk_ps_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vmovmskps %ymm0, %eax ## encoding: [0xc5,0xfc,0x50,0xc0] -; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: vmovmskps %ymm0, %eax # encoding: [0xc5,0xfc,0x50,0xc0] +; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %a0) ; [#uses=1] ret i32 %res } @@ -496,12 +496,12 @@ declare i32 @llvm.x86.avx.movmsk.ps.256(<8 x float>) nounwind readnone define i32 @test_x86_avx_ptestc_256(<4 x i64> %a0, <4 x i64> %a1) { ; CHECK-LABEL: test_x86_avx_ptestc_256: -; CHECK: ## BB#0: -; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0] -; CHECK-NEXT: vptest %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x17,0xc1] -; CHECK-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0] -; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] +; CHECK-NEXT: vptest %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x17,0xc1] +; CHECK-NEXT: setb %al # encoding: [0x0f,0x92,0xc0] +; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call i32 @llvm.x86.avx.ptestc.256(<4 x i64> %a0, <4 x i64> %a1) ; [#uses=1] ret i32 %res } @@ -510,12 +510,12 @@ declare i32 @llvm.x86.avx.ptestc.256(<4 x i64>, <4 x i64>) nounwind readnone define i32 @test_x86_avx_ptestnzc_256(<4 x i64> %a0, <4 x i64> %a1) { ; CHECK-LABEL: test_x86_avx_ptestnzc_256: -; CHECK: ## BB#0: -; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0] -; CHECK-NEXT: vptest %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x17,0xc1] -; CHECK-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0] -; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] +; CHECK-NEXT: vptest %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x17,0xc1] +; CHECK-NEXT: seta %al # encoding: [0x0f,0x97,0xc0] +; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call i32 @llvm.x86.avx.ptestnzc.256(<4 x i64> %a0, <4 x i64> %a1) ; [#uses=1] ret i32 %res } @@ -524,12 +524,12 @@ declare i32 @llvm.x86.avx.ptestnzc.256(<4 x i64>, <4 x i64>) nounwind readnone define i32 @test_x86_avx_ptestz_256(<4 x i64> %a0, <4 x i64> %a1) { ; CHECK-LABEL: test_x86_avx_ptestz_256: -; CHECK: ## BB#0: -; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0] -; CHECK-NEXT: vptest %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x17,0xc1] -; CHECK-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0] -; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] +; CHECK-NEXT: vptest %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x17,0xc1] +; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0] +; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call i32 @llvm.x86.avx.ptestz.256(<4 x i64> %a0, <4 x i64> %a1) ; [#uses=1] ret i32 %res } @@ -538,14 +538,14 @@ declare i32 @llvm.x86.avx.ptestz.256(<4 x i64>, <4 x i64>) nounwind readnone define <8 x float> @test_x86_avx_rcp_ps_256(<8 x float> %a0) { ; AVX-LABEL: test_x86_avx_rcp_ps_256: -; AVX: ## BB#0: -; AVX-NEXT: vrcpps %ymm0, %ymm0 ## encoding: [0xc5,0xfc,0x53,0xc0] -; AVX-NEXT: retl ## encoding: [0xc3] +; AVX: # BB#0: +; AVX-NEXT: vrcpps %ymm0, %ymm0 # encoding: [0xc5,0xfc,0x53,0xc0] +; AVX-NEXT: retl # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_rcp_ps_256: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vrcp14ps %ymm0, %ymm0 ## encoding: [0x62,0xf2,0x7d,0x28,0x4c,0xc0] -; AVX512VL-NEXT: retl ## encoding: [0xc3] +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vrcp14ps %ymm0, %ymm0 # encoding: [0x62,0xf2,0x7d,0x28,0x4c,0xc0] +; AVX512VL-NEXT: retl # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %a0) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -554,9 +554,9 @@ declare <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float>) nounwind readnone define <4 x double> @test_x86_avx_round_pd_256(<4 x double> %a0) { ; CHECK-LABEL: test_x86_avx_round_pd_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vroundpd $7, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x09,0xc0,0x07] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: vroundpd $7, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x09,0xc0,0x07] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a0, i32 7) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -565,9 +565,9 @@ declare <4 x double> @llvm.x86.avx.round.pd.256(<4 x double>, i32) nounwind read define <8 x float> @test_x86_avx_round_ps_256(<8 x float> %a0) { ; CHECK-LABEL: test_x86_avx_round_ps_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vroundps $7, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x08,0xc0,0x07] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: vroundps $7, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x08,0xc0,0x07] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a0, i32 7) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -576,14 +576,14 @@ declare <8 x float> @llvm.x86.avx.round.ps.256(<8 x float>, i32) nounwind readno define <8 x float> @test_x86_avx_rsqrt_ps_256(<8 x float> %a0) { ; AVX-LABEL: test_x86_avx_rsqrt_ps_256: -; AVX: ## BB#0: -; AVX-NEXT: vrsqrtps %ymm0, %ymm0 ## encoding: [0xc5,0xfc,0x52,0xc0] -; AVX-NEXT: retl ## encoding: [0xc3] +; AVX: # BB#0: +; AVX-NEXT: vrsqrtps %ymm0, %ymm0 # encoding: [0xc5,0xfc,0x52,0xc0] +; AVX-NEXT: retl # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_rsqrt_ps_256: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vrsqrt14ps %ymm0, %ymm0 ## encoding: [0x62,0xf2,0x7d,0x28,0x4e,0xc0] -; AVX512VL-NEXT: retl ## encoding: [0xc3] +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vrsqrt14ps %ymm0, %ymm0 # encoding: [0x62,0xf2,0x7d,0x28,0x4e,0xc0] +; AVX512VL-NEXT: retl # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %a0) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -592,9 +592,9 @@ declare <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float>) nounwind readnone define <4 x double> @test_x86_avx_sqrt_pd_256(<4 x double> %a0) { ; CHECK-LABEL: test_x86_avx_sqrt_pd_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vsqrtpd %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x51,0xc0] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: vsqrtpd %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x51,0xc0] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double> %a0) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -603,9 +603,9 @@ declare <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double>) nounwind readnone define <8 x float> @test_x86_avx_sqrt_ps_256(<8 x float> %a0) { ; CHECK-LABEL: test_x86_avx_sqrt_ps_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vsqrtps %ymm0, %ymm0 ## encoding: [0xc5,0xfc,0x51,0xc0] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: vsqrtps %ymm0, %ymm0 # encoding: [0xc5,0xfc,0x51,0xc0] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %a0) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -614,10 +614,10 @@ declare <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float>) nounwind readnone define <4 x double> @test_x86_avx_vperm2f128_pd_256(<4 x double> %a0, <4 x double> %a1) { ; CHECK-LABEL: test_x86_avx_vperm2f128_pd_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vperm2f128 $7, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x06,0xc1,0x07] -; CHECK-NEXT: ## ymm0 = ymm1[2,3],ymm0[0,1] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 $7, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x06,0xc1,0x07] +; CHECK-NEXT: # ymm0 = ymm1[2,3],ymm0[0,1] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double> %a0, <4 x double> %a1, i8 7) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -626,10 +626,10 @@ declare <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double>, <4 x double>, define <8 x float> @test_x86_avx_vperm2f128_ps_256(<8 x float> %a0, <8 x float> %a1) { ; CHECK-LABEL: test_x86_avx_vperm2f128_ps_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vperm2f128 $7, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x06,0xc1,0x07] -; CHECK-NEXT: ## ymm0 = ymm1[2,3],ymm0[0,1] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 $7, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x06,0xc1,0x07] +; CHECK-NEXT: # ymm0 = ymm1[2,3],ymm0[0,1] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.vperm2f128.ps.256(<8 x float> %a0, <8 x float> %a1, i8 7) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -638,10 +638,10 @@ declare <8 x float> @llvm.x86.avx.vperm2f128.ps.256(<8 x float>, <8 x float>, i8 define <8 x i32> @test_x86_avx_vperm2f128_si_256(<8 x i32> %a0, <8 x i32> %a1) { ; CHECK-LABEL: test_x86_avx_vperm2f128_si_256: -; CHECK: ## BB#0: -; CHECK-NEXT: vperm2f128 $7, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x06,0xc1,0x07] -; CHECK-NEXT: ## ymm0 = ymm1[2,3],ymm0[0,1] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 $7, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x06,0xc1,0x07] +; CHECK-NEXT: # ymm0 = ymm1[2,3],ymm0[0,1] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx.vperm2f128.si.256(<8 x i32> %a0, <8 x i32> %a1, i8 7) ; <<8 x i32>> [#uses=1] ret <8 x i32> %res } @@ -650,14 +650,14 @@ declare <8 x i32> @llvm.x86.avx.vperm2f128.si.256(<8 x i32>, <8 x i32>, i8) noun define <2 x double> @test_x86_avx_vpermilvar_pd(<2 x double> %a0, <2 x i64> %a1) { ; AVX-LABEL: test_x86_avx_vpermilvar_pd: -; AVX: ## BB#0: -; AVX-NEXT: vpermilpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x0d,0xc1] -; AVX-NEXT: retl ## encoding: [0xc3] +; AVX: # BB#0: +; AVX-NEXT: vpermilpd %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0d,0xc1] +; AVX-NEXT: retl # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_vpermilvar_pd: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpermilpd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0d,0xc1] -; AVX512VL-NEXT: retl ## encoding: [0xc3] +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vpermilpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0d,0xc1] +; AVX512VL-NEXT: retl # encoding: [0xc3] %res = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> %a1) ; <<2 x double>> [#uses=1] ret <2 x double> %res } @@ -666,14 +666,14 @@ declare <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double>, <2 x i64>) nounwi define <4 x double> @test_x86_avx_vpermilvar_pd_256(<4 x double> %a0, <4 x i64> %a1) { ; AVX-LABEL: test_x86_avx_vpermilvar_pd_256: -; AVX: ## BB#0: -; AVX-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0d,0xc1] -; AVX-NEXT: retl ## encoding: [0xc3] +; AVX: # BB#0: +; AVX-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0d,0xc1] +; AVX-NEXT: retl # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_vpermilvar_pd_256: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x0d,0xc1] -; AVX512VL-NEXT: retl ## encoding: [0xc3] +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x0d,0xc1] +; AVX512VL-NEXT: retl # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> %a1) ; <<4 x double>> [#uses=1] ret <4 x double> %res } @@ -681,45 +681,45 @@ declare <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double>, <4 x i64>) no define <4 x double> @test_x86_avx_vpermilvar_pd_256_2(<4 x double> %a0) { ; AVX-LABEL: test_x86_avx_vpermilvar_pd_256_2: -; AVX: ## BB#0: -; AVX-NEXT: vpermilpd $9, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x05,0xc0,0x09] -; AVX-NEXT: ## ymm0 = ymm0[1,0,2,3] -; AVX-NEXT: retl ## encoding: [0xc3] +; AVX: # BB#0: +; AVX-NEXT: vpermilpd $9, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0x7d,0x05,0xc0,0x09] +; AVX-NEXT: # ymm0 = ymm0[1,0,2,3] +; AVX-NEXT: retl # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_vpermilvar_pd_256_2: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpermilpd $9, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x05,0xc0,0x09] -; AVX512VL-NEXT: ## ymm0 = ymm0[1,0,2,3] -; AVX512VL-NEXT: retl ## encoding: [0xc3] +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vpermilpd $9, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x05,0xc0,0x09] +; AVX512VL-NEXT: # ymm0 = ymm0[1,0,2,3] +; AVX512VL-NEXT: retl # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> ) ; <<4 x double>> [#uses=1] ret <4 x double> %res } define <4 x float> @test_x86_avx_vpermilvar_ps(<4 x float> %a0, <4 x i32> %a1) { ; AVX-LABEL: test_x86_avx_vpermilvar_ps: -; AVX: ## BB#0: -; AVX-NEXT: vpermilps %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x0c,0xc1] -; AVX-NEXT: retl ## encoding: [0xc3] +; AVX: # BB#0: +; AVX-NEXT: vpermilps %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0c,0xc1] +; AVX-NEXT: retl # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_vpermilvar_ps: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpermilps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0c,0xc1] -; AVX512VL-NEXT: retl ## encoding: [0xc3] +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vpermilps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0c,0xc1] +; AVX512VL-NEXT: retl # encoding: [0xc3] %res = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> %a1) ; <<4 x float>> [#uses=1] ret <4 x float> %res } define <4 x float> @test_x86_avx_vpermilvar_ps_load(<4 x float> %a0, <4 x i32>* %a1) { ; AVX-LABEL: test_x86_avx_vpermilvar_ps_load: -; AVX: ## BB#0: -; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] -; AVX-NEXT: vpermilps (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x0c,0x00] -; AVX-NEXT: retl ## encoding: [0xc3] +; AVX: # BB#0: +; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; AVX-NEXT: vpermilps (%eax), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0c,0x00] +; AVX-NEXT: retl # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_vpermilvar_ps_load: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] -; AVX512VL-NEXT: vpermilps (%eax), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0c,0x00] -; AVX512VL-NEXT: retl ## encoding: [0xc3] +; AVX512VL: # BB#0: +; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; AVX512VL-NEXT: vpermilps (%eax), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0c,0x00] +; AVX512VL-NEXT: retl # encoding: [0xc3] %a2 = load <4 x i32>, <4 x i32>* %a1 %res = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> %a2) ; <<4 x float>> [#uses=1] ret <4 x float> %res @@ -729,14 +729,14 @@ declare <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>, <4 x i32>) nounwind define <8 x float> @test_x86_avx_vpermilvar_ps_256(<8 x float> %a0, <8 x i32> %a1) { ; AVX-LABEL: test_x86_avx_vpermilvar_ps_256: -; AVX: ## BB#0: -; AVX-NEXT: vpermilps %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0c,0xc1] -; AVX-NEXT: retl ## encoding: [0xc3] +; AVX: # BB#0: +; AVX-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0c,0xc1] +; AVX-NEXT: retl # encoding: [0xc3] ; ; AVX512VL-LABEL: test_x86_avx_vpermilvar_ps_256: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: vpermilps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x0c,0xc1] -; AVX512VL-NEXT: retl ## encoding: [0xc3] +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x0c,0xc1] +; AVX512VL-NEXT: retl # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %a1) ; <<8 x float>> [#uses=1] ret <8 x float> %res } @@ -745,11 +745,11 @@ declare <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>, <8 x i32>) noun define i32 @test_x86_avx_vtestc_pd(<2 x double> %a0, <2 x double> %a1) { ; CHECK-LABEL: test_x86_avx_vtestc_pd: -; CHECK: ## BB#0: -; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0] -; CHECK-NEXT: vtestpd %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x0f,0xc1] -; CHECK-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] +; CHECK-NEXT: vtestpd %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0f,0xc1] +; CHECK-NEXT: setb %al # encoding: [0x0f,0x92,0xc0] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %a0, <2 x double> %a1) ; [#uses=1] ret i32 %res } @@ -758,12 +758,12 @@ declare i32 @llvm.x86.avx.vtestc.pd(<2 x double>, <2 x double>) nounwind readnon define i32 @test_x86_avx_vtestc_pd_256(<4 x double> %a0, <4 x double> %a1) { ; CHECK-LABEL: test_x86_avx_vtestc_pd_256: -; CHECK: ## BB#0: -; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0] -; CHECK-NEXT: vtestpd %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0f,0xc1] -; CHECK-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0] -; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] +; CHECK-NEXT: vtestpd %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0f,0xc1] +; CHECK-NEXT: setb %al # encoding: [0x0f,0x92,0xc0] +; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %a0, <4 x double> %a1) ; [#uses=1] ret i32 %res } @@ -772,11 +772,11 @@ declare i32 @llvm.x86.avx.vtestc.pd.256(<4 x double>, <4 x double>) nounwind rea define i32 @test_x86_avx_vtestc_ps(<4 x float> %a0, <4 x float> %a1) { ; CHECK-LABEL: test_x86_avx_vtestc_ps: -; CHECK: ## BB#0: -; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0] -; CHECK-NEXT: vtestps %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x0e,0xc1] -; CHECK-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] +; CHECK-NEXT: vtestps %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0e,0xc1] +; CHECK-NEXT: setb %al # encoding: [0x0f,0x92,0xc0] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> %a0, <4 x float> %a1) ; [#uses=1] ret i32 %res } @@ -785,12 +785,12 @@ declare i32 @llvm.x86.avx.vtestc.ps(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_avx_vtestc_ps_256(<8 x float> %a0, <8 x float> %a1) { ; CHECK-LABEL: test_x86_avx_vtestc_ps_256: -; CHECK: ## BB#0: -; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0] -; CHECK-NEXT: vtestps %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0e,0xc1] -; CHECK-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0] -; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] +; CHECK-NEXT: vtestps %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0e,0xc1] +; CHECK-NEXT: setb %al # encoding: [0x0f,0x92,0xc0] +; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %a0, <8 x float> %a1) ; [#uses=1] ret i32 %res } @@ -799,11 +799,11 @@ declare i32 @llvm.x86.avx.vtestc.ps.256(<8 x float>, <8 x float>) nounwind readn define i32 @test_x86_avx_vtestnzc_pd(<2 x double> %a0, <2 x double> %a1) { ; CHECK-LABEL: test_x86_avx_vtestnzc_pd: -; CHECK: ## BB#0: -; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0] -; CHECK-NEXT: vtestpd %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x0f,0xc1] -; CHECK-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] +; CHECK-NEXT: vtestpd %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0f,0xc1] +; CHECK-NEXT: seta %al # encoding: [0x0f,0x97,0xc0] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call i32 @llvm.x86.avx.vtestnzc.pd(<2 x double> %a0, <2 x double> %a1) ; [#uses=1] ret i32 %res } @@ -812,12 +812,12 @@ declare i32 @llvm.x86.avx.vtestnzc.pd(<2 x double>, <2 x double>) nounwind readn define i32 @test_x86_avx_vtestnzc_pd_256(<4 x double> %a0, <4 x double> %a1) { ; CHECK-LABEL: test_x86_avx_vtestnzc_pd_256: -; CHECK: ## BB#0: -; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0] -; CHECK-NEXT: vtestpd %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0f,0xc1] -; CHECK-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0] -; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] +; CHECK-NEXT: vtestpd %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0f,0xc1] +; CHECK-NEXT: seta %al # encoding: [0x0f,0x97,0xc0] +; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call i32 @llvm.x86.avx.vtestnzc.pd.256(<4 x double> %a0, <4 x double> %a1) ; [#uses=1] ret i32 %res } @@ -826,11 +826,11 @@ declare i32 @llvm.x86.avx.vtestnzc.pd.256(<4 x double>, <4 x double>) nounwind r define i32 @test_x86_avx_vtestnzc_ps(<4 x float> %a0, <4 x float> %a1) { ; CHECK-LABEL: test_x86_avx_vtestnzc_ps: -; CHECK: ## BB#0: -; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0] -; CHECK-NEXT: vtestps %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x0e,0xc1] -; CHECK-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] +; CHECK-NEXT: vtestps %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0e,0xc1] +; CHECK-NEXT: seta %al # encoding: [0x0f,0x97,0xc0] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call i32 @llvm.x86.avx.vtestnzc.ps(<4 x float> %a0, <4 x float> %a1) ; [#uses=1] ret i32 %res } @@ -839,12 +839,12 @@ declare i32 @llvm.x86.avx.vtestnzc.ps(<4 x float>, <4 x float>) nounwind readnon define i32 @test_x86_avx_vtestnzc_ps_256(<8 x float> %a0, <8 x float> %a1) { ; CHECK-LABEL: test_x86_avx_vtestnzc_ps_256: -; CHECK: ## BB#0: -; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0] -; CHECK-NEXT: vtestps %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0e,0xc1] -; CHECK-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0] -; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] +; CHECK-NEXT: vtestps %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0e,0xc1] +; CHECK-NEXT: seta %al # encoding: [0x0f,0x97,0xc0] +; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call i32 @llvm.x86.avx.vtestnzc.ps.256(<8 x float> %a0, <8 x float> %a1) ; [#uses=1] ret i32 %res } @@ -853,11 +853,11 @@ declare i32 @llvm.x86.avx.vtestnzc.ps.256(<8 x float>, <8 x float>) nounwind rea define i32 @test_x86_avx_vtestz_pd(<2 x double> %a0, <2 x double> %a1) { ; CHECK-LABEL: test_x86_avx_vtestz_pd: -; CHECK: ## BB#0: -; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0] -; CHECK-NEXT: vtestpd %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x0f,0xc1] -; CHECK-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] +; CHECK-NEXT: vtestpd %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0f,0xc1] +; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call i32 @llvm.x86.avx.vtestz.pd(<2 x double> %a0, <2 x double> %a1) ; [#uses=1] ret i32 %res } @@ -866,12 +866,12 @@ declare i32 @llvm.x86.avx.vtestz.pd(<2 x double>, <2 x double>) nounwind readnon define i32 @test_x86_avx_vtestz_pd_256(<4 x double> %a0, <4 x double> %a1) { ; CHECK-LABEL: test_x86_avx_vtestz_pd_256: -; CHECK: ## BB#0: -; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0] -; CHECK-NEXT: vtestpd %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0f,0xc1] -; CHECK-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0] -; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] +; CHECK-NEXT: vtestpd %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0f,0xc1] +; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0] +; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call i32 @llvm.x86.avx.vtestz.pd.256(<4 x double> %a0, <4 x double> %a1) ; [#uses=1] ret i32 %res } @@ -880,11 +880,11 @@ declare i32 @llvm.x86.avx.vtestz.pd.256(<4 x double>, <4 x double>) nounwind rea define i32 @test_x86_avx_vtestz_ps(<4 x float> %a0, <4 x float> %a1) { ; CHECK-LABEL: test_x86_avx_vtestz_ps: -; CHECK: ## BB#0: -; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0] -; CHECK-NEXT: vtestps %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x0e,0xc1] -; CHECK-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] +; CHECK-NEXT: vtestps %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x79,0x0e,0xc1] +; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call i32 @llvm.x86.avx.vtestz.ps(<4 x float> %a0, <4 x float> %a1) ; [#uses=1] ret i32 %res } @@ -893,12 +893,12 @@ declare i32 @llvm.x86.avx.vtestz.ps(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_avx_vtestz_ps_256(<8 x float> %a0, <8 x float> %a1) { ; CHECK-LABEL: test_x86_avx_vtestz_ps_256: -; CHECK: ## BB#0: -; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0] -; CHECK-NEXT: vtestps %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0e,0xc1] -; CHECK-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0] -; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] +; CHECK-NEXT: vtestps %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x0e,0xc1] +; CHECK-NEXT: sete %al # encoding: [0x0f,0x94,0xc0] +; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call i32 @llvm.x86.avx.vtestz.ps.256(<8 x float> %a0, <8 x float> %a1) ; [#uses=1] ret i32 %res } @@ -907,9 +907,9 @@ declare i32 @llvm.x86.avx.vtestz.ps.256(<8 x float>, <8 x float>) nounwind readn define void @test_x86_avx_vzeroall() { ; CHECK-LABEL: test_x86_avx_vzeroall: -; CHECK: ## BB#0: -; CHECK-NEXT: vzeroall ## encoding: [0xc5,0xfc,0x77] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: vzeroall # encoding: [0xc5,0xfc,0x77] +; CHECK-NEXT: retl # encoding: [0xc3] call void @llvm.x86.avx.vzeroall() ret void } @@ -918,9 +918,9 @@ declare void @llvm.x86.avx.vzeroall() nounwind define void @test_x86_avx_vzeroupper() { ; CHECK-LABEL: test_x86_avx_vzeroupper: -; CHECK: ## BB#0: -; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; CHECK-NEXT: retl # encoding: [0xc3] call void @llvm.x86.avx.vzeroupper() ret void } @@ -928,22 +928,22 @@ declare void @llvm.x86.avx.vzeroupper() nounwind define void @movnt_dq(i8* %p, <2 x i64> %a1) nounwind { ; AVX-LABEL: movnt_dq: -; AVX: ## BB#0: -; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] -; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x76,0xc9] -; AVX-NEXT: vpsubq %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfb,0xc1] -; AVX-NEXT: vmovntdq %ymm0, (%eax) ## encoding: [0xc5,0xfd,0xe7,0x00] -; AVX-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; AVX-NEXT: retl ## encoding: [0xc3] +; AVX: # BB#0: +; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x76,0xc9] +; AVX-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfb,0xc1] +; AVX-NEXT: vmovntdq %ymm0, (%eax) # encoding: [0xc5,0xfd,0xe7,0x00] +; AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; AVX-NEXT: retl # encoding: [0xc3] ; ; AVX512VL-LABEL: movnt_dq: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] -; AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x76,0xc9] -; AVX512VL-NEXT: vpsubq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfb,0xc1] -; AVX512VL-NEXT: vmovntdq %ymm0, (%eax) ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe7,0x00] -; AVX512VL-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; AVX512VL-NEXT: retl ## encoding: [0xc3] +; AVX512VL: # BB#0: +; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x76,0xc9] +; AVX512VL-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfb,0xc1] +; AVX512VL-NEXT: vmovntdq %ymm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe7,0x00] +; AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; AVX512VL-NEXT: retl # encoding: [0xc3] %a2 = add <2 x i64> %a1, %a3 = shufflevector <2 x i64> %a2, <2 x i64> undef, <4 x i32> tail call void @llvm.x86.avx.movnt.dq.256(i8* %p, <4 x i64> %a3) nounwind @@ -953,18 +953,18 @@ declare void @llvm.x86.avx.movnt.dq.256(i8*, <4 x i64>) nounwind define void @movnt_ps(i8* %p, <8 x float> %a) nounwind { ; AVX-LABEL: movnt_ps: -; AVX: ## BB#0: -; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] -; AVX-NEXT: vmovntps %ymm0, (%eax) ## encoding: [0xc5,0xfc,0x2b,0x00] -; AVX-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; AVX-NEXT: retl ## encoding: [0xc3] +; AVX: # BB#0: +; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; AVX-NEXT: vmovntps %ymm0, (%eax) # encoding: [0xc5,0xfc,0x2b,0x00] +; AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; AVX-NEXT: retl # encoding: [0xc3] ; ; AVX512VL-LABEL: movnt_ps: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] -; AVX512VL-NEXT: vmovntps %ymm0, (%eax) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x2b,0x00] -; AVX512VL-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; AVX512VL-NEXT: retl ## encoding: [0xc3] +; AVX512VL: # BB#0: +; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; AVX512VL-NEXT: vmovntps %ymm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x2b,0x00] +; AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; AVX512VL-NEXT: retl # encoding: [0xc3] tail call void @llvm.x86.avx.movnt.ps.256(i8* %p, <8 x float> %a) nounwind ret void } @@ -973,22 +973,22 @@ declare void @llvm.x86.avx.movnt.ps.256(i8*, <8 x float>) nounwind define void @movnt_pd(i8* %p, <4 x double> %a1) nounwind { ; add operation forces the execution domain. ; AVX-LABEL: movnt_pd: -; AVX: ## BB#0: -; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] -; AVX-NEXT: vxorpd %ymm1, %ymm1, %ymm1 ## encoding: [0xc5,0xf5,0x57,0xc9] -; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x58,0xc1] -; AVX-NEXT: vmovntpd %ymm0, (%eax) ## encoding: [0xc5,0xfd,0x2b,0x00] -; AVX-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; AVX-NEXT: retl ## encoding: [0xc3] +; AVX: # BB#0: +; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; AVX-NEXT: vxorpd %ymm1, %ymm1, %ymm1 # encoding: [0xc5,0xf5,0x57,0xc9] +; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0x58,0xc1] +; AVX-NEXT: vmovntpd %ymm0, (%eax) # encoding: [0xc5,0xfd,0x2b,0x00] +; AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; AVX-NEXT: retl # encoding: [0xc3] ; ; AVX512VL-LABEL: movnt_pd: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] -; AVX512VL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x57,0xc9] -; AVX512VL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc1] -; AVX512VL-NEXT: vmovntpd %ymm0, (%eax) ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x2b,0x00] -; AVX512VL-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] -; AVX512VL-NEXT: retl ## encoding: [0xc3] +; AVX512VL: # BB#0: +; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; AVX512VL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0x57,0xc9] +; AVX512VL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc1] +; AVX512VL-NEXT: vmovntpd %ymm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x2b,0x00] +; AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; AVX512VL-NEXT: retl # encoding: [0xc3] %a2 = fadd <4 x double> %a1, tail call void @llvm.x86.avx.movnt.pd.256(i8* %p, <4 x double> %a2) nounwind ret void @@ -999,9 +999,9 @@ declare void @llvm.x86.avx.movnt.pd.256(i8*, <4 x double>) nounwind ; Check for pclmulqdq define <2 x i64> @test_x86_pclmulqdq(<2 x i64> %a0, <2 x i64> %a1) { ; CHECK-LABEL: test_x86_pclmulqdq: -; CHECK: ## BB#0: -; CHECK-NEXT: vpclmulqdq $0, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x44,0xc1,0x00] -; CHECK-NEXT: retl ## encoding: [0xc3] +; CHECK: # BB#0: +; CHECK-NEXT: vpclmulqdq $0, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x44,0xc1,0x00] +; CHECK-NEXT: retl # encoding: [0xc3] %res = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %a0, <2 x i64> %a1, i8 0) ; <<2 x i64>> [#uses=1] ret <2 x i64> %res } diff --git a/llvm/test/CodeGen/X86/avx-intrinsics-x86_64.ll b/llvm/test/CodeGen/X86/avx-intrinsics-x86_64.ll index 909c69c..8d4f442 100644 --- a/llvm/test/CodeGen/X86/avx-intrinsics-x86_64.ll +++ b/llvm/test/CodeGen/X86/avx-intrinsics-x86_64.ll @@ -1,18 +1,18 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-apple-darwin -march=x86-64 -mcpu=corei7 -mattr=avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX -; RUN: llc < %s -mtriple=x86_64-apple-darwin -march=x86-64 -mcpu=corei7 -mattr=avx512vl | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7 -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7 -mattr=+avx512vl | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL define <4 x double> @test_x86_avx_vzeroall(<4 x double> %a, <4 x double> %b) { ; AVX-LABEL: test_x86_avx_vzeroall: -; AVX: ## BB#0: +; AVX: # BB#0: ; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 -; AVX-NEXT: vmovupd %ymm0, -{{[0-9]+}}(%rsp) ## 32-byte Spill +; AVX-NEXT: vmovupd %ymm0, -{{[0-9]+}}(%rsp) # 32-byte Spill ; AVX-NEXT: vzeroall -; AVX-NEXT: vmovups -{{[0-9]+}}(%rsp), %ymm0 ## 32-byte Reload +; AVX-NEXT: vmovups -{{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload ; AVX-NEXT: retq ; ; AVX512VL-LABEL: test_x86_avx_vzeroall: -; AVX512VL: ## BB#0: +; AVX512VL: # BB#0: ; AVX512VL-NEXT: vaddpd %ymm1, %ymm0, %ymm16 ; AVX512VL-NEXT: vzeroall ; AVX512VL-NEXT: vmovapd %ymm16, %ymm0 @@ -25,15 +25,15 @@ declare void @llvm.x86.avx.vzeroall() nounwind define <4 x double> @test_x86_avx_vzeroupper(<4 x double> %a, <4 x double> %b) { ; AVX-LABEL: test_x86_avx_vzeroupper: -; AVX: ## BB#0: +; AVX: # BB#0: ; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 -; AVX-NEXT: vmovupd %ymm0, -{{[0-9]+}}(%rsp) ## 32-byte Spill +; AVX-NEXT: vmovupd %ymm0, -{{[0-9]+}}(%rsp) # 32-byte Spill ; AVX-NEXT: vzeroupper -; AVX-NEXT: vmovups -{{[0-9]+}}(%rsp), %ymm0 ## 32-byte Reload +; AVX-NEXT: vmovups -{{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload ; AVX-NEXT: retq ; ; AVX512VL-LABEL: test_x86_avx_vzeroupper: -; AVX512VL: ## BB#0: +; AVX512VL: # BB#0: ; AVX512VL-NEXT: vaddpd %ymm1, %ymm0, %ymm16 ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: vmovapd %ymm16, %ymm0 diff --git a/llvm/test/CodeGen/X86/avx-select.ll b/llvm/test/CodeGen/X86/avx-select.ll index cdd3180..8abc488 100644 --- a/llvm/test/CodeGen/X86/avx-select.ll +++ b/llvm/test/CodeGen/X86/avx-select.ll @@ -1,17 +1,29 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx | FileCheck %s +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X86 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64 define <8 x i32> @select00(i32 %a, <8 x i32> %b) nounwind { -; CHECK-LABEL: select00: -; CHECK: ## BB#0: -; CHECK-NEXT: vxorps %ymm1, %ymm1, %ymm1 -; CHECK-NEXT: cmpl $255, %edi -; CHECK-NEXT: je LBB0_2 -; CHECK-NEXT: ## BB#1: -; CHECK-NEXT: vmovaps %ymm0, %ymm1 -; CHECK-NEXT: LBB0_2: -; CHECK-NEXT: vxorps %ymm1, %ymm0, %ymm0 -; CHECK-NEXT: retq +; X86-LABEL: select00: +; X86: # BB#0: +; X86-NEXT: cmpl $255, {{[0-9]+}}(%esp) +; X86-NEXT: vxorps %ymm1, %ymm1, %ymm1 +; X86-NEXT: je .LBB0_2 +; X86-NEXT: # BB#1: +; X86-NEXT: vmovaps %ymm0, %ymm1 +; X86-NEXT: .LBB0_2: +; X86-NEXT: vxorps %ymm1, %ymm0, %ymm0 +; X86-NEXT: retl +; +; X64-LABEL: select00: +; X64: # BB#0: +; X64-NEXT: vxorps %ymm1, %ymm1, %ymm1 +; X64-NEXT: cmpl $255, %edi +; X64-NEXT: je .LBB0_2 +; X64-NEXT: # BB#1: +; X64-NEXT: vmovaps %ymm0, %ymm1 +; X64-NEXT: .LBB0_2: +; X64-NEXT: vxorps %ymm1, %ymm0, %ymm0 +; X64-NEXT: retq %cmpres = icmp eq i32 %a, 255 %selres = select i1 %cmpres, <8 x i32> zeroinitializer, <8 x i32> %b %res = xor <8 x i32> %b, %selres @@ -19,16 +31,27 @@ define <8 x i32> @select00(i32 %a, <8 x i32> %b) nounwind { } define <4 x i64> @select01(i32 %a, <4 x i64> %b) nounwind { -; CHECK-LABEL: select01: -; CHECK: ## BB#0: -; CHECK-NEXT: vxorps %ymm1, %ymm1, %ymm1 -; CHECK-NEXT: cmpl $255, %edi -; CHECK-NEXT: je LBB1_2 -; CHECK-NEXT: ## BB#1: -; CHECK-NEXT: vmovaps %ymm0, %ymm1 -; CHECK-NEXT: LBB1_2: -; CHECK-NEXT: vxorps %ymm1, %ymm0, %ymm0 -; CHECK-NEXT: retq +; X86-LABEL: select01: +; X86: # BB#0: +; X86-NEXT: cmpl $255, {{[0-9]+}}(%esp) +; X86-NEXT: vxorps %ymm1, %ymm1, %ymm1 +; X86-NEXT: je .LBB1_2 +; X86-NEXT: # BB#1: +; X86-NEXT: vmovaps %ymm0, %ymm1 +; X86-NEXT: .LBB1_2: +; X86-NEXT: vxorps %ymm1, %ymm0, %ymm0 +; X86-NEXT: retl +; +; X64-LABEL: select01: +; X64: # BB#0: +; X64-NEXT: vxorps %ymm1, %ymm1, %ymm1 +; X64-NEXT: cmpl $255, %edi +; X64-NEXT: je .LBB1_2 +; X64-NEXT: # BB#1: +; X64-NEXT: vmovaps %ymm0, %ymm1 +; X64-NEXT: .LBB1_2: +; X64-NEXT: vxorps %ymm1, %ymm0, %ymm0 +; X64-NEXT: retq %cmpres = icmp eq i32 %a, 255 %selres = select i1 %cmpres, <4 x i64> zeroinitializer, <4 x i64> %b %res = xor <4 x i64> %b, %selres diff --git a/llvm/test/CodeGen/X86/avx-vbroadcastf128.ll b/llvm/test/CodeGen/X86/avx-vbroadcastf128.ll index 426ff81..d7a5f2d 100644 --- a/llvm/test/CodeGen/X86/avx-vbroadcastf128.ll +++ b/llvm/test/CodeGen/X86/avx-vbroadcastf128.ll @@ -1,16 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+avx | FileCheck %s --check-prefix=X32 -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx | FileCheck %s --check-prefix=X64 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64 define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind { ; X32-LABEL: test_broadcast_2f64_4f64: -; X32: ## BB#0: +; X32: # BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_2f64_4f64: -; X64: ## BB#0: +; X64: # BB#0: ; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-NEXT: retq %1 = load <2 x double>, <2 x double> *%p @@ -20,13 +20,13 @@ define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind { define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind { ; X32-LABEL: test_broadcast_2i64_4i64: -; X32: ## BB#0: +; X32: # BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_2i64_4i64: -; X64: ## BB#0: +; X64: # BB#0: ; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-NEXT: retq %1 = load <2 x i64>, <2 x i64> *%p @@ -36,13 +36,13 @@ define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind { define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind { ; X32-LABEL: test_broadcast_4f32_8f32: -; X32: ## BB#0: +; X32: # BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_4f32_8f32: -; X64: ## BB#0: +; X64: # BB#0: ; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-NEXT: retq %1 = load <4 x float>, <4 x float> *%p @@ -52,13 +52,13 @@ define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind { define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind { ; X32-LABEL: test_broadcast_4i32_8i32: -; X32: ## BB#0: +; X32: # BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_4i32_8i32: -; X64: ## BB#0: +; X64: # BB#0: ; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-NEXT: retq %1 = load <4 x i32>, <4 x i32> *%p @@ -68,13 +68,13 @@ define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind { define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind { ; X32-LABEL: test_broadcast_8i16_16i16: -; X32: ## BB#0: +; X32: # BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_8i16_16i16: -; X64: ## BB#0: +; X64: # BB#0: ; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-NEXT: retq %1 = load <8 x i16>, <8 x i16> *%p @@ -84,13 +84,13 @@ define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind { define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind { ; X32-LABEL: test_broadcast_16i8_32i8: -; X32: ## BB#0: +; X32: # BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_16i8_32i8: -; X64: ## BB#0: +; X64: # BB#0: ; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-NEXT: retq %1 = load <16 x i8>, <16 x i8> *%p @@ -100,7 +100,7 @@ define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind { define <4 x double> @test_broadcast_2f64_4f64_reuse(<2 x double>* %p0, <2 x double>* %p1) { ; X32-LABEL: test_broadcast_2f64_4f64_reuse: -; X32: ## BB#0: +; X32: # BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: vmovaps (%ecx), %xmm1 @@ -109,7 +109,7 @@ define <4 x double> @test_broadcast_2f64_4f64_reuse(<2 x double>* %p0, <2 x doub ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_2f64_4f64_reuse: -; X64: ## BB#0: +; X64: # BB#0: ; X64-NEXT: vmovaps (%rdi), %xmm1 ; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm0 ; X64-NEXT: vmovaps %xmm1, (%rsi) @@ -122,7 +122,7 @@ define <4 x double> @test_broadcast_2f64_4f64_reuse(<2 x double>* %p0, <2 x doub define <4 x i64> @test_broadcast_2i64_4i64_reuse(<2 x i64>* %p0, <2 x i64>* %p1) { ; X32-LABEL: test_broadcast_2i64_4i64_reuse: -; X32: ## BB#0: +; X32: # BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: vmovaps (%ecx), %xmm1 @@ -131,7 +131,7 @@ define <4 x i64> @test_broadcast_2i64_4i64_reuse(<2 x i64>* %p0, <2 x i64>* %p1) ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_2i64_4i64_reuse: -; X64: ## BB#0: +; X64: # BB#0: ; X64-NEXT: vmovaps (%rdi), %xmm1 ; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm0 ; X64-NEXT: vmovaps %xmm1, (%rsi) @@ -144,7 +144,7 @@ define <4 x i64> @test_broadcast_2i64_4i64_reuse(<2 x i64>* %p0, <2 x i64>* %p1) define <8 x float> @test_broadcast_4f32_8f32_reuse(<4 x float>* %p0, <4 x float>* %p1) { ; X32-LABEL: test_broadcast_4f32_8f32_reuse: -; X32: ## BB#0: +; X32: # BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: vmovaps (%ecx), %xmm1 @@ -153,7 +153,7 @@ define <8 x float> @test_broadcast_4f32_8f32_reuse(<4 x float>* %p0, <4 x float> ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_4f32_8f32_reuse: -; X64: ## BB#0: +; X64: # BB#0: ; X64-NEXT: vmovaps (%rdi), %xmm1 ; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm0 ; X64-NEXT: vmovaps %xmm1, (%rsi) @@ -166,7 +166,7 @@ define <8 x float> @test_broadcast_4f32_8f32_reuse(<4 x float>* %p0, <4 x float> define <8 x i32> @test_broadcast_4i32_8i32_reuse(<4 x i32>* %p0, <4 x i32>* %p1) { ; X32-LABEL: test_broadcast_4i32_8i32_reuse: -; X32: ## BB#0: +; X32: # BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: vmovaps (%ecx), %xmm1 @@ -175,7 +175,7 @@ define <8 x i32> @test_broadcast_4i32_8i32_reuse(<4 x i32>* %p0, <4 x i32>* %p1) ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_4i32_8i32_reuse: -; X64: ## BB#0: +; X64: # BB#0: ; X64-NEXT: vmovaps (%rdi), %xmm1 ; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm0 ; X64-NEXT: vmovaps %xmm1, (%rsi) @@ -188,7 +188,7 @@ define <8 x i32> @test_broadcast_4i32_8i32_reuse(<4 x i32>* %p0, <4 x i32>* %p1) define <16 x i16> @test_broadcast_8i16_16i16_reuse(<8 x i16> *%p0, <8 x i16> *%p1) nounwind { ; X32-LABEL: test_broadcast_8i16_16i16_reuse: -; X32: ## BB#0: +; X32: # BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: vmovaps (%ecx), %xmm1 @@ -197,7 +197,7 @@ define <16 x i16> @test_broadcast_8i16_16i16_reuse(<8 x i16> *%p0, <8 x i16> *%p ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_8i16_16i16_reuse: -; X64: ## BB#0: +; X64: # BB#0: ; X64-NEXT: vmovaps (%rdi), %xmm1 ; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm0 ; X64-NEXT: vmovaps %xmm1, (%rsi) @@ -210,7 +210,7 @@ define <16 x i16> @test_broadcast_8i16_16i16_reuse(<8 x i16> *%p0, <8 x i16> *%p define <32 x i8> @test_broadcast_16i8_32i8_reuse(<16 x i8> *%p0, <16 x i8> *%p1) nounwind { ; X32-LABEL: test_broadcast_16i8_32i8_reuse: -; X32: ## BB#0: +; X32: # BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: vmovaps (%ecx), %xmm1 @@ -219,7 +219,7 @@ define <32 x i8> @test_broadcast_16i8_32i8_reuse(<16 x i8> *%p0, <16 x i8> *%p1) ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_16i8_32i8_reuse: -; X64: ## BB#0: +; X64: # BB#0: ; X64-NEXT: vmovaps (%rdi), %xmm1 ; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm0 ; X64-NEXT: vmovaps %xmm1, (%rsi) @@ -232,7 +232,7 @@ define <32 x i8> @test_broadcast_16i8_32i8_reuse(<16 x i8> *%p0, <16 x i8> *%p1) define <8 x i32> @PR29088(<4 x i32>* %p0, <8 x float>* %p1) { ; X32-LABEL: PR29088: -; X32: ## BB#0: +; X32: # BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: vmovaps (%ecx), %xmm0 @@ -242,7 +242,7 @@ define <8 x i32> @PR29088(<4 x i32>* %p0, <8 x float>* %p1) { ; X32-NEXT: retl ; ; X64-LABEL: PR29088: -; X64: ## BB#0: +; X64: # BB#0: ; X64-NEXT: vmovaps (%rdi), %xmm0 ; X64-NEXT: vxorps %ymm1, %ymm1, %ymm1 ; X64-NEXT: vmovaps %ymm1, (%rsi) diff --git a/llvm/test/CodeGen/X86/avx-vperm2x128.ll b/llvm/test/CodeGen/X86/avx-vperm2x128.ll index 29b9dec..b372f73 100644 --- a/llvm/test/CodeGen/X86/avx-vperm2x128.ll +++ b/llvm/test/CodeGen/X86/avx-vperm2x128.ll @@ -1,10 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX1 -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX2 define <8 x float> @shuffle_v8f32_45670123(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_45670123: -; ALL: ## BB#0: ## %entry +; ALL: # BB#0: # %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1] ; ALL-NEXT: retq entry: @@ -14,7 +14,7 @@ entry: define <8 x float> @shuffle_v8f32_45670123_mem(<8 x float>* %pa, <8 x float>* %pb) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_45670123_mem: -; ALL: ## BB#0: ## %entry +; ALL: # BB#0: # %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3,0,1] ; ALL-NEXT: retq entry: @@ -26,7 +26,7 @@ entry: define <8 x float> @shuffle_v8f32_0123cdef(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_0123cdef: -; ALL: ## BB#0: ## %entry +; ALL: # BB#0: # %entry ; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3] ; ALL-NEXT: retq entry: @@ -36,12 +36,12 @@ entry: define <8 x float> @shuffle_v8f32_01230123(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; AVX1-LABEL: shuffle_v8f32_01230123: -; AVX1: ## BB#0: ## %entry +; AVX1: # BB#0: # %entry ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v8f32_01230123: -; AVX2: ## BB#0: ## %entry +; AVX2: # BB#0: # %entry ; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[0,1,0,1] ; AVX2-NEXT: retq entry: @@ -51,7 +51,7 @@ entry: define <8 x float> @shuffle_v8f32_01230123_mem(<8 x float>* %pa, <8 x float>* %pb) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_01230123_mem: -; ALL: ## BB#0: ## %entry +; ALL: # BB#0: # %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[0,1,0,1] ; ALL-NEXT: retq entry: @@ -63,7 +63,7 @@ entry: define <8 x float> @shuffle_v8f32_45674567(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_45674567: -; ALL: ## BB#0: ## %entry +; ALL: # BB#0: # %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3] ; ALL-NEXT: retq entry: @@ -73,7 +73,7 @@ entry: define <8 x float> @shuffle_v8f32_45674567_mem(<8 x float>* %pa, <8 x float>* %pb) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_45674567_mem: -; ALL: ## BB#0: ## %entry +; ALL: # BB#0: # %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3,2,3] ; ALL-NEXT: retq entry: @@ -85,7 +85,7 @@ entry: define <32 x i8> @shuffle_v32i8_2323(<32 x i8> %a, <32 x i8> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v32i8_2323: -; ALL: ## BB#0: ## %entry +; ALL: # BB#0: # %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3] ; ALL-NEXT: retq entry: @@ -95,7 +95,7 @@ entry: define <32 x i8> @shuffle_v32i8_2323_domain(<32 x i8> %a, <32 x i8> %b) nounwind uwtable readnone ssp { ; AVX1-LABEL: shuffle_v32i8_2323_domain: -; AVX1: ## BB#0: ## %entry +; AVX1: # BB#0: # %entry ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0 @@ -104,7 +104,7 @@ define <32 x i8> @shuffle_v32i8_2323_domain(<32 x i8> %a, <32 x i8> %b) nounwind ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v32i8_2323_domain: -; AVX2: ## BB#0: ## %entry +; AVX2: # BB#0: # %entry ; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3,2,3] @@ -118,7 +118,7 @@ entry: define <4 x i64> @shuffle_v4i64_6701(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v4i64_6701: -; ALL: ## BB#0: ## %entry +; ALL: # BB#0: # %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1] ; ALL-NEXT: retq entry: @@ -128,14 +128,14 @@ entry: define <4 x i64> @shuffle_v4i64_6701_domain(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp { ; AVX1-LABEL: shuffle_v4i64_6701_domain: -; AVX1: ## BB#0: ## %entry +; AVX1: # BB#0: # %entry ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1] ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v4i64_6701_domain: -; AVX2: ## BB#0: ## %entry +; AVX2: # BB#0: # %entry ; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 ; AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1] @@ -149,7 +149,7 @@ entry: define <8 x i32> @shuffle_v8i32_u5u7cdef(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readnone ssp { ; AVX1-LABEL: shuffle_v8i32_u5u7cdef: -; AVX1: ## BB#0: ## %entry +; AVX1: # BB#0: # %entry ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpsubd %xmm2, %xmm0, %xmm0 @@ -158,7 +158,7 @@ define <8 x i32> @shuffle_v8i32_u5u7cdef(<8 x i32> %a, <8 x i32> %b) nounwind uw ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v8i32_u5u7cdef: -; AVX2: ## BB#0: ## %entry +; AVX2: # BB#0: # %entry ; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 ; AVX2-NEXT: vpsubd %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] @@ -172,14 +172,14 @@ entry: define <16 x i16> @shuffle_v16i16_4501(<16 x i16> %a, <16 x i16> %b) nounwind uwtable readnone ssp { ; AVX1-LABEL: shuffle_v16i16_4501: -; AVX1: ## BB#0: ## %entry +; AVX1: # BB#0: # %entry ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpsubw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v16i16_4501: -; AVX2: ## BB#0: ## %entry +; AVX2: # BB#0: # %entry ; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 ; AVX2-NEXT: vpsubw %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 @@ -193,7 +193,7 @@ entry: define <16 x i16> @shuffle_v16i16_4501_mem(<16 x i16>* %a, <16 x i16>* %b) nounwind uwtable readnone ssp { ; AVX1-LABEL: shuffle_v16i16_4501_mem: -; AVX1: ## BB#0: ## %entry +; AVX1: # BB#0: # %entry ; AVX1-NEXT: vmovdqa (%rdi), %ymm0 ; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpsubw %xmm1, %xmm0, %xmm0 @@ -201,7 +201,7 @@ define <16 x i16> @shuffle_v16i16_4501_mem(<16 x i16>* %a, <16 x i16>* %b) nounw ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v16i16_4501_mem: -; AVX2: ## BB#0: ## %entry +; AVX2: # BB#0: # %entry ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 ; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpsubw %ymm1, %ymm0, %ymm0 @@ -219,7 +219,7 @@ entry: define <8 x float> @shuffle_v8f32_uu67u9ub(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_uu67u9ub: -; ALL: ## BB#0: ## %entry +; ALL: # BB#0: # %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] ; ALL-NEXT: retq entry: @@ -229,7 +229,7 @@ entry: define <8 x float> @shuffle_v8f32_uu67uu67(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_uu67uu67: -; ALL: ## BB#0: ## %entry +; ALL: # BB#0: # %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3] ; ALL-NEXT: retq entry: @@ -239,7 +239,7 @@ entry: define <8 x float> @shuffle_v8f32_uu67uuab(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_uu67uuab: -; ALL: ## BB#0: ## %entry +; ALL: # BB#0: # %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] ; ALL-NEXT: retq entry: @@ -249,7 +249,7 @@ entry: define <8 x float> @shuffle_v8f32_uu67uuef(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_uu67uuef: -; ALL: ## BB#0: ## %entry +; ALL: # BB#0: # %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] ; ALL-NEXT: retq entry: @@ -259,7 +259,7 @@ entry: define <8 x float> @shuffle_v8f32_uu674567(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_uu674567: -; ALL: ## BB#0: ## %entry +; ALL: # BB#0: # %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3] ; ALL-NEXT: retq entry: @@ -269,7 +269,7 @@ entry: define <8 x float> @shuffle_v8f32_uu6789ab(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_uu6789ab: -; ALL: ## BB#0: ## %entry +; ALL: # BB#0: # %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] ; ALL-NEXT: retq entry: @@ -279,7 +279,7 @@ entry: define <8 x float> @shuffle_v8f32_4567uu67(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_4567uu67: -; ALL: ## BB#0: ## %entry +; ALL: # BB#0: # %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3] ; ALL-NEXT: retq entry: @@ -289,7 +289,7 @@ entry: define <8 x float> @shuffle_v8f32_4567uuef(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_4567uuef: -; ALL: ## BB#0: ## %entry +; ALL: # BB#0: # %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] ; ALL-NEXT: retq entry: @@ -301,7 +301,7 @@ entry: define <8 x float> @shuffle_v8f32_uu67ucuf(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp { ; ALL-LABEL: shuffle_v8f32_uu67ucuf: -; ALL: ## BB#0: ## %entry +; ALL: # BB#0: # %entry ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] ; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,3,4,4,6,7] ; ALL-NEXT: retq @@ -317,7 +317,7 @@ entry: define <4 x double> @shuffle_v4f64_zz01(<4 x double> %a) { ; ALL-LABEL: shuffle_v4f64_zz01: -; ALL: ## BB#0: +; ALL: # BB#0: ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1] ; ALL-NEXT: retq %s = shufflevector <4 x double> %a, <4 x double> , <4 x i32> @@ -325,7 +325,7 @@ define <4 x double> @shuffle_v4f64_zz01(<4 x double> %a) { } define <4 x double> @shuffle_v4f64_zz01_optsize(<4 x double> %a) optsize { ; ALL-LABEL: shuffle_v4f64_zz01_optsize: -; ALL: ## BB#0: +; ALL: # BB#0: ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1] ; ALL-NEXT: retq %s = shufflevector <4 x double> %a, <4 x double> , <4 x i32> @@ -334,7 +334,7 @@ define <4 x double> @shuffle_v4f64_zz01_optsize(<4 x double> %a) optsize { define <4 x double> @shuffle_v4f64_zz23(<4 x double> %a) { ; ALL-LABEL: shuffle_v4f64_zz23: -; ALL: ## BB#0: +; ALL: # BB#0: ; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 ; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] ; ALL-NEXT: retq @@ -343,7 +343,7 @@ define <4 x double> @shuffle_v4f64_zz23(<4 x double> %a) { } define <4 x double> @shuffle_v4f64_zz23_optsize(<4 x double> %a) optsize { ; ALL-LABEL: shuffle_v4f64_zz23_optsize: -; ALL: ## BB#0: +; ALL: # BB#0: ; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 ; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] ; ALL-NEXT: retq @@ -353,7 +353,7 @@ define <4 x double> @shuffle_v4f64_zz23_optsize(<4 x double> %a) optsize { define <4 x double> @shuffle_v4f64_zz45(<4 x double> %a) { ; ALL-LABEL: shuffle_v4f64_zz45: -; ALL: ## BB#0: +; ALL: # BB#0: ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1] ; ALL-NEXT: retq %s = shufflevector <4 x double> , <4 x double> %a, <4 x i32> @@ -361,7 +361,7 @@ define <4 x double> @shuffle_v4f64_zz45(<4 x double> %a) { } define <4 x double> @shuffle_v4f64_zz45_optsize(<4 x double> %a) optsize { ; ALL-LABEL: shuffle_v4f64_zz45_optsize: -; ALL: ## BB#0: +; ALL: # BB#0: ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1] ; ALL-NEXT: retq %s = shufflevector <4 x double> , <4 x double> %a, <4 x i32> @@ -370,7 +370,7 @@ define <4 x double> @shuffle_v4f64_zz45_optsize(<4 x double> %a) optsize { define <4 x double> @shuffle_v4f64_zz67(<4 x double> %a) { ; ALL-LABEL: shuffle_v4f64_zz67: -; ALL: ## BB#0: +; ALL: # BB#0: ; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 ; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] ; ALL-NEXT: retq @@ -379,7 +379,7 @@ define <4 x double> @shuffle_v4f64_zz67(<4 x double> %a) { } define <4 x double> @shuffle_v4f64_zz67_optsize(<4 x double> %a) optsize { ; ALL-LABEL: shuffle_v4f64_zz67_optsize: -; ALL: ## BB#0: +; ALL: # BB#0: ; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 ; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3] ; ALL-NEXT: retq @@ -389,7 +389,7 @@ define <4 x double> @shuffle_v4f64_zz67_optsize(<4 x double> %a) optsize { define <4 x double> @shuffle_v4f64_01zz(<4 x double> %a) { ; ALL-LABEL: shuffle_v4f64_01zz: -; ALL: ## BB#0: +; ALL: # BB#0: ; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 ; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3] ; ALL-NEXT: retq @@ -398,7 +398,7 @@ define <4 x double> @shuffle_v4f64_01zz(<4 x double> %a) { } define <4 x double> @shuffle_v4f64_01zz_optsize(<4 x double> %a) optsize { ; ALL-LABEL: shuffle_v4f64_01zz_optsize: -; ALL: ## BB#0: +; ALL: # BB#0: ; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 ; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3] ; ALL-NEXT: retq @@ -408,7 +408,7 @@ define <4 x double> @shuffle_v4f64_01zz_optsize(<4 x double> %a) optsize { define <4 x double> @shuffle_v4f64_23zz(<4 x double> %a) { ; ALL-LABEL: shuffle_v4f64_23zz: -; ALL: ## BB#0: +; ALL: # BB#0: ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero ; ALL-NEXT: retq %s = shufflevector <4 x double> %a, <4 x double> , <4 x i32> @@ -416,7 +416,7 @@ define <4 x double> @shuffle_v4f64_23zz(<4 x double> %a) { } define <4 x double> @shuffle_v4f64_23zz_optsize(<4 x double> %a) optsize { ; ALL-LABEL: shuffle_v4f64_23zz_optsize: -; ALL: ## BB#0: +; ALL: # BB#0: ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero ; ALL-NEXT: retq %s = shufflevector <4 x double> %a, <4 x double> , <4 x i32> @@ -425,7 +425,7 @@ define <4 x double> @shuffle_v4f64_23zz_optsize(<4 x double> %a) optsize { define <4 x double> @shuffle_v4f64_45zz(<4 x double> %a) { ; ALL-LABEL: shuffle_v4f64_45zz: -; ALL: ## BB#0: +; ALL: # BB#0: ; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 ; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3] ; ALL-NEXT: retq @@ -434,7 +434,7 @@ define <4 x double> @shuffle_v4f64_45zz(<4 x double> %a) { } define <4 x double> @shuffle_v4f64_45zz_optsize(<4 x double> %a) optsize { ; ALL-LABEL: shuffle_v4f64_45zz_optsize: -; ALL: ## BB#0: +; ALL: # BB#0: ; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 ; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3] ; ALL-NEXT: retq @@ -444,7 +444,7 @@ define <4 x double> @shuffle_v4f64_45zz_optsize(<4 x double> %a) optsize { define <4 x double> @shuffle_v4f64_67zz(<4 x double> %a) { ; ALL-LABEL: shuffle_v4f64_67zz: -; ALL: ## BB#0: +; ALL: # BB#0: ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero ; ALL-NEXT: retq %s = shufflevector <4 x double> , <4 x double> %a, <4 x i32> @@ -452,7 +452,7 @@ define <4 x double> @shuffle_v4f64_67zz(<4 x double> %a) { } define <4 x double> @shuffle_v4f64_67zz_optsize(<4 x double> %a) optsize { ; ALL-LABEL: shuffle_v4f64_67zz_optsize: -; ALL: ## BB#0: +; ALL: # BB#0: ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero ; ALL-NEXT: retq %s = shufflevector <4 x double> , <4 x double> %a, <4 x i32> @@ -463,14 +463,14 @@ define <4 x double> @shuffle_v4f64_67zz_optsize(<4 x double> %a) optsize { define <4 x i64> @shuffle_v4i64_67zz(<4 x i64> %a, <4 x i64> %b) { ; AVX1-LABEL: shuffle_v4i64_67zz: -; AVX1: ## BB#0: +; AVX1: # BB#0: ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero ; AVX1-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3] ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v4i64_67zz: -; AVX2: ## BB#0: +; AVX2: # BB#0: ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero ; AVX2-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: retq @@ -483,15 +483,15 @@ define <4 x i64> @shuffle_v4i64_67zz(<4 x i64> %a, <4 x i64> %b) { define <4 x double> @ld0_hi0_lo1_4f64(<4 x double> * %pa, <4 x double> %b) nounwind uwtable readnone ssp { ; AVX1-LABEL: ld0_hi0_lo1_4f64: -; AVX1: ## BB#0: ## %entry +; AVX1: # BB#0: # %entry ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1] ; AVX1-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ld0_hi0_lo1_4f64: -; AVX2: ## BB#0: ## %entry +; AVX2: # BB#0: # %entry ; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1] -; AVX2-NEXT: vbroadcastsd {{.*}}(%rip), %ymm1 +; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm1 = [1,1,1,1] ; AVX2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq entry: @@ -503,15 +503,15 @@ entry: define <4 x double> @ld1_hi0_hi1_4f64(<4 x double> %a, <4 x double> * %pb) nounwind uwtable readnone ssp { ; AVX1-LABEL: ld1_hi0_hi1_4f64: -; AVX1: ## BB#0: ## %entry +; AVX1: # BB#0: # %entry ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] ; AVX1-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ld1_hi0_hi1_4f64: -; AVX2: ## BB#0: ## %entry +; AVX2: # BB#0: # %entry ; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] -; AVX2-NEXT: vbroadcastsd {{.*}}(%rip), %ymm1 +; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm1 = [1,1,1,1] ; AVX2-NEXT: vaddpd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq entry: @@ -523,15 +523,15 @@ entry: define <8 x float> @ld0_hi0_lo1_8f32(<8 x float> * %pa, <8 x float> %b) nounwind uwtable readnone ssp { ; AVX1-LABEL: ld0_hi0_lo1_8f32: -; AVX1: ## BB#0: ## %entry +; AVX1: # BB#0: # %entry ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1] ; AVX1-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ld0_hi0_lo1_8f32: -; AVX2: ## BB#0: ## %entry +; AVX2: # BB#0: # %entry ; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1] -; AVX2-NEXT: vbroadcastss {{.*}}(%rip), %ymm1 +; AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1] ; AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq entry: @@ -543,15 +543,15 @@ entry: define <8 x float> @ld1_hi0_hi1_8f32(<8 x float> %a, <8 x float> * %pb) nounwind uwtable readnone ssp { ; AVX1-LABEL: ld1_hi0_hi1_8f32: -; AVX1: ## BB#0: ## %entry +; AVX1: # BB#0: # %entry ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] ; AVX1-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ld1_hi0_hi1_8f32: -; AVX2: ## BB#0: ## %entry +; AVX2: # BB#0: # %entry ; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] -; AVX2-NEXT: vbroadcastss {{.*}}(%rip), %ymm1 +; AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1] ; AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq entry: @@ -563,7 +563,7 @@ entry: define <4 x i64> @ld0_hi0_lo1_4i64(<4 x i64> * %pa, <4 x i64> %b) nounwind uwtable readnone ssp { ; AVX1-LABEL: ld0_hi0_lo1_4i64: -; AVX1: ## BB#0: ## %entry +; AVX1: # BB#0: # %entry ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1] ; AVX1-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 @@ -572,7 +572,7 @@ define <4 x i64> @ld0_hi0_lo1_4i64(<4 x i64> * %pa, <4 x i64> %b) nounwind uwtab ; AVX1-NEXT: retq ; ; AVX2-LABEL: ld0_hi0_lo1_4i64: -; AVX2: ## BB#0: ## %entry +; AVX2: # BB#0: # %entry ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1] ; AVX2-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: retq @@ -585,7 +585,7 @@ entry: define <4 x i64> @ld1_hi0_hi1_4i64(<4 x i64> %a, <4 x i64> * %pb) nounwind uwtable readnone ssp { ; AVX1-LABEL: ld1_hi0_hi1_4i64: -; AVX1: ## BB#0: ## %entry +; AVX1: # BB#0: # %entry ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] ; AVX1-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 @@ -594,7 +594,7 @@ define <4 x i64> @ld1_hi0_hi1_4i64(<4 x i64> %a, <4 x i64> * %pb) nounwind uwtab ; AVX1-NEXT: retq ; ; AVX2-LABEL: ld1_hi0_hi1_4i64: -; AVX2: ## BB#0: ## %entry +; AVX2: # BB#0: # %entry ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] ; AVX2-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: retq @@ -607,7 +607,7 @@ entry: define <8 x i32> @ld0_hi0_lo1_8i32(<8 x i32> * %pa, <8 x i32> %b) nounwind uwtable readnone ssp { ; AVX1-LABEL: ld0_hi0_lo1_8i32: -; AVX1: ## BB#0: ## %entry +; AVX1: # BB#0: # %entry ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1] ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,2,3,4] @@ -617,7 +617,7 @@ define <8 x i32> @ld0_hi0_lo1_8i32(<8 x i32> * %pa, <8 x i32> %b) nounwind uwtab ; AVX1-NEXT: retq ; ; AVX2-LABEL: ld0_hi0_lo1_8i32: -; AVX2: ## BB#0: ## %entry +; AVX2: # BB#0: # %entry ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1] ; AVX2-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: retq @@ -630,7 +630,7 @@ entry: define <8 x i32> @ld1_hi0_hi1_8i32(<8 x i32> %a, <8 x i32> * %pb) nounwind uwtable readnone ssp { ; AVX1-LABEL: ld1_hi0_hi1_8i32: -; AVX1: ## BB#0: ## %entry +; AVX1: # BB#0: # %entry ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,2,3,4] @@ -640,7 +640,7 @@ define <8 x i32> @ld1_hi0_hi1_8i32(<8 x i32> %a, <8 x i32> * %pb) nounwind uwtab ; AVX1-NEXT: retq ; ; AVX2-LABEL: ld1_hi0_hi1_8i32: -; AVX2: ## BB#0: ## %entry +; AVX2: # BB#0: # %entry ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] ; AVX2-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: retq -- 2.7.4