From 428a4e6374e34dd1a810823e6a59bd5ea6cafd0a Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Mon, 6 Nov 2017 22:49:04 +0000 Subject: [PATCH] [X86] Make FeatureAVX512 imply FeatureF16C. The EVEX to VEX pass is already assuming this is true under AVX512VL. We had special patterns to use zmm instructions if VLX and F16C weren't available. Instead just make AVX512 imply F16C to make the EVEX to VEX behavior explicitly legal and remove the extra patterns. All known CPUs with AVX512 have F16C so this should safe for now. llvm-svn: 317521 --- llvm/lib/Target/X86/X86.td | 8 +- llvm/lib/Target/X86/X86ISelLowering.cpp | 3 +- llvm/lib/Target/X86/X86InstrAVX512.td | 29 - llvm/lib/Target/X86/X86InstrInfo.td | 1 - llvm/test/CodeGen/X86/vec_fp_to_int.ll | 74 +- llvm/test/CodeGen/X86/vector-half-conversions.ll | 2718 ++++++---------------- 6 files changed, 781 insertions(+), 2052 deletions(-) diff --git a/llvm/lib/Target/X86/X86.td b/llvm/lib/Target/X86/X86.td index 4979037..34f2956 100644 --- a/llvm/lib/Target/X86/X86.td +++ b/llvm/lib/Target/X86/X86.td @@ -119,9 +119,12 @@ def FeatureAVX2 : SubtargetFeature<"avx2", "X86SSELevel", "AVX2", def FeatureFMA : SubtargetFeature<"fma", "HasFMA", "true", "Enable three-operand fused multiple-add", [FeatureAVX]>; +def FeatureF16C : SubtargetFeature<"f16c", "HasF16C", "true", + "Support 16-bit floating point conversion instructions", + [FeatureAVX]>; def FeatureAVX512 : SubtargetFeature<"avx512f", "X86SSELevel", "AVX512F", "Enable AVX-512 instructions", - [FeatureAVX2, FeatureFMA]>; + [FeatureAVX2, FeatureFMA, FeatureF16C]>; def FeatureERI : SubtargetFeature<"avx512er", "HasERI", "true", "Enable AVX-512 Exponential and Reciprocal Instructions", [FeatureAVX512]>; @@ -177,9 +180,6 @@ def FeatureMOVBE : SubtargetFeature<"movbe", "HasMOVBE", "true", "Support MOVBE instruction">; def FeatureRDRAND : SubtargetFeature<"rdrnd", "HasRDRAND", "true", "Support RDRAND instruction">; -def FeatureF16C : SubtargetFeature<"f16c", "HasF16C", "true", - "Support 16-bit floating point conversion instructions", - [FeatureAVX]>; def FeatureFSGSBase : SubtargetFeature<"fsgsbase", "HasFSGSBase", "true", "Support FS/GS Base instructions">; def FeatureLZCNT : SubtargetFeature<"lzcnt", "HasLZCNT", "true", diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 6a67767..3aa0878 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -380,8 +380,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, // Special handling for half-precision floating point conversions. // If we don't have F16C support, then lower half float conversions // into library calls. - if (Subtarget.useSoftFloat() || - (!Subtarget.hasF16C() && !Subtarget.hasAVX512())) { + if (Subtarget.useSoftFloat() || !Subtarget.hasF16C()) { setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); } diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td index d916441..c2693ea 100644 --- a/llvm/lib/Target/X86/X86InstrAVX512.td +++ b/llvm/lib/Target/X86/X86InstrAVX512.td @@ -7267,35 +7267,6 @@ let Predicates = [HasVLX] in { (VCVTPS2PHZ128rr (COPY_TO_REGCLASS FR32X:$src, VR128X), 4)), FR32X)) >; } -// Patterns for matching float to half-float conversion when AVX512 is supported -// but F16C isn't. In that case we have to use 512-bit vectors. -let Predicates = [HasAVX512, NoVLX, NoF16C] in { - def : Pat<(fp_to_f16 FR32X:$src), - (i16 (EXTRACT_SUBREG - (VMOVPDI2DIZrr - (v8i16 (EXTRACT_SUBREG - (VCVTPS2PHZrr - (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), - (v4f32 (COPY_TO_REGCLASS FR32X:$src, VR128X)), - sub_xmm), 4), sub_xmm))), sub_16bit))>; - - def : Pat<(f16_to_fp GR16:$src), - (f32 (COPY_TO_REGCLASS - (v4f32 (EXTRACT_SUBREG - (VCVTPH2PSZrr - (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), - (v8i16 (COPY_TO_REGCLASS (MOVSX32rr16 GR16:$src), VR128X)), - sub_xmm)), sub_xmm)), FR32X))>; - - def : Pat<(f16_to_fp (i16 (fp_to_f16 FR32X:$src))), - (f32 (COPY_TO_REGCLASS - (v4f32 (EXTRACT_SUBREG - (VCVTPH2PSZrr - (VCVTPS2PHZrr (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), - (v4f32 (COPY_TO_REGCLASS FR32X:$src, VR128X)), - sub_xmm), 4)), sub_xmm)), FR32X))>; -} - // Unordered/Ordered scalar fp compare with Sea and set EFLAGS multiclass avx512_ord_cmp_sae opc, X86VectorVTInfo _, string OpcodeStr> { diff --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td index 559a8fc..f00caa1 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.td +++ b/llvm/lib/Target/X86/X86InstrInfo.td @@ -850,7 +850,6 @@ def HasLWP : Predicate<"Subtarget->hasLWP()">; def HasMOVBE : Predicate<"Subtarget->hasMOVBE()">; def HasRDRAND : Predicate<"Subtarget->hasRDRAND()">; def HasF16C : Predicate<"Subtarget->hasF16C()">; -def NoF16C : Predicate<"!Subtarget->hasF16C()">; def HasFSGSBase : Predicate<"Subtarget->hasFSGSBase()">; def HasLZCNT : Predicate<"Subtarget->hasLZCNT()">; def HasBMI : Predicate<"Subtarget->hasBMI()">; diff --git a/llvm/test/CodeGen/X86/vec_fp_to_int.ll b/llvm/test/CodeGen/X86/vec_fp_to_int.ll index c6335d7..2f52bab 100644 --- a/llvm/test/CodeGen/X86/vec_fp_to_int.ll +++ b/llvm/test/CodeGen/X86/vec_fp_to_int.ll @@ -2288,67 +2288,19 @@ define <4 x i32> @fptosi_2f16_to_4i32(<2 x half> %a) nounwind { ; VEX-NEXT: popq %rax ; VEX-NEXT: retq ; -; AVX512F-LABEL: fptosi_2f16_to_4i32: -; AVX512F: # BB#0: -; AVX512F-NEXT: # kill: %XMM1 %XMM1 %ZMM1 -; AVX512F-NEXT: # kill: %XMM0 %XMM0 %ZMM0 -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 -; AVX512F-NEXT: vcvttss2si %xmm1, %rax -; AVX512F-NEXT: vmovq %rax, %xmm1 -; AVX512F-NEXT: vcvttss2si %xmm0, %rax -; AVX512F-NEXT: vmovq %rax, %xmm0 -; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: fptosi_2f16_to_4i32: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512VL-NEXT: vcvttss2si %xmm1, %rax -; AVX512VL-NEXT: vmovq %rax, %xmm1 -; AVX512VL-NEXT: vcvttss2si %xmm0, %rax -; AVX512VL-NEXT: vmovq %rax, %xmm0 -; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero -; AVX512VL-NEXT: retq -; -; AVX512DQ-LABEL: fptosi_2f16_to_4i32: -; AVX512DQ: # BB#0: -; AVX512DQ-NEXT: # kill: %XMM1 %XMM1 %ZMM1 -; AVX512DQ-NEXT: # kill: %XMM0 %XMM0 %ZMM0 -; AVX512DQ-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; AVX512DQ-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512DQ-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512DQ-NEXT: vcvtph2ps %ymm1, %zmm1 -; AVX512DQ-NEXT: vcvttss2si %xmm1, %rax -; AVX512DQ-NEXT: vmovq %rax, %xmm1 -; AVX512DQ-NEXT: vcvttss2si %xmm0, %rax -; AVX512DQ-NEXT: vmovq %rax, %xmm0 -; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX512DQ-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero -; AVX512DQ-NEXT: vzeroupper -; AVX512DQ-NEXT: retq -; -; AVX512VLDQ-LABEL: fptosi_2f16_to_4i32: -; AVX512VLDQ: # BB#0: -; AVX512VLDQ-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VLDQ-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512VLDQ-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VLDQ-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512VLDQ-NEXT: vcvttss2si %xmm1, %rax -; AVX512VLDQ-NEXT: vmovq %rax, %xmm1 -; AVX512VLDQ-NEXT: vcvttss2si %xmm0, %rax -; AVX512VLDQ-NEXT: vmovq %rax, %xmm0 -; AVX512VLDQ-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX512VLDQ-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero -; AVX512VLDQ-NEXT: retq +; AVX512-LABEL: fptosi_2f16_to_4i32: +; AVX512: # BB#0: +; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0 +; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 +; AVX512-NEXT: vcvttss2si %xmm1, %rax +; AVX512-NEXT: vmovq %rax, %xmm1 +; AVX512-NEXT: vcvttss2si %xmm0, %rax +; AVX512-NEXT: vmovq %rax, %xmm0 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero +; AVX512-NEXT: retq %cvt = fptosi <2 x half> %a to <2 x i32> %ext = shufflevector <2 x i32> %cvt, <2 x i32> zeroinitializer, <4 x i32> ret <4 x i32> %ext diff --git a/llvm/test/CodeGen/X86/vector-half-conversions.ll b/llvm/test/CodeGen/X86/vector-half-conversions.ll index 6e664ba..8ee56a4 100644 --- a/llvm/test/CodeGen/X86/vector-half-conversions.ll +++ b/llvm/test/CodeGen/X86/vector-half-conversions.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+f16c -verify-machineinstrs | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+f16c -verify-machineinstrs | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,-f16c -verify-machineinstrs | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -verify-machineinstrs | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl -verify-machineinstrs | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512VL ; @@ -9,35 +9,12 @@ ; define float @cvt_i16_to_f32(i16 %a0) nounwind { -; AVX1-LABEL: cvt_i16_to_f32: -; AVX1: # BB#0: -; AVX1-NEXT: movswl %di, %eax -; AVX1-NEXT: vmovd %eax, %xmm0 -; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: cvt_i16_to_f32: -; AVX2: # BB#0: -; AVX2-NEXT: movswl %di, %eax -; AVX2-NEXT: vmovd %eax, %xmm0 -; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX2-NEXT: retq -; -; AVX512F-LABEL: cvt_i16_to_f32: -; AVX512F: # BB#0: -; AVX512F-NEXT: movswl %di, %eax -; AVX512F-NEXT: vmovd %eax, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512F-NEXT: # kill: %XMM0 %XMM0 %ZMM0 -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: cvt_i16_to_f32: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: movswl %di, %eax -; AVX512VL-NEXT: vmovd %eax, %xmm0 -; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512VL-NEXT: retq +; ALL-LABEL: cvt_i16_to_f32: +; ALL: # BB#0: +; ALL-NEXT: movswl %di, %eax +; ALL-NEXT: vmovd %eax, %xmm0 +; ALL-NEXT: vcvtph2ps %xmm0, %xmm0 +; ALL-NEXT: retq %1 = bitcast i16 %a0 to half %2 = fpext half %1 to float ret float %2 @@ -111,19 +88,18 @@ define <4 x float> @cvt_4i16_to_4f32(<4 x i16> %a0) nounwind { ; AVX512F-NEXT: shrq $48, %rdx ; AVX512F-NEXT: movswl %dx, %edx ; AVX512F-NEXT: vmovd %edx, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 ; AVX512F-NEXT: movswl %cx, %ecx ; AVX512F-NEXT: vmovd %ecx, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 +; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm1 ; AVX512F-NEXT: cwtl ; AVX512F-NEXT: vmovd %eax, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 +; AVX512F-NEXT: vcvtph2ps %xmm2, %xmm2 ; AVX512F-NEXT: vmovd %esi, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 +; AVX512F-NEXT: vcvtph2ps %xmm3, %xmm3 ; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] ; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] ; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: cvt_4i16_to_4f32: @@ -222,19 +198,18 @@ define <4 x float> @cvt_8i16_to_4f32(<8 x i16> %a0) nounwind { ; AVX512F-NEXT: shrq $48, %rdx ; AVX512F-NEXT: movswl %dx, %edx ; AVX512F-NEXT: vmovd %edx, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 ; AVX512F-NEXT: movswl %cx, %ecx ; AVX512F-NEXT: vmovd %ecx, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 +; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm1 ; AVX512F-NEXT: cwtl ; AVX512F-NEXT: vmovd %eax, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 +; AVX512F-NEXT: vcvtph2ps %xmm2, %xmm2 ; AVX512F-NEXT: vmovd %esi, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 +; AVX512F-NEXT: vcvtph2ps %xmm3, %xmm3 ; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] ; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] ; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: cvt_8i16_to_4f32: @@ -271,201 +246,54 @@ define <4 x float> @cvt_8i16_to_4f32(<8 x i16> %a0) nounwind { } define <8 x float> @cvt_8i16_to_8f32(<8 x i16> %a0) nounwind { -; AVX1-LABEL: cvt_8i16_to_8f32: -; AVX1: # BB#0: -; AVX1-NEXT: vpextrq $1, %xmm0, %rdx -; AVX1-NEXT: movq %rdx, %r8 -; AVX1-NEXT: movq %rdx, %r10 -; AVX1-NEXT: movswl %dx, %r9d -; AVX1-NEXT: # kill: %EDX %EDX %RDX -; AVX1-NEXT: shrl $16, %edx -; AVX1-NEXT: shrq $32, %r8 -; AVX1-NEXT: shrq $48, %r10 -; AVX1-NEXT: vmovq %xmm0, %rdi -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: movq %rdi, %rsi -; AVX1-NEXT: movswl %di, %ecx -; AVX1-NEXT: # kill: %EDI %EDI %RDI -; AVX1-NEXT: shrl $16, %edi -; AVX1-NEXT: shrq $32, %rax -; AVX1-NEXT: shrq $48, %rsi -; AVX1-NEXT: movswl %si, %esi -; AVX1-NEXT: vmovd %esi, %xmm0 -; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX1-NEXT: cwtl -; AVX1-NEXT: vmovd %eax, %xmm1 -; AVX1-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX1-NEXT: movswl %di, %eax -; AVX1-NEXT: vmovd %eax, %xmm2 -; AVX1-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX1-NEXT: vmovd %ecx, %xmm3 -; AVX1-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX1-NEXT: movswl %r10w, %eax -; AVX1-NEXT: vmovd %eax, %xmm4 -; AVX1-NEXT: vcvtph2ps %xmm4, %xmm4 -; AVX1-NEXT: movswl %r8w, %eax -; AVX1-NEXT: vmovd %eax, %xmm5 -; AVX1-NEXT: vcvtph2ps %xmm5, %xmm5 -; AVX1-NEXT: movswl %dx, %eax -; AVX1-NEXT: vmovd %eax, %xmm6 -; AVX1-NEXT: vcvtph2ps %xmm6, %xmm6 -; AVX1-NEXT: vmovd %r9d, %xmm7 -; AVX1-NEXT: vcvtph2ps %xmm7, %xmm7 -; AVX1-NEXT: vinsertps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[2,3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0] -; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: cvt_8i16_to_8f32: -; AVX2: # BB#0: -; AVX2-NEXT: vpextrq $1, %xmm0, %rdx -; AVX2-NEXT: movq %rdx, %r8 -; AVX2-NEXT: movq %rdx, %r10 -; AVX2-NEXT: movswl %dx, %r9d -; AVX2-NEXT: # kill: %EDX %EDX %RDX -; AVX2-NEXT: shrl $16, %edx -; AVX2-NEXT: shrq $32, %r8 -; AVX2-NEXT: shrq $48, %r10 -; AVX2-NEXT: vmovq %xmm0, %rdi -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: movq %rdi, %rsi -; AVX2-NEXT: movswl %di, %ecx -; AVX2-NEXT: # kill: %EDI %EDI %RDI -; AVX2-NEXT: shrl $16, %edi -; AVX2-NEXT: shrq $32, %rax -; AVX2-NEXT: shrq $48, %rsi -; AVX2-NEXT: movswl %si, %esi -; AVX2-NEXT: vmovd %esi, %xmm0 -; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX2-NEXT: cwtl -; AVX2-NEXT: vmovd %eax, %xmm1 -; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX2-NEXT: movswl %di, %eax -; AVX2-NEXT: vmovd %eax, %xmm2 -; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX2-NEXT: vmovd %ecx, %xmm3 -; AVX2-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX2-NEXT: movswl %r10w, %eax -; AVX2-NEXT: vmovd %eax, %xmm4 -; AVX2-NEXT: vcvtph2ps %xmm4, %xmm4 -; AVX2-NEXT: movswl %r8w, %eax -; AVX2-NEXT: vmovd %eax, %xmm5 -; AVX2-NEXT: vcvtph2ps %xmm5, %xmm5 -; AVX2-NEXT: movswl %dx, %eax -; AVX2-NEXT: vmovd %eax, %xmm6 -; AVX2-NEXT: vcvtph2ps %xmm6, %xmm6 -; AVX2-NEXT: vmovd %r9d, %xmm7 -; AVX2-NEXT: vcvtph2ps %xmm7, %xmm7 -; AVX2-NEXT: vinsertps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[2,3] -; AVX2-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3] -; AVX2-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0] -; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] -; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX2-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 -; AVX2-NEXT: retq -; -; AVX512F-LABEL: cvt_8i16_to_8f32: -; AVX512F: # BB#0: -; AVX512F-NEXT: vpextrq $1, %xmm0, %rdx -; AVX512F-NEXT: movq %rdx, %r8 -; AVX512F-NEXT: movq %rdx, %r9 -; AVX512F-NEXT: movswl %dx, %r10d -; AVX512F-NEXT: # kill: %EDX %EDX %RDX -; AVX512F-NEXT: shrl $16, %edx -; AVX512F-NEXT: shrq $32, %r8 -; AVX512F-NEXT: shrq $48, %r9 -; AVX512F-NEXT: vmovq %xmm0, %rdi -; AVX512F-NEXT: movq %rdi, %rax -; AVX512F-NEXT: movq %rdi, %rcx -; AVX512F-NEXT: movswl %di, %esi -; AVX512F-NEXT: # kill: %EDI %EDI %RDI -; AVX512F-NEXT: shrl $16, %edi -; AVX512F-NEXT: shrq $32, %rax -; AVX512F-NEXT: shrq $48, %rcx -; AVX512F-NEXT: movswl %cx, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512F-NEXT: cwtl -; AVX512F-NEXT: vmovd %eax, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 -; AVX512F-NEXT: movswl %di, %eax -; AVX512F-NEXT: vmovd %eax, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 -; AVX512F-NEXT: vmovd %esi, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 -; AVX512F-NEXT: movswl %r9w, %eax -; AVX512F-NEXT: vmovd %eax, %xmm4 -; AVX512F-NEXT: vcvtph2ps %ymm4, %zmm4 -; AVX512F-NEXT: movswl %r8w, %eax -; AVX512F-NEXT: vmovd %eax, %xmm5 -; AVX512F-NEXT: vcvtph2ps %ymm5, %zmm5 -; AVX512F-NEXT: movswl %dx, %eax -; AVX512F-NEXT: vmovd %eax, %xmm6 -; AVX512F-NEXT: vcvtph2ps %ymm6, %zmm6 -; AVX512F-NEXT: vmovd %r10d, %xmm7 -; AVX512F-NEXT: vcvtph2ps %ymm7, %zmm7 -; AVX512F-NEXT: vinsertps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX512F-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: cvt_8i16_to_8f32: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: vpextrq $1, %xmm0, %rdx -; AVX512VL-NEXT: movq %rdx, %r8 -; AVX512VL-NEXT: movq %rdx, %r10 -; AVX512VL-NEXT: movswl %dx, %r9d -; AVX512VL-NEXT: # kill: %EDX %EDX %RDX -; AVX512VL-NEXT: shrl $16, %edx -; AVX512VL-NEXT: shrq $32, %r8 -; AVX512VL-NEXT: shrq $48, %r10 -; AVX512VL-NEXT: vmovq %xmm0, %rdi -; AVX512VL-NEXT: movq %rdi, %rax -; AVX512VL-NEXT: movq %rdi, %rsi -; AVX512VL-NEXT: movswl %di, %ecx -; AVX512VL-NEXT: # kill: %EDI %EDI %RDI -; AVX512VL-NEXT: shrl $16, %edi -; AVX512VL-NEXT: shrq $32, %rax -; AVX512VL-NEXT: shrq $48, %rsi -; AVX512VL-NEXT: movswl %si, %esi -; AVX512VL-NEXT: vmovd %esi, %xmm0 -; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512VL-NEXT: cwtl -; AVX512VL-NEXT: vmovd %eax, %xmm1 -; AVX512VL-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512VL-NEXT: movswl %di, %eax -; AVX512VL-NEXT: vmovd %eax, %xmm2 -; AVX512VL-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX512VL-NEXT: vmovd %ecx, %xmm3 -; AVX512VL-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX512VL-NEXT: movswl %r10w, %eax -; AVX512VL-NEXT: vmovd %eax, %xmm4 -; AVX512VL-NEXT: vcvtph2ps %xmm4, %xmm4 -; AVX512VL-NEXT: movswl %r8w, %eax -; AVX512VL-NEXT: vmovd %eax, %xmm5 -; AVX512VL-NEXT: vcvtph2ps %xmm5, %xmm5 -; AVX512VL-NEXT: movswl %dx, %eax -; AVX512VL-NEXT: vmovd %eax, %xmm6 -; AVX512VL-NEXT: vcvtph2ps %xmm6, %xmm6 -; AVX512VL-NEXT: vmovd %r9d, %xmm7 -; AVX512VL-NEXT: vcvtph2ps %xmm7, %xmm7 -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[2,3] -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3] -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0] -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX512VL-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 -; AVX512VL-NEXT: retq +; ALL-LABEL: cvt_8i16_to_8f32: +; ALL: # BB#0: +; ALL-NEXT: vpextrq $1, %xmm0, %rdx +; ALL-NEXT: movq %rdx, %r8 +; ALL-NEXT: movq %rdx, %r10 +; ALL-NEXT: movswl %dx, %r9d +; ALL-NEXT: # kill: %EDX %EDX %RDX +; ALL-NEXT: shrl $16, %edx +; ALL-NEXT: shrq $32, %r8 +; ALL-NEXT: shrq $48, %r10 +; ALL-NEXT: vmovq %xmm0, %rdi +; ALL-NEXT: movq %rdi, %rax +; ALL-NEXT: movq %rdi, %rsi +; ALL-NEXT: movswl %di, %ecx +; ALL-NEXT: # kill: %EDI %EDI %RDI +; ALL-NEXT: shrl $16, %edi +; ALL-NEXT: shrq $32, %rax +; ALL-NEXT: shrq $48, %rsi +; ALL-NEXT: movswl %si, %esi +; ALL-NEXT: vmovd %esi, %xmm0 +; ALL-NEXT: vcvtph2ps %xmm0, %xmm0 +; ALL-NEXT: cwtl +; ALL-NEXT: vmovd %eax, %xmm1 +; ALL-NEXT: vcvtph2ps %xmm1, %xmm1 +; ALL-NEXT: movswl %di, %eax +; ALL-NEXT: vmovd %eax, %xmm2 +; ALL-NEXT: vcvtph2ps %xmm2, %xmm2 +; ALL-NEXT: vmovd %ecx, %xmm3 +; ALL-NEXT: vcvtph2ps %xmm3, %xmm3 +; ALL-NEXT: movswl %r10w, %eax +; ALL-NEXT: vmovd %eax, %xmm4 +; ALL-NEXT: vcvtph2ps %xmm4, %xmm4 +; ALL-NEXT: movswl %r8w, %eax +; ALL-NEXT: vmovd %eax, %xmm5 +; ALL-NEXT: vcvtph2ps %xmm5, %xmm5 +; ALL-NEXT: movswl %dx, %eax +; ALL-NEXT: vmovd %eax, %xmm6 +; ALL-NEXT: vcvtph2ps %xmm6, %xmm6 +; ALL-NEXT: vmovd %r9d, %xmm7 +; ALL-NEXT: vcvtph2ps %xmm7, %xmm7 +; ALL-NEXT: vinsertps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[2,3] +; ALL-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3] +; ALL-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0] +; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] +; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] +; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; ALL-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 +; ALL-NEXT: retq %1 = bitcast <8 x i16> %a0 to <8 x half> %2 = fpext <8 x half> %1 to <8 x float> ret <8 x float> %2 @@ -664,98 +492,98 @@ define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind { ; ; AVX512F-LABEL: cvt_16i16_to_16f32: ; AVX512F: # BB#0: -; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm10 ; AVX512F-NEXT: vmovq %xmm0, %rax ; AVX512F-NEXT: movq %rax, %rcx ; AVX512F-NEXT: shrq $48, %rcx ; AVX512F-NEXT: movswl %cx, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm2 +; AVX512F-NEXT: vmovd %ecx, %xmm8 ; AVX512F-NEXT: movq %rax, %rcx ; AVX512F-NEXT: shrq $32, %rcx ; AVX512F-NEXT: movswl %cx, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm3 +; AVX512F-NEXT: vmovd %ecx, %xmm9 ; AVX512F-NEXT: movswl %ax, %ecx ; AVX512F-NEXT: # kill: %EAX %EAX %RAX ; AVX512F-NEXT: shrl $16, %eax ; AVX512F-NEXT: cwtl -; AVX512F-NEXT: vmovd %eax, %xmm4 +; AVX512F-NEXT: vmovd %eax, %xmm11 ; AVX512F-NEXT: vpextrq $1, %xmm0, %rax -; AVX512F-NEXT: vmovd %ecx, %xmm0 +; AVX512F-NEXT: vmovd %ecx, %xmm12 ; AVX512F-NEXT: movq %rax, %rcx ; AVX512F-NEXT: shrq $48, %rcx ; AVX512F-NEXT: movswl %cx, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm5 +; AVX512F-NEXT: vmovd %ecx, %xmm13 ; AVX512F-NEXT: movq %rax, %rcx ; AVX512F-NEXT: shrq $32, %rcx ; AVX512F-NEXT: movswl %cx, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm6 +; AVX512F-NEXT: vmovd %ecx, %xmm14 ; AVX512F-NEXT: movswl %ax, %ecx ; AVX512F-NEXT: # kill: %EAX %EAX %RAX ; AVX512F-NEXT: shrl $16, %eax ; AVX512F-NEXT: cwtl -; AVX512F-NEXT: vmovd %eax, %xmm7 -; AVX512F-NEXT: vmovq %xmm1, %rax -; AVX512F-NEXT: vmovd %ecx, %xmm8 +; AVX512F-NEXT: vmovd %eax, %xmm15 +; AVX512F-NEXT: vmovq %xmm10, %rax +; AVX512F-NEXT: vmovd %ecx, %xmm2 ; AVX512F-NEXT: movq %rax, %rcx ; AVX512F-NEXT: shrq $48, %rcx ; AVX512F-NEXT: movswl %cx, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm9 +; AVX512F-NEXT: vmovd %ecx, %xmm3 ; AVX512F-NEXT: movq %rax, %rcx ; AVX512F-NEXT: shrq $32, %rcx ; AVX512F-NEXT: movswl %cx, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm10 +; AVX512F-NEXT: vmovd %ecx, %xmm1 ; AVX512F-NEXT: movswl %ax, %ecx ; AVX512F-NEXT: # kill: %EAX %EAX %RAX ; AVX512F-NEXT: shrl $16, %eax ; AVX512F-NEXT: cwtl -; AVX512F-NEXT: vmovd %eax, %xmm11 -; AVX512F-NEXT: vpextrq $1, %xmm1, %rax -; AVX512F-NEXT: vmovd %ecx, %xmm1 +; AVX512F-NEXT: vmovd %eax, %xmm4 +; AVX512F-NEXT: vpextrq $1, %xmm10, %rax +; AVX512F-NEXT: vmovd %ecx, %xmm10 ; AVX512F-NEXT: movq %rax, %rcx ; AVX512F-NEXT: shrq $48, %rcx ; AVX512F-NEXT: movswl %cx, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm12 +; AVX512F-NEXT: vmovd %ecx, %xmm5 ; AVX512F-NEXT: movq %rax, %rcx ; AVX512F-NEXT: shrq $32, %rcx ; AVX512F-NEXT: movswl %cx, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm13 +; AVX512F-NEXT: vmovd %ecx, %xmm6 ; AVX512F-NEXT: movl %eax, %ecx ; AVX512F-NEXT: shrl $16, %ecx ; AVX512F-NEXT: movswl %cx, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm14 +; AVX512F-NEXT: vmovd %ecx, %xmm7 ; AVX512F-NEXT: cwtl -; AVX512F-NEXT: vmovd %eax, %xmm15 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm16 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 -; AVX512F-NEXT: vcvtph2ps %ymm4, %zmm4 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512F-NEXT: vcvtph2ps %ymm5, %zmm5 -; AVX512F-NEXT: vcvtph2ps %ymm6, %zmm6 -; AVX512F-NEXT: vcvtph2ps %ymm7, %zmm7 -; AVX512F-NEXT: vcvtph2ps %ymm8, %zmm8 -; AVX512F-NEXT: vcvtph2ps %ymm9, %zmm9 -; AVX512F-NEXT: vcvtph2ps %ymm10, %zmm10 -; AVX512F-NEXT: vcvtph2ps %ymm11, %zmm11 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 -; AVX512F-NEXT: vcvtph2ps %ymm12, %zmm12 -; AVX512F-NEXT: vcvtph2ps %ymm13, %zmm13 -; AVX512F-NEXT: vcvtph2ps %ymm14, %zmm14 -; AVX512F-NEXT: vcvtph2ps %ymm15, %zmm15 -; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm15[0],xmm14[0],xmm15[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm13[0],xmm2[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm12[0] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm10[0],xmm1[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm9[0] -; AVX512F-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 -; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm8[0],xmm7[0],xmm8[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm6[0],xmm2[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm5[0] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0],xmm0[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm16[0] -; AVX512F-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX512F-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512F-NEXT: vmovd %eax, %xmm0 +; AVX512F-NEXT: vcvtph2ps %xmm8, %xmm8 +; AVX512F-NEXT: vcvtph2ps %xmm9, %xmm9 +; AVX512F-NEXT: vcvtph2ps %xmm11, %xmm11 +; AVX512F-NEXT: vcvtph2ps %xmm12, %xmm12 +; AVX512F-NEXT: vcvtph2ps %xmm13, %xmm13 +; AVX512F-NEXT: vcvtph2ps %xmm14, %xmm14 +; AVX512F-NEXT: vcvtph2ps %xmm15, %xmm15 +; AVX512F-NEXT: vcvtph2ps %xmm2, %xmm2 +; AVX512F-NEXT: vcvtph2ps %xmm3, %xmm3 +; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm1 +; AVX512F-NEXT: vcvtph2ps %xmm4, %xmm4 +; AVX512F-NEXT: vcvtph2ps %xmm10, %xmm10 +; AVX512F-NEXT: vcvtph2ps %xmm5, %xmm5 +; AVX512F-NEXT: vcvtph2ps %xmm6, %xmm6 +; AVX512F-NEXT: vcvtph2ps %xmm7, %xmm7 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 +; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[2,3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm6[0],xmm0[3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm5[0] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm4 = xmm10[0],xmm4[0],xmm10[2,3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm4[0,1],xmm1[0],xmm4[3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[0] +; AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm15[0],xmm2[2,3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm14[0],xmm1[3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm13[0] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm12[0],xmm11[0],xmm12[2,3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm9[0],xmm2[3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm8[0] +; AVX512F-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: cvt_16i16_to_16f32: @@ -863,35 +691,12 @@ define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind { ; define float @load_cvt_i16_to_f32(i16* %a0) nounwind { -; AVX1-LABEL: load_cvt_i16_to_f32: -; AVX1: # BB#0: -; AVX1-NEXT: movswl (%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm0 -; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: load_cvt_i16_to_f32: -; AVX2: # BB#0: -; AVX2-NEXT: movswl (%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm0 -; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX2-NEXT: retq -; -; AVX512F-LABEL: load_cvt_i16_to_f32: -; AVX512F: # BB#0: -; AVX512F-NEXT: movswl (%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512F-NEXT: # kill: %XMM0 %XMM0 %ZMM0 -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: load_cvt_i16_to_f32: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: movswl (%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm0 -; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512VL-NEXT: retq +; ALL-LABEL: load_cvt_i16_to_f32: +; ALL: # BB#0: +; ALL-NEXT: movswl (%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm0 +; ALL-NEXT: vcvtph2ps %xmm0, %xmm0 +; ALL-NEXT: retq %1 = load i16, i16* %a0 %2 = bitcast i16 %1 to half %3 = fpext half %2 to float @@ -899,142 +704,84 @@ define float @load_cvt_i16_to_f32(i16* %a0) nounwind { } define <4 x float> @load_cvt_4i16_to_4f32(<4 x i16>* %a0) nounwind { -; AVX1-LABEL: load_cvt_4i16_to_4f32: +; ALL-LABEL: load_cvt_4i16_to_4f32: +; ALL: # BB#0: +; ALL-NEXT: movswl 6(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm0 +; ALL-NEXT: vcvtph2ps %xmm0, %xmm0 +; ALL-NEXT: movswl 4(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm1 +; ALL-NEXT: vcvtph2ps %xmm1, %xmm1 +; ALL-NEXT: movswl (%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm2 +; ALL-NEXT: vcvtph2ps %xmm2, %xmm2 +; ALL-NEXT: movswl 2(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm3 +; ALL-NEXT: vcvtph2ps %xmm3, %xmm3 +; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] +; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] +; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; ALL-NEXT: retq + %1 = load <4 x i16>, <4 x i16>* %a0 + %2 = bitcast <4 x i16> %1 to <4 x half> + %3 = fpext <4 x half> %2 to <4 x float> + ret <4 x float> %3 +} + +define <4 x float> @load_cvt_8i16_to_4f32(<8 x i16>* %a0) nounwind { +; AVX1-LABEL: load_cvt_8i16_to_4f32: ; AVX1: # BB#0: -; AVX1-NEXT: movswl 6(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm0 +; AVX1-NEXT: movq (%rdi), %rax +; AVX1-NEXT: movq %rax, %rcx +; AVX1-NEXT: movq %rax, %rdx +; AVX1-NEXT: movswl %ax, %esi +; AVX1-NEXT: # kill: %EAX %EAX %RAX +; AVX1-NEXT: shrl $16, %eax +; AVX1-NEXT: shrq $32, %rcx +; AVX1-NEXT: shrq $48, %rdx +; AVX1-NEXT: movswl %dx, %edx +; AVX1-NEXT: vmovd %edx, %xmm0 ; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX1-NEXT: movswl 4(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm1 +; AVX1-NEXT: movswl %cx, %ecx +; AVX1-NEXT: vmovd %ecx, %xmm1 ; AVX1-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX1-NEXT: movswl (%rdi), %eax +; AVX1-NEXT: cwtl ; AVX1-NEXT: vmovd %eax, %xmm2 ; AVX1-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX1-NEXT: movswl 2(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm3 +; AVX1-NEXT: vmovd %esi, %xmm3 ; AVX1-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] +; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] ; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] ; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] ; AVX1-NEXT: retq ; -; AVX2-LABEL: load_cvt_4i16_to_4f32: +; AVX2-LABEL: load_cvt_8i16_to_4f32: ; AVX2: # BB#0: -; AVX2-NEXT: movswl 6(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm0 +; AVX2-NEXT: movq (%rdi), %rax +; AVX2-NEXT: movq %rax, %rcx +; AVX2-NEXT: movq %rax, %rdx +; AVX2-NEXT: movswl %ax, %esi +; AVX2-NEXT: # kill: %EAX %EAX %RAX +; AVX2-NEXT: shrl $16, %eax +; AVX2-NEXT: shrq $32, %rcx +; AVX2-NEXT: shrq $48, %rdx +; AVX2-NEXT: movswl %dx, %edx +; AVX2-NEXT: vmovd %edx, %xmm0 ; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX2-NEXT: movswl 4(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm1 +; AVX2-NEXT: movswl %cx, %ecx +; AVX2-NEXT: vmovd %ecx, %xmm1 ; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX2-NEXT: movswl (%rdi), %eax +; AVX2-NEXT: cwtl ; AVX2-NEXT: vmovd %eax, %xmm2 ; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX2-NEXT: movswl 2(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm3 +; AVX2-NEXT: vmovd %esi, %xmm3 ; AVX2-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] +; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] ; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] ; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] ; AVX2-NEXT: retq ; -; AVX512F-LABEL: load_cvt_4i16_to_4f32: -; AVX512F: # BB#0: -; AVX512F-NEXT: movswl 6(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512F-NEXT: movswl 4(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 -; AVX512F-NEXT: movswl (%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 -; AVX512F-NEXT: movswl 2(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 -; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: load_cvt_4i16_to_4f32: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: movswl 6(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm0 -; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512VL-NEXT: movswl 4(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm1 -; AVX512VL-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512VL-NEXT: movswl (%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm2 -; AVX512VL-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX512VL-NEXT: movswl 2(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm3 -; AVX512VL-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX512VL-NEXT: retq - %1 = load <4 x i16>, <4 x i16>* %a0 - %2 = bitcast <4 x i16> %1 to <4 x half> - %3 = fpext <4 x half> %2 to <4 x float> - ret <4 x float> %3 -} - -define <4 x float> @load_cvt_8i16_to_4f32(<8 x i16>* %a0) nounwind { -; AVX1-LABEL: load_cvt_8i16_to_4f32: -; AVX1: # BB#0: -; AVX1-NEXT: movq (%rdi), %rax -; AVX1-NEXT: movq %rax, %rcx -; AVX1-NEXT: movq %rax, %rdx -; AVX1-NEXT: movswl %ax, %esi -; AVX1-NEXT: # kill: %EAX %EAX %RAX -; AVX1-NEXT: shrl $16, %eax -; AVX1-NEXT: shrq $32, %rcx -; AVX1-NEXT: shrq $48, %rdx -; AVX1-NEXT: movswl %dx, %edx -; AVX1-NEXT: vmovd %edx, %xmm0 -; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX1-NEXT: movswl %cx, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm1 -; AVX1-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX1-NEXT: cwtl -; AVX1-NEXT: vmovd %eax, %xmm2 -; AVX1-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX1-NEXT: vmovd %esi, %xmm3 -; AVX1-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX1-NEXT: retq -; -; AVX2-LABEL: load_cvt_8i16_to_4f32: -; AVX2: # BB#0: -; AVX2-NEXT: movq (%rdi), %rax -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: movq %rax, %rdx -; AVX2-NEXT: movswl %ax, %esi -; AVX2-NEXT: # kill: %EAX %EAX %RAX -; AVX2-NEXT: shrl $16, %eax -; AVX2-NEXT: shrq $32, %rcx -; AVX2-NEXT: shrq $48, %rdx -; AVX2-NEXT: movswl %dx, %edx -; AVX2-NEXT: vmovd %edx, %xmm0 -; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX2-NEXT: movswl %cx, %ecx -; AVX2-NEXT: vmovd %ecx, %xmm1 -; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX2-NEXT: cwtl -; AVX2-NEXT: vmovd %eax, %xmm2 -; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX2-NEXT: vmovd %esi, %xmm3 -; AVX2-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] -; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX2-NEXT: retq -; -; AVX512F-LABEL: load_cvt_8i16_to_4f32: +; AVX512F-LABEL: load_cvt_8i16_to_4f32: ; AVX512F: # BB#0: ; AVX512F-NEXT: movq (%rdi), %rax ; AVX512F-NEXT: movq %rax, %rcx @@ -1046,19 +793,18 @@ define <4 x float> @load_cvt_8i16_to_4f32(<8 x i16>* %a0) nounwind { ; AVX512F-NEXT: shrq $48, %rdx ; AVX512F-NEXT: movswl %dx, %edx ; AVX512F-NEXT: vmovd %edx, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 ; AVX512F-NEXT: movswl %cx, %ecx ; AVX512F-NEXT: vmovd %ecx, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 +; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm1 ; AVX512F-NEXT: cwtl ; AVX512F-NEXT: vmovd %eax, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 +; AVX512F-NEXT: vcvtph2ps %xmm2, %xmm2 ; AVX512F-NEXT: vmovd %esi, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 +; AVX512F-NEXT: vcvtph2ps %xmm3, %xmm3 ; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] ; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] ; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: load_cvt_8i16_to_4f32: @@ -1096,145 +842,40 @@ define <4 x float> @load_cvt_8i16_to_4f32(<8 x i16>* %a0) nounwind { } define <8 x float> @load_cvt_8i16_to_8f32(<8 x i16>* %a0) nounwind { -; AVX1-LABEL: load_cvt_8i16_to_8f32: -; AVX1: # BB#0: -; AVX1-NEXT: movswl 6(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm0 -; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX1-NEXT: movswl 4(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm1 -; AVX1-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX1-NEXT: movswl (%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm2 -; AVX1-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX1-NEXT: movswl 2(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm3 -; AVX1-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX1-NEXT: movswl 14(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm4 -; AVX1-NEXT: vcvtph2ps %xmm4, %xmm4 -; AVX1-NEXT: movswl 12(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm5 -; AVX1-NEXT: vcvtph2ps %xmm5, %xmm5 -; AVX1-NEXT: movswl 8(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm6 -; AVX1-NEXT: vcvtph2ps %xmm6, %xmm6 -; AVX1-NEXT: movswl 10(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm7 -; AVX1-NEXT: vcvtph2ps %xmm7, %xmm7 -; AVX1-NEXT: vinsertps {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[2,3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0] -; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: load_cvt_8i16_to_8f32: -; AVX2: # BB#0: -; AVX2-NEXT: movswl 6(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm0 -; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX2-NEXT: movswl 4(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm1 -; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX2-NEXT: movswl (%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm2 -; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX2-NEXT: movswl 2(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm3 -; AVX2-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX2-NEXT: movswl 14(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm4 -; AVX2-NEXT: vcvtph2ps %xmm4, %xmm4 -; AVX2-NEXT: movswl 12(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm5 -; AVX2-NEXT: vcvtph2ps %xmm5, %xmm5 -; AVX2-NEXT: movswl 8(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm6 -; AVX2-NEXT: vcvtph2ps %xmm6, %xmm6 -; AVX2-NEXT: movswl 10(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm7 -; AVX2-NEXT: vcvtph2ps %xmm7, %xmm7 -; AVX2-NEXT: vinsertps {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[2,3] -; AVX2-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3] -; AVX2-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0] -; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] -; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX2-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 -; AVX2-NEXT: retq -; -; AVX512F-LABEL: load_cvt_8i16_to_8f32: -; AVX512F: # BB#0: -; AVX512F-NEXT: movswl 6(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512F-NEXT: movswl 4(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 -; AVX512F-NEXT: movswl (%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 -; AVX512F-NEXT: movswl 2(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 -; AVX512F-NEXT: movswl 14(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm4 -; AVX512F-NEXT: vcvtph2ps %ymm4, %zmm4 -; AVX512F-NEXT: movswl 12(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm5 -; AVX512F-NEXT: vcvtph2ps %ymm5, %zmm5 -; AVX512F-NEXT: movswl 8(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm6 -; AVX512F-NEXT: vcvtph2ps %ymm6, %zmm6 -; AVX512F-NEXT: movswl 10(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm7 -; AVX512F-NEXT: vcvtph2ps %ymm7, %zmm7 -; AVX512F-NEXT: vinsertps {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX512F-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: load_cvt_8i16_to_8f32: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: movswl 6(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm0 -; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512VL-NEXT: movswl 4(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm1 -; AVX512VL-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512VL-NEXT: movswl (%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm2 -; AVX512VL-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX512VL-NEXT: movswl 2(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm3 -; AVX512VL-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX512VL-NEXT: movswl 14(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm4 -; AVX512VL-NEXT: vcvtph2ps %xmm4, %xmm4 -; AVX512VL-NEXT: movswl 12(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm5 -; AVX512VL-NEXT: vcvtph2ps %xmm5, %xmm5 -; AVX512VL-NEXT: movswl 8(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm6 -; AVX512VL-NEXT: vcvtph2ps %xmm6, %xmm6 -; AVX512VL-NEXT: movswl 10(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm7 -; AVX512VL-NEXT: vcvtph2ps %xmm7, %xmm7 -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[2,3] -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3] -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0] -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX512VL-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 -; AVX512VL-NEXT: retq +; ALL-LABEL: load_cvt_8i16_to_8f32: +; ALL: # BB#0: +; ALL-NEXT: movswl 6(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm0 +; ALL-NEXT: vcvtph2ps %xmm0, %xmm0 +; ALL-NEXT: movswl 4(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm1 +; ALL-NEXT: vcvtph2ps %xmm1, %xmm1 +; ALL-NEXT: movswl (%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm2 +; ALL-NEXT: vcvtph2ps %xmm2, %xmm2 +; ALL-NEXT: movswl 2(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm3 +; ALL-NEXT: vcvtph2ps %xmm3, %xmm3 +; ALL-NEXT: movswl 14(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm4 +; ALL-NEXT: vcvtph2ps %xmm4, %xmm4 +; ALL-NEXT: movswl 12(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm5 +; ALL-NEXT: vcvtph2ps %xmm5, %xmm5 +; ALL-NEXT: movswl 8(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm6 +; ALL-NEXT: vcvtph2ps %xmm6, %xmm6 +; ALL-NEXT: movswl 10(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm7 +; ALL-NEXT: vcvtph2ps %xmm7, %xmm7 +; ALL-NEXT: vinsertps {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[2,3] +; ALL-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3] +; ALL-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0] +; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] +; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] +; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; ALL-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 +; ALL-NEXT: retq %1 = load <8 x i16>, <8 x i16>* %a0 %2 = bitcast <8 x i16> %1 to <8 x half> %3 = fpext <8 x half> %2 to <8 x float> @@ -1378,65 +1019,65 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind { ; AVX512F: # BB#0: ; AVX512F-NEXT: movswl 6(%rdi), %eax ; AVX512F-NEXT: vmovd %eax, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm16 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm8 ; AVX512F-NEXT: movswl 4(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm17 +; AVX512F-NEXT: vmovd %eax, %xmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm9 ; AVX512F-NEXT: movswl (%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 +; AVX512F-NEXT: vmovd %eax, %xmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm10 ; AVX512F-NEXT: movswl 2(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 +; AVX512F-NEXT: vmovd %eax, %xmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm11 ; AVX512F-NEXT: movswl 14(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm4 -; AVX512F-NEXT: vcvtph2ps %ymm4, %zmm4 +; AVX512F-NEXT: vmovd %eax, %xmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm12 ; AVX512F-NEXT: movswl 12(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm5 -; AVX512F-NEXT: vcvtph2ps %ymm5, %zmm5 +; AVX512F-NEXT: vmovd %eax, %xmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm13 ; AVX512F-NEXT: movswl 8(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm6 -; AVX512F-NEXT: vcvtph2ps %ymm6, %zmm6 +; AVX512F-NEXT: vmovd %eax, %xmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm14 ; AVX512F-NEXT: movswl 10(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm7 -; AVX512F-NEXT: vcvtph2ps %ymm7, %zmm7 +; AVX512F-NEXT: vmovd %eax, %xmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm15 ; AVX512F-NEXT: movswl 22(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm8 -; AVX512F-NEXT: vcvtph2ps %ymm8, %zmm8 +; AVX512F-NEXT: vmovd %eax, %xmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 ; AVX512F-NEXT: movswl 20(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm9 -; AVX512F-NEXT: vcvtph2ps %ymm9, %zmm9 +; AVX512F-NEXT: vmovd %eax, %xmm1 +; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm1 ; AVX512F-NEXT: movswl 16(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm10 -; AVX512F-NEXT: vcvtph2ps %ymm10, %zmm10 +; AVX512F-NEXT: vmovd %eax, %xmm2 +; AVX512F-NEXT: vcvtph2ps %xmm2, %xmm2 ; AVX512F-NEXT: movswl 18(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm11 -; AVX512F-NEXT: vcvtph2ps %ymm11, %zmm11 +; AVX512F-NEXT: vmovd %eax, %xmm3 +; AVX512F-NEXT: vcvtph2ps %xmm3, %xmm3 ; AVX512F-NEXT: movswl 30(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm12 -; AVX512F-NEXT: vcvtph2ps %ymm12, %zmm12 +; AVX512F-NEXT: vmovd %eax, %xmm4 +; AVX512F-NEXT: vcvtph2ps %xmm4, %xmm4 ; AVX512F-NEXT: movswl 28(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm13 -; AVX512F-NEXT: vcvtph2ps %ymm13, %zmm13 +; AVX512F-NEXT: vmovd %eax, %xmm5 +; AVX512F-NEXT: vcvtph2ps %xmm5, %xmm5 ; AVX512F-NEXT: movswl 24(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm14 -; AVX512F-NEXT: vcvtph2ps %ymm14, %zmm14 +; AVX512F-NEXT: vmovd %eax, %xmm6 +; AVX512F-NEXT: vcvtph2ps %xmm6, %xmm6 ; AVX512F-NEXT: movswl 26(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm15 -; AVX512F-NEXT: vcvtph2ps %ymm15, %zmm15 -; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm14[0],xmm15[0],xmm14[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm13[0],xmm0[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm12[0] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm10[0],xmm11[0],xmm10[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm9[0],xmm1[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm8[0] -; AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm6[0],xmm7[0],xmm6[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm5[0],xmm1[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0] +; AVX512F-NEXT: vmovd %eax, %xmm7 +; AVX512F-NEXT: vcvtph2ps %xmm7, %xmm7 +; AVX512F-NEXT: vinsertps {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[2,3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0] ; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm17[0],xmm2[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm16[0] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; AVX512F-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 +; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm14[0],xmm15[0],xmm14[2,3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm13[0],xmm1[3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm12[0] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm10[0],xmm11[0],xmm10[2,3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm9[0],xmm2[3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm8[0] ; AVX512F-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0 ; AVX512F-NEXT: retq @@ -1518,45 +1159,20 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind { ; define double @cvt_i16_to_f64(i16 %a0) nounwind { -; AVX1-LABEL: cvt_i16_to_f64: -; AVX1: # BB#0: -; AVX1-NEXT: movswl %di, %eax -; AVX1-NEXT: vmovd %eax, %xmm0 -; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: cvt_i16_to_f64: -; AVX2: # BB#0: -; AVX2-NEXT: movswl %di, %eax -; AVX2-NEXT: vmovd %eax, %xmm0 -; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX2-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX2-NEXT: retq -; -; AVX512F-LABEL: cvt_i16_to_f64: -; AVX512F: # BB#0: -; AVX512F-NEXT: movswl %di, %eax -; AVX512F-NEXT: vmovd %eax, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512F-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: cvt_i16_to_f64: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: movswl %di, %eax -; AVX512VL-NEXT: vmovd %eax, %xmm0 -; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512VL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512VL-NEXT: retq - %1 = bitcast i16 %a0 to half - %2 = fpext half %1 to double - ret double %2 -} - -define <2 x double> @cvt_2i16_to_2f64(<2 x i16> %a0) nounwind { -; AVX1-LABEL: cvt_2i16_to_2f64: +; ALL-LABEL: cvt_i16_to_f64: +; ALL: # BB#0: +; ALL-NEXT: movswl %di, %eax +; ALL-NEXT: vmovd %eax, %xmm0 +; ALL-NEXT: vcvtph2ps %xmm0, %xmm0 +; ALL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 +; ALL-NEXT: retq + %1 = bitcast i16 %a0 to half + %2 = fpext half %1 to double + ret double %2 +} + +define <2 x double> @cvt_2i16_to_2f64(<2 x i16> %a0) nounwind { +; AVX1-LABEL: cvt_2i16_to_2f64: ; AVX1: # BB#0: ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] @@ -1599,13 +1215,12 @@ define <2 x double> @cvt_2i16_to_2f64(<2 x i16> %a0) nounwind { ; AVX512F-NEXT: shrl $16, %eax ; AVX512F-NEXT: cwtl ; AVX512F-NEXT: vmovd %eax, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 ; AVX512F-NEXT: vmovd %ecx, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 +; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm1 ; AVX512F-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 ; AVX512F-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 ; AVX512F-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: cvt_2i16_to_2f64: @@ -1701,15 +1316,15 @@ define <4 x double> @cvt_4i16_to_4f64(<4 x i16> %a0) nounwind { ; AVX512F-NEXT: shrl $16, %edx ; AVX512F-NEXT: movswl %dx, %edx ; AVX512F-NEXT: vmovd %edx, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 ; AVX512F-NEXT: vmovd %esi, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 +; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm1 ; AVX512F-NEXT: movswl %cx, %ecx ; AVX512F-NEXT: vmovd %ecx, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 +; AVX512F-NEXT: vcvtph2ps %xmm2, %xmm2 ; AVX512F-NEXT: cwtl ; AVX512F-NEXT: vmovd %eax, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 +; AVX512F-NEXT: vcvtph2ps %xmm3, %xmm3 ; AVX512F-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 ; AVX512F-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 ; AVX512F-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] @@ -1791,13 +1406,12 @@ define <2 x double> @cvt_8i16_to_2f64(<8 x i16> %a0) nounwind { ; AVX512F-NEXT: shrl $16, %eax ; AVX512F-NEXT: cwtl ; AVX512F-NEXT: vmovd %eax, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 ; AVX512F-NEXT: vmovd %ecx, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 +; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm1 ; AVX512F-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 ; AVX512F-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 ; AVX512F-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: cvt_8i16_to_2f64: @@ -1892,15 +1506,15 @@ define <4 x double> @cvt_8i16_to_4f64(<8 x i16> %a0) nounwind { ; AVX512F-NEXT: shrl $16, %edx ; AVX512F-NEXT: movswl %dx, %edx ; AVX512F-NEXT: vmovd %edx, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 ; AVX512F-NEXT: vmovd %esi, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 +; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm1 ; AVX512F-NEXT: movswl %cx, %ecx ; AVX512F-NEXT: vmovd %ecx, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 +; AVX512F-NEXT: vcvtph2ps %xmm2, %xmm2 ; AVX512F-NEXT: cwtl ; AVX512F-NEXT: vmovd %eax, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 +; AVX512F-NEXT: vcvtph2ps %xmm3, %xmm3 ; AVX512F-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 ; AVX512F-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 ; AVX512F-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] @@ -2055,115 +1669,60 @@ define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind { ; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 ; AVX2-NEXT: retq ; -; AVX512F-LABEL: cvt_8i16_to_8f64: -; AVX512F: # BB#0: -; AVX512F-NEXT: vpextrq $1, %xmm0, %rdx -; AVX512F-NEXT: movq %rdx, %r8 -; AVX512F-NEXT: movl %edx, %r9d -; AVX512F-NEXT: movswl %dx, %r10d -; AVX512F-NEXT: shrq $48, %rdx -; AVX512F-NEXT: shrq $32, %r8 -; AVX512F-NEXT: shrl $16, %r9d -; AVX512F-NEXT: vmovq %xmm0, %rdi -; AVX512F-NEXT: movq %rdi, %rax -; AVX512F-NEXT: movl %edi, %ecx -; AVX512F-NEXT: movswl %di, %esi -; AVX512F-NEXT: shrq $48, %rdi -; AVX512F-NEXT: shrq $32, %rax -; AVX512F-NEXT: shrl $16, %ecx -; AVX512F-NEXT: movswl %cx, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512F-NEXT: vmovd %esi, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 -; AVX512F-NEXT: cwtl -; AVX512F-NEXT: vmovd %eax, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 -; AVX512F-NEXT: movswl %di, %eax -; AVX512F-NEXT: vmovd %eax, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 -; AVX512F-NEXT: movswl %r9w, %eax -; AVX512F-NEXT: vmovd %eax, %xmm4 -; AVX512F-NEXT: vcvtph2ps %ymm4, %zmm4 -; AVX512F-NEXT: vmovd %r10d, %xmm5 -; AVX512F-NEXT: vcvtph2ps %ymm5, %zmm5 -; AVX512F-NEXT: movswl %r8w, %eax -; AVX512F-NEXT: vmovd %eax, %xmm6 -; AVX512F-NEXT: vcvtph2ps %ymm6, %zmm6 -; AVX512F-NEXT: movswl %dx, %eax -; AVX512F-NEXT: vmovd %eax, %xmm7 -; AVX512F-NEXT: vcvtph2ps %ymm7, %zmm7 -; AVX512F-NEXT: vcvtss2sd %xmm7, %xmm7, %xmm7 -; AVX512F-NEXT: vcvtss2sd %xmm6, %xmm6, %xmm6 -; AVX512F-NEXT: vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm7[0] -; AVX512F-NEXT: vcvtss2sd %xmm5, %xmm5, %xmm5 -; AVX512F-NEXT: vcvtss2sd %xmm4, %xmm4, %xmm4 -; AVX512F-NEXT: vmovlhps {{.*#+}} xmm4 = xmm5[0],xmm4[0] -; AVX512F-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4 -; AVX512F-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 -; AVX512F-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 -; AVX512F-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; AVX512F-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 -; AVX512F-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512F-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX512F-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX512F-NEXT: vinsertf64x4 $1, %ymm4, %zmm0, %zmm0 -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: cvt_8i16_to_8f64: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: vpextrq $1, %xmm0, %rdx -; AVX512VL-NEXT: movq %rdx, %r8 -; AVX512VL-NEXT: movl %edx, %r10d -; AVX512VL-NEXT: movswl %dx, %r9d -; AVX512VL-NEXT: shrq $48, %rdx -; AVX512VL-NEXT: shrq $32, %r8 -; AVX512VL-NEXT: shrl $16, %r10d -; AVX512VL-NEXT: vmovq %xmm0, %rdi -; AVX512VL-NEXT: movq %rdi, %rax -; AVX512VL-NEXT: movl %edi, %esi -; AVX512VL-NEXT: movswl %di, %ecx -; AVX512VL-NEXT: shrq $48, %rdi -; AVX512VL-NEXT: shrq $32, %rax -; AVX512VL-NEXT: shrl $16, %esi -; AVX512VL-NEXT: movswl %si, %esi -; AVX512VL-NEXT: vmovd %esi, %xmm0 -; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512VL-NEXT: vmovd %ecx, %xmm1 -; AVX512VL-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512VL-NEXT: cwtl -; AVX512VL-NEXT: vmovd %eax, %xmm2 -; AVX512VL-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX512VL-NEXT: movswl %di, %eax -; AVX512VL-NEXT: vmovd %eax, %xmm3 -; AVX512VL-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX512VL-NEXT: movswl %r10w, %eax -; AVX512VL-NEXT: vmovd %eax, %xmm4 -; AVX512VL-NEXT: vcvtph2ps %xmm4, %xmm4 -; AVX512VL-NEXT: vmovd %r9d, %xmm5 -; AVX512VL-NEXT: vcvtph2ps %xmm5, %xmm5 -; AVX512VL-NEXT: movswl %r8w, %eax -; AVX512VL-NEXT: vmovd %eax, %xmm6 -; AVX512VL-NEXT: vcvtph2ps %xmm6, %xmm6 -; AVX512VL-NEXT: movswl %dx, %eax -; AVX512VL-NEXT: vmovd %eax, %xmm7 -; AVX512VL-NEXT: vcvtph2ps %xmm7, %xmm7 -; AVX512VL-NEXT: vcvtss2sd %xmm7, %xmm7, %xmm7 -; AVX512VL-NEXT: vcvtss2sd %xmm6, %xmm6, %xmm6 -; AVX512VL-NEXT: vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm7[0] -; AVX512VL-NEXT: vcvtss2sd %xmm5, %xmm5, %xmm5 -; AVX512VL-NEXT: vcvtss2sd %xmm4, %xmm4, %xmm4 -; AVX512VL-NEXT: vmovlhps {{.*#+}} xmm4 = xmm5[0],xmm4[0] -; AVX512VL-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4 -; AVX512VL-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 -; AVX512VL-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 -; AVX512VL-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; AVX512VL-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 -; AVX512VL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX512VL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX512VL-NEXT: vinsertf64x4 $1, %ymm4, %zmm0, %zmm0 -; AVX512VL-NEXT: retq +; AVX512-LABEL: cvt_8i16_to_8f64: +; AVX512: # BB#0: +; AVX512-NEXT: vpextrq $1, %xmm0, %rdx +; AVX512-NEXT: movq %rdx, %r8 +; AVX512-NEXT: movl %edx, %r10d +; AVX512-NEXT: movswl %dx, %r9d +; AVX512-NEXT: shrq $48, %rdx +; AVX512-NEXT: shrq $32, %r8 +; AVX512-NEXT: shrl $16, %r10d +; AVX512-NEXT: vmovq %xmm0, %rdi +; AVX512-NEXT: movq %rdi, %rax +; AVX512-NEXT: movl %edi, %esi +; AVX512-NEXT: movswl %di, %ecx +; AVX512-NEXT: shrq $48, %rdi +; AVX512-NEXT: shrq $32, %rax +; AVX512-NEXT: shrl $16, %esi +; AVX512-NEXT: movswl %si, %esi +; AVX512-NEXT: vmovd %esi, %xmm0 +; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0 +; AVX512-NEXT: vmovd %ecx, %xmm1 +; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 +; AVX512-NEXT: cwtl +; AVX512-NEXT: vmovd %eax, %xmm2 +; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2 +; AVX512-NEXT: movswl %di, %eax +; AVX512-NEXT: vmovd %eax, %xmm3 +; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3 +; AVX512-NEXT: movswl %r10w, %eax +; AVX512-NEXT: vmovd %eax, %xmm4 +; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4 +; AVX512-NEXT: vmovd %r9d, %xmm5 +; AVX512-NEXT: vcvtph2ps %xmm5, %xmm5 +; AVX512-NEXT: movswl %r8w, %eax +; AVX512-NEXT: vmovd %eax, %xmm6 +; AVX512-NEXT: vcvtph2ps %xmm6, %xmm6 +; AVX512-NEXT: movswl %dx, %eax +; AVX512-NEXT: vmovd %eax, %xmm7 +; AVX512-NEXT: vcvtph2ps %xmm7, %xmm7 +; AVX512-NEXT: vcvtss2sd %xmm7, %xmm7, %xmm7 +; AVX512-NEXT: vcvtss2sd %xmm6, %xmm6, %xmm6 +; AVX512-NEXT: vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm7[0] +; AVX512-NEXT: vcvtss2sd %xmm5, %xmm5, %xmm5 +; AVX512-NEXT: vcvtss2sd %xmm4, %xmm4, %xmm4 +; AVX512-NEXT: vmovlhps {{.*#+}} xmm4 = xmm5[0],xmm4[0] +; AVX512-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4 +; AVX512-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 +; AVX512-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 +; AVX512-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; AVX512-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 +; AVX512-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 +; AVX512-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX512-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX512-NEXT: vinsertf64x4 $1, %ymm4, %zmm0, %zmm0 +; AVX512-NEXT: retq %1 = bitcast <8 x i16> %a0 to <8 x half> %2 = fpext <8 x half> %1 to <8 x double> ret <8 x double> %2 @@ -2174,38 +1733,13 @@ define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind { ; define double @load_cvt_i16_to_f64(i16* %a0) nounwind { -; AVX1-LABEL: load_cvt_i16_to_f64: -; AVX1: # BB#0: -; AVX1-NEXT: movswl (%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm0 -; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: load_cvt_i16_to_f64: -; AVX2: # BB#0: -; AVX2-NEXT: movswl (%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm0 -; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX2-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX2-NEXT: retq -; -; AVX512F-LABEL: load_cvt_i16_to_f64: -; AVX512F: # BB#0: -; AVX512F-NEXT: movswl (%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512F-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: load_cvt_i16_to_f64: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: movswl (%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm0 -; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512VL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512VL-NEXT: retq +; ALL-LABEL: load_cvt_i16_to_f64: +; ALL: # BB#0: +; ALL-NEXT: movswl (%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm0 +; ALL-NEXT: vcvtph2ps %xmm0, %xmm0 +; ALL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 +; ALL-NEXT: retq %1 = load i16, i16* %a0 %2 = bitcast i16 %1 to half %3 = fpext half %2 to double @@ -2213,58 +1747,18 @@ define double @load_cvt_i16_to_f64(i16* %a0) nounwind { } define <2 x double> @load_cvt_2i16_to_2f64(<2 x i16>* %a0) nounwind { -; AVX1-LABEL: load_cvt_2i16_to_2f64: -; AVX1: # BB#0: -; AVX1-NEXT: movswl (%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm0 -; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX1-NEXT: movswl 2(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm1 -; AVX1-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX1-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 -; AVX1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX1-NEXT: retq -; -; AVX2-LABEL: load_cvt_2i16_to_2f64: -; AVX2: # BB#0: -; AVX2-NEXT: movswl (%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm0 -; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX2-NEXT: movswl 2(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm1 -; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX2-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX2-NEXT: retq -; -; AVX512F-LABEL: load_cvt_2i16_to_2f64: -; AVX512F: # BB#0: -; AVX512F-NEXT: movswl (%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512F-NEXT: movswl 2(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 -; AVX512F-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 -; AVX512F-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512F-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: load_cvt_2i16_to_2f64: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: movswl (%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm0 -; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512VL-NEXT: movswl 2(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm1 -; AVX512VL-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512VL-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 -; AVX512VL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX512VL-NEXT: retq +; ALL-LABEL: load_cvt_2i16_to_2f64: +; ALL: # BB#0: +; ALL-NEXT: movswl (%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm0 +; ALL-NEXT: vcvtph2ps %xmm0, %xmm0 +; ALL-NEXT: movswl 2(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm1 +; ALL-NEXT: vcvtph2ps %xmm1, %xmm1 +; ALL-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 +; ALL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 +; ALL-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; ALL-NEXT: retq %1 = load <2 x i16>, <2 x i16>* %a0 %2 = bitcast <2 x i16> %1 to <2 x half> %3 = fpext <2 x half> %2 to <2 x double> @@ -2272,97 +1766,28 @@ define <2 x double> @load_cvt_2i16_to_2f64(<2 x i16>* %a0) nounwind { } define <4 x double> @load_cvt_4i16_to_4f64(<4 x i16>* %a0) nounwind { -; AVX1-LABEL: load_cvt_4i16_to_4f64: -; AVX1: # BB#0: -; AVX1-NEXT: movswl (%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm0 -; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX1-NEXT: movswl 2(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm1 -; AVX1-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX1-NEXT: movswl 4(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm2 -; AVX1-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX1-NEXT: movswl 6(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm3 -; AVX1-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX1-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; AVX1-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 -; AVX1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: load_cvt_4i16_to_4f64: -; AVX2: # BB#0: -; AVX2-NEXT: movswl (%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm0 -; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX2-NEXT: movswl 2(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm1 -; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX2-NEXT: movswl 4(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm2 -; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX2-NEXT: movswl 6(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm3 -; AVX2-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX2-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 -; AVX2-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 -; AVX2-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; AVX2-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX2-NEXT: retq -; -; AVX512F-LABEL: load_cvt_4i16_to_4f64: -; AVX512F: # BB#0: -; AVX512F-NEXT: movswl (%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512F-NEXT: movswl 2(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 -; AVX512F-NEXT: movswl 4(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 -; AVX512F-NEXT: movswl 6(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 -; AVX512F-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 -; AVX512F-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 -; AVX512F-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; AVX512F-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 -; AVX512F-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512F-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX512F-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: load_cvt_4i16_to_4f64: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: movswl (%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm0 -; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512VL-NEXT: movswl 2(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm1 -; AVX512VL-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512VL-NEXT: movswl 4(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm2 -; AVX512VL-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX512VL-NEXT: movswl 6(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm3 -; AVX512VL-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX512VL-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 -; AVX512VL-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 -; AVX512VL-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; AVX512VL-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 -; AVX512VL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX512VL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX512VL-NEXT: retq +; ALL-LABEL: load_cvt_4i16_to_4f64: +; ALL: # BB#0: +; ALL-NEXT: movswl (%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm0 +; ALL-NEXT: vcvtph2ps %xmm0, %xmm0 +; ALL-NEXT: movswl 2(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm1 +; ALL-NEXT: vcvtph2ps %xmm1, %xmm1 +; ALL-NEXT: movswl 4(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm2 +; ALL-NEXT: vcvtph2ps %xmm2, %xmm2 +; ALL-NEXT: movswl 6(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm3 +; ALL-NEXT: vcvtph2ps %xmm3, %xmm3 +; ALL-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 +; ALL-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 +; ALL-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; ALL-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 +; ALL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 +; ALL-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; ALL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; ALL-NEXT: retq %1 = load <4 x i16>, <4 x i16>* %a0 %2 = bitcast <4 x i16> %1 to <4 x half> %3 = fpext <4 x half> %2 to <4 x double> @@ -2439,15 +1864,15 @@ define <4 x double> @load_cvt_8i16_to_4f64(<8 x i16>* %a0) nounwind { ; AVX512F-NEXT: shrl $16, %edx ; AVX512F-NEXT: movswl %dx, %edx ; AVX512F-NEXT: vmovd %edx, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 ; AVX512F-NEXT: vmovd %esi, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 +; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm1 ; AVX512F-NEXT: movswl %cx, %ecx ; AVX512F-NEXT: vmovd %ecx, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 +; AVX512F-NEXT: vcvtph2ps %xmm2, %xmm2 ; AVX512F-NEXT: cwtl ; AVX512F-NEXT: vmovd %eax, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 +; AVX512F-NEXT: vcvtph2ps %xmm3, %xmm3 ; AVX512F-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 ; AVX512F-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 ; AVX512F-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] @@ -2579,91 +2004,48 @@ define <8 x double> @load_cvt_8i16_to_8f64(<8 x i16>* %a0) nounwind { ; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 ; AVX2-NEXT: retq ; -; AVX512F-LABEL: load_cvt_8i16_to_8f64: -; AVX512F: # BB#0: -; AVX512F-NEXT: movswl (%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512F-NEXT: movswl 2(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 -; AVX512F-NEXT: movswl 4(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 -; AVX512F-NEXT: movswl 6(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 -; AVX512F-NEXT: movswl 8(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm4 -; AVX512F-NEXT: vcvtph2ps %ymm4, %zmm4 -; AVX512F-NEXT: movswl 10(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm5 -; AVX512F-NEXT: vcvtph2ps %ymm5, %zmm5 -; AVX512F-NEXT: movswl 12(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm6 -; AVX512F-NEXT: vcvtph2ps %ymm6, %zmm6 -; AVX512F-NEXT: movswl 14(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm7 -; AVX512F-NEXT: vcvtph2ps %ymm7, %zmm7 -; AVX512F-NEXT: vcvtss2sd %xmm7, %xmm7, %xmm7 -; AVX512F-NEXT: vcvtss2sd %xmm6, %xmm6, %xmm6 -; AVX512F-NEXT: vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm7[0] -; AVX512F-NEXT: vcvtss2sd %xmm5, %xmm5, %xmm5 -; AVX512F-NEXT: vcvtss2sd %xmm4, %xmm4, %xmm4 -; AVX512F-NEXT: vmovlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0] -; AVX512F-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4 -; AVX512F-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 -; AVX512F-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 -; AVX512F-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; AVX512F-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 -; AVX512F-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512F-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX512F-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX512F-NEXT: vinsertf64x4 $1, %ymm4, %zmm0, %zmm0 -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: load_cvt_8i16_to_8f64: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: movswl (%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm0 -; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512VL-NEXT: movswl 2(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm1 -; AVX512VL-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512VL-NEXT: movswl 4(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm2 -; AVX512VL-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX512VL-NEXT: movswl 6(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm3 -; AVX512VL-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX512VL-NEXT: movswl 8(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm4 -; AVX512VL-NEXT: vcvtph2ps %xmm4, %xmm4 -; AVX512VL-NEXT: movswl 10(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm5 -; AVX512VL-NEXT: vcvtph2ps %xmm5, %xmm5 -; AVX512VL-NEXT: movswl 12(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm6 -; AVX512VL-NEXT: vcvtph2ps %xmm6, %xmm6 -; AVX512VL-NEXT: movswl 14(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm7 -; AVX512VL-NEXT: vcvtph2ps %xmm7, %xmm7 -; AVX512VL-NEXT: vcvtss2sd %xmm7, %xmm7, %xmm7 -; AVX512VL-NEXT: vcvtss2sd %xmm6, %xmm6, %xmm6 -; AVX512VL-NEXT: vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm7[0] -; AVX512VL-NEXT: vcvtss2sd %xmm5, %xmm5, %xmm5 -; AVX512VL-NEXT: vcvtss2sd %xmm4, %xmm4, %xmm4 -; AVX512VL-NEXT: vmovlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0] -; AVX512VL-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4 -; AVX512VL-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 -; AVX512VL-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 -; AVX512VL-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; AVX512VL-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 -; AVX512VL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX512VL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX512VL-NEXT: vinsertf64x4 $1, %ymm4, %zmm0, %zmm0 -; AVX512VL-NEXT: retq +; AVX512-LABEL: load_cvt_8i16_to_8f64: +; AVX512: # BB#0: +; AVX512-NEXT: movswl (%rdi), %eax +; AVX512-NEXT: vmovd %eax, %xmm0 +; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0 +; AVX512-NEXT: movswl 2(%rdi), %eax +; AVX512-NEXT: vmovd %eax, %xmm1 +; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 +; AVX512-NEXT: movswl 4(%rdi), %eax +; AVX512-NEXT: vmovd %eax, %xmm2 +; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2 +; AVX512-NEXT: movswl 6(%rdi), %eax +; AVX512-NEXT: vmovd %eax, %xmm3 +; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3 +; AVX512-NEXT: movswl 8(%rdi), %eax +; AVX512-NEXT: vmovd %eax, %xmm4 +; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4 +; AVX512-NEXT: movswl 10(%rdi), %eax +; AVX512-NEXT: vmovd %eax, %xmm5 +; AVX512-NEXT: vcvtph2ps %xmm5, %xmm5 +; AVX512-NEXT: movswl 12(%rdi), %eax +; AVX512-NEXT: vmovd %eax, %xmm6 +; AVX512-NEXT: vcvtph2ps %xmm6, %xmm6 +; AVX512-NEXT: movswl 14(%rdi), %eax +; AVX512-NEXT: vmovd %eax, %xmm7 +; AVX512-NEXT: vcvtph2ps %xmm7, %xmm7 +; AVX512-NEXT: vcvtss2sd %xmm7, %xmm7, %xmm7 +; AVX512-NEXT: vcvtss2sd %xmm6, %xmm6, %xmm6 +; AVX512-NEXT: vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm7[0] +; AVX512-NEXT: vcvtss2sd %xmm5, %xmm5, %xmm5 +; AVX512-NEXT: vcvtss2sd %xmm4, %xmm4, %xmm4 +; AVX512-NEXT: vmovlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0] +; AVX512-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4 +; AVX512-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 +; AVX512-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 +; AVX512-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; AVX512-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 +; AVX512-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 +; AVX512-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX512-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX512-NEXT: vinsertf64x4 $1, %ymm4, %zmm0, %zmm0 +; AVX512-NEXT: retq %1 = load <8 x i16>, <8 x i16>* %a0 %2 = bitcast <8 x i16> %1 to <8 x half> %3 = fpext <8 x half> %2 to <8 x double> @@ -2675,138 +2057,41 @@ define <8 x double> @load_cvt_8i16_to_8f64(<8 x i16>* %a0) nounwind { ; define i16 @cvt_f32_to_i16(float %a0) nounwind { -; AVX1-LABEL: cvt_f32_to_i16: -; AVX1: # BB#0: -; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX1-NEXT: vmovd %xmm0, %eax -; AVX1-NEXT: # kill: %AX %AX %EAX -; AVX1-NEXT: retq -; -; AVX2-LABEL: cvt_f32_to_i16: -; AVX2: # BB#0: -; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX2-NEXT: vmovd %xmm0, %eax -; AVX2-NEXT: # kill: %AX %AX %EAX -; AVX2-NEXT: retq -; -; AVX512F-LABEL: cvt_f32_to_i16: -; AVX512F: # BB#0: -; AVX512F-NEXT: # kill: %XMM0 %XMM0 %ZMM0 -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; AVX512F-NEXT: vmovd %xmm0, %eax -; AVX512F-NEXT: # kill: %AX %AX %EAX -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: cvt_f32_to_i16: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovd %xmm0, %eax -; AVX512VL-NEXT: # kill: %AX %AX %EAX -; AVX512VL-NEXT: retq +; ALL-LABEL: cvt_f32_to_i16: +; ALL: # BB#0: +; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; ALL-NEXT: vmovd %xmm0, %eax +; ALL-NEXT: # kill: %AX %AX %EAX +; ALL-NEXT: retq %1 = fptrunc float %a0 to half %2 = bitcast half %1 to i16 ret i16 %2 } define <4 x i16> @cvt_4f32_to_4i16(<4 x float> %a0) nounwind { -; AVX1-LABEL: cvt_4f32_to_4i16: -; AVX1: # BB#0: -; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %eax -; AVX1-NEXT: shll $16, %eax -; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %ecx -; AVX1-NEXT: movzwl %cx, %ecx -; AVX1-NEXT: orl %eax, %ecx -; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %eax -; AVX1-NEXT: shll $16, %eax -; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX1-NEXT: vmovd %xmm0, %edx -; AVX1-NEXT: movzwl %dx, %edx -; AVX1-NEXT: orl %eax, %edx -; AVX1-NEXT: shlq $32, %rdx -; AVX1-NEXT: orq %rcx, %rdx -; AVX1-NEXT: vmovq %rdx, %xmm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: cvt_4f32_to_4i16: -; AVX2: # BB#0: -; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %eax -; AVX2-NEXT: shll $16, %eax -; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %ecx -; AVX2-NEXT: movzwl %cx, %ecx -; AVX2-NEXT: orl %eax, %ecx -; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %eax -; AVX2-NEXT: shll $16, %eax -; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX2-NEXT: vmovd %xmm0, %edx -; AVX2-NEXT: movzwl %dx, %edx -; AVX2-NEXT: orl %eax, %edx -; AVX2-NEXT: shlq $32, %rdx -; AVX2-NEXT: orq %rcx, %rdx -; AVX2-NEXT: vmovq %rdx, %xmm0 -; AVX2-NEXT: retq -; -; AVX512F-LABEL: cvt_4f32_to_4i16: -; AVX512F: # BB#0: -; AVX512F-NEXT: # kill: %XMM0 %XMM0 %ZMM0 -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movzwl %ax, %eax -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %ecx -; AVX512F-NEXT: shll $16, %ecx -; AVX512F-NEXT: orl %eax, %ecx -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movzwl %ax, %eax -; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; AVX512F-NEXT: vmovd %xmm0, %edx -; AVX512F-NEXT: shll $16, %edx -; AVX512F-NEXT: orl %eax, %edx -; AVX512F-NEXT: shlq $32, %rdx -; AVX512F-NEXT: orq %rcx, %rdx -; AVX512F-NEXT: vmovq %rdx, %xmm0 -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: cvt_4f32_to_4i16: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: shll $16, %eax -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %ecx -; AVX512VL-NEXT: movzwl %cx, %ecx -; AVX512VL-NEXT: orl %eax, %ecx -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: shll $16, %eax -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovd %xmm0, %edx -; AVX512VL-NEXT: movzwl %dx, %edx -; AVX512VL-NEXT: orl %eax, %edx -; AVX512VL-NEXT: shlq $32, %rdx -; AVX512VL-NEXT: orq %rcx, %rdx -; AVX512VL-NEXT: vmovq %rdx, %xmm0 -; AVX512VL-NEXT: retq +; ALL-LABEL: cvt_4f32_to_4i16: +; ALL: # BB#0: +; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %eax +; ALL-NEXT: shll $16, %eax +; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm1 +; ALL-NEXT: vmovd %xmm1, %ecx +; ALL-NEXT: movzwl %cx, %ecx +; ALL-NEXT: orl %eax, %ecx +; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %eax +; ALL-NEXT: shll $16, %eax +; ALL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] +; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; ALL-NEXT: vmovd %xmm0, %edx +; ALL-NEXT: movzwl %dx, %edx +; ALL-NEXT: orl %eax, %edx +; ALL-NEXT: shlq $32, %rdx +; ALL-NEXT: orq %rcx, %rdx +; ALL-NEXT: vmovq %rdx, %xmm0 +; ALL-NEXT: retq %1 = fptrunc <4 x float> %a0 to <4 x half> %2 = bitcast <4 x half> %1 to <4 x i16> ret <4 x i16> %2 @@ -2865,29 +2150,27 @@ define <8 x i16> @cvt_4f32_to_8i16_undef(<4 x float> %a0) nounwind { ; ; AVX512F-LABEL: cvt_4f32_to_8i16_undef: ; AVX512F: # BB#0: -; AVX512F-NEXT: # kill: %XMM0 %XMM0 %ZMM0 -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movzwl %ax, %eax ; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 +; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; AVX512F-NEXT: vmovd %xmm1, %eax +; AVX512F-NEXT: shll $16, %eax +; AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm1 ; AVX512F-NEXT: vmovd %xmm1, %ecx -; AVX512F-NEXT: shll $16, %ecx +; AVX512F-NEXT: movzwl %cx, %ecx ; AVX512F-NEXT: orl %eax, %ecx -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 +; AVX512F-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] +; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1 ; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movzwl %ax, %eax -; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 +; AVX512F-NEXT: shll $16, %eax +; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] +; AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0 ; AVX512F-NEXT: vmovd %xmm0, %edx -; AVX512F-NEXT: shll $16, %edx +; AVX512F-NEXT: movzwl %dx, %edx ; AVX512F-NEXT: orl %eax, %edx ; AVX512F-NEXT: shlq $32, %rdx ; AVX512F-NEXT: orq %rcx, %rdx ; AVX512F-NEXT: vmovq %rdx, %xmm0 ; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: cvt_4f32_to_8i16_undef: @@ -2974,208 +2257,30 @@ define <8 x i16> @cvt_4f32_to_8i16_zero(<4 x float> %a0) nounwind { ; ; AVX512F-LABEL: cvt_4f32_to_8i16_zero: ; AVX512F: # BB#0: -; AVX512F-NEXT: # kill: %XMM0 %XMM0 %ZMM0 -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movzwl %ax, %eax -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %ecx -; AVX512F-NEXT: shll $16, %ecx -; AVX512F-NEXT: orl %eax, %ecx -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movzwl %ax, %eax -; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; AVX512F-NEXT: vmovd %xmm0, %edx -; AVX512F-NEXT: shll $16, %edx -; AVX512F-NEXT: orl %eax, %edx -; AVX512F-NEXT: shlq $32, %rdx -; AVX512F-NEXT: orq %rcx, %rdx -; AVX512F-NEXT: vmovq %rdx, %xmm0 -; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: cvt_4f32_to_8i16_zero: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: shll $16, %eax -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %ecx -; AVX512VL-NEXT: movzwl %cx, %ecx -; AVX512VL-NEXT: orl %eax, %ecx -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: shll $16, %eax -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovd %xmm0, %edx -; AVX512VL-NEXT: movzwl %dx, %edx -; AVX512VL-NEXT: orl %eax, %edx -; AVX512VL-NEXT: shlq $32, %rdx -; AVX512VL-NEXT: orq %rcx, %rdx -; AVX512VL-NEXT: vmovq %rdx, %xmm0 -; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,2] -; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX512VL-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] -; AVX512VL-NEXT: retq - %1 = fptrunc <4 x float> %a0 to <4 x half> - %2 = bitcast <4 x half> %1 to <4 x i16> - %3 = shufflevector <4 x i16> %2, <4 x i16> zeroinitializer, <8 x i32> - ret <8 x i16> %3 -} - -define <8 x i16> @cvt_8f32_to_8i16(<8 x float> %a0) nounwind { -; AVX1-LABEL: cvt_8f32_to_8i16: -; AVX1: # BB#0: -; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %eax -; AVX1-NEXT: shll $16, %eax -; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %ecx -; AVX1-NEXT: movzwl %cx, %ecx -; AVX1-NEXT: orl %eax, %ecx -; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %edx -; AVX1-NEXT: shll $16, %edx -; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %eax -; AVX1-NEXT: movzwl %ax, %eax -; AVX1-NEXT: orl %edx, %eax -; AVX1-NEXT: shlq $32, %rax -; AVX1-NEXT: orq %rcx, %rax -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %ecx -; AVX1-NEXT: shll $16, %ecx -; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %edx -; AVX1-NEXT: movzwl %dx, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %ecx -; AVX1-NEXT: shll $16, %ecx -; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX1-NEXT: vmovd %xmm0, %esi -; AVX1-NEXT: movzwl %si, %esi -; AVX1-NEXT: orl %ecx, %esi -; AVX1-NEXT: shlq $32, %rsi -; AVX1-NEXT: orq %rdx, %rsi -; AVX1-NEXT: vmovq %rsi, %xmm0 -; AVX1-NEXT: vmovq %rax, %xmm1 -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: retq -; -; AVX2-LABEL: cvt_8f32_to_8i16: -; AVX2: # BB#0: -; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %eax -; AVX2-NEXT: shll $16, %eax -; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %ecx -; AVX2-NEXT: movzwl %cx, %ecx -; AVX2-NEXT: orl %eax, %ecx -; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %edx -; AVX2-NEXT: shll $16, %edx -; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %eax -; AVX2-NEXT: movzwl %ax, %eax -; AVX2-NEXT: orl %edx, %eax -; AVX2-NEXT: shlq $32, %rax -; AVX2-NEXT: orq %rcx, %rax -; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %ecx -; AVX2-NEXT: shll $16, %ecx -; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %edx -; AVX2-NEXT: movzwl %dx, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %ecx -; AVX2-NEXT: shll $16, %ecx -; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX2-NEXT: vmovd %xmm0, %esi -; AVX2-NEXT: movzwl %si, %esi -; AVX2-NEXT: orl %ecx, %esi -; AVX2-NEXT: shlq $32, %rsi -; AVX2-NEXT: orq %rdx, %rsi -; AVX2-NEXT: vmovq %rsi, %xmm0 -; AVX2-NEXT: vmovq %rax, %xmm1 -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: retq -; -; AVX512F-LABEL: cvt_8f32_to_8i16: -; AVX512F: # BB#0: -; AVX512F-NEXT: # kill: %YMM0 %YMM0 %ZMM0 -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movzwl %ax, %eax -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %ecx -; AVX512F-NEXT: shll $16, %ecx -; AVX512F-NEXT: orl %eax, %ecx -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movzwl %ax, %edx -; AVX512F-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: shll $16, %eax -; AVX512F-NEXT: orl %edx, %eax -; AVX512F-NEXT: shlq $32, %rax -; AVX512F-NEXT: orq %rcx, %rax -; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %ecx -; AVX512F-NEXT: movzwl %cx, %ecx ; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %edx -; AVX512F-NEXT: shll $16, %edx -; AVX512F-NEXT: orl %ecx, %edx -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 +; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; AVX512F-NEXT: vmovd %xmm1, %eax +; AVX512F-NEXT: shll $16, %eax +; AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm1 ; AVX512F-NEXT: vmovd %xmm1, %ecx ; AVX512F-NEXT: movzwl %cx, %ecx -; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; AVX512F-NEXT: vmovd %xmm0, %esi -; AVX512F-NEXT: shll $16, %esi -; AVX512F-NEXT: orl %ecx, %esi -; AVX512F-NEXT: shlq $32, %rsi -; AVX512F-NEXT: orq %rdx, %rsi -; AVX512F-NEXT: vmovq %rsi, %xmm0 -; AVX512F-NEXT: vmovq %rax, %xmm1 -; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: orl %eax, %ecx +; AVX512F-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] +; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; AVX512F-NEXT: vmovd %xmm1, %eax +; AVX512F-NEXT: shll $16, %eax +; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] +; AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; AVX512F-NEXT: vmovd %xmm0, %edx +; AVX512F-NEXT: movzwl %dx, %edx +; AVX512F-NEXT: orl %eax, %edx +; AVX512F-NEXT: shlq $32, %rdx +; AVX512F-NEXT: orq %rcx, %rdx +; AVX512F-NEXT: vmovq %rdx, %xmm0 +; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero ; AVX512F-NEXT: retq ; -; AVX512VL-LABEL: cvt_8f32_to_8i16: +; AVX512VL-LABEL: cvt_4f32_to_8i16_zero: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 @@ -3187,40 +2292,74 @@ define <8 x i16> @cvt_8f32_to_8i16(<8 x float> %a0) nounwind { ; AVX512VL-NEXT: orl %eax, %ecx ; AVX512VL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] ; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %edx -; AVX512VL-NEXT: shll $16, %edx -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 ; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: movzwl %ax, %eax -; AVX512VL-NEXT: orl %edx, %eax -; AVX512VL-NEXT: shlq $32, %rax -; AVX512VL-NEXT: orq %rcx, %rax -; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %ecx -; AVX512VL-NEXT: shll $16, %ecx -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %edx -; AVX512VL-NEXT: movzwl %dx, %edx -; AVX512VL-NEXT: orl %ecx, %edx -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %ecx -; AVX512VL-NEXT: shll $16, %ecx +; AVX512VL-NEXT: shll $16, %eax ; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] ; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovd %xmm0, %esi -; AVX512VL-NEXT: movzwl %si, %esi -; AVX512VL-NEXT: orl %ecx, %esi -; AVX512VL-NEXT: shlq $32, %rsi -; AVX512VL-NEXT: orq %rdx, %rsi -; AVX512VL-NEXT: vmovq %rsi, %xmm0 -; AVX512VL-NEXT: vmovq %rax, %xmm1 -; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX512VL-NEXT: vzeroupper +; AVX512VL-NEXT: vmovd %xmm0, %edx +; AVX512VL-NEXT: movzwl %dx, %edx +; AVX512VL-NEXT: orl %eax, %edx +; AVX512VL-NEXT: shlq $32, %rdx +; AVX512VL-NEXT: orq %rcx, %rdx +; AVX512VL-NEXT: vmovq %rdx, %xmm0 +; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] +; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,2] +; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512VL-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; AVX512VL-NEXT: retq + %1 = fptrunc <4 x float> %a0 to <4 x half> + %2 = bitcast <4 x half> %1 to <4 x i16> + %3 = shufflevector <4 x i16> %2, <4 x i16> zeroinitializer, <8 x i32> + ret <8 x i16> %3 +} + +define <8 x i16> @cvt_8f32_to_8i16(<8 x float> %a0) nounwind { +; ALL-LABEL: cvt_8f32_to_8i16: +; ALL: # BB#0: +; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %eax +; ALL-NEXT: shll $16, %eax +; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm1 +; ALL-NEXT: vmovd %xmm1, %ecx +; ALL-NEXT: movzwl %cx, %ecx +; ALL-NEXT: orl %eax, %ecx +; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %edx +; ALL-NEXT: shll $16, %edx +; ALL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %eax +; ALL-NEXT: movzwl %ax, %eax +; ALL-NEXT: orl %edx, %eax +; ALL-NEXT: shlq $32, %rax +; ALL-NEXT: orq %rcx, %rax +; ALL-NEXT: vextractf128 $1, %ymm0, %xmm0 +; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %ecx +; ALL-NEXT: shll $16, %ecx +; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm1 +; ALL-NEXT: vmovd %xmm1, %edx +; ALL-NEXT: movzwl %dx, %edx +; ALL-NEXT: orl %ecx, %edx +; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %ecx +; ALL-NEXT: shll $16, %ecx +; ALL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] +; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; ALL-NEXT: vmovd %xmm0, %esi +; ALL-NEXT: movzwl %si, %esi +; ALL-NEXT: orl %ecx, %esi +; ALL-NEXT: shlq $32, %rsi +; ALL-NEXT: orq %rdx, %rsi +; ALL-NEXT: vmovq %rsi, %xmm0 +; ALL-NEXT: vmovq %rax, %xmm1 +; ALL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; ALL-NEXT: vzeroupper +; ALL-NEXT: retq %1 = fptrunc <8 x float> %a0 to <8 x half> %2 = bitcast <8 x half> %1 to <8 x i16> ret <8 x i16> %2 @@ -3361,141 +2500,73 @@ define <16 x i16> @cvt_16f32_to_16i16(<16 x float> %a0) nounwind { ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 ; AVX2-NEXT: retq ; -; AVX512F-LABEL: cvt_16f32_to_16i16: -; AVX512F: # BB#0: -; AVX512F-NEXT: vextractf64x4 $1, %zmm0, %ymm1 -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm2 -; AVX512F-NEXT: vmovd %xmm2, %eax -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm2, %ymm2 -; AVX512F-NEXT: vmovd %eax, %xmm3 -; AVX512F-NEXT: vmovd %xmm2, %eax -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm2, %ymm2 -; AVX512F-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3 -; AVX512F-NEXT: vmovd %xmm2, %eax -; AVX512F-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX512F-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: vcvtps2ph $4, %zmm2, %ymm1 -; AVX512F-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm1 -; AVX512F-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm2, %ymm2 -; AVX512F-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3 -; AVX512F-NEXT: vmovd %xmm2, %eax -; AVX512F-NEXT: vpinsrw $7, %eax, %xmm3, %xmm2 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %eax, %xmm3 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; AVX512F-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3 -; AVX512F-NEXT: vmovd %xmm0, %eax -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm0 -; AVX512F-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3 -; AVX512F-NEXT: vmovd %xmm0, %eax -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; AVX512F-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3 -; AVX512F-NEXT: vmovd %xmm0, %eax -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm1[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; AVX512F-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3 -; AVX512F-NEXT: vmovd %xmm0, %eax -; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; AVX512F-NEXT: vpinsrw $6, %eax, %xmm3, %xmm1 -; AVX512F-NEXT: vmovd %xmm0, %eax -; AVX512F-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0 -; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: cvt_16f32_to_16i16: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: vextractf64x4 $1, %zmm0, %ymm1 -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm2 -; AVX512VL-NEXT: vmovd %xmm2, %eax -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX512VL-NEXT: vmovd %eax, %xmm3 -; AVX512VL-NEXT: vmovd %xmm2, %eax -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX512VL-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3 -; AVX512VL-NEXT: vmovd %xmm2, %eax -; AVX512VL-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: vcvtps2ph $4, %xmm2, %xmm1 -; AVX512VL-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX512VL-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3 -; AVX512VL-NEXT: vmovd %xmm2, %eax -; AVX512VL-NEXT: vpinsrw $7, %eax, %xmm3, %xmm2 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %eax, %xmm3 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3 -; AVX512VL-NEXT: vmovd %xmm0, %eax -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm0 -; AVX512VL-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3 -; AVX512VL-NEXT: vmovd %xmm0, %eax -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3 -; AVX512VL-NEXT: vmovd %xmm0, %eax -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm1[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3 -; AVX512VL-NEXT: vmovd %xmm0, %eax -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vpinsrw $6, %eax, %xmm3, %xmm1 -; AVX512VL-NEXT: vmovd %xmm0, %eax -; AVX512VL-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0 -; AVX512VL-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 -; AVX512VL-NEXT: retq +; AVX512-LABEL: cvt_16f32_to_16i16: +; AVX512: # BB#0: +; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm2 +; AVX512-NEXT: vmovd %xmm2, %eax +; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; AVX512-NEXT: vmovd %eax, %xmm3 +; AVX512-NEXT: vmovd %xmm2, %eax +; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] +; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; AVX512-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm2, %eax +; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX512-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; AVX512-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm1, %eax +; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm1 +; AVX512-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm1, %eax +; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; AVX512-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm1, %eax +; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0] +; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; AVX512-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm1, %eax +; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm1 +; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; AVX512-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm2, %eax +; AVX512-NEXT: vpinsrw $7, %eax, %xmm3, %xmm2 +; AVX512-NEXT: vmovd %xmm1, %eax +; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; AVX512-NEXT: vmovd %eax, %xmm3 +; AVX512-NEXT: vmovd %xmm1, %eax +; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; AVX512-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm1, %eax +; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm0, %eax +; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm0 +; AVX512-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm0, %eax +; AVX512-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm0, %eax +; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm1[1,0] +; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm0, %eax +; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[3,1,2,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrw $6, %eax, %xmm3, %xmm1 +; AVX512-NEXT: vmovd %xmm0, %eax +; AVX512-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0 +; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; AVX512-NEXT: retq %1 = fptrunc <16 x float> %a0 to <16 x half> %2 = bitcast <16 x half> %1 to <16 x i16> ret <16 x i16> %2 @@ -3506,35 +2577,12 @@ define <16 x i16> @cvt_16f32_to_16i16(<16 x float> %a0) nounwind { ; define void @store_cvt_f32_to_i16(float %a0, i16* %a1) nounwind { -; AVX1-LABEL: store_cvt_f32_to_i16: -; AVX1: # BB#0: -; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX1-NEXT: vmovd %xmm0, %eax -; AVX1-NEXT: movw %ax, (%rdi) -; AVX1-NEXT: retq -; -; AVX2-LABEL: store_cvt_f32_to_i16: -; AVX2: # BB#0: -; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX2-NEXT: vmovd %xmm0, %eax -; AVX2-NEXT: movw %ax, (%rdi) -; AVX2-NEXT: retq -; -; AVX512F-LABEL: store_cvt_f32_to_i16: -; AVX512F: # BB#0: -; AVX512F-NEXT: # kill: %XMM0 %XMM0 %ZMM0 -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; AVX512F-NEXT: vmovd %xmm0, %eax -; AVX512F-NEXT: movw %ax, (%rdi) -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: store_cvt_f32_to_i16: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovd %xmm0, %eax -; AVX512VL-NEXT: movw %ax, (%rdi) -; AVX512VL-NEXT: retq +; ALL-LABEL: store_cvt_f32_to_i16: +; ALL: # BB#0: +; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; ALL-NEXT: vmovd %xmm0, %eax +; ALL-NEXT: movw %ax, (%rdi) +; ALL-NEXT: retq %1 = fptrunc float %a0 to half %2 = bitcast half %1 to i16 store i16 %2, i16* %a1 @@ -3542,83 +2590,24 @@ define void @store_cvt_f32_to_i16(float %a0, i16* %a1) nounwind { } define void @store_cvt_4f32_to_4i16(<4 x float> %a0, <4 x i16>* %a1) nounwind { -; AVX1-LABEL: store_cvt_4f32_to_4i16: -; AVX1: # BB#0: -; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %eax -; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %ecx -; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %edx -; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX1-NEXT: vmovd %xmm0, %esi -; AVX1-NEXT: movw %si, (%rdi) -; AVX1-NEXT: movw %dx, 6(%rdi) -; AVX1-NEXT: movw %cx, 4(%rdi) -; AVX1-NEXT: movw %ax, 2(%rdi) -; AVX1-NEXT: retq -; -; AVX2-LABEL: store_cvt_4f32_to_4i16: -; AVX2: # BB#0: -; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %eax -; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %ecx -; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %edx -; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX2-NEXT: vmovd %xmm0, %esi -; AVX2-NEXT: movw %si, (%rdi) -; AVX2-NEXT: movw %dx, 6(%rdi) -; AVX2-NEXT: movw %cx, 4(%rdi) -; AVX2-NEXT: movw %ax, 2(%rdi) -; AVX2-NEXT: retq -; -; AVX512F-LABEL: store_cvt_4f32_to_4i16: -; AVX512F: # BB#0: -; AVX512F-NEXT: # kill: %XMM0 %XMM0 %ZMM0 -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %ecx -; AVX512F-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %edx -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; AVX512F-NEXT: vmovd %xmm0, %esi -; AVX512F-NEXT: movw %si, (%rdi) -; AVX512F-NEXT: movw %dx, 6(%rdi) -; AVX512F-NEXT: movw %cx, 4(%rdi) -; AVX512F-NEXT: movw %ax, 2(%rdi) -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: store_cvt_4f32_to_4i16: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %ecx -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %edx -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovd %xmm0, %esi -; AVX512VL-NEXT: movw %si, (%rdi) -; AVX512VL-NEXT: movw %dx, 6(%rdi) -; AVX512VL-NEXT: movw %cx, 4(%rdi) -; AVX512VL-NEXT: movw %ax, 2(%rdi) -; AVX512VL-NEXT: retq +; ALL-LABEL: store_cvt_4f32_to_4i16: +; ALL: # BB#0: +; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %eax +; ALL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %ecx +; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %edx +; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; ALL-NEXT: vmovd %xmm0, %esi +; ALL-NEXT: movw %si, (%rdi) +; ALL-NEXT: movw %dx, 6(%rdi) +; ALL-NEXT: movw %cx, 4(%rdi) +; ALL-NEXT: movw %ax, 2(%rdi) +; ALL-NEXT: retq %1 = fptrunc <4 x float> %a0 to <4 x half> %2 = bitcast <4 x half> %1 to <4 x i16> store <4 x i16> %2, <4 x i16>* %a1 @@ -3680,30 +2669,28 @@ define void @store_cvt_4f32_to_8i16_undef(<4 x float> %a0, <8 x i16>* %a1) nounw ; ; AVX512F-LABEL: store_cvt_4f32_to_8i16_undef: ; AVX512F: # BB#0: -; AVX512F-NEXT: # kill: %XMM0 %XMM0 %ZMM0 -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movzwl %ax, %eax ; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 +; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; AVX512F-NEXT: vmovd %xmm1, %eax +; AVX512F-NEXT: shll $16, %eax +; AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm1 ; AVX512F-NEXT: vmovd %xmm1, %ecx -; AVX512F-NEXT: shll $16, %ecx +; AVX512F-NEXT: movzwl %cx, %ecx ; AVX512F-NEXT: orl %eax, %ecx -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 +; AVX512F-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] +; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1 ; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movzwl %ax, %eax -; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 +; AVX512F-NEXT: shll $16, %eax +; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] +; AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0 ; AVX512F-NEXT: vmovd %xmm0, %edx -; AVX512F-NEXT: shll $16, %edx +; AVX512F-NEXT: movzwl %dx, %edx ; AVX512F-NEXT: orl %eax, %edx ; AVX512F-NEXT: shlq $32, %rdx ; AVX512F-NEXT: orq %rcx, %rdx ; AVX512F-NEXT: vmovq %rdx, %xmm0 ; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] ; AVX512F-NEXT: vmovdqa %xmm0, (%rdi) -; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: store_cvt_4f32_to_8i16_undef: @@ -3794,30 +2781,28 @@ define void @store_cvt_4f32_to_8i16_zero(<4 x float> %a0, <8 x i16>* %a1) nounwi ; ; AVX512F-LABEL: store_cvt_4f32_to_8i16_zero: ; AVX512F: # BB#0: -; AVX512F-NEXT: # kill: %XMM0 %XMM0 %ZMM0 -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movzwl %ax, %eax ; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 +; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; AVX512F-NEXT: vmovd %xmm1, %eax +; AVX512F-NEXT: shll $16, %eax +; AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm1 ; AVX512F-NEXT: vmovd %xmm1, %ecx -; AVX512F-NEXT: shll $16, %ecx +; AVX512F-NEXT: movzwl %cx, %ecx ; AVX512F-NEXT: orl %eax, %ecx -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 +; AVX512F-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] +; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1 ; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movzwl %ax, %eax -; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 +; AVX512F-NEXT: shll $16, %eax +; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] +; AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0 ; AVX512F-NEXT: vmovd %xmm0, %edx -; AVX512F-NEXT: shll $16, %edx +; AVX512F-NEXT: movzwl %dx, %edx ; AVX512F-NEXT: orl %eax, %edx ; AVX512F-NEXT: shlq $32, %rdx ; AVX512F-NEXT: orq %rcx, %rdx ; AVX512F-NEXT: vmovq %rdx, %xmm0 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero ; AVX512F-NEXT: vmovdqa %xmm0, (%rdi) -; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: store_cvt_4f32_to_8i16_zero: @@ -3856,150 +2841,41 @@ define void @store_cvt_4f32_to_8i16_zero(<4 x float> %a0, <8 x i16>* %a1) nounwi } define void @store_cvt_8f32_to_8i16(<8 x float> %a0, <8 x i16>* %a1) nounwind { -; AVX1-LABEL: store_cvt_8f32_to_8i16: -; AVX1: # BB#0: -; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %r8d -; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %r9d -; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %r10d -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX1-NEXT: vmovd %xmm2, %r11d -; AVX1-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] -; AVX1-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX1-NEXT: vmovd %xmm2, %eax -; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX1-NEXT: vmovd %xmm2, %ecx -; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX1-NEXT: vmovd %xmm0, %edx -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm0 -; AVX1-NEXT: vmovd %xmm0, %esi -; AVX1-NEXT: movw %si, 8(%rdi) -; AVX1-NEXT: movw %dx, (%rdi) -; AVX1-NEXT: movw %cx, 14(%rdi) -; AVX1-NEXT: movw %ax, 12(%rdi) -; AVX1-NEXT: movw %r11w, 10(%rdi) -; AVX1-NEXT: movw %r10w, 6(%rdi) -; AVX1-NEXT: movw %r9w, 4(%rdi) -; AVX1-NEXT: movw %r8w, 2(%rdi) -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: retq -; -; AVX2-LABEL: store_cvt_8f32_to_8i16: -; AVX2: # BB#0: -; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %r8d -; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %r9d -; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %r10d -; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] -; AVX2-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX2-NEXT: vmovd %xmm2, %r11d -; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] -; AVX2-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX2-NEXT: vmovd %xmm2, %eax -; AVX2-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3] -; AVX2-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX2-NEXT: vmovd %xmm2, %ecx -; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX2-NEXT: vmovd %xmm0, %edx -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm0 -; AVX2-NEXT: vmovd %xmm0, %esi -; AVX2-NEXT: movw %si, 8(%rdi) -; AVX2-NEXT: movw %dx, (%rdi) -; AVX2-NEXT: movw %cx, 14(%rdi) -; AVX2-NEXT: movw %ax, 12(%rdi) -; AVX2-NEXT: movw %r11w, 10(%rdi) -; AVX2-NEXT: movw %r10w, 6(%rdi) -; AVX2-NEXT: movw %r9w, 4(%rdi) -; AVX2-NEXT: movw %r8w, 2(%rdi) -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: retq -; -; AVX512F-LABEL: store_cvt_8f32_to_8i16: -; AVX512F: # BB#0: -; AVX512F-NEXT: # kill: %YMM0 %YMM0 %ZMM0 -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %r8d -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %r9d -; AVX512F-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %r10d -; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm2, %ymm2 -; AVX512F-NEXT: vmovd %xmm2, %r11d -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm2, %ymm2 -; AVX512F-NEXT: vmovd %xmm2, %eax -; AVX512F-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm2, %ymm2 -; AVX512F-NEXT: vmovd %xmm2, %ecx -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; AVX512F-NEXT: vmovd %xmm0, %edx -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm0 -; AVX512F-NEXT: vmovd %xmm0, %esi -; AVX512F-NEXT: movw %si, 8(%rdi) -; AVX512F-NEXT: movw %dx, (%rdi) -; AVX512F-NEXT: movw %cx, 14(%rdi) -; AVX512F-NEXT: movw %ax, 12(%rdi) -; AVX512F-NEXT: movw %r11w, 10(%rdi) -; AVX512F-NEXT: movw %r10w, 6(%rdi) -; AVX512F-NEXT: movw %r9w, 4(%rdi) -; AVX512F-NEXT: movw %r8w, 2(%rdi) -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: store_cvt_8f32_to_8i16: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %r8d -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %r9d -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %r10d -; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX512VL-NEXT: vmovd %xmm2, %r11d -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX512VL-NEXT: vmovd %xmm2, %eax -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX512VL-NEXT: vmovd %xmm2, %ecx -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovd %xmm0, %edx -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm0 -; AVX512VL-NEXT: vmovd %xmm0, %esi -; AVX512VL-NEXT: movw %si, 8(%rdi) -; AVX512VL-NEXT: movw %dx, (%rdi) -; AVX512VL-NEXT: movw %cx, 14(%rdi) -; AVX512VL-NEXT: movw %ax, 12(%rdi) -; AVX512VL-NEXT: movw %r11w, 10(%rdi) -; AVX512VL-NEXT: movw %r10w, 6(%rdi) -; AVX512VL-NEXT: movw %r9w, 4(%rdi) -; AVX512VL-NEXT: movw %r8w, 2(%rdi) -; AVX512VL-NEXT: vzeroupper -; AVX512VL-NEXT: retq +; ALL-LABEL: store_cvt_8f32_to_8i16: +; ALL: # BB#0: +; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %r8d +; ALL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %r9d +; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %r10d +; ALL-NEXT: vextractf128 $1, %ymm0, %xmm1 +; ALL-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] +; ALL-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; ALL-NEXT: vmovd %xmm2, %r11d +; ALL-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] +; ALL-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; ALL-NEXT: vmovd %xmm2, %eax +; ALL-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3] +; ALL-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; ALL-NEXT: vmovd %xmm2, %ecx +; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; ALL-NEXT: vmovd %xmm0, %edx +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm0 +; ALL-NEXT: vmovd %xmm0, %esi +; ALL-NEXT: movw %si, 8(%rdi) +; ALL-NEXT: movw %dx, (%rdi) +; ALL-NEXT: movw %cx, 14(%rdi) +; ALL-NEXT: movw %ax, 12(%rdi) +; ALL-NEXT: movw %r11w, 10(%rdi) +; ALL-NEXT: movw %r10w, 6(%rdi) +; ALL-NEXT: movw %r9w, 4(%rdi) +; ALL-NEXT: movw %r8w, 2(%rdi) +; ALL-NEXT: vzeroupper +; ALL-NEXT: retq %1 = fptrunc <8 x float> %a0 to <8 x half> %2 = bitcast <8 x half> %1 to <8 x i16> store <8 x i16> %2, <8 x i16>* %a1 @@ -4141,141 +3017,73 @@ define void @store_cvt_16f32_to_16i16(<16 x float> %a0, <16 x i16>* %a1) nounwin ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; -; AVX512F-LABEL: store_cvt_16f32_to_16i16: -; AVX512F: # BB#0: -; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512F-NEXT: vextractf64x4 $1, %zmm0, %ymm2 -; AVX512F-NEXT: vextractf128 $1, %ymm2, %xmm3 -; AVX512F-NEXT: vcvtps2ph $4, %zmm3, %ymm4 -; AVX512F-NEXT: vmovd %xmm4, %eax -; AVX512F-NEXT: vcvtps2ph $4, %zmm2, %ymm4 -; AVX512F-NEXT: movw %ax, 24(%rdi) -; AVX512F-NEXT: vmovd %xmm4, %eax -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm4 -; AVX512F-NEXT: movw %ax, 16(%rdi) -; AVX512F-NEXT: vmovd %xmm4, %eax -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm4 -; AVX512F-NEXT: movw %ax, 8(%rdi) -; AVX512F-NEXT: vmovd %xmm4, %eax -; AVX512F-NEXT: vpermilps {{.*#+}} xmm4 = xmm3[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm4, %ymm4 -; AVX512F-NEXT: movw %ax, (%rdi) -; AVX512F-NEXT: vmovd %xmm4, %eax -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm4 = xmm3[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm4, %ymm4 -; AVX512F-NEXT: movw %ax, 30(%rdi) -; AVX512F-NEXT: vmovd %xmm4, %eax -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm4, %ymm4 -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm3 = xmm3[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm3, %ymm3 -; AVX512F-NEXT: movw %ax, 28(%rdi) -; AVX512F-NEXT: vmovd %xmm3, %eax -; AVX512F-NEXT: vpermilps {{.*#+}} xmm3 = xmm2[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm3, %ymm3 -; AVX512F-NEXT: movw %ax, 26(%rdi) -; AVX512F-NEXT: vmovd %xmm3, %eax -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm3, %ymm3 -; AVX512F-NEXT: movw %ax, 22(%rdi) -; AVX512F-NEXT: vmovd %xmm3, %eax -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm3, %ymm3 -; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm2 = xmm2[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm2, %ymm2 -; AVX512F-NEXT: movw %ax, 20(%rdi) -; AVX512F-NEXT: vmovd %xmm2, %eax -; AVX512F-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm2, %ymm2 -; AVX512F-NEXT: movw %ax, 18(%rdi) -; AVX512F-NEXT: vmovd %xmm2, %eax -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm2, %ymm2 -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: movw %ax, 14(%rdi) -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movw %ax, 12(%rdi) -; AVX512F-NEXT: vmovd %xmm2, %eax -; AVX512F-NEXT: movw %ax, 10(%rdi) -; AVX512F-NEXT: vmovd %xmm0, %eax -; AVX512F-NEXT: movw %ax, 6(%rdi) -; AVX512F-NEXT: vmovd %xmm3, %eax -; AVX512F-NEXT: movw %ax, 4(%rdi) -; AVX512F-NEXT: vmovd %xmm4, %eax -; AVX512F-NEXT: movw %ax, 2(%rdi) -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: store_cvt_16f32_to_16i16: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512VL-NEXT: vextractf64x4 $1, %zmm0, %ymm2 -; AVX512VL-NEXT: vextractf128 $1, %ymm2, %xmm3 -; AVX512VL-NEXT: vcvtps2ph $4, %xmm3, %xmm4 -; AVX512VL-NEXT: vmovd %xmm4, %eax -; AVX512VL-NEXT: vcvtps2ph $4, %xmm2, %xmm4 -; AVX512VL-NEXT: movw %ax, 24(%rdi) -; AVX512VL-NEXT: vmovd %xmm4, %eax -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm4 -; AVX512VL-NEXT: movw %ax, 16(%rdi) -; AVX512VL-NEXT: vmovd %xmm4, %eax -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm4 -; AVX512VL-NEXT: movw %ax, 8(%rdi) -; AVX512VL-NEXT: vmovd %xmm4, %eax -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm4 = xmm3[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm4, %xmm4 -; AVX512VL-NEXT: movw %ax, (%rdi) -; AVX512VL-NEXT: vmovd %xmm4, %eax -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm4 = xmm3[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm4, %xmm4 -; AVX512VL-NEXT: movw %ax, 30(%rdi) -; AVX512VL-NEXT: vmovd %xmm4, %eax -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm4, %xmm4 -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm3 = xmm3[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm3, %xmm3 -; AVX512VL-NEXT: movw %ax, 28(%rdi) -; AVX512VL-NEXT: vmovd %xmm3, %eax -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm3 = xmm2[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm3, %xmm3 -; AVX512VL-NEXT: movw %ax, 26(%rdi) -; AVX512VL-NEXT: vmovd %xmm3, %eax -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm3, %xmm3 -; AVX512VL-NEXT: movw %ax, 22(%rdi) -; AVX512VL-NEXT: vmovd %xmm3, %eax -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm3, %xmm3 -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm2 = xmm2[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX512VL-NEXT: movw %ax, 20(%rdi) -; AVX512VL-NEXT: vmovd %xmm2, %eax -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX512VL-NEXT: movw %ax, 18(%rdi) -; AVX512VL-NEXT: vmovd %xmm2, %eax -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: movw %ax, 14(%rdi) -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: movw %ax, 12(%rdi) -; AVX512VL-NEXT: vmovd %xmm2, %eax -; AVX512VL-NEXT: movw %ax, 10(%rdi) -; AVX512VL-NEXT: vmovd %xmm0, %eax -; AVX512VL-NEXT: movw %ax, 6(%rdi) -; AVX512VL-NEXT: vmovd %xmm3, %eax -; AVX512VL-NEXT: movw %ax, 4(%rdi) -; AVX512VL-NEXT: vmovd %xmm4, %eax -; AVX512VL-NEXT: movw %ax, 2(%rdi) -; AVX512VL-NEXT: vzeroupper -; AVX512VL-NEXT: retq +; AVX512-LABEL: store_cvt_16f32_to_16i16: +; AVX512: # BB#0: +; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; AVX512-NEXT: vextractf128 $1, %ymm2, %xmm3 +; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm4 +; AVX512-NEXT: vmovd %xmm4, %eax +; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm4 +; AVX512-NEXT: movw %ax, 24(%rdi) +; AVX512-NEXT: vmovd %xmm4, %eax +; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm4 +; AVX512-NEXT: movw %ax, 16(%rdi) +; AVX512-NEXT: vmovd %xmm4, %eax +; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm4 +; AVX512-NEXT: movw %ax, 8(%rdi) +; AVX512-NEXT: vmovd %xmm4, %eax +; AVX512-NEXT: vpermilps {{.*#+}} xmm4 = xmm3[3,1,2,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4 +; AVX512-NEXT: movw %ax, (%rdi) +; AVX512-NEXT: vmovd %xmm4, %eax +; AVX512-NEXT: vpermilpd {{.*#+}} xmm4 = xmm3[1,0] +; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4 +; AVX512-NEXT: movw %ax, 30(%rdi) +; AVX512-NEXT: vmovd %xmm4, %eax +; AVX512-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4 +; AVX512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3 +; AVX512-NEXT: movw %ax, 28(%rdi) +; AVX512-NEXT: vmovd %xmm3, %eax +; AVX512-NEXT: vpermilps {{.*#+}} xmm3 = xmm2[3,1,2,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3 +; AVX512-NEXT: movw %ax, 26(%rdi) +; AVX512-NEXT: vmovd %xmm3, %eax +; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0] +; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3 +; AVX512-NEXT: movw %ax, 22(%rdi) +; AVX512-NEXT: vmovd %xmm3, %eax +; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0] +; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3 +; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm2[1,1,3,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; AVX512-NEXT: movw %ax, 20(%rdi) +; AVX512-NEXT: vmovd %xmm2, %eax +; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; AVX512-NEXT: movw %ax, 18(%rdi) +; AVX512-NEXT: vmovd %xmm2, %eax +; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] +; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; AVX512-NEXT: movw %ax, 14(%rdi) +; AVX512-NEXT: vmovd %xmm1, %eax +; AVX512-NEXT: movw %ax, 12(%rdi) +; AVX512-NEXT: vmovd %xmm2, %eax +; AVX512-NEXT: movw %ax, 10(%rdi) +; AVX512-NEXT: vmovd %xmm0, %eax +; AVX512-NEXT: movw %ax, 6(%rdi) +; AVX512-NEXT: vmovd %xmm3, %eax +; AVX512-NEXT: movw %ax, 4(%rdi) +; AVX512-NEXT: vmovd %xmm4, %eax +; AVX512-NEXT: movw %ax, 2(%rdi) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq %1 = fptrunc <16 x float> %a0 to <16 x half> %2 = bitcast <16 x half> %1 to <16 x i16> store <16 x i16> %2, <16 x i16>* %a1 -- 2.7.4