Noticed in D57514.
Differential Revision: https://reviews.llvm.org/D57519
llvm-svn: 352922
{ X86::VBROADCASTSSZ256m, X86::VBROADCASTSSZ256m, X86::VPBROADCASTDZ256m },
{ X86::VBROADCASTSSZr, X86::VBROADCASTSSZr, X86::VPBROADCASTDZr },
{ X86::VBROADCASTSSZm, X86::VBROADCASTSSZm, X86::VPBROADCASTDZm },
+ { X86::VMOVDDUPZ128rr, X86::VMOVDDUPZ128rr, X86::VPBROADCASTQZ128r },
+ { X86::VMOVDDUPZ128rm, X86::VMOVDDUPZ128rm, X86::VPBROADCASTQZ128m },
{ X86::VBROADCASTSDZ256r, X86::VBROADCASTSDZ256r, X86::VPBROADCASTQZ256r },
{ X86::VBROADCASTSDZ256m, X86::VBROADCASTSDZ256m, X86::VPBROADCASTQZ256m },
{ X86::VBROADCASTSDZr, X86::VBROADCASTSDZr, X86::VPBROADCASTQZr },
{ X86::VPERM2F128rr, X86::VPERM2F128rr, X86::VPERM2I128rr },
{ X86::VBROADCASTSSrm, X86::VBROADCASTSSrm, X86::VPBROADCASTDrm},
{ X86::VBROADCASTSSrr, X86::VBROADCASTSSrr, X86::VPBROADCASTDrr},
+ { X86::VMOVDDUPrm, X86::VMOVDDUPrm, X86::VPBROADCASTQrm},
+ { X86::VMOVDDUPrr, X86::VMOVDDUPrr, X86::VPBROADCASTQrr},
{ X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrr, X86::VPBROADCASTDYrr},
{ X86::VBROADCASTSSYrm, X86::VBROADCASTSSYrm, X86::VPBROADCASTDYrm},
{ X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrr, X86::VPBROADCASTQYrr},
define <2 x i64> @test_mm_broadcastq_epi64(<2 x i64> %a0) {
; CHECK-LABEL: test_mm_broadcastq_epi64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0
+; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <2 x i64> %a0, <2 x i64> undef, <2 x i32> zeroinitializer
ret <2 x i64> %res
define <2 x i64> @test_x86_avx2_pbroadcastq_128(<2 x i64> %a0) {
; CHECK-LABEL: test_x86_avx2_pbroadcastq_128:
; CHECK: ## %bb.0:
-; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0
+; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x i64> @llvm.x86.avx2.pbroadcastq.128(<2 x i64> %a0)
ret <2 x i64> %res
; X32-LABEL: Q64:
; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vpbroadcastq (%eax), %xmm0
+; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X32-NEXT: retl
;
; X64-LABEL: Q64:
; X64: ## %bb.0: ## %entry
-; X64-NEXT: vpbroadcastq (%rdi), %xmm0
+; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X64-NEXT: retq
entry:
%q = load i64, i64* %ptr, align 4
;
; X64-LABEL: broadcast_mem_v4i16_v8i16:
; X64: ## %bb.0:
-; X64-NEXT: vpbroadcastq (%rdi), %xmm0
+; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X64-NEXT: retq
%load = load <4 x i16>, <4 x i16>* %ptr
%shuf = shufflevector <4 x i16> %load, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
;
; X64-LABEL: load_splat_2i64_2i64_1111:
; X64: ## %bb.0: ## %entry
-; X64-NEXT: vpbroadcastq 8(%rdi), %xmm0
+; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X64-NEXT: retq
entry:
%ld = load <2 x i64>, <2 x i64>* %ptr
define <2 x i64> @_inreg2xi64(<2 x i64> %a) {
; X32-LABEL: _inreg2xi64:
; X32: ## %bb.0:
-; X32-NEXT: vpbroadcastq %xmm0, %xmm0
+; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; X32-NEXT: retl
;
; X64-LABEL: _inreg2xi64:
; X64: ## %bb.0:
-; X64-NEXT: vpbroadcastq %xmm0, %xmm0
+; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; X64-NEXT: retq
%b = shufflevector <2 x i64> %a, <2 x i64> undef, <2 x i32> zeroinitializer
ret <2 x i64> %b
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-NEXT: vmovaps %xmm0, (%esp)
-; X32-NEXT: vpbroadcastq (%eax), %xmm1
+; X32-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; X32-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
-; X32-NEXT: vmovdqa %xmm1, {{[0-9]+}}(%esp)
+; X32-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
; X32-NEXT: addl $60, %esp
; X32-NEXT: retl
;
; X64: ## %bb.0: ## %entry
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: vpbroadcastq (%rdi), %xmm1
+; X64-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; X64-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp)
; X64-NEXT: retq
entry:
%__a.addr.i = alloca <2 x i64>, align 16
define <2 x i64> @test_i64_to_2_mem(i64* %p) {
; CHECK-LABEL: test_i64_to_2_mem:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpbroadcastq (%rdi), %xmm0
+; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; CHECK-NEXT: retq
%s = load i64, i64* %p
%vec = insertelement <2 x i64> undef, i64 %s, i32 0
define <4 x i32> @test_2xi32_to_4xi32(<4 x i32> %vec) {
; CHECK-LABEL: test_2xi32_to_4xi32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0
+; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; CHECK-NEXT: retq
%res = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
ret <4 x i32> %res
define <4 x i32> @test_2xi32_to_4xi32_mem(<2 x i32>* %vp) {
; CHECK-LABEL: test_2xi32_to_4xi32_mem:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpbroadcastq (%rdi), %xmm0
+; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; CHECK-NEXT: retq
%vec = load <2 x i32>, <2 x i32>* %vp
%res = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
define <4 x i32> @test_8xi32_to_4xi32_perm_mem_mask3(<8 x i32>* %vp) {
; CHECK-LABEL: test_8xi32_to_4xi32_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = mem[1,1,2,3]
-; CHECK-NEXT: vpbroadcastq 8(%rdi), %xmm1
-; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = mem[1,1,2,3]
+; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
+; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
; CHECK-NEXT: retq
%vec = load <8 x i32>, <8 x i32>* %vp
%res = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> <i32 5, i32 3, i32 2, i32 7>
define <2 x i64> @test_mm_broadcastq_epi64(<2 x i64> %a0) {
; CHECK-LABEL: test_mm_broadcastq_epi64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0
+; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <2 x i64> %a0, <2 x i64> undef, <2 x i32> zeroinitializer
ret <2 x i64> %res
; X86-LABEL: test_mask_andnot_epi64_rmb_128:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT: vpbroadcastq (%eax), %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x59,0x08]
-; X86-NEXT: vpandn %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdf,0xc1]
+; X86-NEXT: vmovddup (%eax), %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0x08]
+; X86-NEXT: # xmm1 = mem[0,0]
+; X86-NEXT: vandnps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x55,0xc1]
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_mask_andnot_epi64_rmb_128:
;
; ALL32-LABEL: f16xi8_i64:
; ALL32: # %bb.0:
-; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = [7.9499288951273625E-275,7.9499288951273625E-275]
-; ALL32-NEXT: # xmm1 = mem[0,0]
+; ALL32-NEXT: vpbroadcastq {{.*#+}} xmm1 = [7.9499288951273625E-275,7.9499288951273625E-275]
; ALL32-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL32-NEXT: retl
;
; ALL32-LABEL: f8xi16_i64:
; ALL32: # %bb.0:
-; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = [4.1720559249406128E-309,4.1720559249406128E-309]
-; ALL32-NEXT: # xmm1 = mem[0,0]
+; ALL32-NEXT: vpbroadcastq {{.*#+}} xmm1 = [4.1720559249406128E-309,4.1720559249406128E-309]
; ALL32-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL32-NEXT: retl
;
; ALL32-LABEL: f4xi32_i64:
; ALL32: # %bb.0:
-; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = [2.1219957909652723E-314,2.1219957909652723E-314]
-; ALL32-NEXT: # xmm1 = mem[0,0]
+; ALL32-NEXT: vpbroadcastq {{.*#+}} xmm1 = [2.1219957909652723E-314,2.1219957909652723E-314]
; ALL32-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0
; ALL32-NEXT: retl
;
; ALL64-LABEL: f4xf32_f64:
; ALL64: # %bb.0:
-; ALL64-NEXT: vpbroadcastq {{.*#+}} xmm1 = [4575657222482165760,4575657222482165760]
+; ALL64-NEXT: vmovddup {{.*#+}} xmm1 = [4575657222482165760,4575657222482165760]
+; ALL64-NEXT: # xmm1 = mem[0,0]
; ALL64-NEXT: vaddps %xmm1, %xmm0, %xmm0
; ALL64-NEXT: vdivps %xmm0, %xmm1, %xmm0
; ALL64-NEXT: retq
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE-NEXT: retq
;
-; AVX1-LABEL: load64_ins_eltc_v2i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: load64_ins_eltc_v2i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0
-; AVX2-NEXT: retq
+; AVX-LABEL: load64_ins_eltc_v2i64:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
+; AVX-NEXT: retq
%x = load i64, i64* %p
%ins = insertelement <2 x i64> undef, i64 %x, i32 1
ret <2 x i64> %ins
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE-NEXT: retq
;
-; AVX1-LABEL: load_i64_v2i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: load_i64_v2i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0
-; AVX2-NEXT: retq
+; AVX-LABEL: load_i64_v2i64:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
+; AVX-NEXT: retq
%x = load i64, i64* %p
%ins = insertelement <2 x i64> undef, i64 %x, i32 %y
ret <2 x i64> %ins
; XOP-LABEL: interleave_24i32_in:
; XOP: # %bb.0:
; XOP-NEXT: vmovupd (%rsi), %ymm0
-; XOP-NEXT: vmovupd (%rcx), %ymm1
+; XOP-NEXT: vmovups (%rcx), %ymm1
; XOP-NEXT: vmovups 16(%rcx), %xmm2
; XOP-NEXT: vmovups (%rdx), %xmm3
; XOP-NEXT: vmovups 16(%rdx), %xmm4
; AVX2-LABEL: wrongorder:
; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastsd %xmm0, %ymm1
-; AVX2-NEXT: vmovapd %ymm1, 32(%rdi)
-; AVX2-NEXT: vmovapd %ymm1, (%rdi)
+; AVX2-NEXT: vmovaps %ymm1, 32(%rdi)
+; AVX2-NEXT: vmovaps %ymm1, (%rdi)
; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
; X86-AVX1-NEXT: # xmm0 = xmm0[0,0]
-; X86-AVX1-NEXT: vmovapd %xmm0, (%eax) # encoding: [0xc5,0xf9,0x29,0x00]
+; X86-AVX1-NEXT: vmovaps %xmm0, (%eax) # encoding: [0xc5,0xf8,0x29,0x00]
; X86-AVX1-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_store_pd1:
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
; X86-AVX512-NEXT: # xmm0 = xmm0[0,0]
-; X86-AVX512-NEXT: vmovapd %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x29,0x00]
+; X86-AVX512-NEXT: vmovaps %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x00]
; X86-AVX512-NEXT: retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_store_pd1:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
; X64-AVX1-NEXT: # xmm0 = xmm0[0,0]
-; X64-AVX1-NEXT: vmovapd %xmm0, (%rdi) # encoding: [0xc5,0xf9,0x29,0x07]
+; X64-AVX1-NEXT: vmovaps %xmm0, (%rdi) # encoding: [0xc5,0xf8,0x29,0x07]
; X64-AVX1-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_store_pd1:
; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
; X64-AVX512-NEXT: # xmm0 = xmm0[0,0]
-; X64-AVX512-NEXT: vmovapd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x29,0x07]
+; X64-AVX512-NEXT: vmovaps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07]
; X64-AVX512-NEXT: retq # encoding: [0xc3]
%arg0 = bitcast double * %a0 to <2 x double>*
%shuf = shufflevector <2 x double> %a1, <2 x double> undef, <2 x i32> zeroinitializer
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
; X86-AVX1-NEXT: # xmm0 = xmm0[0,0]
-; X86-AVX1-NEXT: vmovapd %xmm0, (%eax) # encoding: [0xc5,0xf9,0x29,0x00]
+; X86-AVX1-NEXT: vmovaps %xmm0, (%eax) # encoding: [0xc5,0xf8,0x29,0x00]
; X86-AVX1-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_store1_pd:
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
; X86-AVX512-NEXT: # xmm0 = xmm0[0,0]
-; X86-AVX512-NEXT: vmovapd %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x29,0x00]
+; X86-AVX512-NEXT: vmovaps %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x00]
; X86-AVX512-NEXT: retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_store1_pd:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
; X64-AVX1-NEXT: # xmm0 = xmm0[0,0]
-; X64-AVX1-NEXT: vmovapd %xmm0, (%rdi) # encoding: [0xc5,0xf9,0x29,0x07]
+; X64-AVX1-NEXT: vmovaps %xmm0, (%rdi) # encoding: [0xc5,0xf8,0x29,0x07]
; X64-AVX1-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_store1_pd:
; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
; X64-AVX512-NEXT: # xmm0 = xmm0[0,0]
-; X64-AVX512-NEXT: vmovapd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x29,0x07]
+; X64-AVX512-NEXT: vmovaps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07]
; X64-AVX512-NEXT: retq # encoding: [0xc3]
%arg0 = bitcast double * %a0 to <2 x double>*
%shuf = shufflevector <2 x double> %a1, <2 x double> undef, <2 x i32> zeroinitializer
; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X32-NEXT: retl
;
-; X64-AVX1-LABEL: test_2xi32_to_4xi32_mem:
-; X64-AVX1: # %bb.0:
-; X64-AVX1-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
-; X64-AVX1-NEXT: retq
-;
-; X64-AVX2-LABEL: test_2xi32_to_4xi32_mem:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: vpbroadcastq (%rdi), %xmm0
-; X64-AVX2-NEXT: retq
-;
-; X64-AVX512-LABEL: test_2xi32_to_4xi32_mem:
-; X64-AVX512: # %bb.0:
-; X64-AVX512-NEXT: vpbroadcastq (%rdi), %xmm0
-; X64-AVX512-NEXT: retq
+; X64-LABEL: test_2xi32_to_4xi32_mem:
+; X64: # %bb.0:
+; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
+; X64-NEXT: retq
%vec = load <2 x i32>, <2 x i32>* %vp
%res = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
ret <4 x i32> %res
; AVX1: # %bb.0:
; AVX1-NEXT: vmovddup {{.*#+}} xmm2 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm2 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vandpd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vmovddup {{.*#+}} xmm2 = [1.2598673968951787E-321,1.2598673968951787E-321]
; XOPAVX1-NEXT: # xmm2 = mem[0,0]
-; XOPAVX1-NEXT: vandpd %xmm2, %xmm0, %xmm0
-; XOPAVX1-NEXT: vandpd %xmm2, %xmm1, %xmm1
-; XOPAVX1-NEXT: vxorpd %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT: vandps %xmm2, %xmm0, %xmm0
+; XOPAVX1-NEXT: vandps %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2
; XOPAVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
; XOPAVX1-NEXT: vpshlq %xmm1, %xmm0, %xmm0
; XOPAVX1-NEXT: retq
; AVX1: # %bb.0:
; AVX1-NEXT: vmovddup {{.*#+}} xmm2 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm2 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vmovddup {{.*#+}} xmm2 = [1.2598673968951787E-321,1.2598673968951787E-321]
; XOPAVX1-NEXT: # xmm2 = mem[0,0]
-; XOPAVX1-NEXT: vandpd %xmm2, %xmm0, %xmm0
+; XOPAVX1-NEXT: vandps %xmm2, %xmm0, %xmm0
; XOPAVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero
-; XOPAVX1-NEXT: vxorpd %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2
; XOPAVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
; XOPAVX1-NEXT: vpshlq %xmm1, %xmm0, %xmm0
; XOPAVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v2i64_00:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
+; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v2i64_00:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpbroadcastq %xmm0, %xmm0
+; AVX512VL-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 0>
ret <2 x i64> %shuffle
;
; AVX2-LABEL: shuffle_v2i64_22:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq %xmm1, %xmm0
+; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm1[0,0]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v2i64_22:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpbroadcastq %xmm1, %xmm0
+; AVX512VL-NEXT: vmovddup {{.*#+}} xmm0 = xmm1[0,0]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 2>
ret <2 x i64> %shuffle
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE-NEXT: retq
;
-; AVX1-LABEL: insert_dup_mem_v2i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: insert_dup_mem_v2i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0
-; AVX2-NEXT: retq
-;
-; AVX512VL-LABEL: insert_dup_mem_v2i64:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpbroadcastq (%rdi), %xmm0
-; AVX512VL-NEXT: retq
+; AVX-LABEL: insert_dup_mem_v2i64:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
+; AVX-NEXT: retq
%tmp = load i64, i64* %ptr, align 1
%tmp1 = insertelement <2 x i64> undef, i64 %tmp, i32 0
%tmp2 = shufflevector <2 x i64> %tmp1, <2 x i64> undef, <2 x i32> zeroinitializer
;
; AVX2OR512VL-LABEL: shuffle_v4i32_0451:
; AVX2OR512VL: # %bb.0:
-; AVX2OR512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
-; AVX2OR512VL-NEXT: vpbroadcastq %xmm0, %xmm0
-; AVX2OR512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
+; AVX2OR512VL-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; AVX2OR512VL-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; AVX2OR512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 5, i32 1>
ret <4 x i32> %shuffle
;
; AVX2OR512VL-LABEL: shuffle_v4i32_4015:
; AVX2OR512VL: # %bb.0:
-; AVX2OR512VL-NEXT: vpbroadcastq %xmm1, %xmm1
-; AVX2OR512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; AVX2OR512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
+; AVX2OR512VL-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0]
+; AVX2OR512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX2OR512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
; AVX2OR512VL-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 0, i32 1, i32 5>
ret <4 x i32> %shuffle
; AVX1-LABEL: shuffle_v4f64_0020:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
; ALL-LABEL: shuffle_v4f64_0423:
; ALL: # %bb.0:
; ALL-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0]
-; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3]
+; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; ALL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 3>
ret <4 x double> %shuffle
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3]
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_0412:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq %xmm1, %xmm1
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,2]
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
+; AVX2-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0]
+; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,2]
+; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v4i64_0412:
define <16 x i8> @combine_pshufb_as_vpbroadcastq128(<16 x i8> %a) {
; CHECK-LABEL: combine_pshufb_as_vpbroadcastq128:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0
+; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; CHECK-NEXT: ret{{[l|q]}}
%1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>)
ret <16 x i8> %1
define <16 x i8> @combine_pshufb_insertion_as_broadcast_v2i64(i64 %a0) {
; X86-LABEL: combine_pshufb_insertion_as_broadcast_v2i64:
; X86: # %bb.0:
-; X86-NEXT: vpbroadcastq {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X86-NEXT: retl
;
; X64-LABEL: combine_pshufb_insertion_as_broadcast_v2i64:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX2-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
-; X86-AVX2-NEXT: vmovapd %xmm0, (%eax)
+; X86-AVX2-NEXT: vmovaps %xmm0, (%eax)
; X86-AVX2-NEXT: retl
;
; X64-AVX-LABEL: buildvector_v4f32_0404:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X64-AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; X64-AVX2-NEXT: vmovapd %xmm0, (%rdi)
+; X64-AVX2-NEXT: vmovaps %xmm0, (%rdi)
; X64-AVX2-NEXT: retq
%v0 = insertelement <4 x float> undef, float %a, i32 0
%v1 = insertelement <4 x float> %v0, float %b, i32 1
;
; AVX2-LABEL: combine_nested_undef_test4:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
+; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX2-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 4, i32 7, i32 1>
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 4, i32 4, i32 0, i32 3>
;
; AVX2-LABEL: combine_nested_undef_test21:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
-; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
+; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
+; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX2-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 1, i32 3, i32 1>
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 3>
;
; AVX2-LABEL: combine_nested_undef_test25:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
+; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX2-NEXT: retq
%1 = shufflevector <4 x i32> %B, <4 x i32> %A, <4 x i32> <i32 1, i32 5, i32 2, i32 4>
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 1, i32 3, i32 1>
;
; AVX2-LABEL: combine_nested_undef_test27:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
+; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX2-NEXT: retq
%1 = shufflevector <4 x i32> %B, <4 x i32> %A, <4 x i32> <i32 2, i32 1, i32 5, i32 4>
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 3, i32 2>
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
;
; AVX1-LABEL: trunc_and_v16i64_v16i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vandpd %ymm4, %ymm0, %ymm0
-; AVX1-NEXT: vandpd %ymm5, %ymm1, %ymm1
-; AVX1-NEXT: vandpd %ymm6, %ymm2, %ymm2
-; AVX1-NEXT: vandpd %ymm7, %ymm3, %ymm3
+; AVX1-NEXT: vandps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vandps %ymm5, %ymm1, %ymm1
+; AVX1-NEXT: vandps %ymm6, %ymm2, %ymm2
+; AVX1-NEXT: vandps %ymm7, %ymm3, %ymm3
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
;
; AVX1-LABEL: trunc_xor_v16i64_v16i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vxorpd %ymm4, %ymm0, %ymm0
-; AVX1-NEXT: vxorpd %ymm5, %ymm1, %ymm1
-; AVX1-NEXT: vxorpd %ymm6, %ymm2, %ymm2
-; AVX1-NEXT: vxorpd %ymm7, %ymm3, %ymm3
+; AVX1-NEXT: vxorps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm5, %ymm1, %ymm1
+; AVX1-NEXT: vxorps %ymm6, %ymm2, %ymm2
+; AVX1-NEXT: vxorps %ymm7, %ymm3, %ymm3
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
;
; AVX1-LABEL: trunc_or_v16i64_v16i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vorpd %ymm4, %ymm0, %ymm0
-; AVX1-NEXT: vorpd %ymm5, %ymm1, %ymm1
-; AVX1-NEXT: vorpd %ymm6, %ymm2, %ymm2
-; AVX1-NEXT: vorpd %ymm7, %ymm3, %ymm3
+; AVX1-NEXT: vorps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vorps %ymm5, %ymm1, %ymm1
+; AVX1-NEXT: vorps %ymm6, %ymm2, %ymm2
+; AVX1-NEXT: vorps %ymm7, %ymm3, %ymm3
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
;
; AVX1-LABEL: trunc_and_v16i64_v16i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vandpd %ymm4, %ymm0, %ymm0
-; AVX1-NEXT: vandpd %ymm5, %ymm1, %ymm1
-; AVX1-NEXT: vandpd %ymm6, %ymm2, %ymm2
-; AVX1-NEXT: vandpd %ymm7, %ymm3, %ymm3
+; AVX1-NEXT: vandps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vandps %ymm5, %ymm1, %ymm1
+; AVX1-NEXT: vandps %ymm6, %ymm2, %ymm2
+; AVX1-NEXT: vandps %ymm7, %ymm3, %ymm3
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
;
; AVX1-LABEL: trunc_xor_v16i64_v16i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vxorpd %ymm4, %ymm0, %ymm0
-; AVX1-NEXT: vxorpd %ymm5, %ymm1, %ymm1
-; AVX1-NEXT: vxorpd %ymm6, %ymm2, %ymm2
-; AVX1-NEXT: vxorpd %ymm7, %ymm3, %ymm3
+; AVX1-NEXT: vxorps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm5, %ymm1, %ymm1
+; AVX1-NEXT: vxorps %ymm6, %ymm2, %ymm2
+; AVX1-NEXT: vxorps %ymm7, %ymm3, %ymm3
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
;
; AVX1-LABEL: trunc_or_v16i64_v16i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vorpd %ymm4, %ymm0, %ymm0
-; AVX1-NEXT: vorpd %ymm5, %ymm1, %ymm1
-; AVX1-NEXT: vorpd %ymm6, %ymm2, %ymm2
-; AVX1-NEXT: vorpd %ymm7, %ymm3, %ymm3
+; AVX1-NEXT: vorps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vorps %ymm5, %ymm1, %ymm1
+; AVX1-NEXT: vorps %ymm6, %ymm2, %ymm2
+; AVX1-NEXT: vorps %ymm7, %ymm3, %ymm3
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovddup {{.*#+}} xmm3 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm3 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vandpd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vandpd %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovddup {{.*#+}} xmm3 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm3 = mem[0,0]
-; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vandpd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vandpd %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
;
; AVX2-LABEL: load_splat_4i32_4i32_0101:
; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0
+; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_4i32_4i32_0101:
; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpbroadcastq (%rdi), %xmm0
+; AVX512-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX512-NEXT: retq
entry:
%ld = load <4 x i32>, <4 x i32>* %ptr
;
; AVX2-LABEL: load_splat_8i16_8i16_01230123:
; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0
+; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_8i16_8i16_01230123:
; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpbroadcastq (%rdi), %xmm0
+; AVX512-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX512-NEXT: retq
entry:
%ld = load <8 x i16>, <8 x i16>* %ptr
;
; AVX2-LABEL: load_splat_16i8_16i8_0123456701234567:
; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0
+; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_16i8_16i8_0123456701234567:
; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpbroadcastq (%rdi), %xmm0
+; AVX512-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX512-NEXT: retq
entry:
%ld = load <16 x i8>, <16 x i8>* %ptr
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE-NEXT: retq
;
-; AVX1-LABEL: load_splat_4i32_2i32_0101:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: load_splat_4i32_2i32_0101:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq (%rdi), %xmm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: load_splat_4i32_2i32_0101:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpbroadcastq (%rdi), %xmm0
-; AVX512-NEXT: retq
+; AVX-LABEL: load_splat_4i32_2i32_0101:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
+; AVX-NEXT: retq
%vec = load <2 x i32>, <2 x i32>* %vp
%res = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
ret <4 x i32> %res