1. Use shuffle to insert element i1 into vector. The previous implementation was incorrect ( dest_bit OR src_bit , it doesn't clear the bit if src_bit=0 )
2. Improve shuffle i1 vector, use CVT2MASK if supported instead TRUNCATE.
Differential Revision: http://reviews.llvm.org/D23347
llvm-svn: 278623
setOperationAction(ISD::TRUNCATE, VT, Custom);
setOperationAction(ISD::SETCC, VT, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::SELECT, VT, Custom);
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
V2 = getOnesVector(ExtVT, Subtarget, DAG, DL);
else
V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2);
- return DAG.getNode(ISD::TRUNCATE, DL, VT,
- DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask));
+
+ SDValue Shuffle = DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask);
+ // i1 was sign extended we can use X86ISD::CVT2MASK.
+ int NumElems = VT.getVectorNumElements();
+ if ((Subtarget.hasBWI() && (NumElems >= 32)) ||
+ (Subtarget.hasDQI() && (NumElems < 32)))
+ return DAG.getNode(X86ISD::CVT2MASK, DL, VT, Shuffle);
+
+ return DAG.getNode(ISD::TRUNCATE, DL, VT, Shuffle);
}
/// Helper function that returns true if the shuffle mask should be
unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt);
- if (IdxVal)
+ unsigned NumElems = VecVT.getVectorNumElements();
+
+ if(Vec.isUndef()) {
+ if (IdxVal)
+ EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
+ DAG.getConstant(IdxVal, dl, MVT::i8));
+ return EltInVec;
+ }
+
+ // Insertion of one bit into first or last position
+ // can be done with two SHIFTs + OR.
+ if (IdxVal == 0 ) {
+ // EltInVec already at correct index and other bits are 0.
+ // Clean the first bit in source vector.
+ Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
+ DAG.getConstant(1 , dl, MVT::i8));
+ Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
+ DAG.getConstant(1, dl, MVT::i8));
+
+ return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
+ }
+ if (IdxVal == NumElems -1) {
+ // Move the bit to the last position inside the vector.
EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
DAG.getConstant(IdxVal, dl, MVT::i8));
- if (Vec.isUndef())
- return EltInVec;
- return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
+ // Clean the last bit in the source vector.
+ Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
+ DAG.getConstant(1, dl, MVT::i8));
+ Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
+ DAG.getConstant(1 , dl, MVT::i8));
+
+ return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
+ }
+
+ // Use shuffle to insert element.
+ SmallVector<int, 64> MaskVec(NumElems);
+ for (unsigned i = 0; i != NumElems; ++i)
+ MaskVec[i] = (i == IdxVal) ? NumElems : i;
+
+ return DAG.getVectorShuffle(VecVT, dl, Vec, EltInVec, MaskVec);
}
SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
// With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
let Predicates = [HasAVX512] in {
def : Pat<(v8i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK8)>;
+ def : Pat<(v4i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK4)>;
+ def : Pat<(v2i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK2)>;
def : Pat<(v8i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK8)>;
def : Pat<(v4i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK4)>;
def : Pat<(v2i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK2)>;
def : Pat<(v32i1 (extract_subvector (v64i1 VK64:$src), (iPTR 32))),
(v32i1 (COPY_TO_REGCLASS (KSHIFTRQri VK64:$src, (i8 32)), VK32))>;
-def : Pat<(v8i1 (X86vshli VK8:$src, (i8 imm:$imm))),
- (v8i1 (COPY_TO_REGCLASS
- (KSHIFTLWri (COPY_TO_REGCLASS VK8:$src, VK16),
- (I8Imm $imm)), VK8))>, Requires<[HasAVX512, NoDQI]>;
-def : Pat<(v4i1 (X86vshli VK4:$src, (i8 imm:$imm))),
- (v4i1 (COPY_TO_REGCLASS
- (KSHIFTLWri (COPY_TO_REGCLASS VK4:$src, VK16),
- (I8Imm $imm)), VK4))>, Requires<[HasAVX512]>;
+// Patterns for kmask shift
+multiclass mask_shift_lowering<RegisterClass RC, ValueType VT> {
+ def : Pat<(VT (X86vshli RC:$src, (i8 imm:$imm))),
+ (VT (COPY_TO_REGCLASS
+ (KSHIFTLWri (COPY_TO_REGCLASS RC:$src, VK16),
+ (I8Imm $imm)),
+ RC))>;
+ def : Pat<(VT (X86vsrli RC:$src, (i8 imm:$imm))),
+ (VT (COPY_TO_REGCLASS
+ (KSHIFTRWri (COPY_TO_REGCLASS RC:$src, VK16),
+ (I8Imm $imm)),
+ RC))>;
+}
+
+defm : mask_shift_lowering<VK8, v8i1>, Requires<[HasAVX512, NoDQI]>;
+defm : mask_shift_lowering<VK4, v4i1>, Requires<[HasAVX512]>;
+defm : mask_shift_lowering<VK2, v2i1>, Requires<[HasAVX512]>;
//===----------------------------------------------------------------------===//
// AVX-512 - Aligned and unaligned load and store
//
; ALL-NEXT: kmovw %edi, %k0
; ALL-NEXT: movw $-4, %ax
; ALL-NEXT: kmovw %eax, %k1
+; ALL-NEXT: kshiftrw $1, %k1, %k1
+; ALL-NEXT: kshiftlw $1, %k1, %k1
; ALL-NEXT: korw %k0, %k1, %k0
; ALL-NEXT: kmovw %k0, %eax
; ALL-NEXT: retq
; KNL-NEXT: kmovw %eax, %k0
; KNL-NEXT: movw $-4, %ax
; KNL-NEXT: kmovw %eax, %k1
+; KNL-NEXT: kshiftrw $1, %k1, %k1
+; KNL-NEXT: kshiftlw $1, %k1, %k1
; KNL-NEXT: korw %k0, %k1, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: retq
; SKX-NEXT: kmovw %eax, %k0
; SKX-NEXT: movw $-4, %ax
; SKX-NEXT: kmovw %eax, %k1
+; SKX-NEXT: kshiftrw $1, %k1, %k1
+; SKX-NEXT: kshiftlw $1, %k1, %k1
; SKX-NEXT: korw %k0, %k1, %k0
; SKX-NEXT: kmovw %k0, %eax
; SKX-NEXT: retq
; KNL: ## BB#0:
; KNL-NEXT: movzbl (%rdi), %eax
; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k0
-; KNL-NEXT: kmovw %esi, %k1
-; KNL-NEXT: kshiftlw $10, %k0, %k0
-; KNL-NEXT: korw %k0, %k1, %k0
+; KNL-NEXT: kmovw %eax, %k1
+; KNL-NEXT: kmovw %esi, %k2
+; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
+; KNL-NEXT: vmovdqa32 %zmm0, %zmm1 {%k2} {z}
+; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,3,4,5,6,7,8,9,16,11,12,13,14,15]
+; KNL-NEXT: vpermt2d %zmm0, %zmm2, %zmm1
+; KNL-NEXT: vpslld $31, %zmm1, %zmm0
+; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: retq
;
; SKX-NEXT: andl $1, %eax
; SKX-NEXT: kmovd %eax, %k0
; SKX-NEXT: kmovw %esi, %k1
-; SKX-NEXT: kshiftlw $10, %k0, %k0
-; SKX-NEXT: korw %k0, %k1, %k0
+; SKX-NEXT: vpmovm2d %k1, %zmm0
+; SKX-NEXT: vpmovm2d %k0, %zmm1
+; SKX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,3,4,5,6,7,8,9,16,11,12,13,14,15]
+; SKX-NEXT: vpermt2d %zmm1, %zmm2, %zmm0
+; SKX-NEXT: vpmovd2m %zmm0, %k0
; SKX-NEXT: kmovw %k0, %eax
; SKX-NEXT: retq
%x = load i1 , i1 * %addr, align 128
; KNL: ## BB#0:
; KNL-NEXT: movzbl (%rdi), %eax
; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k0
-; KNL-NEXT: kmovw %esi, %k1
-; KNL-NEXT: kshiftlw $4, %k0, %k0
-; KNL-NEXT: korw %k0, %k1, %k0
+; KNL-NEXT: kmovw %eax, %k1
+; KNL-NEXT: kmovw %esi, %k2
+; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
+; KNL-NEXT: vmovdqa64 %zmm0, %zmm1 {%k2} {z}
+; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,8,5,6,7]
+; KNL-NEXT: vpermt2q %zmm0, %zmm2, %zmm1
+; KNL-NEXT: vpsllq $63, %zmm1, %zmm0
+; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: retq
;
; SKX-NEXT: andl $1, %eax
; SKX-NEXT: kmovd %eax, %k0
; SKX-NEXT: kmovb %esi, %k1
-; SKX-NEXT: kshiftlb $4, %k0, %k0
-; SKX-NEXT: korb %k0, %k1, %k0
+; SKX-NEXT: vpmovm2q %k1, %zmm0
+; SKX-NEXT: vpmovm2q %k0, %zmm1
+; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,8,5,6,7]
+; SKX-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
+; SKX-NEXT: vpmovq2m %zmm0, %k0
; SKX-NEXT: kmovb %k0, %eax
; SKX-NEXT: retq
%x = load i1 , i1 * %addr, align 128
ret <32 x i8> %r
}
+define i32 @test_insertelement_v32i1(i32 %a, i32 %b, <32 x i32> %x , <32 x i32> %y) {
+; SKX-LABEL: test_insertelement_v32i1:
+; SKX: ## BB#0:
+; SKX-NEXT: cmpl %esi, %edi
+; SKX-NEXT: setb %al
+; SKX-NEXT: kmovw %eax, %k0
+; SKX-NEXT: vpcmpltud %zmm2, %zmm0, %k1
+; SKX-NEXT: vpcmpltud %zmm3, %zmm1, %k2
+; SKX-NEXT: kunpckwd %k1, %k2, %k1
+; SKX-NEXT: vpmovm2w %k1, %zmm0
+; SKX-NEXT: vpmovm2w %k0, %zmm1
+; SKX-NEXT: vmovdqu16 {{.*#+}} zmm2 = [0,1,2,3,32,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
+; SKX-NEXT: vpermt2w %zmm1, %zmm2, %zmm0
+; SKX-NEXT: vpmovw2m %zmm0, %k0
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: retq
+ %cmp_res_i1 = icmp ult i32 %a, %b
+ %cmp_cmp_vec = icmp ult <32 x i32> %x, %y
+ %maskv = insertelement <32 x i1> %cmp_cmp_vec, i1 %cmp_res_i1, i32 4
+ %res = bitcast <32 x i1> %maskv to i32
+ ret i32 %res
+}
+
+define i8 @test_iinsertelement_v4i1(i32 %a, i32 %b, <4 x i32> %x , <4 x i32> %y) {
+; SKX-LABEL: test_iinsertelement_v4i1:
+; SKX: ## BB#0:
+; SKX-NEXT: cmpl %esi, %edi
+; SKX-NEXT: setb %al
+; SKX-NEXT: kmovw %eax, %k0
+; SKX-NEXT: vpcmpltud %xmm1, %xmm0, %k1
+; SKX-NEXT: vpmovm2d %k1, %xmm0
+; SKX-NEXT: vpmovm2d %k0, %xmm1
+; SKX-NEXT: vpbroadcastq %xmm1, %xmm1
+; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
+; SKX-NEXT: vpmovd2m %xmm0, %k0
+; SKX-NEXT: kmovb %k0, %eax
+; SKX-NEXT: retq
+ %cmp_res_i1 = icmp ult i32 %a, %b
+ %cmp_cmp_vec = icmp ult <4 x i32> %x, %y
+ %maskv = insertelement <4 x i1> %cmp_cmp_vec, i1 %cmp_res_i1, i32 2
+ %res0 = shufflevector <4 x i1> %maskv, <4 x i1> undef , <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
+ %res = bitcast <8 x i1> %res0 to i8
+ ret i8 %res
+}
+
+define i8 @test_iinsertelement_v2i1(i32 %a, i32 %b, <2 x i64> %x , <2 x i64> %y) {
+; SKX-LABEL: test_iinsertelement_v2i1:
+; SKX: ## BB#0:
+; SKX-NEXT: cmpl %esi, %edi
+; SKX-NEXT: setb %al
+; SKX-NEXT: kmovw %eax, %k0
+; SKX-NEXT: vpcmpltuq %xmm1, %xmm0, %k1
+; SKX-NEXT: kshiftlw $1, %k1, %k1
+; SKX-NEXT: kshiftrw $1, %k1, %k1
+; SKX-NEXT: kshiftlw $1, %k0, %k0
+; SKX-NEXT: korw %k0, %k1, %k0
+; SKX-NEXT: kmovb %k0, %eax
+; SKX-NEXT: retq
+ %cmp_res_i1 = icmp ult i32 %a, %b
+ %cmp_cmp_vec = icmp ult <2 x i64> %x, %y
+ %maskv = insertelement <2 x i1> %cmp_cmp_vec, i1 %cmp_res_i1, i32 1
+ %res0 = shufflevector <2 x i1> %maskv, <2 x i1> undef , <8 x i32> <i32 0, i32 1, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
+ %res = bitcast <8 x i1> %res0 to i8
+ ret i8 %res
+}
+
define zeroext i8 @test_extractelement_v2i1(<2 x i64> %a, <2 x i64> %b) {
; KNL-LABEL: test_extractelement_v2i1:
; KNL: ## BB#0:
; SKX-NEXT: kmovq %rdi, %k0
; SKX-NEXT: kxnorw %k0, %k0, %k1
; SKX-NEXT: kshiftrw $15, %k1, %k1
-; SKX-NEXT: kshiftlq $5, %k1, %k1
-; SKX-NEXT: korq %k1, %k0, %k0
+; SKX-NEXT: vpmovm2b %k1, %zmm0
+; SKX-NEXT: vpsllq $40, %xmm0, %xmm0
+; SKX-NEXT: vpmovm2b %k0, %zmm1
+; SKX-NEXT: vmovdqu8 {{.*#+}} ymm2 = [255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; SKX-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; SKX-NEXT: vextracti64x4 $1, %zmm1, %ymm1
+; SKX-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; SKX-NEXT: vpmovb2m %zmm0, %k0
; SKX-NEXT: vpmovm2b %k0, %zmm0
; SKX-NEXT: retq
%a = bitcast i64 %x to <64 x i1>
; SKX-NEXT: cmpl %edx, %esi
; SKX-NEXT: setg %al
; SKX-NEXT: kmovw %eax, %k1
-; SKX-NEXT: kshiftlq $5, %k1, %k1
-; SKX-NEXT: korq %k1, %k0, %k0
+; SKX-NEXT: vpmovm2b %k1, %zmm0
+; SKX-NEXT: vpsllq $40, %xmm0, %xmm0
+; SKX-NEXT: vpmovm2b %k0, %zmm1
+; SKX-NEXT: vmovdqu8 {{.*#+}} ymm2 = [255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; SKX-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; SKX-NEXT: vextracti64x4 $1, %zmm1, %ymm1
+; SKX-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; SKX-NEXT: vpmovb2m %zmm0, %k0
; SKX-NEXT: vpmovm2b %k0, %zmm0
; SKX-NEXT: retq
%a = bitcast i64 %x to <64 x i1>
define <8 x i1> @test18(i8 %a, i16 %y) {
; KNL-LABEL: test18:
; KNL: ## BB#0:
-; KNL-NEXT: kmovw %edi, %k0
-; KNL-NEXT: kmovw %esi, %k1
-; KNL-NEXT: kshiftlw $7, %k1, %k2
+; KNL-NEXT: kmovw %edi, %k1
+; KNL-NEXT: kmovw %esi, %k2
+; KNL-NEXT: kshiftlw $7, %k2, %k0
+; KNL-NEXT: kshiftrw $15, %k0, %k0
+; KNL-NEXT: kshiftlw $6, %k2, %k2
; KNL-NEXT: kshiftrw $15, %k2, %k2
-; KNL-NEXT: kshiftlw $6, %k1, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kshiftlw $6, %k1, %k1
-; KNL-NEXT: korw %k1, %k0, %k0
-; KNL-NEXT: kshiftlw $7, %k2, %k1
-; KNL-NEXT: korw %k1, %k0, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
+; KNL-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} {z}
+; KNL-NEXT: vmovdqa64 %zmm0, %zmm2 {%k2} {z}
+; KNL-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,2,3,4,5,8,7]
+; KNL-NEXT: vpermt2q %zmm2, %zmm3, %zmm1
+; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
+; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
+; KNL-NEXT: kshiftlw $1, %k1, %k1
+; KNL-NEXT: kshiftrw $1, %k1, %k1
+; KNL-NEXT: kshiftlw $7, %k0, %k0
+; KNL-NEXT: korw %k0, %k1, %k1
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovqw %zmm0, %xmm0
; KNL-NEXT: retq
; SKX: ## BB#0:
; SKX-NEXT: kmovb %edi, %k0
; SKX-NEXT: kmovw %esi, %k1
-; SKX-NEXT: kshiftlw $6, %k1, %k2
+; SKX-NEXT: kshiftlw $7, %k1, %k2
; SKX-NEXT: kshiftrw $15, %k2, %k2
-; SKX-NEXT: kshiftlw $7, %k1, %k1
+; SKX-NEXT: kshiftlw $6, %k1, %k1
; SKX-NEXT: kshiftrw $15, %k1, %k1
-; SKX-NEXT: kshiftlb $7, %k1, %k1
-; SKX-NEXT: kshiftlb $6, %k2, %k2
-; SKX-NEXT: korb %k2, %k0, %k0
+; SKX-NEXT: vpmovm2q %k0, %zmm0
+; SKX-NEXT: vpmovm2q %k1, %zmm1
+; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7]
+; SKX-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
+; SKX-NEXT: vpmovq2m %zmm0, %k0
+; SKX-NEXT: kshiftlb $1, %k0, %k0
+; SKX-NEXT: kshiftrb $1, %k0, %k0
+; SKX-NEXT: kshiftlb $7, %k2, %k1
; SKX-NEXT: korb %k1, %k0, %k0
; SKX-NEXT: vpmovm2w %k0, %xmm0
; SKX-NEXT: retq
; CHECK-NEXT: vpmovm2q %k0, %zmm0
; CHECK-NEXT: vpxord %zmm1, %zmm1, %zmm1
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,0,1],zmm0[0,1,0,1]
-; CHECK-NEXT: vpsllq $63, %zmm0, %zmm0
-; CHECK-NEXT: vptestmq %zmm0, %zmm0, %k0
+; CHECK-NEXT: vpmovq2m %zmm0, %k0
; CHECK-NEXT: vpmovm2w %k0, %xmm0
; CHECK-NEXT: retq
%res = shufflevector <2 x i1> %a, <2 x i1> zeroinitializer, <8 x i32> <i32 3, i32 3, i32 undef, i32 undef, i32 0, i32 1, i32 undef, i32 undef>
; VL_BW_DQ-NEXT: vptestmq %xmm0, %xmm0, %k0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %xmm0
; VL_BW_DQ-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; VL_BW_DQ-NEXT: vpsllq $63, %xmm0, %xmm0
-; VL_BW_DQ-NEXT: vptestmq %xmm0, %xmm0, %k0
+; VL_BW_DQ-NEXT: vpmovq2m %xmm0, %k0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %xmm0
; VL_BW_DQ-NEXT: retq
%b = shufflevector <2 x i1> %a, <2 x i1> undef, <2 x i32> <i32 1, i32 0>
; VL_BW_DQ-NEXT: kmovb %eax, %k0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %xmm1
; VL_BW_DQ-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
-; VL_BW_DQ-NEXT: vpsllq $63, %xmm0, %xmm0
-; VL_BW_DQ-NEXT: vptestmq %xmm0, %xmm0, %k0
+; VL_BW_DQ-NEXT: vpmovq2m %xmm0, %k0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %xmm0
; VL_BW_DQ-NEXT: retq
%b = shufflevector <2 x i1> %a, <2 x i1> <i1 1, i1 0>, <2 x i32> <i32 1, i32 2>
; VL_BW_DQ-NEXT: vptestmd %xmm0, %xmm0, %k0
; VL_BW_DQ-NEXT: vpmovm2d %k0, %xmm0
; VL_BW_DQ-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,1,0]
-; VL_BW_DQ-NEXT: vpslld $31, %xmm0, %xmm0
-; VL_BW_DQ-NEXT: vptestmd %xmm0, %xmm0, %k0
+; VL_BW_DQ-NEXT: vpmovd2m %xmm0, %k0
; VL_BW_DQ-NEXT: vpmovm2d %k0, %xmm0
; VL_BW_DQ-NEXT: retq
%b = shufflevector <4 x i1> %a, <4 x i1> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm0
; VL_BW_DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [3,6,1,0,3,7,7,0]
; VL_BW_DQ-NEXT: vpermq %zmm0, %zmm1, %zmm0
-; VL_BW_DQ-NEXT: vpsllq $63, %zmm0, %zmm0
-; VL_BW_DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
+; VL_BW_DQ-NEXT: vpmovq2m %zmm0, %k0
; VL_BW_DQ-NEXT: vpmovm2w %k0, %xmm0
; VL_BW_DQ-NEXT: retq
%a2 = icmp eq <8 x i64> %a, %a1
; VL_BW_DQ-NEXT: vpmovm2d %k0, %zmm1
; VL_BW_DQ-NEXT: vmovdqa32 {{.*#+}} zmm2 = [3,6,22,12,3,7,7,0,3,6,1,13,3,21,7,0]
; VL_BW_DQ-NEXT: vpermt2d %zmm0, %zmm2, %zmm1
-; VL_BW_DQ-NEXT: vpslld $31, %zmm1, %zmm0
-; VL_BW_DQ-NEXT: vptestmd %zmm0, %zmm0, %k0
+; VL_BW_DQ-NEXT: vpmovd2m %zmm1, %k0
; VL_BW_DQ-NEXT: vpmovm2b %k0, %xmm0
; VL_BW_DQ-NEXT: retq
%a2 = icmp eq <16 x i32> %a, %a1
; VL_BW_DQ-NEXT: vpmovm2w %k0, %zmm0
; VL_BW_DQ-NEXT: vmovdqu16 {{.*#+}} zmm1 = [3,6,22,12,3,7,7,0,3,6,1,13,3,21,7,0,3,6,22,12,3,7,7,0,3,6,1,13,3,21,7,0]
; VL_BW_DQ-NEXT: vpermw %zmm0, %zmm1, %zmm0
-; VL_BW_DQ-NEXT: vpsllw $15, %zmm0, %zmm0
; VL_BW_DQ-NEXT: vpmovw2m %zmm0, %k0
; VL_BW_DQ-NEXT: vpmovm2b %k0, %ymm0
; VL_BW_DQ-NEXT: retq
; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm0
; VL_BW_DQ-NEXT: vextracti64x2 $1, %zmm0, %xmm0
; VL_BW_DQ-NEXT: vpbroadcastq %xmm0, %zmm0
-; VL_BW_DQ-NEXT: vpsllq $63, %zmm0, %zmm0
-; VL_BW_DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
+; VL_BW_DQ-NEXT: vpmovq2m %zmm0, %k0
; VL_BW_DQ-NEXT: vpmovm2w %k0, %xmm0
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
; VL_BW_DQ-NEXT: vpxord %zmm1, %zmm1, %zmm1
; VL_BW_DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = <8,2,10,u,3,u,2,u>
; VL_BW_DQ-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
-; VL_BW_DQ-NEXT: vpsllq $63, %zmm0, %zmm0
-; VL_BW_DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
+; VL_BW_DQ-NEXT: vpmovq2m %zmm0, %k0
; VL_BW_DQ-NEXT: kmovb %k0, %eax
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
; VL_BW_DQ-NEXT: kmovb %edi, %k0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm0
; VL_BW_DQ-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,4,5,0,1,0,1]
-; VL_BW_DQ-NEXT: vpsllq $63, %zmm0, %zmm0
-; VL_BW_DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
+; VL_BW_DQ-NEXT: vpmovq2m %zmm0, %k0
; VL_BW_DQ-NEXT: kmovb %k0, %eax
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
; VL_BW_DQ-NEXT: vpxord %zmm1, %zmm1, %zmm1
; VL_BW_DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [8,6,1,0,3,7,7,0]
; VL_BW_DQ-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
-; VL_BW_DQ-NEXT: vpsllq $63, %zmm0, %zmm0
-; VL_BW_DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
+; VL_BW_DQ-NEXT: vpmovq2m %zmm0, %k0
; VL_BW_DQ-NEXT: kmovb %k0, %eax
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
; VL_BW_DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [9,1,2,10,4,5,6,7]
; VL_BW_DQ-NEXT: vpxord %zmm2, %zmm2, %zmm2
; VL_BW_DQ-NEXT: vpermt2q %zmm0, %zmm1, %zmm2
-; VL_BW_DQ-NEXT: vpsllq $63, %zmm2, %zmm0
-; VL_BW_DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
+; VL_BW_DQ-NEXT: vpmovq2m %zmm2, %k0
; VL_BW_DQ-NEXT: kmovb %k0, %eax
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm1
; VL_BW_DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [9,6,1,0,3,7,7,1]
; VL_BW_DQ-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
-; VL_BW_DQ-NEXT: vpsllq $63, %zmm0, %zmm0
-; VL_BW_DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
+; VL_BW_DQ-NEXT: vpmovq2m %zmm0, %k0
; VL_BW_DQ-NEXT: kmovb %k0, %eax
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
; VL_BW_DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [9,1,2,3,4,5,6,7]
; VL_BW_DQ-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
; VL_BW_DQ-NEXT: vpermt2q %zmm0, %zmm1, %zmm2
-; VL_BW_DQ-NEXT: vpsllq $63, %zmm2, %zmm0
-; VL_BW_DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
+; VL_BW_DQ-NEXT: vpmovq2m %zmm2, %k0
; VL_BW_DQ-NEXT: kmovb %k0, %eax
; VL_BW_DQ-NEXT: retq
%c = shufflevector <8 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, <8 x i1> %a, <8 x i32> <i32 9, i32 6, i32 1, i32 0, i32 3, i32 7, i32 7, i32 0>
; VL_BW_DQ-NEXT: kmovw %edi, %k0
; VL_BW_DQ-NEXT: vpmovm2d %k0, %zmm0
; VL_BW_DQ-NEXT: vpbroadcastd %xmm0, %zmm0
-; VL_BW_DQ-NEXT: vpslld $31, %zmm0, %zmm0
-; VL_BW_DQ-NEXT: vptestmd %zmm0, %zmm0, %k0
+; VL_BW_DQ-NEXT: vpmovd2m %zmm0, %k0
; VL_BW_DQ-NEXT: kmovw %k0, %eax
; VL_BW_DQ-NEXT: retq
%b = bitcast i16 %a to <16 x i1>
; VL_BW_DQ-NEXT: kmovq %rdi, %k0
; VL_BW_DQ-NEXT: vpmovm2b %k0, %zmm0
; VL_BW_DQ-NEXT: vpbroadcastb %xmm0, %zmm0
-; VL_BW_DQ-NEXT: vpsllw $7, %zmm0, %zmm0
; VL_BW_DQ-NEXT: vpmovb2m %zmm0, %k0
; VL_BW_DQ-NEXT: kmovq %k0, %rax
; VL_BW_DQ-NEXT: retq