}
/// Insert i1-subvector to i1-vector.
-static SDValue Insert1BitVector(SDValue Op, SelectionDAG &DAG) {
+static SDValue Insert1BitVector(SDValue Op, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
SDLoc dl(Op);
SDValue Vec = Op.getOperand(0);
// 3. Subvector should be inserted in the middle (for example v2i1
// to v16i1, index 2)
+ // extend to natively supported kshift
+ MVT MinVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
+ MVT WideOpVT = OpVT;
+ if (OpVT.getSizeInBits() < MinVT.getStoreSizeInBits())
+ WideOpVT = MinVT;
+
SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
- SDValue Undef = DAG.getUNDEF(OpVT);
- SDValue WideSubVec =
- DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Undef, SubVec, ZeroIdx);
- if (Vec.isUndef())
- return DAG.getNode(X86ISD::VSHLI, dl, OpVT, WideSubVec,
- DAG.getConstant(IdxVal, dl, MVT::i8));
+ SDValue Undef = DAG.getUNDEF(WideOpVT);
+ SDValue WideSubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
+ Undef, SubVec, ZeroIdx);
+
+ // Extract sub-vector if require.
+ auto ExtractSubVec = [&](SDValue V) {
+ return (WideOpVT == OpVT) ? V : DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
+ OpVT, V, ZeroIdx);
+ };
+
+ if (Vec.isUndef()) {
+ if (IdxVal != 0) {
+ SDValue ShiftBits = DAG.getConstant(IdxVal, dl, MVT::i8);
+ WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, WideSubVec, ShiftBits);
+ }
+ return ExtractSubVec(WideSubVec);
+ }
if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
+ NumElems = WideOpVT.getVectorNumElements();
unsigned ShiftLeft = NumElems - SubVecNumElems;
unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
- WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, WideSubVec,
- DAG.getConstant(ShiftLeft, dl, MVT::i8));
- return ShiftRight ? DAG.getNode(X86ISD::VSRLI, dl, OpVT, WideSubVec,
- DAG.getConstant(ShiftRight, dl, MVT::i8)) : WideSubVec;
+ Vec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, WideSubVec,
+ DAG.getConstant(ShiftLeft, dl, MVT::i8));
+ Vec = ShiftRight ? DAG.getNode(X86ISD::VSRLI, dl, WideOpVT, Vec,
+ DAG.getConstant(ShiftRight, dl, MVT::i8)) : Vec;
+ return ExtractSubVec(Vec);
}
if (IdxVal == 0) {
// Zero lower bits of the Vec
SDValue ShiftBits = DAG.getConstant(SubVecNumElems, dl, MVT::i8);
- Vec = DAG.getNode(X86ISD::VSRLI, dl, OpVT, Vec, ShiftBits);
- Vec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, Vec, ShiftBits);
- // Merge them together
- return DAG.getNode(ISD::OR, dl, OpVT, Vec, WideSubVec);
+ Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
+ Vec = DAG.getNode(X86ISD::VSRLI, dl, WideOpVT, Vec, ShiftBits);
+ Vec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, Vec, ShiftBits);
+ // Merge them together, SubVec should be zero extended.
+ WideSubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
+ getZeroVector(WideOpVT, Subtarget, DAG, dl),
+ SubVec, ZeroIdx);
+ Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, WideSubVec);
+ return ExtractSubVec(Vec);
}
// Simple case when we put subvector in the upper part
if (IdxVal + SubVecNumElems == NumElems) {
// Zero upper bits of the Vec
- WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, Vec,
- DAG.getConstant(IdxVal, dl, MVT::i8));
+ WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, WideSubVec,
+ DAG.getConstant(IdxVal, dl, MVT::i8));
SDValue ShiftBits = DAG.getConstant(SubVecNumElems, dl, MVT::i8);
- Vec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, Vec, ShiftBits);
- Vec = DAG.getNode(X86ISD::VSRLI, dl, OpVT, Vec, ShiftBits);
- return DAG.getNode(ISD::OR, dl, OpVT, Vec, WideSubVec);
+ Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
+ Vec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, Vec, ShiftBits);
+ Vec = DAG.getNode(X86ISD::VSRLI, dl, WideOpVT, Vec, ShiftBits);
+ Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, WideSubVec);
+ return ExtractSubVec(Vec);
}
// Subvector should be inserted in the middle - use shuffle
+ WideSubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Undef,
+ SubVec, ZeroIdx);
SmallVector<int, 64> Mask;
for (unsigned i = 0; i < NumElems; ++i)
Mask.push_back(i >= IdxVal && i < IdxVal + SubVecNumElems ?
return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
if (OpVT.getVectorElementType() == MVT::i1)
- return Insert1BitVector(Op, DAG);
+ return Insert1BitVector(Op, DAG, Subtarget);
return SDValue();
}
let Predicates = [HasBWI] in {
defm Q : avx512_mask_shiftop<opc2, !strconcat(OpcodeStr, "q"), VK64, OpNode>,
VEX, TAPD, VEX_W;
- let Predicates = [HasDQI] in
defm D : avx512_mask_shiftop<opc2, !strconcat(OpcodeStr, "d"), VK32, OpNode>,
VEX, TAPD;
}
def : Pat<(i1 1), (COPY_TO_REGCLASS (KSHIFTRWri (KSET1W), (i8 15)), VK1)>;
def : Pat<(i1 -1), (COPY_TO_REGCLASS (KSHIFTRWri (KSET1W), (i8 15)), VK1)>;
}
-def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 0))),
- (v8i1 (COPY_TO_REGCLASS VK16:$src, VK8))>;
-def : Pat<(v8i1 (extract_subvector (v32i1 VK32:$src), (iPTR 0))),
- (v8i1 (COPY_TO_REGCLASS VK32:$src, VK8))>;
-def : Pat<(v8i1 (extract_subvector (v64i1 VK64:$src), (iPTR 0))),
- (v8i1 (COPY_TO_REGCLASS VK64:$src, VK8))>;
-
-def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))),
- (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>;
-
-def : Pat<(v16i1 (extract_subvector (v32i1 VK32:$src), (iPTR 0))),
- (v16i1 (COPY_TO_REGCLASS VK32:$src, VK16))>;
-def : Pat<(v16i1 (extract_subvector (v64i1 VK64:$src), (iPTR 0))),
- (v16i1 (COPY_TO_REGCLASS VK64:$src, VK16))>;
-
-def : Pat<(v16i1 (extract_subvector (v32i1 VK32:$src), (iPTR 16))),
- (v16i1 (COPY_TO_REGCLASS (KSHIFTRDri VK32:$src, (i8 16)), VK16))>;
-
-def : Pat<(v32i1 (extract_subvector (v64i1 VK64:$src), (iPTR 0))),
- (v32i1 (COPY_TO_REGCLASS VK64:$src, VK32))>;
-def : Pat<(v32i1 (extract_subvector (v64i1 VK64:$src), (iPTR 32))),
- (v32i1 (COPY_TO_REGCLASS (KSHIFTRQri VK64:$src, (i8 32)), VK32))>;
-
-def : Pat<(v4i1 (extract_subvector (v8i1 VK8:$src), (iPTR 0))),
- (v4i1 (COPY_TO_REGCLASS VK8:$src, VK4))>;
-
-def : Pat<(v2i1 (extract_subvector (v8i1 VK8:$src), (iPTR 0))),
- (v2i1 (COPY_TO_REGCLASS VK8:$src, VK2))>;
+// Patterns for kmask insert_subvector/extract_subvector to/from index=0
+multiclass operation_subvector_mask_lowering<RegisterClass subRC, ValueType subVT,
+ RegisterClass RC, ValueType VT> {
+ def : Pat<(subVT (extract_subvector (VT RC:$src), (iPTR 0))),
+ (subVT (COPY_TO_REGCLASS RC:$src, subRC))>;
+
+ def : Pat<(VT (insert_subvector undef, subRC:$src, (iPTR 0))),
+ (VT (COPY_TO_REGCLASS subRC:$src, RC))>;
+}
-def : Pat<(v4i1 (insert_subvector undef, (v2i1 VK2:$src), (iPTR 0))),
- (v4i1 (COPY_TO_REGCLASS VK2:$src, VK4))>;
+defm : operation_subvector_mask_lowering<VK2, v2i1, VK4, v4i1>;
+defm : operation_subvector_mask_lowering<VK2, v2i1, VK8, v8i1>;
+defm : operation_subvector_mask_lowering<VK2, v2i1, VK16, v16i1>;
+defm : operation_subvector_mask_lowering<VK2, v2i1, VK32, v32i1>;
+defm : operation_subvector_mask_lowering<VK2, v2i1, VK64, v64i1>;
-def : Pat<(v8i1 (insert_subvector undef, (v4i1 VK4:$src), (iPTR 0))),
- (v8i1 (COPY_TO_REGCLASS VK4:$src, VK8))>;
-def : Pat<(v8i1 (insert_subvector undef, (v2i1 VK2:$src), (iPTR 0))),
- (v8i1 (COPY_TO_REGCLASS VK2:$src, VK8))>;
+defm : operation_subvector_mask_lowering<VK4, v4i1, VK8, v8i1>;
+defm : operation_subvector_mask_lowering<VK4, v4i1, VK16, v16i1>;
+defm : operation_subvector_mask_lowering<VK4, v4i1, VK32, v32i1>;
+defm : operation_subvector_mask_lowering<VK4, v4i1, VK64, v64i1>;
-def : Pat<(v16i1 (insert_subvector undef, (v8i1 VK8:$src), (iPTR 0))),
- (v16i1 (COPY_TO_REGCLASS VK8:$src, VK16))>;
+defm : operation_subvector_mask_lowering<VK8, v8i1, VK16, v16i1>;
+defm : operation_subvector_mask_lowering<VK8, v8i1, VK32, v32i1>;
+defm : operation_subvector_mask_lowering<VK8, v8i1, VK64, v64i1>;
-def : Pat<(v32i1 (insert_subvector undef, VK2:$src, (iPTR 0))),
- (v32i1 (COPY_TO_REGCLASS VK2:$src, VK32))>;
-def : Pat<(v32i1 (insert_subvector undef, VK4:$src, (iPTR 0))),
- (v32i1 (COPY_TO_REGCLASS VK4:$src, VK32))>;
-def : Pat<(v32i1 (insert_subvector undef, VK8:$src, (iPTR 0))),
- (v32i1 (COPY_TO_REGCLASS VK8:$src, VK32))>;
-def : Pat<(v32i1 (insert_subvector undef, VK16:$src, (iPTR 0))),
- (v32i1 (COPY_TO_REGCLASS VK16:$src, VK32))>;
+defm : operation_subvector_mask_lowering<VK16, v16i1, VK32, v32i1>;
+defm : operation_subvector_mask_lowering<VK16, v16i1, VK64, v64i1>;
-def : Pat<(v64i1 (insert_subvector undef, VK2:$src, (iPTR 0))),
- (v64i1 (COPY_TO_REGCLASS VK2:$src, VK64))>;
-def : Pat<(v64i1 (insert_subvector undef, VK4:$src, (iPTR 0))),
- (v64i1 (COPY_TO_REGCLASS VK4:$src, VK64))>;
-def : Pat<(v64i1 (insert_subvector undef, VK8:$src, (iPTR 0))),
- (v64i1 (COPY_TO_REGCLASS VK8:$src, VK64))>;
-def : Pat<(v64i1 (insert_subvector undef, VK16:$src, (iPTR 0))),
- (v64i1 (COPY_TO_REGCLASS VK16:$src, VK64))>;
-def : Pat<(v64i1 (insert_subvector undef, VK32:$src, (iPTR 0))),
- (v64i1 (COPY_TO_REGCLASS VK32:$src, VK64))>;
+defm : operation_subvector_mask_lowering<VK32, v32i1, VK64, v64i1>;
+def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))),
+ (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>;
+def : Pat<(v16i1 (extract_subvector (v32i1 VK32:$src), (iPTR 16))),
+ (v16i1 (COPY_TO_REGCLASS (KSHIFTRDri VK32:$src, (i8 16)), VK16))>;
+def : Pat<(v32i1 (extract_subvector (v64i1 VK64:$src), (iPTR 32))),
+ (v32i1 (COPY_TO_REGCLASS (KSHIFTRQri VK64:$src, (i8 32)), VK32))>;
def : Pat<(v8i1 (X86vshli VK8:$src, (i8 imm:$imm))),
(v8i1 (COPY_TO_REGCLASS
(KSHIFTLWri (COPY_TO_REGCLASS VK8:$src, VK16),
(I8Imm $imm)), VK8))>, Requires<[HasAVX512, NoDQI]>;
-def : Pat<(v8i1 (X86vsrli VK8:$src, (i8 imm:$imm))),
- (v8i1 (COPY_TO_REGCLASS
- (KSHIFTRWri (COPY_TO_REGCLASS VK8:$src, VK16),
- (I8Imm $imm)), VK8))>, Requires<[HasAVX512, NoDQI]>;
-
def : Pat<(v4i1 (X86vshli VK4:$src, (i8 imm:$imm))),
(v4i1 (COPY_TO_REGCLASS
(KSHIFTLWri (COPY_TO_REGCLASS VK4:$src, VK16),
(I8Imm $imm)), VK4))>, Requires<[HasAVX512]>;
-
-def : Pat<(v4i1 (X86vsrli VK4:$src, (i8 imm:$imm))),
- (v4i1 (COPY_TO_REGCLASS
- (KSHIFTRWri (COPY_TO_REGCLASS VK4:$src, VK16),
- (I8Imm $imm)), VK4))>, Requires<[HasAVX512]>;
-
//===----------------------------------------------------------------------===//
// AVX-512 - Aligned and unaligned load and store
//
; CHECK: # BB#0:
; CHECK-NEXT: vpslld $31, %xmm0, %xmm0
; CHECK-NEXT: vptestmd %xmm0, %xmm0, %k0
+; CHECK-NEXT: vpslld $31, %xmm1, %xmm0
+; CHECK-NEXT: vptestmd %xmm0, %xmm0, %k1
+; CHECK-NEXT: kshiftlb $4, %k1, %k1
; CHECK-NEXT: kshiftlb $4, %k0, %k0
-; CHECK-NEXT: kshiftrb $4, %k0, %k1
-; CHECK-NEXT: korb %k0, %k1, %k0
+; CHECK-NEXT: kshiftrb $4, %k0, %k0
+; CHECK-NEXT: korb %k1, %k0, %k0
; CHECK-NEXT: vpmovm2w %k0, %xmm0
; CHECK-NEXT: retq
; CHECK: # BB#0:
; CHECK-NEXT: vpsllq $63, %xmm0, %xmm0
; CHECK-NEXT: vptestmq %xmm0, %xmm0, %k0
-; CHECK-NEXT: kshiftlw $2, %k0, %k0
-; CHECK-NEXT: kshiftrw $2, %k0, %k1
-; CHECK-NEXT: korw %k0, %k1, %k0
+; CHECK-NEXT: vpsllq $63, %xmm1, %xmm0
+; CHECK-NEXT: vptestmq %xmm0, %xmm0, %k1
+; CHECK-NEXT: kshiftlb $2, %k1, %k1
+; CHECK-NEXT: kshiftlb $2, %k0, %k0
+; CHECK-NEXT: kshiftrb $2, %k0, %k0
+; CHECK-NEXT: korb %k1, %k0, %k0
; CHECK-NEXT: vpmovm2d %k0, %xmm0
; CHECK-NEXT: retq
; CHECK: # BB#0:
; CHECK-NEXT: vpsllq $63, %xmm0, %xmm0
; CHECK-NEXT: vptestmq %xmm0, %xmm0, %k0
-; CHECK-NEXT: kshiftlw $2, %k0, %k0
-; CHECK-NEXT: kshiftrw $2, %k0, %k1
-; CHECK-NEXT: korw %k0, %k1, %k0
+; CHECK-NEXT: vpsllq $63, %xmm1, %xmm0
+; CHECK-NEXT: vptestmq %xmm0, %xmm0, %k1
+; CHECK-NEXT: kshiftlb $2, %k1, %k1
+; CHECK-NEXT: kshiftlb $2, %k0, %k0
+; CHECK-NEXT: kshiftrb $2, %k0, %k0
+; CHECK-NEXT: korb %k1, %k0, %k0
; CHECK-NEXT: kunpckbw %k0, %k0, %k0
; CHECK-NEXT: vpmovm2b %k0, %xmm0
; CHECK-NEXT: retq
; CHECK: # BB#0:
; CHECK-NEXT: vpslld $31, %xmm0, %xmm0
; CHECK-NEXT: vptestmd %xmm0, %xmm0, %k0
+; CHECK-NEXT: vpslld $31, %xmm1, %xmm0
+; CHECK-NEXT: vptestmd %xmm0, %xmm0, %k1
+; CHECK-NEXT: kshiftlb $4, %k1, %k1
; CHECK-NEXT: kshiftlb $4, %k0, %k0
-; CHECK-NEXT: kshiftrb $4, %k0, %k1
-; CHECK-NEXT: korb %k0, %k1, %k0
+; CHECK-NEXT: kshiftrb $4, %k0, %k0
+; CHECK-NEXT: korb %k1, %k0, %k0
; CHECK-NEXT: kunpckbw %k0, %k0, %k0
; CHECK-NEXT: kunpckwd %k0, %k0, %k0
; CHECK-NEXT: vpmovm2b %k0, %ymm0
; SKX: # BB#0:
; SKX-NEXT: vpsllq $63, %xmm2, %xmm2
; SKX-NEXT: vptestmq %xmm2, %xmm2, %k0
-; SKX-NEXT: kshiftlw $2, %k0, %k0
-; SKX-NEXT: kshiftrw $2, %k0, %k1
+; SKX-NEXT: kshiftlb $6, %k0, %k0
+; SKX-NEXT: kshiftrb $6, %k0, %k1
; SKX-NEXT: vscatterqps %xmm0, (,%ymm1) {%k1}
; SKX-NEXT: retq
;
; SKX_32-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SKX_32-NEXT: vpsllq $63, %xmm2, %xmm2
; SKX_32-NEXT: vptestmq %xmm2, %xmm2, %k0
-; SKX_32-NEXT: kshiftlw $2, %k0, %k0
-; SKX_32-NEXT: kshiftrw $2, %k0, %k1
+; SKX_32-NEXT: kshiftlb $6, %k0, %k0
+; SKX_32-NEXT: kshiftrb $6, %k0, %k1
; SKX_32-NEXT: vscatterdps %xmm0, (,%xmm1) {%k1}
; SKX_32-NEXT: retl
call void @llvm.masked.scatter.v2f32(<2 x float> %a1, <2 x float*> %ptr, i32 4, <2 x i1> %mask)
; SKX: # BB#0:
; SKX-NEXT: vpsllq $63, %xmm2, %xmm2
; SKX-NEXT: vptestmq %xmm2, %xmm2, %k0
-; SKX-NEXT: kshiftlw $2, %k0, %k0
-; SKX-NEXT: kshiftrw $2, %k0, %k1
+; SKX-NEXT: kshiftlb $6, %k0, %k0
+; SKX-NEXT: kshiftrb $6, %k0, %k1
; SKX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SKX-NEXT: vpscatterqd %xmm0, (,%ymm1) {%k1}
; SKX-NEXT: retq
; SKX_32: # BB#0:
; SKX_32-NEXT: vpsllq $63, %xmm2, %xmm2
; SKX_32-NEXT: vptestmq %xmm2, %xmm2, %k0
-; SKX_32-NEXT: kshiftlw $2, %k0, %k0
-; SKX_32-NEXT: kshiftrw $2, %k0, %k1
+; SKX_32-NEXT: kshiftlb $6, %k0, %k0
+; SKX_32-NEXT: kshiftrb $6, %k0, %k1
; SKX_32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SKX_32-NEXT: vpscatterqd %xmm0, (,%ymm1) {%k1}
; SKX_32-NEXT: retl
; SKX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SKX-NEXT: vpsllq $63, %xmm1, %xmm1
; SKX-NEXT: vptestmq %xmm1, %xmm1, %k0
-; SKX-NEXT: kshiftlw $2, %k0, %k0
-; SKX-NEXT: kshiftrw $2, %k0, %k1
+; SKX-NEXT: kshiftlb $6, %k0, %k0
+; SKX-NEXT: kshiftrb $6, %k0, %k1
; SKX-NEXT: vgatherdps (%rdi,%xmm0,4), %xmm2 {%k1}
; SKX-NEXT: vmovaps %zmm2, %zmm0
; SKX-NEXT: retq
; SKX_32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SKX_32-NEXT: vpsllq $63, %xmm1, %xmm1
; SKX_32-NEXT: vptestmq %xmm1, %xmm1, %k0
-; SKX_32-NEXT: kshiftlw $2, %k0, %k0
-; SKX_32-NEXT: kshiftrw $2, %k0, %k1
+; SKX_32-NEXT: kshiftlb $6, %k0, %k0
+; SKX_32-NEXT: kshiftrb $6, %k0, %k1
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SKX_32-NEXT: vgatherdps (%eax,%xmm0,4), %xmm2 {%k1}
; SKX_32-NEXT: vmovaps %zmm2, %zmm0
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k0
-; SKX-NEXT: kshiftlw $2, %k0, %k0
-; SKX-NEXT: kshiftrw $2, %k0, %k1
+; SKX-NEXT: kshiftlw $14, %k0, %k0
+; SKX-NEXT: kshiftrw $14, %k0, %k1
; SKX-NEXT: vmovups %xmm1, (%rdi) {%k1}
; SKX-NEXT: retq
%mask = icmp eq <2 x i32> %trigger, zeroinitializer
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k0
-; SKX-NEXT: kshiftlw $2, %k0, %k0
-; SKX-NEXT: kshiftrw $2, %k0, %k1
+; SKX-NEXT: kshiftlw $14, %k0, %k0
+; SKX-NEXT: kshiftrw $14, %k0, %k1
; SKX-NEXT: vmovups (%rdi), %xmm1 {%k1}
; SKX-NEXT: vmovaps %zmm1, %zmm0
; SKX-NEXT: retq
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k0
-; SKX-NEXT: kshiftlw $2, %k0, %k0
-; SKX-NEXT: kshiftrw $2, %k0, %k1
+; SKX-NEXT: kshiftlw $14, %k0, %k0
+; SKX-NEXT: kshiftrw $14, %k0, %k1
; SKX-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
; SKX-NEXT: vmovdqu32 (%rdi), %xmm0 {%k1}
; SKX-NEXT: vpmovsxdq %xmm0, %xmm0
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; SKX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
-; SKX-NEXT: kshiftlw $2, %k0, %k0
-; SKX-NEXT: kshiftrw $2, %k0, %k1
+; SKX-NEXT: kshiftlw $14, %k0, %k0
+; SKX-NEXT: kshiftrw $14, %k0, %k1
; SKX-NEXT: vmovups (%rdi), %xmm0 {%k1} {z}
; SKX-NEXT: retq
%mask = icmp eq <2 x i32> %trigger, zeroinitializer