Exposed an issue that recursive calls to getTargetConstantBitsFromNode don't handle changes to EltSizeInBits yet.
llvm-svn: 343384
// Extract constant bits from a subvector's source.
if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
isa<ConstantSDNode>(Op.getOperand(1))) {
+ // TODO - support extract_subvector through bitcasts.
+ if (EltSizeInBits != VT.getScalarSizeInBits())
+ return false;
+
if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
UndefElts, EltBits, AllowWholeUndefs,
AllowPartialUndefs)) {
}
}
+ // Extract constant bits from shuffle node sources.
+ if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Op)) {
+ // TODO - support shuffle through bitcasts.
+ if (EltSizeInBits != VT.getScalarSizeInBits())
+ return false;
+
+ ArrayRef<int> Mask = SVN->getMask();
+ if ((!AllowWholeUndefs || !AllowPartialUndefs) &&
+ llvm::any_of(Mask, [](int M) { return M < 0; }))
+ return false;
+
+ APInt UndefElts0, UndefElts1;
+ SmallVector<APInt, 32> EltBits0, EltBits1;
+ if (isAnyInRange(Mask, 0, NumElts) &&
+ !getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
+ UndefElts0, EltBits0, AllowWholeUndefs,
+ AllowPartialUndefs))
+ return false;
+ if (isAnyInRange(Mask, NumElts, 2 * NumElts) &&
+ !getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
+ UndefElts1, EltBits1, AllowWholeUndefs,
+ AllowPartialUndefs))
+ return false;
+
+ UndefElts = APInt::getNullValue(NumElts);
+ for (int i = 0; i != NumElts; ++i) {
+ int M = Mask[i];
+ if (M < 0) {
+ UndefElts.setBit(i);
+ EltBits.push_back(APInt::getNullValue(EltSizeInBits));
+ } else if (M < (int)NumElts) {
+ if (UndefElts0[M])
+ UndefElts.setBit(i);
+ EltBits.push_back(EltBits0[M]);
+ } else {
+ if (UndefElts1[M - NumElts])
+ UndefElts.setBit(i);
+ EltBits.push_back(EltBits1[M - NumElts]);
+ }
+ }
+ return true;
+ }
+
return false;
}
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-16, %esp
; X32-NEXT: subl $16, %esp
-; X32-NEXT: vmovdqa {{.*#+}} xmm3 = [33,0,63,0]
-; X32-NEXT: vmovdqa {{.*#+}} xmm4 = [0,2147483648,0,2147483648]
-; X32-NEXT: vpsrlq %xmm3, %xmm4, %xmm5
-; X32-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[2,3,0,1]
-; X32-NEXT: vpsrlq %xmm6, %xmm4, %xmm4
-; X32-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
-; X32-NEXT: vextractf128 $1, %ymm2, %xmm5
-; X32-NEXT: vpsrlq %xmm6, %xmm5, %xmm7
-; X32-NEXT: vpsrlq %xmm3, %xmm5, %xmm5
-; X32-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm7[4,5,6,7]
-; X32-NEXT: vpsrlq %xmm6, %xmm2, %xmm6
-; X32-NEXT: vpsrlq %xmm3, %xmm2, %xmm2
-; X32-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4,5,6,7]
; X32-NEXT: vpmovsxdq 16(%ebp), %xmm3
-; X32-NEXT: vpxor %xmm4, %xmm5, %xmm5
-; X32-NEXT: vpsubq %xmm4, %xmm5, %xmm5
-; X32-NEXT: vpxor %xmm4, %xmm2, %xmm2
-; X32-NEXT: vpsubq %xmm4, %xmm2, %xmm2
; X32-NEXT: vpmovsxdq 8(%ebp), %xmm4
-; X32-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2
+; X32-NEXT: vmovdqa {{.*#+}} xmm5 = [0,2147483648,0,2147483648]
+; X32-NEXT: vpsrlq $63, %xmm5, %xmm6
+; X32-NEXT: vpsrlq $33, %xmm5, %xmm5
+; X32-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4,5,6,7]
+; X32-NEXT: vextractf128 $1, %ymm2, %xmm6
+; X32-NEXT: vpsrlq $63, %xmm6, %xmm7
+; X32-NEXT: vpsrlq $33, %xmm6, %xmm6
+; X32-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm7[4,5,6,7]
+; X32-NEXT: vpxor %xmm5, %xmm6, %xmm6
+; X32-NEXT: vpsubq %xmm5, %xmm6, %xmm6
+; X32-NEXT: vpsrlq $63, %xmm2, %xmm7
+; X32-NEXT: vpsrlq $33, %xmm2, %xmm2
+; X32-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm7[4,5,6,7]
+; X32-NEXT: vpxor %xmm5, %xmm2, %xmm2
+; X32-NEXT: vpsubq %xmm5, %xmm2, %xmm2
+; X32-NEXT: vinsertf128 $1, %xmm6, %ymm2, %ymm2
; X32-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
; X32-NEXT: vextractf128 $1, %ymm1, %xmm4
; X32-NEXT: vextractf128 $1, %ymm0, %xmm5
;
; X86-AVX1-LABEL: trunc_ashr_v4i64_demandedelts:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: movl $63, %eax
-; X86-AVX1-NEXT: vmovd %eax, %xmm1
-; X86-AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm2
-; X86-AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
-; X86-AVX1-NEXT: vpsllq %xmm3, %xmm0, %xmm4
-; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7]
-; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; X86-AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm4
-; X86-AVX1-NEXT: vpsllq %xmm3, %xmm0, %xmm0
-; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm4[0,1,2,3],xmm0[4,5,6,7]
-; X86-AVX1-NEXT: vpsrlq %xmm3, %xmm0, %xmm4
-; X86-AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
-; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5,6,7]
-; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,2147483648,0,2147483648]
-; X86-AVX1-NEXT: vpsrlq %xmm1, %xmm4, %xmm5
-; X86-AVX1-NEXT: vpsrlq %xmm3, %xmm4, %xmm4
-; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
-; X86-AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0
-; X86-AVX1-NEXT: vpsubq %xmm4, %xmm0, %xmm0
-; X86-AVX1-NEXT: vpsrlq %xmm3, %xmm2, %xmm3
-; X86-AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm1
-; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
-; X86-AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm1
-; X86-AVX1-NEXT: vpsubq %xmm4, %xmm1, %xmm1
-; X86-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; X86-AVX1-NEXT: vpsllq $63, %xmm0, %xmm1
+; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; X86-AVX1-NEXT: vpsllq $63, %xmm2, %xmm3
+; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; X86-AVX1-NEXT: vpsrlq $63, %xmm3, %xmm3
+; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,0,0,0,0,0,0,32768]
+; X86-AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
+; X86-AVX1-NEXT: vpsubq %xmm3, %xmm2, %xmm2
+; X86-AVX1-NEXT: vpsrlq $63, %xmm1, %xmm1
+; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; X86-AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpsubq %xmm3, %xmm0, %xmm0
+; X86-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; X86-AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X86-AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
;
; X32-AVX1-LABEL: constant_shift_v4i64:
; X32-AVX1: # %bb.0:
-; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [31,0,62,0]
-; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
-; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm3
-; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
-; X32-AVX1-NEXT: vpsrlq %xmm4, %xmm2, %xmm5
-; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm5[4,5,6,7]
-; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
-; X32-AVX1-NEXT: vpsrlq %xmm4, %xmm5, %xmm4
-; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm5, %xmm1
-; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4,5,6,7]
-; X32-AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm1
-; X32-AVX1-NEXT: vpsubq %xmm3, %xmm1, %xmm1
-; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,0,7,0]
-; X32-AVX1-NEXT: vpsrlq %xmm3, %xmm2, %xmm4
-; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm3[2,3,0,1]
-; X32-AVX1-NEXT: vpsrlq %xmm5, %xmm2, %xmm2
-; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7]
-; X32-AVX1-NEXT: vpsrlq %xmm5, %xmm0, %xmm4
-; X32-AVX1-NEXT: vpsrlq %xmm3, %xmm0, %xmm0
-; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5,6,7]
-; X32-AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
-; X32-AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0
-; X32-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [0,2147483648,0,2147483648]
+; X32-AVX1-NEXT: vpsrlq $62, %xmm1, %xmm2
+; X32-AVX1-NEXT: vpsrlq $31, %xmm1, %xmm3
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; X32-AVX1-NEXT: vpsrlq $62, %xmm3, %xmm4
+; X32-AVX1-NEXT: vpsrlq $31, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4,5,6,7]
+; X32-AVX1-NEXT: vpxor %xmm2, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2
+; X32-AVX1-NEXT: vpsrlq $7, %xmm1, %xmm3
+; X32-AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
+; X32-AVX1-NEXT: vpsrlq $7, %xmm0, %xmm3
+; X32-AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
+; X32-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: constant_shift_v4i64:
;
; X32-AVX1-LABEL: constant_shift_v4i64:
; X32-AVX1: # %bb.0:
-; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [31,0,62,0]
-; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
-; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; X32-AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm2
-; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm3, %xmm1
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X32-AVX1-NEXT: vpsrlq $62, %xmm1, %xmm2
+; X32-AVX1-NEXT: vpsrlq $31, %xmm1, %xmm1
; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
-; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,0,7,0]
-; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,3,0,1]
-; X32-AVX1-NEXT: vpsrlq %xmm3, %xmm0, %xmm3
-; X32-AVX1-NEXT: vpsrlq %xmm2, %xmm0, %xmm0
-; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
+; X32-AVX1-NEXT: vpsrlq $7, %xmm0, %xmm2
+; X32-AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
; X32-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-AVX1-NEXT: retl
;
;
; X32-AVX1-LABEL: constant_shift_v4i64:
; X32-AVX1: # %bb.0:
-; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [31,0,62,0]
-; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
-; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; X32-AVX1-NEXT: vpsllq %xmm2, %xmm3, %xmm2
-; X32-AVX1-NEXT: vpsllq %xmm1, %xmm3, %xmm1
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X32-AVX1-NEXT: vpsllq $62, %xmm1, %xmm2
+; X32-AVX1-NEXT: vpsllq $31, %xmm1, %xmm1
; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
-; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,0,7,0]
-; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,3,0,1]
-; X32-AVX1-NEXT: vpsllq %xmm3, %xmm0, %xmm3
-; X32-AVX1-NEXT: vpsllq %xmm2, %xmm0, %xmm0
-; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
+; X32-AVX1-NEXT: vpsllq $7, %xmm0, %xmm2
+; X32-AVX1-NEXT: vpsllq $1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
; X32-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-AVX1-NEXT: retl
;