This requires us to override the isTargetCanonicalConstantNode callback introduced in D128144, so we can recognise the various cases where a VBROADCAST_LOAD constant is being reused at different vector widths to prevent infinite loops.
Known.setAllZero();
break;
}
+ case X86ISD::VBROADCAST_LOAD: {
+ APInt UndefElts;
+ SmallVector<APInt, 16> EltBits;
+ if (getTargetConstantBitsFromNode(Op, BitWidth, UndefElts, EltBits,
+ /*AllowWholeUndefs*/ false,
+ /*AllowPartialUndefs*/ false)) {
+ Known.Zero.setAllBits();
+ Known.One.setAllBits();
+ for (unsigned I = 0; I != NumElts; ++I) {
+ if (!DemandedElts[I])
+ continue;
+ if (UndefElts[I]) {
+ Known.resetAll();
+ break;
+ }
+ KnownBits Known2 = KnownBits::makeConstant(EltBits[I]);
+ Known = KnownBits::commonBits(Known, Known2);
+ }
+ return;
+ }
+ break;
+ }
}
// Handle target shuffles.
APInt &UndefElts,
unsigned Depth) const override;
+ bool isTargetCanonicalConstantNode(SDValue Op) const override {
+ // Peek through bitcasts/extracts/inserts to see if we have a broadcast
+ // vector from memory.
+ while (Op.getOpcode() == ISD::BITCAST ||
+ Op.getOpcode() == ISD::EXTRACT_SUBVECTOR ||
+ (Op.getOpcode() == ISD::INSERT_SUBVECTOR &&
+ Op.getOperand(0).isUndef()))
+ Op = Op.getOperand(Op.getOpcode() == ISD::INSERT_SUBVECTOR ? 1 : 0);
+
+ return Op.getOpcode() == X86ISD::VBROADCAST_LOAD ||
+ TargetLowering::isTargetCanonicalConstantNode(Op);
+ }
+
const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const override;
SDValue unwrapAddress(SDValue N) const override;
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531]
; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $1, %xmm0, %xmm1
-; AVX2-NEXT: vpslld $31, %xmm0, %xmm0
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrld $1, %xmm0, %xmm0
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [715827882,715827882,715827882,715827882]
; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm1
; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
; AVX2-NEXT: subq $64, %rsp
-; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [7,7,7,7]
-; AVX2-NEXT: vpand %xmm3, %xmm1, %xmm1
; AVX2-NEXT: vmovd %xmm1, %eax
; AVX2-NEXT: vmovaps %ymm0, (%rsp)
; AVX2-NEXT: andl $3, %eax
; AVX512-NEXT: andq $-32, %rsp
; AVX512-NEXT: subq $64, %rsp
; AVX512-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2
-; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm3 = [7,7,7,7]
-; AVX512-NEXT: vpand %xmm3, %xmm1, %xmm1
; AVX512-NEXT: vmovd %xmm1, %eax
; AVX512-NEXT: vmovaps %ymm0, (%rsp)
; AVX512-NEXT: andl $3, %eax
; AVX512VL-NEXT: movq %rsp, %rbp
; AVX512VL-NEXT: andq $-32, %rsp
; AVX512VL-NEXT: subq $64, %rsp
-; AVX512VL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm1
; AVX512VL-NEXT: vmovd %xmm1, %eax
; AVX512VL-NEXT: vmovaps %ymm0, (%rsp)
; AVX512VL-NEXT: andl $3, %eax
; AVX2-NEXT: vpsrad $17, %xmm0, %xmm0
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [15,15,15,15]
; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
-; AVX2-NEXT: vpackusdw %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: concat_trunc_packssdw_128:
;
; X86-AVX2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_1:
; X86-AVX2: # %bb.0:
-; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294967294,4294967294,4294967294,4294967294]
-; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpsrad $1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X86-AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]