For mask ops like these, the other operand's corresponding element might be zero (result = zero) - so we must demand all the bits and that element.
This appears to be what D128570 was trying to fix - both sides of the funnel shift mask of the vXi64 (legalized to v2Xi32) were incorrectly simplifying the upper 32-bit halves to undef, resulting in bad folds later on.
I intend to address the test case regressions, but this close to the release branch I'd prefer to get a fix in first.
EltBits)) {
OpBits.clearAllBits();
OpElts.clearAllBits();
- for (int I = 0; I != NumElts; ++I)
- if (DemandedElts[I] && ((Invert && !EltBits[I].isAllOnes()) ||
- (!Invert && !EltBits[I].isZero()))) {
+ for (int I = 0; I != NumElts; ++I) {
+ if (!DemandedElts[I])
+ continue;
+ if (UndefElts[I]) {
+ // We can't assume an undef src element gives an undef dst - the
+ // other src might be zero.
+ OpBits.setAllBits();
+ OpElts.setBit(I);
+ } else if ((Invert && !EltBits[I].isAllOnes()) ||
+ (!Invert && !EltBits[I].isZero())) {
OpBits |= Invert ? ~EltBits[I] : EltBits[I];
OpElts.setBit(I);
}
+ }
}
return std::make_pair(OpBits, OpElts);
};
EltBits)) {
DemandedBits.clearAllBits();
DemandedElts.clearAllBits();
- for (int I = 0; I != NumElts; ++I)
- if (!EltBits[I].isZero()) {
+ for (int I = 0; I != NumElts; ++I) {
+ if (UndefElts[I]) {
+ // We can't assume an undef src element gives an undef dst - the
+ // other src might be zero.
+ DemandedBits.setAllBits();
+ DemandedElts.setBit(I);
+ } else if (!EltBits[I].isZero()) {
DemandedBits |= EltBits[I];
DemandedElts.setBit(I);
}
+ }
}
return std::make_pair(DemandedBits, DemandedElts);
};
EltBits)) {
DemandedBits.clearAllBits();
DemandedElts.clearAllBits();
- for (int I = 0; I != NumElts; ++I)
- if ((Invert && !EltBits[I].isAllOnes()) ||
- (!Invert && !EltBits[I].isZero())) {
+ for (int I = 0; I != NumElts; ++I) {
+ if (UndefElts[I]) {
+ // We can't assume an undef src element gives an undef dst - the
+ // other src might be zero.
+ DemandedBits.setAllBits();
+ DemandedElts.setBit(I);
+ } else if ((Invert && !EltBits[I].isAllOnes()) ||
+ (!Invert && !EltBits[I].isZero())) {
DemandedBits |= Invert ? ~EltBits[I] : EltBits[I];
DemandedElts.setBit(I);
}
+ }
}
return std::make_pair(DemandedBits, DemandedElts);
};
define <2 x half> @test_u1tofp2(<2 x i1> %arg0) {
; CHECK-LABEL: test_u1tofp2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpmovqw %xmm0, %xmm0
+; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; CHECK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: vcvtuw2ph %xmm0, %xmm0
; CHECK-NEXT: retq
; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; X86-SSE2-NEXT: pmuludq %xmm3, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X64-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; X64-SSE2-NEXT: pmuludq %xmm1, %xmm0
+; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; X64-SSE2-NEXT: pmuludq %xmm3, %xmm1
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw %xmm4, %ymm1, %ymm1
; AVX512F-NEXT: vinserti64x4 $1, %ymm5, %zmm1, %zmm1
+; AVX512F-NEXT: vpbroadcastw %xmm2, %xmm2
; AVX512F-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512F-NEXT: vpsllw %xmm2, %ymm3, %ymm3
; AVX512VL-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512VL-NEXT: vpsrlw %xmm4, %ymm1, %ymm1
; AVX512VL-NEXT: vinserti64x4 $1, %ymm5, %zmm1, %zmm1
+; AVX512VL-NEXT: vpbroadcastw %xmm2, %xmm2
; AVX512VL-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512VL-NEXT: vpsllw %xmm2, %ymm3, %ymm3
; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm6
; AVX512F-NEXT: vpsrlw %xmm3, %ymm6, %ymm3
; AVX512F-NEXT: vinserti64x4 $1, %ymm5, %zmm3, %zmm3
+; AVX512F-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX512F-NEXT: vpand %xmm2, %xmm1, %xmm1
; AVX512F-NEXT: vpsllw %xmm1, %ymm4, %ymm2
; AVX512F-NEXT: vpsllw %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $1, %ymm0, %ymm6
; AVX512VL-NEXT: vpsrlw %xmm3, %ymm6, %ymm3
; AVX512VL-NEXT: vinserti64x4 $1, %ymm5, %zmm3, %zmm3
+; AVX512VL-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX512VL-NEXT: vpand %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpsllw %xmm1, %ymm4, %ymm2
; AVX512VL-NEXT: vpsllw %xmm1, %ymm0, %ymm0
; AVX512F-LABEL: splatvar_funnnel_v32i16:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpbroadcastq {{.*#+}} xmm3 = [15,15]
-; AVX512F-NEXT: vpand %xmm3, %xmm2, %xmm4
-; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm5
-; AVX512F-NEXT: vpsrlw %xmm4, %ymm5, %ymm5
-; AVX512F-NEXT: vpsrlw %xmm4, %ymm1, %ymm1
-; AVX512F-NEXT: vinserti64x4 $1, %ymm5, %zmm1, %zmm1
-; AVX512F-NEXT: vpandn %xmm3, %xmm2, %xmm2
-; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
-; AVX512F-NEXT: vpsllw $1, %ymm3, %ymm3
-; AVX512F-NEXT: vpsllw %xmm2, %ymm3, %ymm3
+; AVX512F-NEXT: vpandn %xmm3, %xmm2, %xmm4
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm5
+; AVX512F-NEXT: vpsllw $1, %ymm5, %ymm5
+; AVX512F-NEXT: vpsllw %xmm4, %ymm5, %ymm5
; AVX512F-NEXT: vpsllw $1, %ymm0, %ymm0
-; AVX512F-NEXT: vpsllw %xmm2, %ymm0, %ymm0
-; AVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; AVX512F-NEXT: vpsllw %xmm4, %ymm0, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm5, %zmm0, %zmm0
+; AVX512F-NEXT: vpbroadcastw %xmm2, %xmm2
+; AVX512F-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512F-NEXT: vpsrlw %xmm2, %ymm3, %ymm3
+; AVX512F-NEXT: vpsrlw %xmm2, %ymm1, %ymm1
+; AVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
; AVX512F-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatvar_funnnel_v32i16:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpbroadcastq {{.*#+}} xmm3 = [15,15]
-; AVX512VL-NEXT: vpand %xmm3, %xmm2, %xmm4
-; AVX512VL-NEXT: vextracti64x4 $1, %zmm1, %ymm5
-; AVX512VL-NEXT: vpsrlw %xmm4, %ymm5, %ymm5
-; AVX512VL-NEXT: vpsrlw %xmm4, %ymm1, %ymm1
-; AVX512VL-NEXT: vinserti64x4 $1, %ymm5, %zmm1, %zmm1
-; AVX512VL-NEXT: vpandn %xmm3, %xmm2, %xmm2
-; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm3
-; AVX512VL-NEXT: vpsllw $1, %ymm3, %ymm3
-; AVX512VL-NEXT: vpsllw %xmm2, %ymm3, %ymm3
+; AVX512VL-NEXT: vpandn %xmm3, %xmm2, %xmm4
+; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm5
+; AVX512VL-NEXT: vpsllw $1, %ymm5, %ymm5
+; AVX512VL-NEXT: vpsllw %xmm4, %ymm5, %ymm5
; AVX512VL-NEXT: vpsllw $1, %ymm0, %ymm0
-; AVX512VL-NEXT: vpsllw %xmm2, %ymm0, %ymm0
-; AVX512VL-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; AVX512VL-NEXT: vpsllw %xmm4, %ymm0, %ymm0
+; AVX512VL-NEXT: vinserti64x4 $1, %ymm5, %zmm0, %zmm0
+; AVX512VL-NEXT: vpbroadcastw %xmm2, %xmm2
+; AVX512VL-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX512VL-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512VL-NEXT: vpsrlw %xmm2, %ymm3, %ymm3
+; AVX512VL-NEXT: vpsrlw %xmm2, %ymm1, %ymm1
+; AVX512VL-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
; AVX512VL-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512F-LABEL: splatvar_funnnel_v32i16:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpbroadcastq {{.*#+}} xmm2 = [15,15]
-; AVX512F-NEXT: vpand %xmm2, %xmm1, %xmm3
+; AVX512F-NEXT: vpandn %xmm2, %xmm1, %xmm3
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm4
-; AVX512F-NEXT: vpsrlw %xmm3, %ymm4, %ymm5
-; AVX512F-NEXT: vpsrlw %xmm3, %ymm0, %ymm3
+; AVX512F-NEXT: vpsllw $1, %ymm4, %ymm5
+; AVX512F-NEXT: vpsllw %xmm3, %ymm5, %ymm5
+; AVX512F-NEXT: vpsllw $1, %ymm0, %ymm6
+; AVX512F-NEXT: vpsllw %xmm3, %ymm6, %ymm3
; AVX512F-NEXT: vinserti64x4 $1, %ymm5, %zmm3, %zmm3
-; AVX512F-NEXT: vpandn %xmm2, %xmm1, %xmm1
-; AVX512F-NEXT: vpsllw $1, %ymm4, %ymm2
-; AVX512F-NEXT: vpsllw %xmm1, %ymm2, %ymm2
-; AVX512F-NEXT: vpsllw $1, %ymm0, %ymm0
-; AVX512F-NEXT: vpsllw %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpbroadcastw %xmm1, %xmm1
+; AVX512F-NEXT: vpand %xmm2, %xmm1, %xmm1
+; AVX512F-NEXT: vpsrlw %xmm1, %ymm4, %ymm2
+; AVX512F-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
-; AVX512F-NEXT: vporq %zmm3, %zmm0, %zmm0
+; AVX512F-NEXT: vporq %zmm0, %zmm3, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatvar_funnnel_v32i16:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpbroadcastq {{.*#+}} xmm2 = [15,15]
-; AVX512VL-NEXT: vpand %xmm2, %xmm1, %xmm3
+; AVX512VL-NEXT: vpandn %xmm2, %xmm1, %xmm3
; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm4
-; AVX512VL-NEXT: vpsrlw %xmm3, %ymm4, %ymm5
-; AVX512VL-NEXT: vpsrlw %xmm3, %ymm0, %ymm3
+; AVX512VL-NEXT: vpsllw $1, %ymm4, %ymm5
+; AVX512VL-NEXT: vpsllw %xmm3, %ymm5, %ymm5
+; AVX512VL-NEXT: vpsllw $1, %ymm0, %ymm6
+; AVX512VL-NEXT: vpsllw %xmm3, %ymm6, %ymm3
; AVX512VL-NEXT: vinserti64x4 $1, %ymm5, %zmm3, %zmm3
-; AVX512VL-NEXT: vpandn %xmm2, %xmm1, %xmm1
-; AVX512VL-NEXT: vpsllw $1, %ymm4, %ymm2
-; AVX512VL-NEXT: vpsllw %xmm1, %ymm2, %ymm2
-; AVX512VL-NEXT: vpsllw $1, %ymm0, %ymm0
-; AVX512VL-NEXT: vpsllw %xmm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vpbroadcastw %xmm1, %xmm1
+; AVX512VL-NEXT: vpand %xmm2, %xmm1, %xmm1
+; AVX512VL-NEXT: vpsrlw %xmm1, %ymm4, %ymm2
+; AVX512VL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
-; AVX512VL-NEXT: vporq %zmm3, %zmm0, %zmm0
+; AVX512VL-NEXT: vporq %zmm0, %zmm3, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: splatvar_funnnel_v32i16:
;
; AVX-LABEL: and_undef_elts:
; AVX: # %bb.0:
-; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,0,1,2]
+; AVX-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
; AVX-NEXT: retq
%extend = shufflevector <2 x i64> %x, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
%bogus_bo = and <4 x i64> %extend, <i64 undef, i64 undef, i64 42, i64 43>
; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm6
; AVX512F-NEXT: vpsrlw %xmm3, %ymm6, %ymm3
; AVX512F-NEXT: vinserti64x4 $1, %ymm5, %zmm3, %zmm3
+; AVX512F-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX512F-NEXT: vpand %xmm2, %xmm1, %xmm1
; AVX512F-NEXT: vpsllw %xmm1, %ymm4, %ymm2
; AVX512F-NEXT: vpsllw %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw $1, %ymm0, %ymm6
; AVX512VL-NEXT: vpsrlw %xmm3, %ymm6, %ymm3
; AVX512VL-NEXT: vinserti64x4 $1, %ymm5, %zmm3, %zmm3
+; AVX512VL-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX512VL-NEXT: vpand %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpsllw %xmm1, %ymm4, %ymm2
; AVX512VL-NEXT: vpsllw %xmm1, %ymm0, %ymm0