TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
SDValue Cond = N->getOperand(0);
- if (N->getOpcode() != ISD::VSELECT || !DCI.isBeforeLegalizeOps() ||
- DCI.isBeforeLegalize() ||
+ if (N->getOpcode() != ISD::VSELECT ||
ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
return SDValue();
- // Don't optimize vector selects that map to mask-registers.
+ // Don't optimize before the condition has been transformed to a legal type
+ // and don't ever optimize vector selects that map to AVX512 mask-registers.
unsigned BitWidth = Cond.getScalarValueSizeInBits();
- if (BitWidth == 1)
+ if (BitWidth < 8 || BitWidth > 64)
return SDValue();
// We can only handle the cases where VSELECT is directly legal on the
if (UI->getOpcode() != ISD::VSELECT || UI.getOperandNo() != 0)
return SDValue();
- assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
APInt DemandedMask(APInt::getSignMask(BitWidth));
KnownBits Known;
TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
; AVX1-NEXT: vpmovsxwd %xmm2, %xmm3
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2
-; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
-; AVX1-NEXT: vpcmpgtd %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vpcmpgtd %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; AVX1-NEXT: vblendvps %ymm2, %ymm0, %ymm1, %ymm0
; AVX1-NEXT: retq
; AVX2-LABEL: slt_zero:
; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxbd (%rdi), %ymm2
-; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX2-NEXT: vpcmpgtd %ymm2, %ymm3, %ymm2
; AVX2-NEXT: vblendvps %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
ret <4 x double> %sel
}
-; FIXME: The compare with 0 for AVX2 should be eliminated.
-
define <8 x float> @slt_zero_fp_select(<8 x i16>* %p, <8 x float> %x, <8 x float> %y) {
; AVX1-LABEL: slt_zero_fp_select:
; AVX1: # %bb.0:
; AVX2-LABEL: slt_zero_fp_select:
; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxwd (%rdi), %ymm2
-; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX2-NEXT: vpcmpgtd %ymm2, %ymm3, %ymm2
; AVX2-NEXT: vblendvps %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; Test 128-bit vectors for all legal element types.
-; FIXME: Why didn't AVX-512 optimize too?
-
define <16 x i8> @signbit_sel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %mask) {
-; AVX12-LABEL: signbit_sel_v16i8:
-; AVX12: # %bb.0:
-; AVX12-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
-; AVX12-NEXT: retq
-;
-; AVX512-LABEL: signbit_sel_v16i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX512-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2
-; AVX512-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
-; AVX512-NEXT: retq
+; AVX-LABEL: signbit_sel_v16i8:
+; AVX: # %bb.0:
+; AVX-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
%tr = icmp slt <16 x i8> %mask, zeroinitializer
%z = select <16 x i1> %tr, <16 x i8> %x, <16 x i8> %y
ret <16 x i8> %z
;
; AVX512-LABEL: signbit_sel_v32i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX512-NEXT: vpcmpgtb %ymm2, %ymm3, %ymm2
; AVX512-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX512-NEXT: retq
%tr = icmp slt <32 x i8> %mask, zeroinitializer