This initial version only peeks through cases where we just demand the sign bit of an ashr shift, but we could generalize this further depending on how many sign bits we already have.
The pr18014.ll case is a minor annoyance - we've failed to to move the psrad/paddd after the blendvps which would have avoided the extra move, but we have still increased the ILP.
return Vec;
break;
}
+ case X86ISD::VSRAI:
+ // iff we only need the sign bit then we can use the source directly.
+ // TODO: generalize where we only demand extended signbits.
+ if (DemandedBits.isSignMask())
+ return Op.getOperand(0);
+ break;
case X86ISD::PCMPGT:
// icmp sgt(0, R) == ashr(R, BitWidth-1).
// iff we only need the sign bit then we can use R directly.
; X64: # %bb.0: # %entry
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpslld $31, %ymm0, %ymm0
-; X64-NEXT: vpsrad $31, %ymm0, %ymm0
; X64-NEXT: vmovdqa (%rdi), %ymm2
; X64-NEXT: vmovdqa 32(%rdi), %ymm3
; X64-NEXT: vextracti128 $1, %ymm1, %xmm4
; X64: # %bb.0: # %entry
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpslld $31, %ymm0, %ymm0
-; X64-NEXT: vpsrad $31, %ymm0, %ymm0
; X64-NEXT: vmovaps (%rdi), %ymm2
; X64-NEXT: vmovaps 32(%rdi), %ymm3
; X64-NEXT: vextractf128 $1, %ymm1, %xmm4
; AVX2-LABEL: allones_v16i16_and1:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsllw $15, %ymm0, %ymm0
-; AVX2-NEXT: vpsraw $15, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
; AVX2-LABEL: allzeros_v16i16_and1:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsllw $15, %ymm0, %ymm0
-; AVX2-NEXT: vpsraw $15, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
; AVX2-LABEL: allones_v16i16_and4:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsllw $13, %ymm0, %ymm0
-; AVX2-NEXT: vpsraw $15, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
; AVX2-LABEL: allzeros_v16i16_and4:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsllw $13, %ymm0, %ymm0
-; AVX2-NEXT: vpsraw $15, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.1 | FileCheck %s
; Ensure PSRAD is generated as the condition is consumed by both PADD and
-; BLENDVPS. PAND requires all bits setting properly.
+; BLENDVPS. PADD requires all bits setting properly.
define <4 x i32> @foo(<4 x i32>* %p, <4 x i1> %cond, <4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3) {
; CHECK-LABEL: foo:
; CHECK: # %bb.0:
; CHECK-NEXT: pslld $31, %xmm0
-; CHECK-NEXT: psrad $31, %xmm0
+; CHECK-NEXT: movdqa %xmm0, %xmm3
+; CHECK-NEXT: psrad $31, %xmm3
+; CHECK-NEXT: paddd %xmm1, %xmm3
; CHECK-NEXT: blendvps %xmm0, %xmm1, %xmm2
-; CHECK-NEXT: paddd %xmm0, %xmm1
; CHECK-NEXT: movaps %xmm2, (%rdi)
-; CHECK-NEXT: movdqa %xmm1, %xmm0
+; CHECK-NEXT: movdqa %xmm3, %xmm0
; CHECK-NEXT: retq
%sext_cond = sext <4 x i1> %cond to <4 x i32>
%t1 = add <4 x i32> %v1, %sext_cond