ret i8 %res
}
+; PR61104 - peek through vselect allones operand
+; combineBitcastvxi1 may be called before the fold vselect(c,-1,x) --> or(c,x) vXi1 fold
+
+define i8 @v8i32_or_select(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) {
+; SSE2-SSSE3-LABEL: v8i32_or_select:
+; SSE2-SSSE3: # %bb.0:
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm3, %xmm1
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm2, %xmm0
+; SSE2-SSSE3-NEXT: packssdw %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: packssdw %xmm5, %xmm4
+; SSE2-SSSE3-NEXT: por %xmm0, %xmm4
+; SSE2-SSSE3-NEXT: packsswb %xmm4, %xmm4
+; SSE2-SSSE3-NEXT: pmovmskb %xmm4, %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: v8i32_or_select:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm1
+; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpor %xmm2, %xmm3, %xmm1
+; AVX1-NEXT: vpackssdw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: v8i32_or_select:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpgtd %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: v8i32_or_select:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpcmpeqd %ymm1, %ymm0, %k0
+; AVX512F-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; AVX512F-NEXT: vpcmpgtd %ymm2, %ymm0, %k1
+; AVX512F-NEXT: korw %k1, %k0, %k0
+; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: v8i32_or_select:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpcmpeqd %ymm1, %ymm0, %k0
+; AVX512BW-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; AVX512BW-NEXT: vpcmpgtd %ymm2, %ymm0, %k1
+; AVX512BW-NEXT: korw %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+ %cmp = icmp eq <8 x i32> %a0, %a1
+ %slt = icmp slt <8 x i32> %a2, zeroinitializer
+ %sel = select <8 x i1> %cmp, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i1> %slt
+ %res = bitcast <8 x i1> %sel to i8
+ ret i8 %res
+}
+
+
define i8 @v8f32_and(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d) {
; SSE2-SSSE3-LABEL: v8f32_and:
; SSE2-SSSE3: # %bb.0: