ret i1 %5
}
+; FIXME: MOVMSK(ICMP_SGT(X,-1)) -> NOT(MOVMSK(X)))
+define i1 @movmskps_allof_v4i32_positive(<4 x i32> %a0) {
+; SSE-LABEL: movmskps_allof_v4i32_positive:
+; SSE: # %bb.0:
+; SSE-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE-NEXT: pcmpgtd %xmm1, %xmm0
+; SSE-NEXT: movmskps %xmm0, %eax
+; SSE-NEXT: cmpl $15, %eax
+; SSE-NEXT: sete %al
+; SSE-NEXT: retq
+;
+; AVX-LABEL: movmskps_allof_v4i32_positive:
+; AVX: # %bb.0:
+; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovmskps %xmm0, %eax
+; AVX-NEXT: cmpl $15, %eax
+; AVX-NEXT: sete %al
+; AVX-NEXT: retq
+ %1 = icmp sgt <4 x i32> %a0, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %2 = sext <4 x i1> %1 to <4 x i32>
+ %3 = bitcast <4 x i32> %2 to <4 x float>
+ %4 = tail call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %3)
+ %5 = icmp eq i32 %4, 15
+ ret i1 %5
+}
+
+define i1 @pmovmskb_noneof_v16i8_positive(<16 x i8> %a0) {
+; SSE-LABEL: pmovmskb_noneof_v16i8_positive:
+; SSE: # %bb.0:
+; SSE-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE-NEXT: pcmpgtb %xmm1, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: testl %eax, %eax
+; SSE-NEXT: sete %al
+; SSE-NEXT: retq
+;
+; AVX-LABEL: pmovmskb_noneof_v16i8_positive:
+; AVX: # %bb.0:
+; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpmovmskb %xmm0, %eax
+; AVX-NEXT: testl %eax, %eax
+; AVX-NEXT: sete %al
+; AVX-NEXT: retq
+ %1 = icmp sgt <16 x i8> %a0, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %2 = sext <16 x i1> %1 to <16 x i8>
+ %3 = tail call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %2)
+ %4 = icmp eq i32 %3, 0
+ ret i1 %4
+}
+
; AND(MOVMSK(X),MOVMSK(Y)) -> MOVMSK(AND(X,Y))
; XOR(MOVMSK(X),MOVMSK(Y)) -> MOVMSK(XOR(X,Y))
; OR(MOVMSK(X),MOVMSK(Y)) -> MOVMSK(OR(X,Y))