--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
+
+define i32 @ptestz_128_invert(<2 x i64> %c, i32 %a, i32 %b) {
+; CHECK-LABEL: ptestz_128_invert:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vptest %xmm1, %xmm0
+; CHECK-NEXT: cmovnel %esi, %eax
+; CHECK-NEXT: retq
+ %t1 = xor <2 x i64> %c, <i64 -1, i64 -1>
+ %t2 = call i32 @llvm.x86.sse41.ptestz(<2 x i64> %t1, <2 x i64> <i64 -1, i64 -1>)
+ %t3 = icmp ne i32 %t2, 0
+ %t4 = select i1 %t3, i32 %a, i32 %b
+ ret i32 %t4
+}
+
+define i32 @ptestz_256_invert(<4 x i64> %c, i32 %a, i32 %b) {
+; CHECK-LABEL: ptestz_256_invert:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
+; CHECK-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: vptest %ymm1, %ymm0
+; CHECK-NEXT: cmovnel %esi, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+ %t1 = xor <4 x i64> %c, <i64 -1, i64 -1, i64 -1, i64 -1>
+ %t2 = call i32 @llvm.x86.avx.ptestz.256(<4 x i64> %t1, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>)
+ %t3 = icmp ne i32 %t2, 0
+ %t4 = select i1 %t3, i32 %a, i32 %b
+ ret i32 %t4
+}
+
+define i32 @ptestc_128_invert(<2 x i64> %c, i32 %a, i32 %b) {
+; CHECK-LABEL: ptestc_128_invert:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vptest %xmm1, %xmm0
+; CHECK-NEXT: cmovael %esi, %eax
+; CHECK-NEXT: retq
+ %t1 = xor <2 x i64> %c, <i64 -1, i64 -1>
+ %t2 = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %t1, <2 x i64> <i64 -1, i64 -1>)
+ %t3 = icmp ne i32 %t2, 0
+ %t4 = select i1 %t3, i32 %a, i32 %b
+ ret i32 %t4
+}
+
+define i32 @ptestc_256_invert(<4 x i64> %c, i32 %a, i32 %b) {
+; CHECK-LABEL: ptestc_256_invert:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
+; CHECK-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: vptest %ymm1, %ymm0
+; CHECK-NEXT: cmovael %esi, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+ %t1 = xor <4 x i64> %c, <i64 -1, i64 -1, i64 -1, i64 -1>
+ %t2 = call i32 @llvm.x86.avx.ptestc.256(<4 x i64> %t1, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>)
+ %t3 = icmp ne i32 %t2, 0
+ %t4 = select i1 %t3, i32 %a, i32 %b
+ ret i32 %t4
+}
+
+define i32 @ptestnzc_128_invert(<2 x i64> %c, i32 %a, i32 %b) {
+; CHECK-LABEL: ptestnzc_128_invert:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vptest %xmm1, %xmm0
+; CHECK-NEXT: cmovael %esi, %eax
+; CHECK-NEXT: retq
+ %t1 = xor <2 x i64> %c, <i64 -1, i64 -1>
+ %t2 = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %t1, <2 x i64> <i64 -1, i64 -1>)
+ %t3 = icmp ne i32 %t2, 0
+ %t4 = select i1 %t3, i32 %a, i32 %b
+ ret i32 %t4
+}
+
+define i32 @ptestnzc_256_invert(<4 x i64> %c, i32 %a, i32 %b) {
+; CHECK-LABEL: ptestnzc_256_invert:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
+; CHECK-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: vptest %ymm1, %ymm0
+; CHECK-NEXT: cmovbel %esi, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+ %t1 = xor <4 x i64> %c, <i64 -1, i64 -1, i64 -1, i64 -1>
+ %t2 = call i32 @llvm.x86.avx.ptestnzc.256(<4 x i64> %t1, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>)
+ %t3 = icmp ne i32 %t2, 0
+ %t4 = select i1 %t3, i32 %a, i32 %b
+ ret i32 %t4
+}
+
+declare i32 @llvm.x86.sse41.ptestz(<2 x i64>, <2 x i64>) nounwind readnone
+declare i32 @llvm.x86.sse41.ptestc(<2 x i64>, <2 x i64>) nounwind readnone
+declare i32 @llvm.x86.sse41.ptestnzc(<2 x i64>, <2 x i64>) nounwind readnone
+
+declare i32 @llvm.x86.avx.ptestz.256(<4 x i64>, <4 x i64>)
+declare i32 @llvm.x86.avx.ptestc.256(<4 x i64>, <4 x i64>)
+declare i32 @llvm.x86.avx.ptestnzc.256(<4 x i64>, <4 x i64>)
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
+
+define i32 @testpdz_128_invert(<2 x double> %c, i32 %a, i32 %b) {
+; CHECK-LABEL: testpdz_128_invert:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vtestpd %xmm1, %xmm0
+; CHECK-NEXT: cmovnel %esi, %eax
+; CHECK-NEXT: retq
+ %t0 = bitcast <2 x double> %c to <2 x i64>
+ %t1 = xor <2 x i64> %t0, <i64 -1, i64 -1>
+ %t2 = bitcast <2 x i64> %t1 to <2 x double>
+ %t3 = call i32 @llvm.x86.avx.vtestz.pd(<2 x double> %t2, <2 x double> <double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF>)
+ %t4 = icmp ne i32 %t3, 0
+ %t5 = select i1 %t4, i32 %a, i32 %b
+ ret i32 %t5
+}
+
+define i32 @testpdz_256_invert(<4 x double> %c, i32 %a, i32 %b) {
+; CHECK-LABEL: testpdz_256_invert:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
+; CHECK-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: vtestpd %ymm1, %ymm0
+; CHECK-NEXT: cmovnel %esi, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+ %t0 = bitcast <4 x double> %c to <4 x i64>
+ %t1 = xor <4 x i64> %t0, <i64 -1, i64 -1, i64 -1, i64 -1>
+ %t2 = bitcast <4 x i64> %t1 to <4 x double>
+ %t3 = call i32 @llvm.x86.avx.vtestz.pd.256(<4 x double> %t2, <4 x double> <double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF>)
+ %t4 = icmp ne i32 %t3, 0
+ %t5 = select i1 %t4, i32 %a, i32 %b
+ ret i32 %t5
+}
+
+define i32 @testpdc_128_invert(<2 x double> %c, i32 %a, i32 %b) {
+; CHECK-LABEL: testpdc_128_invert:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vtestpd %xmm1, %xmm0
+; CHECK-NEXT: cmovael %esi, %eax
+; CHECK-NEXT: retq
+ %t0 = bitcast <2 x double> %c to <2 x i64>
+ %t1 = xor <2 x i64> %t0, <i64 -1, i64 -1>
+ %t2 = bitcast <2 x i64> %t1 to <2 x double>
+ %t3 = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %t2, <2 x double> <double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF>)
+ %t4 = icmp ne i32 %t3, 0
+ %t5 = select i1 %t4, i32 %a, i32 %b
+ ret i32 %t5
+}
+
+define i32 @testpdc_256_invert(<4 x double> %c, i32 %a, i32 %b) {
+; CHECK-LABEL: testpdc_256_invert:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
+; CHECK-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: vtestpd %ymm1, %ymm0
+; CHECK-NEXT: cmovael %esi, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+ %t0 = bitcast <4 x double> %c to <4 x i64>
+ %t1 = xor <4 x i64> %t0, <i64 -1, i64 -1, i64 -1, i64 -1>
+ %t2 = bitcast <4 x i64> %t1 to <4 x double>
+ %t3 = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %t2, <4 x double> <double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF>)
+ %t4 = icmp ne i32 %t3, 0
+ %t5 = select i1 %t4, i32 %a, i32 %b
+ ret i32 %t5
+}
+
+define i32 @testpdnzc_128_invert(<2 x double> %c, i32 %a, i32 %b) {
+; CHECK-LABEL: testpdnzc_128_invert:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vtestpd %xmm1, %xmm0
+; CHECK-NEXT: cmovbel %esi, %eax
+; CHECK-NEXT: retq
+ %t0 = bitcast <2 x double> %c to <2 x i64>
+ %t1 = xor <2 x i64> %t0, <i64 -1, i64 -1>
+ %t2 = bitcast <2 x i64> %t1 to <2 x double>
+ %t3 = call i32 @llvm.x86.avx.vtestnzc.pd(<2 x double> %t2, <2 x double> <double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF>)
+ %t4 = icmp ne i32 %t3, 0
+ %t5 = select i1 %t4, i32 %a, i32 %b
+ ret i32 %t5
+}
+
+define i32 @testpdnzc_256_invert(<4 x double> %c, i32 %a, i32 %b) {
+; CHECK-LABEL: testpdnzc_256_invert:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
+; CHECK-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: vtestpd %ymm1, %ymm0
+; CHECK-NEXT: cmovbel %esi, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+ %t0 = bitcast <4 x double> %c to <4 x i64>
+ %t1 = xor <4 x i64> %t0, <i64 -1, i64 -1, i64 -1, i64 -1>
+ %t2 = bitcast <4 x i64> %t1 to <4 x double>
+ %t3 = call i32 @llvm.x86.avx.vtestnzc.pd.256(<4 x double> %t2, <4 x double> <double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF>)
+ %t4 = icmp ne i32 %t3, 0
+ %t5 = select i1 %t4, i32 %a, i32 %b
+ ret i32 %t5
+}
+
+declare i32 @llvm.x86.avx.vtestz.pd(<2 x double>, <2 x double>) nounwind readnone
+declare i32 @llvm.x86.avx.vtestc.pd(<2 x double>, <2 x double>) nounwind readnone
+declare i32 @llvm.x86.avx.vtestnzc.pd(<2 x double>, <2 x double>) nounwind readnone
+
+declare i32 @llvm.x86.avx.vtestz.pd.256(<4 x double>, <4 x double>) nounwind readnone
+declare i32 @llvm.x86.avx.vtestc.pd.256(<4 x double>, <4 x double>) nounwind readnone
+declare i32 @llvm.x86.avx.vtestnzc.pd.256(<4 x double>, <4 x double>) nounwind readnone
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
+
+define i32 @testpsz_128_invert(<4 x float> %c, i32 %a, i32 %b) {
+; CHECK-LABEL: testpsz_128_invert:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vtestps %xmm1, %xmm0
+; CHECK-NEXT: cmovnel %esi, %eax
+; CHECK-NEXT: retq
+ %t0 = bitcast <4 x float> %c to <2 x i64>
+ %t1 = xor <2 x i64> %t0, <i64 -1, i64 -1>
+ %t2 = bitcast <2 x i64> %t1 to <4 x float>
+ %t3 = call i32 @llvm.x86.avx.vtestz.ps(<4 x float> %t2, <4 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>)
+ %t4 = icmp ne i32 %t3, 0
+ %t5 = select i1 %t4, i32 %a, i32 %b
+ ret i32 %t5
+}
+
+define i32 @testpsz_256_invert(<8 x float> %c, i32 %a, i32 %b) {
+; CHECK-LABEL: testpsz_256_invert:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
+; CHECK-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: vtestps %ymm1, %ymm0
+; CHECK-NEXT: cmovnel %esi, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+ %t0 = bitcast <8 x float> %c to <4 x i64>
+ %t1 = xor <4 x i64> %t0, <i64 -1, i64 -1, i64 -1, i64 -1>
+ %t2 = bitcast <4 x i64> %t1 to <8 x float>
+ %t3 = call i32 @llvm.x86.avx.vtestz.ps.256(<8 x float> %t2, <8 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>)
+ %t4 = icmp ne i32 %t3, 0
+ %t5 = select i1 %t4, i32 %a, i32 %b
+ ret i32 %t5
+}
+
+define i32 @testpsc_128_invert(<4 x float> %c, i32 %a, i32 %b) {
+; CHECK-LABEL: testpsc_128_invert:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vtestps %xmm1, %xmm0
+; CHECK-NEXT: cmovael %esi, %eax
+; CHECK-NEXT: retq
+ %t0 = bitcast <4 x float> %c to <2 x i64>
+ %t1 = xor <2 x i64> %t0, <i64 -1, i64 -1>
+ %t2 = bitcast <2 x i64> %t1 to <4 x float>
+ %t3 = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> %t2, <4 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>)
+ %t4 = icmp ne i32 %t3, 0
+ %t5 = select i1 %t4, i32 %a, i32 %b
+ ret i32 %t5
+}
+
+define i32 @testpsc_256_invert(<8 x float> %c, i32 %a, i32 %b) {
+; CHECK-LABEL: testpsc_256_invert:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
+; CHECK-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: vtestps %ymm1, %ymm0
+; CHECK-NEXT: cmovael %esi, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+ %t0 = bitcast <8 x float> %c to <4 x i64>
+ %t1 = xor <4 x i64> %t0, <i64 -1, i64 -1, i64 -1, i64 -1>
+ %t2 = bitcast <4 x i64> %t1 to <8 x float>
+ %t3 = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %t2, <8 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>)
+ %t4 = icmp ne i32 %t3, 0
+ %t5 = select i1 %t4, i32 %a, i32 %b
+ ret i32 %t5
+}
+
+define i32 @testpsnzc_128_invert(<4 x float> %c, i32 %a, i32 %b) {
+; CHECK-LABEL: testpsnzc_128_invert:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vtestps %xmm1, %xmm0
+; CHECK-NEXT: cmovbel %esi, %eax
+; CHECK-NEXT: retq
+ %t0 = bitcast <4 x float> %c to <2 x i64>
+ %t1 = xor <2 x i64> %t0, <i64 -1, i64 -1>
+ %t2 = bitcast <2 x i64> %t1 to <4 x float>
+ %t3 = call i32 @llvm.x86.avx.vtestnzc.ps(<4 x float> %t2, <4 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>)
+ %t4 = icmp ne i32 %t3, 0
+ %t5 = select i1 %t4, i32 %a, i32 %b
+ ret i32 %t5
+}
+
+define i32 @testpsnzc_256_invert(<8 x float> %c, i32 %a, i32 %b) {
+; CHECK-LABEL: testpsnzc_256_invert:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
+; CHECK-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: vtestps %ymm1, %ymm0
+; CHECK-NEXT: cmovbel %esi, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+ %t0 = bitcast <8 x float> %c to <4 x i64>
+ %t1 = xor <4 x i64> %t0, <i64 -1, i64 -1, i64 -1, i64 -1>
+ %t2 = bitcast <4 x i64> %t1 to <8 x float>
+ %t3 = call i32 @llvm.x86.avx.vtestnzc.ps.256(<8 x float> %t2, <8 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>)
+ %t4 = icmp ne i32 %t3, 0
+ %t5 = select i1 %t4, i32 %a, i32 %b
+ ret i32 %t5
+}
+
+declare i32 @llvm.x86.avx.vtestz.ps(<4 x float>, <4 x float>) nounwind readnone
+declare i32 @llvm.x86.avx.vtestc.ps(<4 x float>, <4 x float>) nounwind readnone
+declare i32 @llvm.x86.avx.vtestnzc.ps(<4 x float>, <4 x float>) nounwind readnone
+
+declare i32 @llvm.x86.avx.vtestz.ps.256(<8 x float>, <8 x float>) nounwind readnone
+declare i32 @llvm.x86.avx.vtestc.ps.256(<8 x float>, <8 x float>) nounwind readnone
+declare i32 @llvm.x86.avx.vtestnzc.ps.256(<8 x float>, <8 x float>) nounwind readnone