From ac7f3e24d3990962f7d1036e7ff2d29e17e9ef16 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Sat, 29 Apr 2017 14:29:06 +0000 Subject: [PATCH] [X86][SSE] Add initial <2 x half> tests for PR31088 As discussed on D32391, test X86/X64 SSE2 and X64 F16C. llvm-svn: 301744 --- llvm/test/CodeGen/X86/pr31088.ll | 105 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 105 insertions(+) create mode 100644 llvm/test/CodeGen/X86/pr31088.ll diff --git a/llvm/test/CodeGen/X86/pr31088.ll b/llvm/test/CodeGen/X86/pr31088.ll new file mode 100644 index 0000000..ca1e08b --- /dev/null +++ b/llvm/test/CodeGen/X86/pr31088.ll @@ -0,0 +1,105 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+f16c | FileCheck %s --check-prefix=F16C + +define <2 x half> @ir_fadd_v2f16(<2 x half> %arg0, <2 x half> %arg1) nounwind { +; X86-LABEL: ir_fadd_v2f16: +; X86: # BB#0: +; X86-NEXT: subl $64, %esp +; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-NEXT: movss %xmm0, (%esp) +; X86-NEXT: calll __gnu_f2h_ieee +; X86-NEXT: movzwl %ax, %eax +; X86-NEXT: movl %eax, (%esp) +; X86-NEXT: calll __gnu_h2f_ieee +; X86-NEXT: fstpt {{[0-9]+}}(%esp) # 10-byte Folded Spill +; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-NEXT: movss %xmm0, (%esp) +; X86-NEXT: calll __gnu_f2h_ieee +; X86-NEXT: movzwl %ax, %eax +; X86-NEXT: movl %eax, (%esp) +; X86-NEXT: calll __gnu_h2f_ieee +; X86-NEXT: fstpt {{[0-9]+}}(%esp) # 10-byte Folded Spill +; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-NEXT: movss %xmm0, (%esp) +; X86-NEXT: calll __gnu_f2h_ieee +; X86-NEXT: movzwl %ax, %eax +; X86-NEXT: movl %eax, (%esp) +; X86-NEXT: calll __gnu_h2f_ieee +; X86-NEXT: fstpt {{[0-9]+}}(%esp) # 10-byte Folded Spill +; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-NEXT: movss %xmm0, (%esp) +; X86-NEXT: calll __gnu_f2h_ieee +; X86-NEXT: movzwl %ax, %eax +; X86-NEXT: movl %eax, (%esp) +; X86-NEXT: fldt {{[0-9]+}}(%esp) # 10-byte Folded Reload +; X86-NEXT: fstps {{[0-9]+}}(%esp) +; X86-NEXT: fldt {{[0-9]+}}(%esp) # 10-byte Folded Reload +; X86-NEXT: fstps {{[0-9]+}}(%esp) +; X86-NEXT: fldt {{[0-9]+}}(%esp) # 10-byte Folded Reload +; X86-NEXT: fstps {{[0-9]+}}(%esp) +; X86-NEXT: calll __gnu_h2f_ieee +; X86-NEXT: fstps {{[0-9]+}}(%esp) +; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; X86-NEXT: addss {{[0-9]+}}(%esp), %xmm1 +; X86-NEXT: addss {{[0-9]+}}(%esp), %xmm0 +; X86-NEXT: movss %xmm0, {{[0-9]+}}(%esp) +; X86-NEXT: movss %xmm1, {{[0-9]+}}(%esp) +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: addl $64, %esp +; X86-NEXT: retl +; +; X64-LABEL: ir_fadd_v2f16: +; X64: # BB#0: +; X64-NEXT: subq $24, %rsp +; X64-NEXT: movss %xmm2, {{[0-9]+}}(%rsp) # 4-byte Spill +; X64-NEXT: movss %xmm1, {{[0-9]+}}(%rsp) # 4-byte Spill +; X64-NEXT: movss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Spill +; X64-NEXT: movaps %xmm3, %xmm0 +; X64-NEXT: callq __gnu_f2h_ieee +; X64-NEXT: movzwl %ax, %edi +; X64-NEXT: callq __gnu_h2f_ieee +; X64-NEXT: movss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Spill +; X64-NEXT: movss {{[0-9]+}}(%rsp), %xmm0 # 4-byte Reload +; X64-NEXT: # xmm0 = mem[0],zero,zero,zero +; X64-NEXT: callq __gnu_f2h_ieee +; X64-NEXT: movzwl %ax, %edi +; X64-NEXT: callq __gnu_h2f_ieee +; X64-NEXT: movss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Spill +; X64-NEXT: movss {{[0-9]+}}(%rsp), %xmm0 # 4-byte Reload +; X64-NEXT: # xmm0 = mem[0],zero,zero,zero +; X64-NEXT: callq __gnu_f2h_ieee +; X64-NEXT: movzwl %ax, %edi +; X64-NEXT: callq __gnu_h2f_ieee +; X64-NEXT: movss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Spill +; X64-NEXT: movss {{[0-9]+}}(%rsp), %xmm0 # 4-byte Reload +; X64-NEXT: # xmm0 = mem[0],zero,zero,zero +; X64-NEXT: callq __gnu_f2h_ieee +; X64-NEXT: movzwl %ax, %edi +; X64-NEXT: callq __gnu_h2f_ieee +; X64-NEXT: addss {{[0-9]+}}(%rsp), %xmm0 # 4-byte Folded Reload +; X64-NEXT: movss {{[0-9]+}}(%rsp), %xmm1 # 4-byte Reload +; X64-NEXT: # xmm1 = mem[0],zero,zero,zero +; X64-NEXT: addss {{[0-9]+}}(%rsp), %xmm1 # 4-byte Folded Reload +; X64-NEXT: addq $24, %rsp +; X64-NEXT: retq +; +; F16C-LABEL: ir_fadd_v2f16: +; F16C: # BB#0: +; F16C-NEXT: vcvtps2ph $4, %xmm3, %xmm3 +; F16C-NEXT: vcvtph2ps %xmm3, %xmm3 +; F16C-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; F16C-NEXT: vcvtph2ps %xmm1, %xmm1 +; F16C-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; F16C-NEXT: vcvtph2ps %xmm2, %xmm2 +; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; F16C-NEXT: vcvtph2ps %xmm0, %xmm0 +; F16C-NEXT: vaddss %xmm2, %xmm0, %xmm0 +; F16C-NEXT: vaddss %xmm3, %xmm1, %xmm1 +; F16C-NEXT: retq + %retval = fadd <2 x half> %arg0, %arg1 + ret <2 x half> %retval +} -- 2.7.4