}
define void @store_float(float* %fptr, float %v) {
-; X86-SSE-LABEL: store_float:
-; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-SSE-NEXT: movss %xmm0, (%eax)
-; X86-SSE-NEXT: retl
-;
-; X86-AVX-LABEL: store_float:
-; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-AVX-NEXT: vmovss %xmm0, (%eax)
-; X86-AVX-NEXT: retl
-;
-; X86-NOSSE-LABEL: store_float:
-; X86-NOSSE: # %bb.0:
-; X86-NOSSE-NEXT: flds {{[0-9]+}}(%esp)
-; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NOSSE-NEXT: fstps (%eax)
-; X86-NOSSE-NEXT: retl
+; X86-LABEL: store_float:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: retl
;
; X64-SSE-LABEL: store_float:
; X64-SSE: # %bb.0:
;
; X86-SSE2-LABEL: store_double:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-SSE2-NEXT: movsd %xmm0, (%eax)
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movlps %xmm0, (%eax)
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: store_double:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-AVX-NEXT: vmovsd %xmm0, (%eax)
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmovlps %xmm0, (%eax)
; X86-AVX-NEXT: retl
;
; X86-NOSSE-LABEL: store_double:
;
; X64-SSE-LABEL: store_fp128:
; X64-SSE: # %bb.0:
-; X64-SSE-NEXT: movaps %xmm0, (%rdi)
+; X64-SSE-NEXT: subq $24, %rsp
+; X64-SSE-NEXT: .cfi_def_cfa_offset 32
+; X64-SSE-NEXT: movaps %xmm0, (%rsp)
+; X64-SSE-NEXT: movq (%rsp), %rsi
+; X64-SSE-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; X64-SSE-NEXT: callq __sync_lock_test_and_set_16
+; X64-SSE-NEXT: addq $24, %rsp
+; X64-SSE-NEXT: .cfi_def_cfa_offset 8
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: store_fp128:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: vmovaps %xmm0, (%rdi)
+; X64-AVX-NEXT: subq $24, %rsp
+; X64-AVX-NEXT: .cfi_def_cfa_offset 32
+; X64-AVX-NEXT: vmovaps %xmm0, (%rsp)
+; X64-AVX-NEXT: movq (%rsp), %rsi
+; X64-AVX-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; X64-AVX-NEXT: callq __sync_lock_test_and_set_16
+; X64-AVX-NEXT: addq $24, %rsp
+; X64-AVX-NEXT: .cfi_def_cfa_offset 8
; X64-AVX-NEXT: retq
store atomic fp128 %v, fp128* %fptr unordered, align 16
ret void
}
define float @load_float(float* %fptr) {
-; X86-LABEL: load_float:
-; X86: # %bb.0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: flds (%eax)
-; X86-NEXT: retl
+; X86-SSE1-LABEL: load_float:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: pushl %eax
+; X86-SSE1-NEXT: .cfi_def_cfa_offset 8
+; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT: movl (%eax), %eax
+; X86-SSE1-NEXT: movl %eax, (%esp)
+; X86-SSE1-NEXT: flds (%esp)
+; X86-SSE1-NEXT: popl %eax
+; X86-SSE1-NEXT: .cfi_def_cfa_offset 4
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: load_float:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %eax
+; X86-SSE2-NEXT: .cfi_def_cfa_offset 8
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE2-NEXT: movss %xmm0, (%esp)
+; X86-SSE2-NEXT: flds (%esp)
+; X86-SSE2-NEXT: popl %eax
+; X86-SSE2-NEXT: .cfi_def_cfa_offset 4
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: load_float:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: pushl %eax
+; X86-AVX-NEXT: .cfi_def_cfa_offset 8
+; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX-NEXT: vmovss %xmm0, (%esp)
+; X86-AVX-NEXT: flds (%esp)
+; X86-AVX-NEXT: popl %eax
+; X86-AVX-NEXT: .cfi_def_cfa_offset 4
+; X86-AVX-NEXT: retl
+;
+; X86-NOSSE-LABEL: load_float:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl %eax
+; X86-NOSSE-NEXT: .cfi_def_cfa_offset 8
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl (%eax), %eax
+; X86-NOSSE-NEXT: movl %eax, (%esp)
+; X86-NOSSE-NEXT: flds (%esp)
+; X86-NOSSE-NEXT: popl %eax
+; X86-NOSSE-NEXT: .cfi_def_cfa_offset 4
+; X86-NOSSE-NEXT: retl
;
; X64-SSE-LABEL: load_float:
; X64-SSE: # %bb.0:
}
define double @load_double(double* %fptr) {
-; X86-LABEL: load_double:
-; X86: # %bb.0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: fldl (%eax)
-; X86-NEXT: retl
+; X86-SSE1-LABEL: load_double:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: subl $20, %esp
+; X86-SSE1-NEXT: .cfi_def_cfa_offset 24
+; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT: fildll (%eax)
+; X86-SSE1-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE1-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movl %eax, (%esp)
+; X86-SSE1-NEXT: fldl (%esp)
+; X86-SSE1-NEXT: addl $20, %esp
+; X86-SSE1-NEXT: .cfi_def_cfa_offset 4
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: load_double:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: subl $12, %esp
+; X86-SSE2-NEXT: .cfi_def_cfa_offset 16
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movlps %xmm0, (%esp)
+; X86-SSE2-NEXT: fldl (%esp)
+; X86-SSE2-NEXT: addl $12, %esp
+; X86-SSE2-NEXT: .cfi_def_cfa_offset 4
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: load_double:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: subl $12, %esp
+; X86-AVX-NEXT: .cfi_def_cfa_offset 16
+; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmovlps %xmm0, (%esp)
+; X86-AVX-NEXT: fldl (%esp)
+; X86-AVX-NEXT: addl $12, %esp
+; X86-AVX-NEXT: .cfi_def_cfa_offset 4
+; X86-AVX-NEXT: retl
+;
+; X86-NOSSE-LABEL: load_double:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: subl $20, %esp
+; X86-NOSSE-NEXT: .cfi_def_cfa_offset 24
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: fildll (%eax)
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl %eax, (%esp)
+; X86-NOSSE-NEXT: fldl (%esp)
+; X86-NOSSE-NEXT: addl $20, %esp
+; X86-NOSSE-NEXT: .cfi_def_cfa_offset 4
+; X86-NOSSE-NEXT: retl
;
; X64-SSE-LABEL: load_double:
; X64-SSE: # %bb.0:
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-SSE-NEXT: movl %edi, 12(%esi)
-; X86-SSE-NEXT: movl %edx, 8(%esi)
-; X86-SSE-NEXT: movl %ecx, 4(%esi)
+; X86-SSE-NEXT: movl %edi, 8(%esi)
+; X86-SSE-NEXT: movl %edx, 12(%esi)
; X86-SSE-NEXT: movl %eax, (%esi)
+; X86-SSE-NEXT: movl %ecx, 4(%esi)
; X86-SSE-NEXT: movl %esi, %eax
; X86-SSE-NEXT: addl $20, %esp
; X86-SSE-NEXT: .cfi_def_cfa_offset 12
; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NOSSE-NEXT: movl %edi, 12(%esi)
-; X86-NOSSE-NEXT: movl %edx, 8(%esi)
-; X86-NOSSE-NEXT: movl %ecx, 4(%esi)
+; X86-NOSSE-NEXT: movl %edi, 8(%esi)
+; X86-NOSSE-NEXT: movl %edx, 12(%esi)
; X86-NOSSE-NEXT: movl %eax, (%esi)
+; X86-NOSSE-NEXT: movl %ecx, 4(%esi)
; X86-NOSSE-NEXT: movl %esi, %eax
; X86-NOSSE-NEXT: addl $20, %esp
; X86-NOSSE-NEXT: .cfi_def_cfa_offset 12
;
; X64-SSE-LABEL: load_fp128:
; X64-SSE: # %bb.0:
-; X64-SSE-NEXT: movaps (%rdi), %xmm0
+; X64-SSE-NEXT: subq $24, %rsp
+; X64-SSE-NEXT: .cfi_def_cfa_offset 32
+; X64-SSE-NEXT: xorl %esi, %esi
+; X64-SSE-NEXT: xorl %edx, %edx
+; X64-SSE-NEXT: xorl %ecx, %ecx
+; X64-SSE-NEXT: xorl %r8d, %r8d
+; X64-SSE-NEXT: callq __sync_val_compare_and_swap_16
+; X64-SSE-NEXT: movq %rdx, {{[0-9]+}}(%rsp)
+; X64-SSE-NEXT: movq %rax, (%rsp)
+; X64-SSE-NEXT: movaps (%rsp), %xmm0
+; X64-SSE-NEXT: addq $24, %rsp
+; X64-SSE-NEXT: .cfi_def_cfa_offset 8
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: load_fp128:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: vmovaps (%rdi), %xmm0
+; X64-AVX-NEXT: subq $24, %rsp
+; X64-AVX-NEXT: .cfi_def_cfa_offset 32
+; X64-AVX-NEXT: xorl %esi, %esi
+; X64-AVX-NEXT: xorl %edx, %edx
+; X64-AVX-NEXT: xorl %ecx, %ecx
+; X64-AVX-NEXT: xorl %r8d, %r8d
+; X64-AVX-NEXT: callq __sync_val_compare_and_swap_16
+; X64-AVX-NEXT: movq %rdx, {{[0-9]+}}(%rsp)
+; X64-AVX-NEXT: movq %rax, (%rsp)
+; X64-AVX-NEXT: vmovaps (%rsp), %xmm0
+; X64-AVX-NEXT: addq $24, %rsp
+; X64-AVX-NEXT: .cfi_def_cfa_offset 8
; X64-AVX-NEXT: retq
%v = load atomic fp128, fp128* %fptr unordered, align 16
ret fp128 %v