ret i32 %conv
}
+define i32 @TestComp128UEQ(fp128 %d1, fp128 %d2) {
+; CHECK-LABEL: TestComp128UEQ:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: subq $32, %rsp
+; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: callq __eqtf2
+; CHECK-NEXT: testl %eax, %eax
+; CHECK-NEXT: sete %bl
+; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; CHECK-NEXT: callq __unordtf2
+; CHECK-NEXT: testl %eax, %eax
+; CHECK-NEXT: setne %al
+; CHECK-NEXT: orb %bl, %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: addq $32, %rsp
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
+entry:
+ %cmp = fcmp ueq fp128 %d1, %d2
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @TestComp128ONE(fp128 %d1, fp128 %d2) {
+; CHECK-LABEL: TestComp128ONE:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: subq $32, %rsp
+; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: callq __gttf2
+; CHECK-NEXT: testl %eax, %eax
+; CHECK-NEXT: setg %bl
+; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; CHECK-NEXT: callq __lttf2
+; CHECK-NEXT: testl %eax, %eax
+; CHECK-NEXT: sets %al
+; CHECK-NEXT: orb %bl, %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: addq $32, %rsp
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
+entry:
+ %cmp = fcmp one fp128 %d1, %d2
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
define fp128 @TestMax(fp128 %x, fp128 %y) {
; CHECK-LABEL: TestMax:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: callq __gttf2
; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: jg .LBB6_2
+; CHECK-NEXT: jg .LBB8_2
; CHECK-NEXT: # %bb.1: # %entry
; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
-; CHECK-NEXT: .LBB6_2: # %entry
+; CHECK-NEXT: .LBB8_2: # %entry
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
ret i64 %res
}
+define i64 @cmp_ueq_q(i64 %a, i64 %b, fp128 %x, fp128 %y) #0 {
+; CHECK-LABEL: cmp_ueq_q:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbp
+; CHECK-NEXT: pushq %r14
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: subq $32, %rsp
+; CHECK-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: movq %rsi, %r14
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: callq __eqtf2
+; CHECK-NEXT: testl %eax, %eax
+; CHECK-NEXT: sete %bpl
+; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; CHECK-NEXT: callq __unordtf2
+; CHECK-NEXT: testl %eax, %eax
+; CHECK-NEXT: setne %al
+; CHECK-NEXT: orb %bpl, %al
+; CHECK-NEXT: cmoveq %r14, %rbx
+; CHECK-NEXT: movq %rbx, %rax
+; CHECK-NEXT: addq $32, %rsp
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: popq %r14
+; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: retq
+;
+; X86-LABEL: cmp_ueq_q:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: pushl %ebx
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %ebx
+; X86-NEXT: movl %ebx, %esi
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %ebp
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll __eqtf2
+; X86-NEXT: addl $32, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %bl
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %esi
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %ebp
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll __unordtf2
+; X86-NEXT: addl $32, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: orb %bl, %al
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: leal {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: cmovnel %eax, %ecx
+; X86-NEXT: movl (%ecx), %eax
+; X86-NEXT: movl 4(%ecx), %edx
+; X86-NEXT: addl $12, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebx
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+ %cond = call i1 @llvm.experimental.constrained.fcmp.f128(
+ fp128 %x, fp128 %y,
+ metadata !"ueq",
+ metadata !"fpexcept.strict") #0
+ %res = select i1 %cond, i64 %a, i64 %b
+ ret i64 %res
+}
+
+define i64 @cmp_one_q(i64 %a, i64 %b, fp128 %x, fp128 %y) #0 {
+; CHECK-LABEL: cmp_one_q:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbp
+; CHECK-NEXT: pushq %r14
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: subq $32, %rsp
+; CHECK-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: movq %rsi, %r14
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: callq __gttf2
+; CHECK-NEXT: testl %eax, %eax
+; CHECK-NEXT: setg %bpl
+; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; CHECK-NEXT: callq __lttf2
+; CHECK-NEXT: testl %eax, %eax
+; CHECK-NEXT: sets %al
+; CHECK-NEXT: orb %bpl, %al
+; CHECK-NEXT: cmoveq %r14, %rbx
+; CHECK-NEXT: movq %rbx, %rax
+; CHECK-NEXT: addq $32, %rsp
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: popq %r14
+; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: retq
+;
+; X86-LABEL: cmp_one_q:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: pushl %ebx
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %ebx
+; X86-NEXT: movl %ebx, %esi
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %ebp
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll __gttf2
+; X86-NEXT: addl $32, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setg %bl
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %esi
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %ebp
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll __lttf2
+; X86-NEXT: addl $32, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sets %al
+; X86-NEXT: orb %bl, %al
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: leal {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: cmovnel %eax, %ecx
+; X86-NEXT: movl (%ecx), %eax
+; X86-NEXT: movl 4(%ecx), %edx
+; X86-NEXT: addl $12, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebx
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+ %cond = call i1 @llvm.experimental.constrained.fcmp.f128(
+ fp128 %x, fp128 %y,
+ metadata !"one",
+ metadata !"fpexcept.strict") #0
+ %res = select i1 %cond, i64 %a, i64 %b
+ ret i64 %res
+}
+
attributes #0 = { nounwind strictfp }
declare fp128 @llvm.experimental.constrained.fadd.f128(fp128, fp128, metadata, metadata)