+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=armv8-eabi -mattr=+fullfp16 | FileCheck %s
; RUN: llc < %s -mtriple thumbv7a -mattr=+fullfp16 | FileCheck %s
define half @fp16_vminnm_o(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: fp16_vminnm_o:
-; CHECK-NOT: vminnm.f16
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 s0, r2
+; CHECK-NEXT: vmov.f16 s2, r1
+; CHECK-NEXT: vcmp.f16 s0, s2
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f16 s0, s2, s0
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
entry:
%0 = bitcast i16 %a to half
%1 = bitcast i16 %b to half
define half @fp16_vminnm_o_rev(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: fp16_vminnm_o_rev:
-; CHECK-NOT: vminnm.f16
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 s0, r2
+; CHECK-NEXT: vmov.f16 s2, r1
+; CHECK-NEXT: vcmp.f16 s2, s0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f16 s0, s2, s0
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
entry:
%0 = bitcast i16 %a to half
%1 = bitcast i16 %b to half
define half @fp16_vminnm_u(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: fp16_vminnm_u:
-; CHECK-NOT: vminnm.f16
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 s0, r1
+; CHECK-NEXT: vmov.f16 s2, r2
+; CHECK-NEXT: vcmp.f16 s0, s2
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselge.f16 s0, s2, s0
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
entry:
%0 = bitcast i16 %a to half
%1 = bitcast i16 %b to half
define half @fp16_vminnm_ule(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: fp16_vminnm_ule:
-; CHECK-NOT: vminnm.f16
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 s0, r1
+; CHECK-NEXT: vmov.f16 s2, r2
+; CHECK-NEXT: vcmp.f16 s0, s2
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f16 s0, s2, s0
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
entry:
%0 = bitcast i16 %a to half
%1 = bitcast i16 %b to half
define half @fp16_vminnm_u_rev(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: fp16_vminnm_u_rev:
-; CHECK-NOT: vminnm.f16
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 s0, r2
+; CHECK-NEXT: vmov.f16 s2, r1
+; CHECK-NEXT: vcmp.f16 s0, s2
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselge.f16 s0, s2, s0
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
entry:
%0 = bitcast i16 %a to half
%1 = bitcast i16 %b to half
define half @fp16_vmaxnm_o(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: fp16_vmaxnm_o:
-; CHECK-NOT: vmaxnm.f16
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 s0, r2
+; CHECK-NEXT: vmov.f16 s2, r1
+; CHECK-NEXT: vcmp.f16 s2, s0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f16 s0, s2, s0
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
entry:
%0 = bitcast i16 %a to half
%1 = bitcast i16 %b to half
define half @fp16_vmaxnm_oge(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: fp16_vmaxnm_oge:
-; CHECK-NOT: vmaxnm.f16
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 s0, r2
+; CHECK-NEXT: vmov.f16 s2, r1
+; CHECK-NEXT: vcmp.f16 s2, s0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselge.f16 s0, s2, s0
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
entry:
%0 = bitcast i16 %a to half
%1 = bitcast i16 %b to half
define half @fp16_vmaxnm_o_rev(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: fp16_vmaxnm_o_rev:
-; CHECK-NOT: vmaxnm.f16
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 s0, r1
+; CHECK-NEXT: vmov.f16 s2, r2
+; CHECK-NEXT: vcmp.f16 s2, s0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f16 s0, s2, s0
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
entry:
%0 = bitcast i16 %a to half
%1 = bitcast i16 %b to half
define half @fp16_vmaxnm_ole_rev(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: fp16_vmaxnm_ole_rev:
-; CHECK-NOT: vmaxnm.f16
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 s0, r1
+; CHECK-NEXT: vmov.f16 s2, r2
+; CHECK-NEXT: vcmp.f16 s2, s0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselge.f16 s0, s2, s0
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
entry:
%0 = bitcast i16 %a to half
%1 = bitcast i16 %b to half
define half @fp16_vmaxnm_u(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: fp16_vmaxnm_u:
-; CHECK-NOT: vmaxnm.f16
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 s0, r1
+; CHECK-NEXT: vmov.f16 s2, r2
+; CHECK-NEXT: vcmp.f16 s2, s0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselge.f16 s0, s2, s0
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
entry:
%0 = bitcast i16 %a to half
%1 = bitcast i16 %b to half
define half @fp16_vmaxnm_uge(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: fp16_vmaxnm_uge:
-; CHECK-NOT: vmaxnm.f16
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 s0, r1
+; CHECK-NEXT: vmov.f16 s2, r2
+; CHECK-NEXT: vcmp.f16 s2, s0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f16 s0, s2, s0
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
entry:
%0 = bitcast i16 %a to half
%1 = bitcast i16 %b to half
define half @fp16_vmaxnm_u_rev(i16 signext %a, i16 signext %b) {
; CHECK-LABEL: fp16_vmaxnm_u_rev:
-; CHECK-NOT: vmaxnm.f16
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 s0, r2
+; CHECK-NEXT: vmov.f16 s2, r1
+; CHECK-NEXT: vcmp.f16 s2, s0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselge.f16 s0, s2, s0
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
entry:
%0 = bitcast i16 %a to half
%1 = bitcast i16 %b to half
; known non-NaNs
define half @fp16_vminnm_NNNo(i16 signext %a) {
-; CHECK-LABEL: fp16_vminnm_NNNo:
-; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
-; CHECK: vmov.f16 [[S2:s[0-9]]], #1.200000e+01
-; CHECK: vmov.f16 [[S4:s[0-9]]], r{{.}}
-; CHECK: vminnm.f16 s2, [[S4]], [[S2]]
-; CHECK: vmin.f16 d0, d1, d0
+; CHECK-LABEL: fp16_vminnm_NNNo:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 s0, r1
+; CHECK-NEXT: vmov.f16 s2, #1.200000e+01
+; CHECK-NEXT: vminnm.f16 s0, s0, s2
+; CHECK-NEXT: vldr.16 s2, .LCPI12_0
+; CHECK-NEXT: vcmp.f16 s0, s2
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f16 s0, s2, s0
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 1
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI12_0:
+; CHECK-NEXT: .short 0x5040 @ half 34
entry:
%0 = bitcast i16 %a to half
%cmp1 = fcmp olt half %0, 12.
}
define half @fp16_vminnm_NNNo_rev(i16 signext %a) {
-; CHECK-LABEL: fp16_vminnm_NNNo_rev:
-; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
-; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
-; CHECK: vmin.f16 d0, d1, d0
-; CHECK: vldr.16 [[S2:s[0-9]]], .LCPI{{.*}}
-; CHECK: vminnm.f16 s0, [[S0]], [[S2]]
+; CHECK-LABEL: fp16_vminnm_NNNo_rev:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vldr.16 s2, .LCPI13_0
+; CHECK-NEXT: vmov.f16 s0, r1
+; CHECK-NEXT: vcmp.f16 s0, s2
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f16 s0, s2, s0
+; CHECK-NEXT: vldr.16 s2, .LCPI13_1
+; CHECK-NEXT: vminnm.f16 s0, s0, s2
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 1
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI13_0:
+; CHECK-NEXT: .short 0x5300 @ half 56
+; CHECK-NEXT: .LCPI13_1:
+; CHECK-NEXT: .short 0x54e0 @ half 78
entry:
%0 = bitcast i16 %a to half
%cmp1 = fcmp ogt half %0, 56.
define half @fp16_vminnm_NNNu(i16 signext %b) {
; CHECK-LABEL: fp16_vminnm_NNNu:
-; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
-; CHECK: vmov.f16 [[S2:s[0-9]]], #1.200000e+01
-; CHECK: vmov.f16 [[S4:s[0-9]]], r{{.}}
-; CHECK: vminnm.f16 s2, [[S4]], [[S2]]
-; CHECK: vmin.f16 d0, d1, d0
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 s0, r1
+; CHECK-NEXT: vmov.f16 s2, #1.200000e+01
+; CHECK-NEXT: vminnm.f16 s0, s0, s2
+; CHECK-NEXT: vldr.16 s2, .LCPI14_0
+; CHECK-NEXT: vcmp.f16 s0, s2
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselge.f16 s0, s2, s0
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 1
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI14_0:
+; CHECK-NEXT: .short 0x5040 @ half 34
entry:
%0 = bitcast i16 %b to half
%cmp1 = fcmp ult half 12., %0
}
define half @fp16_vminnm_NNNule(i16 signext %b) {
-; CHECK-LABEL: fp16_vminnm_NNNule:
-; CHECK: vldr.16 [[S2:s[0-9]]], .LCPI{{.*}}
-; CHECK: vmov.f16 [[S4:s[0-9]]], r{{.}}
-; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
-; CHECK: vminnm.f16 s2, [[S4]], [[S2]]
-; CHECK: vmin.f16 d0, d1, d0
+; CHECK-LABEL: fp16_vminnm_NNNule:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vldr.16 s2, .LCPI15_0
+; CHECK-NEXT: vmov.f16 s0, r1
+; CHECK-NEXT: vminnm.f16 s0, s0, s2
+; CHECK-NEXT: vldr.16 s2, .LCPI15_1
+; CHECK-NEXT: vcmp.f16 s0, s2
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f16 s0, s2, s0
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 1
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI15_0:
+; CHECK-NEXT: .short 0x5040 @ half 34
+; CHECK-NEXT: .LCPI15_1:
+; CHECK-NEXT: .short 0x5300 @ half 56
entry:
%0 = bitcast i16 %b to half
}
define half @fp16_vminnm_NNNu_rev(i16 signext %b) {
-; CHECK-LABEL: fp16_vminnm_NNNu_rev:
+; CHECK-LABEL: fp16_vminnm_NNNu_rev:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vldr.16 s2, .LCPI16_0
+; CHECK-NEXT: vmov.f16 s0, r1
+; CHECK-NEXT: vcmp.f16 s0, s2
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselge.f16 s0, s2, s0
+; CHECK-NEXT: vldr.16 s2, .LCPI16_1
+; CHECK-NEXT: vminnm.f16 s0, s0, s2
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 1
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI16_0:
+; CHECK-NEXT: .short 0x5300 @ half 56
+; CHECK-NEXT: .LCPI16_1:
+; CHECK-NEXT: .short 0x54e0 @ half 78
-; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
-; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
-; CHECK: vmin.f16 d0, d1, d0
-; CHECK: vldr.16 [[S2:s[0-9]]], .LCPI{{.*}}
-; CHECK: vminnm.f16 s0, [[S0]], [[S2]]
entry:
%0 = bitcast i16 %b to half
}
define half @fp16_vmaxnm_NNNo(i16 signext %a) {
-; CHECK-LABEL: fp16_vmaxnm_NNNo:
-; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
-; CHECK: vmov.f16 [[S2:s[0-9]]], #1.200000e+01
-; CHECK: vmov.f16 [[S4:s[0-9]]], r{{.}}
-; CHECK: vmaxnm.f16 s2, [[S4]], [[S2]]
-; CHECK: vmax.f16 d0, d1, d0
+; CHECK-LABEL: fp16_vmaxnm_NNNo:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 s0, r1
+; CHECK-NEXT: vmov.f16 s2, #1.200000e+01
+; CHECK-NEXT: vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT: vldr.16 s2, .LCPI17_0
+; CHECK-NEXT: vcmp.f16 s2, s0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f16 s0, s2, s0
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 1
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI17_0:
+; CHECK-NEXT: .short 0x5040 @ half 34
entry:
%0 = bitcast i16 %a to half
%cmp1 = fcmp ogt half %0, 12.
}
define half @fp16_vmaxnm_NNNoge(i16 signext %a) {
-; CHECK-LABEL: fp16_vmaxnm_NNNoge:
-; CHECK: vldr.16 [[S2:s[0-9]]], .LCPI{{.*}}
-; CHECK: vmov.f16 [[S4:s[0-9]]], r{{.}}
-; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
-; CHECK: vmaxnm.f16 s2, [[S4]], [[S2]]
-; CHECK: vmax.f16 d0, d1, d0
+; CHECK-LABEL: fp16_vmaxnm_NNNoge:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vldr.16 s2, .LCPI18_0
+; CHECK-NEXT: vmov.f16 s0, r1
+; CHECK-NEXT: vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT: vldr.16 s2, .LCPI18_1
+; CHECK-NEXT: vcmp.f16 s2, s0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselge.f16 s0, s2, s0
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 1
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI18_0:
+; CHECK-NEXT: .short 0x5040 @ half 34
+; CHECK-NEXT: .LCPI18_1:
+; CHECK-NEXT: .short 0x5300 @ half 56
entry:
%0 = bitcast i16 %a to half
%cmp1 = fcmp oge half %0, 34.
}
define half @fp16_vmaxnm_NNNo_rev(i16 signext %a) {
-; CHECK-LABEL: fp16_vmaxnm_NNNo_rev:
-; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
-; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
-; CHECK: vmax.f16 d0, d1, d0
-; CHECK: vldr.16 [[S2:s[0-9]]], .LCPI{{.*}}
-; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]]
+; CHECK-LABEL: fp16_vmaxnm_NNNo_rev:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vldr.16 s2, .LCPI19_0
+; CHECK-NEXT: vmov.f16 s0, r1
+; CHECK-NEXT: vcmp.f16 s2, s0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f16 s0, s2, s0
+; CHECK-NEXT: vldr.16 s2, .LCPI19_1
+; CHECK-NEXT: vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 1
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI19_0:
+; CHECK-NEXT: .short 0x5300 @ half 56
+; CHECK-NEXT: .LCPI19_1:
+; CHECK-NEXT: .short 0x54e0 @ half 78
entry:
%0 = bitcast i16 %a to half
%cmp1 = fcmp olt half %0, 56.
}
define half @fp16_vmaxnm_NNNole_rev(i16 signext %a) {
-; CHECK-LABEL: fp16_vmaxnm_NNNole_rev:
-; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
-; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
-; CHECK: vmax.f16 d0, d1, d0
-; CHECK: vldr.16 [[S2:s[0-9]]], .LCPI{{.*}}
-; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]]
+; CHECK-LABEL: fp16_vmaxnm_NNNole_rev:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vldr.16 s2, .LCPI20_0
+; CHECK-NEXT: vmov.f16 s0, r1
+; CHECK-NEXT: vcmp.f16 s2, s0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselge.f16 s0, s2, s0
+; CHECK-NEXT: vldr.16 s2, .LCPI20_1
+; CHECK-NEXT: vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 1
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI20_0:
+; CHECK-NEXT: .short 0x54e0 @ half 78
+; CHECK-NEXT: .LCPI20_1:
+; CHECK-NEXT: .short 0x55a0 @ half 90
entry:
%0 = bitcast i16 %a to half
%cmp1 = fcmp ole half %0, 78.
}
define half @fp16_vmaxnm_NNNu(i16 signext %b) {
-; CHECK-LABEL: fp16_vmaxnm_NNNu:
-; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
-; CHECK: vmov.f16 [[S2:s[0-9]]], #1.200000e+01
-; CHECK: vmov.f16 [[S4:s[0-9]]], r{{.}}
-; CHECK: vmaxnm.f16 s2, [[S4]], [[S2]]
-; CHECK: vmax.f16 d0, d1, d0
+; CHECK-LABEL: fp16_vmaxnm_NNNu:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 s0, r1
+; CHECK-NEXT: vmov.f16 s2, #1.200000e+01
+; CHECK-NEXT: vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT: vldr.16 s2, .LCPI21_0
+; CHECK-NEXT: vcmp.f16 s2, s0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselge.f16 s0, s2, s0
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 1
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI21_0:
+; CHECK-NEXT: .short 0x5040 @ half 34
entry:
%0 = bitcast i16 %b to half
%cmp1 = fcmp ugt half 12., %0
}
define half @fp16_vmaxnm_NNNuge(i16 signext %b) {
-; CHECK-LABEL: fp16_vmaxnm_NNNuge:
-; CHECK: vldr.16 [[S2:s[0-9]]], .LCPI{{.*}}
-; CHECK: vmov.f16 [[S4:s[0-9]]], r{{.}}
-; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
-; CHECK: vmaxnm.f16 s2, [[S4]], [[S2]]
-; CHECK: vmax.f16 d0, d1, d0
+; CHECK-LABEL: fp16_vmaxnm_NNNuge:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vldr.16 s2, .LCPI22_0
+; CHECK-NEXT: vmov.f16 s0, r1
+; CHECK-NEXT: vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT: vldr.16 s2, .LCPI22_1
+; CHECK-NEXT: vcmp.f16 s2, s0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f16 s0, s2, s0
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 1
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI22_0:
+; CHECK-NEXT: .short 0x5040 @ half 34
+; CHECK-NEXT: .LCPI22_1:
+; CHECK-NEXT: .short 0x5300 @ half 56
entry:
%0 = bitcast i16 %b to half
%cmp1 = fcmp uge half 34., %0
}
define half @fp16_vminmaxnm_neg0(i16 signext %a) {
-; CHECK-LABEL: fp16_vminmaxnm_neg0:
-; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
-; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
-; CHECK: vminnm.f16 s2, [[S2]], [[S0]]
-; CHECK: vmax.f16 d0, d1, d0
+; CHECK-LABEL: fp16_vminmaxnm_neg0:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vldr.16 s0, .LCPI23_0
+; CHECK-NEXT: vmov.f16 s2, r1
+; CHECK-NEXT: vminnm.f16 s2, s2, s0
+; CHECK-NEXT: vcmp.f16 s0, s2
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselge.f16 s0, s0, s2
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 1
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI23_0:
+; CHECK-NEXT: .short 0x8000 @ half -0
entry:
%0 = bitcast i16 %a to half
%cmp1 = fcmp olt half %0, -0.
}
define half @fp16_vminmaxnm_e_0(i16 signext %a) {
-; CHECK-LABEL: fp16_vminmaxnm_e_0:
-; CHECK: vldr.16 [[S2:s[0-9]]], .LCPI{{.*}}
-; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
-; CHECK: vmin.f16 d0, d0, d1
-; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]]
+; CHECK-LABEL: fp16_vminmaxnm_e_0:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 s0, r1
+; CHECK-NEXT: vldr.16 s2, .LCPI24_0
+; CHECK-NEXT: vcmp.f16 s0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselge.f16 s0, s2, s0
+; CHECK-NEXT: vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 1
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI24_0:
+; CHECK-NEXT: .short 0x0000 @ half 0
entry:
%0 = bitcast i16 %a to half
%cmp1 = fcmp nsz ole half 0., %0
}
define half @fp16_vminmaxnm_e_neg0(i16 signext %a) {
-; CHECK-LABEL: fp16_vminmaxnm_e_neg0:
-; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
-; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
-; CHECK: vminnm.f16 s2, [[S2]], [[S0]]
-; CHECK: vmax.f16 d0, d1, d0
+; CHECK-LABEL: fp16_vminmaxnm_e_neg0:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vldr.16 s0, .LCPI25_0
+; CHECK-NEXT: vmov.f16 s2, r1
+; CHECK-NEXT: vminnm.f16 s2, s2, s0
+; CHECK-NEXT: vcmp.f16 s0, s2
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselge.f16 s0, s0, s2
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 1
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI25_0:
+; CHECK-NEXT: .short 0x8000 @ half -0
entry:
%0 = bitcast i16 %a to half
%cmp1 = fcmp nsz ule half -0., %0