ret double %1
}
+define double @fnmadd_d_2(double %a, double %b, double %c) nounwind {
+; RV32IFD-LABEL: fnmadd_d_2:
+; RV32IFD: # %bb.0:
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw a0, 8(sp)
+; RV32IFD-NEXT: sw a1, 12(sp)
+; RV32IFD-NEXT: fld ft0, 8(sp)
+; RV32IFD-NEXT: sw a4, 8(sp)
+; RV32IFD-NEXT: sw a5, 12(sp)
+; RV32IFD-NEXT: fld ft1, 8(sp)
+; RV32IFD-NEXT: sw a2, 8(sp)
+; RV32IFD-NEXT: sw a3, 12(sp)
+; RV32IFD-NEXT: fld ft2, 8(sp)
+; RV32IFD-NEXT: fcvt.d.w ft3, zero
+; RV32IFD-NEXT: fadd.d ft2, ft2, ft3
+; RV32IFD-NEXT: fadd.d ft1, ft1, ft3
+; RV32IFD-NEXT: fneg.d ft2, ft2
+; RV32IFD-NEXT: fmsub.d ft0, ft0, ft2, ft1
+; RV32IFD-NEXT: fsd ft0, 8(sp)
+; RV32IFD-NEXT: lw a0, 8(sp)
+; RV32IFD-NEXT: lw a1, 12(sp)
+; RV32IFD-NEXT: addi sp, sp, 16
+; RV32IFD-NEXT: ret
+;
+; RV64IFD-LABEL: fnmadd_d_2:
+; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: fmv.d.x ft0, a0
+; RV64IFD-NEXT: fmv.d.x ft1, a2
+; RV64IFD-NEXT: fmv.d.x ft2, a1
+; RV64IFD-NEXT: fmv.d.x ft3, zero
+; RV64IFD-NEXT: fadd.d ft2, ft2, ft3
+; RV64IFD-NEXT: fadd.d ft1, ft1, ft3
+; RV64IFD-NEXT: fneg.d ft2, ft2
+; RV64IFD-NEXT: fmsub.d ft0, ft0, ft2, ft1
+; RV64IFD-NEXT: fmv.x.d a0, ft0
+; RV64IFD-NEXT: ret
+ %b_ = fadd double 0.0, %b
+ %c_ = fadd double 0.0, %c
+ %negb = fsub double -0.0, %b_
+ %negc = fsub double -0.0, %c_
+ %1 = call double @llvm.fma.f64(double %a, double %negb, double %negc)
+ ret double %1
+}
+
define double @fnmsub_d(double %a, double %b, double %c) nounwind {
; RV32IFD-LABEL: fnmsub_d:
; RV32IFD: # %bb.0:
%1 = call double @llvm.fma.f64(double %nega, double %b, double %c)
ret double %1
}
+
+define double @fnmsub_d_2(double %a, double %b, double %c) nounwind {
+; RV32IFD-LABEL: fnmsub_d_2:
+; RV32IFD: # %bb.0:
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw a4, 8(sp)
+; RV32IFD-NEXT: sw a5, 12(sp)
+; RV32IFD-NEXT: fld ft0, 8(sp)
+; RV32IFD-NEXT: sw a0, 8(sp)
+; RV32IFD-NEXT: sw a1, 12(sp)
+; RV32IFD-NEXT: fld ft1, 8(sp)
+; RV32IFD-NEXT: sw a2, 8(sp)
+; RV32IFD-NEXT: sw a3, 12(sp)
+; RV32IFD-NEXT: fld ft2, 8(sp)
+; RV32IFD-NEXT: fcvt.d.w ft3, zero
+; RV32IFD-NEXT: fadd.d ft2, ft2, ft3
+; RV32IFD-NEXT: fneg.d ft2, ft2
+; RV32IFD-NEXT: fmadd.d ft0, ft1, ft2, ft0
+; RV32IFD-NEXT: fsd ft0, 8(sp)
+; RV32IFD-NEXT: lw a0, 8(sp)
+; RV32IFD-NEXT: lw a1, 12(sp)
+; RV32IFD-NEXT: addi sp, sp, 16
+; RV32IFD-NEXT: ret
+;
+; RV64IFD-LABEL: fnmsub_d_2:
+; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: fmv.d.x ft0, a2
+; RV64IFD-NEXT: fmv.d.x ft1, a0
+; RV64IFD-NEXT: fmv.d.x ft2, a1
+; RV64IFD-NEXT: fmv.d.x ft3, zero
+; RV64IFD-NEXT: fadd.d ft2, ft2, ft3
+; RV64IFD-NEXT: fneg.d ft2, ft2
+; RV64IFD-NEXT: fmadd.d ft0, ft1, ft2, ft0
+; RV64IFD-NEXT: fmv.x.d a0, ft0
+; RV64IFD-NEXT: ret
+ %b_ = fadd double 0.0, %b
+ %negb = fsub double -0.0, %b_
+ %1 = call double @llvm.fma.f64(double %a, double %negb, double %c)
+ ret double %1
+}
ret float %1
}
+define float @fnmadd_s_2(float %a, float %b, float %c) nounwind {
+; RV32IF-LABEL: fnmadd_s_2:
+; RV32IF: # %bb.0:
+; RV32IF-NEXT: fmv.w.x ft0, a0
+; RV32IF-NEXT: fmv.w.x ft1, a2
+; RV32IF-NEXT: fmv.w.x ft2, a1
+; RV32IF-NEXT: fmv.w.x ft3, zero
+; RV32IF-NEXT: fadd.s ft2, ft2, ft3
+; RV32IF-NEXT: fadd.s ft1, ft1, ft3
+; RV32IF-NEXT: fneg.s ft2, ft2
+; RV32IF-NEXT: fmsub.s ft0, ft0, ft2, ft1
+; RV32IF-NEXT: fmv.x.w a0, ft0
+; RV32IF-NEXT: ret
+;
+; RV64IF-LABEL: fnmadd_s_2:
+; RV64IF: # %bb.0:
+; RV64IF-NEXT: fmv.w.x ft0, a0
+; RV64IF-NEXT: fmv.w.x ft1, a2
+; RV64IF-NEXT: fmv.w.x ft2, a1
+; RV64IF-NEXT: fmv.w.x ft3, zero
+; RV64IF-NEXT: fadd.s ft2, ft2, ft3
+; RV64IF-NEXT: fadd.s ft1, ft1, ft3
+; RV64IF-NEXT: fneg.s ft2, ft2
+; RV64IF-NEXT: fmsub.s ft0, ft0, ft2, ft1
+; RV64IF-NEXT: fmv.x.w a0, ft0
+; RV64IF-NEXT: ret
+ %b_ = fadd float 0.0, %b
+ %c_ = fadd float 0.0, %c
+ %negb = fsub float -0.0, %b_
+ %negc = fsub float -0.0, %c_
+ %1 = call float @llvm.fma.f32(float %a, float %negb, float %negc)
+ ret float %1
+}
+
define float @fnmsub_s(float %a, float %b, float %c) nounwind {
; RV32IF-LABEL: fnmsub_s:
; RV32IF: # %bb.0:
%1 = call float @llvm.fma.f32(float %nega, float %b, float %c)
ret float %1
}
+
+define float @fnmsub_s_2(float %a, float %b, float %c) nounwind {
+; RV32IF-LABEL: fnmsub_s_2:
+; RV32IF: # %bb.0:
+; RV32IF-NEXT: fmv.w.x ft0, a2
+; RV32IF-NEXT: fmv.w.x ft1, a0
+; RV32IF-NEXT: fmv.w.x ft2, a1
+; RV32IF-NEXT: fmv.w.x ft3, zero
+; RV32IF-NEXT: fadd.s ft2, ft2, ft3
+; RV32IF-NEXT: fneg.s ft2, ft2
+; RV32IF-NEXT: fmadd.s ft0, ft1, ft2, ft0
+; RV32IF-NEXT: fmv.x.w a0, ft0
+; RV32IF-NEXT: ret
+;
+; RV64IF-LABEL: fnmsub_s_2:
+; RV64IF: # %bb.0:
+; RV64IF-NEXT: fmv.w.x ft0, a2
+; RV64IF-NEXT: fmv.w.x ft1, a0
+; RV64IF-NEXT: fmv.w.x ft2, a1
+; RV64IF-NEXT: fmv.w.x ft3, zero
+; RV64IF-NEXT: fadd.s ft2, ft2, ft3
+; RV64IF-NEXT: fneg.s ft2, ft2
+; RV64IF-NEXT: fmadd.s ft0, ft1, ft2, ft0
+; RV64IF-NEXT: fmv.x.w a0, ft0
+; RV64IF-NEXT: ret
+ %b_ = fadd float 0.0, %b
+ %negb = fsub float -0.0, %b_
+ %1 = call float @llvm.fma.f32(float %a, float %negb, float %c)
+ ret float %1
+}