// fnmsub: -rs1 * rs2 + rs3
def : Pat<(fma (fneg FPR64:$rs1), FPR64:$rs2, FPR64:$rs3),
(FNMSUB_D FPR64:$rs1, FPR64:$rs2, FPR64:$rs3, 0b111)>;
+def : Pat<(fma FPR64:$rs1, (fneg FPR64:$rs2), FPR64:$rs3),
+ (FNMSUB_D FPR64:$rs1, FPR64:$rs2, FPR64:$rs3, 0b111)>;
// fnmadd: -rs1 * rs2 - rs3
def : Pat<(fma (fneg FPR64:$rs1), FPR64:$rs2, (fneg FPR64:$rs3)),
(FNMADD_D FPR64:$rs1, FPR64:$rs2, FPR64:$rs3, 0b111)>;
+def : Pat<(fma FPR64:$rs1, (fneg FPR64:$rs2), (fneg FPR64:$rs3)),
+ (FNMADD_D FPR64:$rs1, FPR64:$rs2, FPR64:$rs3, 0b111)>;
// The RISC-V 2.2 user-level ISA spec defines fmin and fmax as returning the
// canonical NaN when giving a signaling NaN. This doesn't match the LLVM
// fnmsub: -rs1 * rs2 + rs3
def : Pat<(fma (fneg FPR32:$rs1), FPR32:$rs2, FPR32:$rs3),
(FNMSUB_S FPR32:$rs1, FPR32:$rs2, FPR32:$rs3, 0b111)>;
+def : Pat<(fma FPR32:$rs1, (fneg FPR32:$rs2), FPR32:$rs3),
+ (FNMSUB_S FPR32:$rs1, FPR32:$rs2, FPR32:$rs3, 0b111)>;
// fnmadd: -rs1 * rs2 - rs3
def : Pat<(fma (fneg FPR32:$rs1), FPR32:$rs2, (fneg FPR32:$rs3)),
(FNMADD_S FPR32:$rs1, FPR32:$rs2, FPR32:$rs3, 0b111)>;
+def : Pat<(fma FPR32:$rs1, (fneg FPR32:$rs2), (fneg FPR32:$rs3)),
+ (FNMADD_S FPR32:$rs1, FPR32:$rs2, FPR32:$rs3, 0b111)>;
// The RISC-V 2.2 user-level ISA spec defines fmin and fmax as returning the
// canonical NaN when given a signaling NaN. This doesn't match the LLVM
; RV32IFD-NEXT: fcvt.d.w ft3, zero
; RV32IFD-NEXT: fadd.d ft2, ft2, ft3
; RV32IFD-NEXT: fadd.d ft1, ft1, ft3
-; RV32IFD-NEXT: fneg.d ft2, ft2
-; RV32IFD-NEXT: fmsub.d ft0, ft0, ft2, ft1
+; RV32IFD-NEXT: fnmadd.d ft0, ft0, ft2, ft1
; RV32IFD-NEXT: fsd ft0, 8(sp)
; RV32IFD-NEXT: lw a0, 8(sp)
; RV32IFD-NEXT: lw a1, 12(sp)
; RV64IFD-NEXT: fmv.d.x ft3, zero
; RV64IFD-NEXT: fadd.d ft2, ft2, ft3
; RV64IFD-NEXT: fadd.d ft1, ft1, ft3
-; RV64IFD-NEXT: fneg.d ft2, ft2
-; RV64IFD-NEXT: fmsub.d ft0, ft0, ft2, ft1
+; RV64IFD-NEXT: fnmadd.d ft0, ft0, ft2, ft1
; RV64IFD-NEXT: fmv.x.d a0, ft0
; RV64IFD-NEXT: ret
%b_ = fadd double 0.0, %b
; RV32IFD-NEXT: fld ft2, 8(sp)
; RV32IFD-NEXT: fcvt.d.w ft3, zero
; RV32IFD-NEXT: fadd.d ft2, ft2, ft3
-; RV32IFD-NEXT: fneg.d ft2, ft2
-; RV32IFD-NEXT: fmadd.d ft0, ft1, ft2, ft0
+; RV32IFD-NEXT: fnmsub.d ft0, ft1, ft2, ft0
; RV32IFD-NEXT: fsd ft0, 8(sp)
; RV32IFD-NEXT: lw a0, 8(sp)
; RV32IFD-NEXT: lw a1, 12(sp)
; RV64IFD-NEXT: fmv.d.x ft2, a1
; RV64IFD-NEXT: fmv.d.x ft3, zero
; RV64IFD-NEXT: fadd.d ft2, ft2, ft3
-; RV64IFD-NEXT: fneg.d ft2, ft2
-; RV64IFD-NEXT: fmadd.d ft0, ft1, ft2, ft0
+; RV64IFD-NEXT: fnmsub.d ft0, ft1, ft2, ft0
; RV64IFD-NEXT: fmv.x.d a0, ft0
; RV64IFD-NEXT: ret
%b_ = fadd double 0.0, %b
; RV32IF-NEXT: fmv.w.x ft3, zero
; RV32IF-NEXT: fadd.s ft2, ft2, ft3
; RV32IF-NEXT: fadd.s ft1, ft1, ft3
-; RV32IF-NEXT: fneg.s ft2, ft2
-; RV32IF-NEXT: fmsub.s ft0, ft0, ft2, ft1
+; RV32IF-NEXT: fnmadd.s ft0, ft0, ft2, ft1
; RV32IF-NEXT: fmv.x.w a0, ft0
; RV32IF-NEXT: ret
;
; RV64IF-NEXT: fmv.w.x ft3, zero
; RV64IF-NEXT: fadd.s ft2, ft2, ft3
; RV64IF-NEXT: fadd.s ft1, ft1, ft3
-; RV64IF-NEXT: fneg.s ft2, ft2
-; RV64IF-NEXT: fmsub.s ft0, ft0, ft2, ft1
+; RV64IF-NEXT: fnmadd.s ft0, ft0, ft2, ft1
; RV64IF-NEXT: fmv.x.w a0, ft0
; RV64IF-NEXT: ret
%b_ = fadd float 0.0, %b
; RV32IF-NEXT: fmv.w.x ft2, a1
; RV32IF-NEXT: fmv.w.x ft3, zero
; RV32IF-NEXT: fadd.s ft2, ft2, ft3
-; RV32IF-NEXT: fneg.s ft2, ft2
-; RV32IF-NEXT: fmadd.s ft0, ft1, ft2, ft0
+; RV32IF-NEXT: fnmsub.s ft0, ft1, ft2, ft0
; RV32IF-NEXT: fmv.x.w a0, ft0
; RV32IF-NEXT: ret
;
; RV64IF-NEXT: fmv.w.x ft2, a1
; RV64IF-NEXT: fmv.w.x ft3, zero
; RV64IF-NEXT: fadd.s ft2, ft2, ft3
-; RV64IF-NEXT: fneg.s ft2, ft2
-; RV64IF-NEXT: fmadd.s ft0, ft1, ft2, ft0
+; RV64IF-NEXT: fnmsub.s ft0, ft1, ft2, ft0
; RV64IF-NEXT: fmv.x.w a0, ft0
; RV64IF-NEXT: ret
%b_ = fadd float 0.0, %b