From 1855c0a82a2798276cafe765d825ac7875f5bf1d Mon Sep 17 00:00:00 2001 From: Yeting Kuo Date: Mon, 17 Apr 2023 15:26:51 +0800 Subject: [PATCH] [RISCV] Support vector strict rounding operations. The patch basically models custom lowering of base rounding operations to expand rounding by coverting to ingter and coverting back to FP. The other one thing the patch does is to covert sNan of the source to qNan. Reviewed By: craig.topper Differential Revision: https://reviews.llvm.org/D148519 --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 138 +++++++- llvm/lib/Target/RISCV/RISCVISelLowering.h | 2 + llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td | 17 +- .../CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll | 335 ++++++++++++++++++++ .../CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll | 335 ++++++++++++++++++++ .../rvv/fixed-vectors-fceil-constrained-sdnode.ll | 336 ++++++++++++++++++++ .../rvv/fixed-vectors-ffloor-constrained-sdnode.ll | 336 ++++++++++++++++++++ .../fixed-vectors-fnearbyint-constrained-sdnode.ll | 282 +++++++++++++++++ .../rvv/fixed-vectors-fround-costrained-sdnode.ll | 338 ++++++++++++++++++++ .../fixed-vectors-froundeven-constrained-sdnode.ll | 338 ++++++++++++++++++++ .../rvv/fixed-vectors-ftrunc-constrained-sdnode.ll | 306 ++++++++++++++++++ .../RISCV/rvv/fnearbyint-constrained-sdnode.ll | 350 +++++++++++++++++++++ .../CodeGen/RISCV/rvv/fround-costrained-sdnode.ll | 337 ++++++++++++++++++++ .../RISCV/rvv/froundeven-constrained-sdnode.ll | 337 ++++++++++++++++++++ .../CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll | 305 ++++++++++++++++++ 15 files changed, 4081 insertions(+), 11 deletions(-) create mode 100644 llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-costrained-sdnode.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/fround-costrained-sdnode.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 8104f19..aae1e07 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -820,7 +820,11 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, ISD::STRICT_FDIV, ISD::STRICT_FSQRT, ISD::STRICT_FMA}, VT, Legal); - setOperationAction({ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS}, VT, Custom); + setOperationAction({ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS, + ISD::STRICT_FTRUNC, ISD::STRICT_FCEIL, + ISD::STRICT_FFLOOR, ISD::STRICT_FROUND, + ISD::STRICT_FROUNDEVEN, ISD::STRICT_FNEARBYINT}, + VT, Custom); }; // Sets common extload/truncstore actions on RVV floating-point vector @@ -1050,12 +1054,13 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setOperationAction({ISD::STRICT_FP_EXTEND, ISD::STRICT_FP_ROUND}, VT, Custom); - setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, - ISD::STRICT_FMUL, ISD::STRICT_FDIV, - ISD::STRICT_FSQRT, ISD::STRICT_FMA}, - VT, Custom); - setOperationAction({ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS}, VT, - Custom); + setOperationAction( + {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, + ISD::STRICT_FDIV, ISD::STRICT_FSQRT, ISD::STRICT_FMA, + ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS, ISD::STRICT_FTRUNC, + ISD::STRICT_FCEIL, ISD::STRICT_FFLOOR, ISD::STRICT_FROUND, + ISD::STRICT_FROUNDEVEN, ISD::STRICT_FNEARBYINT}, + VT, Custom); } // Custom-legalize bitcasts from fixed-length vectors to scalar types. @@ -2406,18 +2411,23 @@ static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG, static RISCVFPRndMode::RoundingMode matchRoundingOp(unsigned Opc) { switch (Opc) { case ISD::FROUNDEVEN: + case ISD::STRICT_FROUNDEVEN: case ISD::VP_FROUNDEVEN: return RISCVFPRndMode::RNE; case ISD::FTRUNC: + case ISD::STRICT_FTRUNC: case ISD::VP_FROUNDTOZERO: return RISCVFPRndMode::RTZ; case ISD::FFLOOR: + case ISD::STRICT_FFLOOR: case ISD::VP_FFLOOR: return RISCVFPRndMode::RDN; case ISD::FCEIL: + case ISD::STRICT_FCEIL: case ISD::VP_FCEIL: return RISCVFPRndMode::RUP; case ISD::FROUND: + case ISD::STRICT_FROUND: case ISD::VP_FROUND: return RISCVFPRndMode::RMM; case ISD::FRINT: @@ -2534,6 +2544,110 @@ lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG, return convertFromScalableVector(VT, Truncated, DAG, Subtarget); } +// Expand vector STRICT_FTRUNC, STRICT_FCEIL, STRICT_FFLOOR, STRICT_FROUND +// STRICT_FROUNDEVEN and STRICT_FNEARBYINT by converting sNan of the source to +// qNan and coverting the new source to integer and back to FP. +static SDValue +lowerVectorStrictFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG, + const RISCVSubtarget &Subtarget) { + SDLoc DL(Op); + MVT VT = Op.getSimpleValueType(); + SDValue Chain = Op.getOperand(0); + SDValue Src = Op.getOperand(1); + + MVT ContainerVT = VT; + if (VT.isFixedLengthVector()) { + ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget); + Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget); + } + + auto [Mask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); + + // Freeze the source since we are increasing the number of uses. + Src = DAG.getFreeze(Src); + + // Covert sNan to qNan by executing x + x for all unordered elemenet x in Src. + MVT MaskVT = Mask.getSimpleValueType(); + SDValue Unorder = DAG.getNode(RISCVISD::STRICT_FSETCC_VL, DL, + DAG.getVTList(MaskVT, MVT::Other), + {Chain, Src, Src, DAG.getCondCode(ISD::SETUNE), + DAG.getUNDEF(MaskVT), Mask, VL}); + Chain = Unorder.getValue(1); + Src = DAG.getNode(RISCVISD::STRICT_FADD_VL, DL, + DAG.getVTList(ContainerVT, MVT::Other), + {Chain, Src, Src, DAG.getUNDEF(ContainerVT), Unorder, VL}); + Chain = Src.getValue(1); + + // We do the conversion on the absolute value and fix the sign at the end. + SDValue Abs = DAG.getNode(RISCVISD::FABS_VL, DL, ContainerVT, Src, Mask, VL); + + // Determine the largest integer that can be represented exactly. This and + // values larger than it don't have any fractional bits so don't need to + // be converted. + const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(ContainerVT); + unsigned Precision = APFloat::semanticsPrecision(FltSem); + APFloat MaxVal = APFloat(FltSem); + MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1), + /*IsSigned*/ false, APFloat::rmNearestTiesToEven); + SDValue MaxValNode = + DAG.getConstantFP(MaxVal, DL, ContainerVT.getVectorElementType()); + SDValue MaxValSplat = DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, ContainerVT, + DAG.getUNDEF(ContainerVT), MaxValNode, VL); + + // If abs(Src) was larger than MaxVal or nan, keep it. + Mask = DAG.getNode( + RISCVISD::SETCC_VL, DL, MaskVT, + {Abs, MaxValSplat, DAG.getCondCode(ISD::SETOLT), Mask, Mask, VL}); + + // Truncate to integer and convert back to FP. + MVT IntVT = ContainerVT.changeVectorElementTypeToInteger(); + MVT XLenVT = Subtarget.getXLenVT(); + SDValue Truncated; + + switch (Op.getOpcode()) { + default: + llvm_unreachable("Unexpected opcode"); + case ISD::STRICT_FCEIL: + case ISD::STRICT_FFLOOR: + case ISD::STRICT_FROUND: + case ISD::STRICT_FROUNDEVEN: { + RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Op.getOpcode()); + assert(FRM != RISCVFPRndMode::Invalid); + Truncated = DAG.getNode( + RISCVISD::STRICT_VFCVT_RM_X_F_VL, DL, DAG.getVTList(IntVT, MVT::Other), + {Chain, Src, Mask, DAG.getTargetConstant(FRM, DL, XLenVT), VL}); + break; + } + case ISD::STRICT_FTRUNC: + Truncated = + DAG.getNode(RISCVISD::STRICT_VFCVT_RTZ_X_F_VL, DL, + DAG.getVTList(IntVT, MVT::Other), Chain, Src, Mask, VL); + break; + case ISD::STRICT_FNEARBYINT: + Truncated = DAG.getNode(RISCVISD::STRICT_VFROUND_NOEXCEPT_VL, DL, + DAG.getVTList(ContainerVT, MVT::Other), Chain, Src, + Mask, VL); + break; + } + Chain = Truncated.getValue(1); + + // VFROUND_NOEXCEPT_VL includes SINT_TO_FP_VL. + if (Op.getOpcode() != ISD::STRICT_FNEARBYINT) { + Truncated = DAG.getNode(RISCVISD::STRICT_SINT_TO_FP_VL, DL, + DAG.getVTList(ContainerVT, MVT::Other), Chain, + Truncated, Mask, VL); + Chain = Truncated.getValue(1); + } + + // Restore the original sign so that -0.0 is preserved. + Truncated = DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Truncated, + Src, Src, Mask, VL); + + if (VT.isFixedLengthVector()) + Truncated = convertFromScalableVector(VT, Truncated, DAG, Subtarget); + return DAG.getMergeValues({Truncated, Chain}, DL); +} + static SDValue lowerFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget) { @@ -4832,6 +4946,14 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op, case ISD::STRICT_FSETCC: case ISD::STRICT_FSETCCS: return lowerVectorStrictFSetcc(Op, DAG); + case ISD::STRICT_FCEIL: + case ISD::STRICT_FRINT: + case ISD::STRICT_FFLOOR: + case ISD::STRICT_FTRUNC: + case ISD::STRICT_FNEARBYINT: + case ISD::STRICT_FROUND: + case ISD::STRICT_FROUNDEVEN: + return lowerVectorStrictFTRUNC_FCEIL_FFLOOR_FROUND(Op, DAG, Subtarget); case ISD::MGATHER: case ISD::VP_GATHER: return lowerMaskedGather(Op, DAG); @@ -14745,10 +14867,12 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { NODE_NAME_CASE(STRICT_VFNCVT_ROD_VL) NODE_NAME_CASE(STRICT_SINT_TO_FP_VL) NODE_NAME_CASE(STRICT_UINT_TO_FP_VL) + NODE_NAME_CASE(STRICT_VFCVT_RM_X_F_VL) NODE_NAME_CASE(STRICT_VFCVT_RTZ_X_F_VL) NODE_NAME_CASE(STRICT_VFCVT_RTZ_XU_F_VL) NODE_NAME_CASE(STRICT_FSETCC_VL) NODE_NAME_CASE(STRICT_FSETCCS_VL) + NODE_NAME_CASE(STRICT_VFROUND_NOEXCEPT_VL) NODE_NAME_CASE(VWMUL_VL) NODE_NAME_CASE(VWMULU_VL) NODE_NAME_CASE(VWMULSU_VL) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h index b48cdb2..5dada8a 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -344,10 +344,12 @@ enum NodeType : unsigned { STRICT_VFNCVT_ROD_VL, STRICT_SINT_TO_FP_VL, STRICT_UINT_TO_FP_VL, + STRICT_VFCVT_RM_X_F_VL, STRICT_VFCVT_RTZ_X_F_VL, STRICT_VFCVT_RTZ_XU_F_VL, STRICT_FSETCC_VL, STRICT_FSETCCS_VL, + STRICT_VFROUND_NOEXCEPT_VL, // WARNING: Do not add anything in the end unless you want the node to // have memop! In fact, starting from FIRST_TARGET_MEMORY_OPCODE all diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td index d71ef64..3887e0f 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -219,9 +219,13 @@ def riscv_vfcvt_rm_x_f_vl : SDNode<"RISCVISD::VFCVT_RM_X_F_VL", SDT_RISCVFP2IOp_ def riscv_vfcvt_rtz_xu_f_vl : SDNode<"RISCVISD::VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL>; def riscv_vfcvt_rtz_x_f_vl : SDNode<"RISCVISD::VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL>; +def riscv_strict_vfcvt_rm_x_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RM_X_F_VL", SDT_RISCVFP2IOp_RM_VL, [SDNPHasChain]>; def riscv_strict_vfcvt_rtz_xu_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL, [SDNPHasChain]>; def riscv_strict_vfcvt_rtz_x_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL, [SDNPHasChain]>; +def any_riscv_vfcvt_rm_x_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl, node:$rm), + [(riscv_vfcvt_rm_x_f_vl node:$src, node:$mask, node:$vl, node:$rm), + (riscv_strict_vfcvt_rm_x_f_vl node:$src, node:$mask, node:$vl, node:$rm)]>; def any_riscv_vfcvt_rtz_xu_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), [(riscv_vfcvt_rtz_xu_f_vl node:$src, node:$mask, node:$vl), (riscv_strict_vfcvt_rtz_xu_f_vl node:$src, node:$mask, node:$vl)]>; @@ -246,6 +250,11 @@ def any_riscv_uint_to_fp_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), (riscv_strict_uint_to_fp_vl node:$src, node:$mask, node:$vl)]>; def riscv_vfround_noexcept_vl: SDNode<"RISCVISD::VFROUND_NOEXCEPT_VL", SDT_RISCVFPUnOp_VL>; +def riscv_strict_vfround_noexcept_vl: SDNode<"RISCVISD::STRICT_VFROUND_NOEXCEPT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>; + +def any_riscv_vfround_noexcept_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), + [(riscv_vfround_noexcept_vl node:$src, node:$mask, node:$vl), + (riscv_strict_vfround_noexcept_vl node:$src, node:$mask, node:$vl)]>; def riscv_setcc_vl : SDNode<"RISCVISD::SETCC_VL", SDT_RISCVSETCCOP_VL>; def riscv_strict_fsetcc_vl : SDNode<"RISCVISD::STRICT_FSETCC_VL", SDT_RISCVSETCCOP_VL, [SDNPHasChain]>; @@ -986,7 +995,7 @@ multiclass VPatConvertFP2IVL_V { } } -multiclass VPatConvertFP2I_RM_VL_V { +multiclass VPatConvertFP2I_RM_VL_V { foreach fvti = AllFloatVectors in { defvar ivti = GetIntVTypeInfo.Vti; def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), @@ -1927,8 +1936,8 @@ foreach vti = AllFloatVectors in { TAIL_AGNOSTIC)>; // Rounding without exception to implement nearbyint. - def : Pat<(riscv_vfround_noexcept_vl (vti.Vector vti.RegClass:$rs1), - (vti.Mask V0), VLOpFrag), + def : Pat<(any_riscv_vfround_noexcept_vl (vti.Vector vti.RegClass:$rs1), + (vti.Mask V0), VLOpFrag), (!cast("PseudoVFROUND_NOEXCEPT_V_" # vti.LMul.MX #"_MASK") (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; @@ -2032,7 +2041,7 @@ foreach fvti = AllFloatVectors in { defm : VPatConvertFP2IVL_V; defm : VPatConvertFP2IVL_V; defm : VPatConvertFP2I_RM_VL_V; - defm : VPatConvertFP2I_RM_VL_V; + defm : VPatConvertFP2I_RM_VL_V; defm : VPatConvertFP2IVL_V; defm : VPatConvertFP2IVL_V; diff --git a/llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll new file mode 100644 index 0000000..b6d2764 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll @@ -0,0 +1,335 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +define @ceil_nxv1f16( %x) { +; CHECK-LABEL: ceil_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI0_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.ceil.nxv1f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.ceil.nxv1f16(, metadata) + +define @ceil_nxv2f16( %x) { +; CHECK-LABEL: ceil_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI1_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.ceil.nxv2f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.ceil.nxv2f16(, metadata) + +define @ceil_nxv4f16( %x) { +; CHECK-LABEL: ceil_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI2_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.ceil.nxv4f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.ceil.nxv4f16(, metadata) + +define @ceil_nxv8f16( %x) { +; CHECK-LABEL: ceil_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI3_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.ceil.nxv8f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.ceil.nxv8f16(, metadata) + +define @ceil_nxv16f16( %x) { +; CHECK-LABEL: ceil_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI4_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.ceil.nxv16f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.ceil.nxv16f16(, metadata) + +define @ceil_nxv32f16( %x) { +; CHECK-LABEL: ceil_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI5_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: vmflt.vf v0, v16, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.ceil.nxv32f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.ceil.nxv32f16(, metadata) + +define @ceil_nxv1f32( %x) { +; CHECK-LABEL: ceil_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.ceil.nxv1f32( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.ceil.nxv1f32(, metadata) + +define @ceil_nxv2f32( %x) { +; CHECK-LABEL: ceil_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.ceil.nxv2f32( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.ceil.nxv2f32(, metadata) + +define @ceil_nxv4f32( %x) { +; CHECK-LABEL: ceil_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.ceil.nxv4f32( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.ceil.nxv4f32(, metadata) + +define @ceil_nxv8f32( %x) { +; CHECK-LABEL: ceil_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.ceil.nxv8f32( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.ceil.nxv8f32(, metadata) + +define @ceil_nxv16f32( %x) { +; CHECK-LABEL: ceil_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v16, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.ceil.nxv16f32( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.ceil.nxv16f32(, metadata) + +define @ceil_nxv1f64( %x) { +; CHECK-LABEL: ceil_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI11_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.ceil.nxv1f64( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.ceil.nxv1f64(, metadata) + +define @ceil_nxv2f64( %x) { +; CHECK-LABEL: ceil_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI12_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.ceil.nxv2f64( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.ceil.nxv2f64(, metadata) + +define @ceil_nxv4f64( %x) { +; CHECK-LABEL: ceil_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI13_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.ceil.nxv4f64( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.ceil.nxv4f64(, metadata) + +define @ceil_nxv8f64( %x) { +; CHECK-LABEL: ceil_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI14_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: vmflt.vf v0, v16, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.ceil.nxv8f64( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.ceil.nxv8f64(, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll new file mode 100644 index 0000000..7610d91 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll @@ -0,0 +1,335 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +define @floor_nxv1f16( %x) { +; CHECK-LABEL: floor_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI0_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.floor.nxv1f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.floor.nxv1f16(, metadata) + +define @floor_nxv2f16( %x) { +; CHECK-LABEL: floor_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI1_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.floor.nxv2f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.floor.nxv2f16(, metadata) + +define @floor_nxv4f16( %x) { +; CHECK-LABEL: floor_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI2_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.floor.nxv4f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.floor.nxv4f16(, metadata) + +define @floor_nxv8f16( %x) { +; CHECK-LABEL: floor_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI3_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.floor.nxv8f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.floor.nxv8f16(, metadata) + +define @floor_nxv16f16( %x) { +; CHECK-LABEL: floor_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI4_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.floor.nxv16f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.floor.nxv16f16(, metadata) + +define @floor_nxv32f16( %x) { +; CHECK-LABEL: floor_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI5_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: vmflt.vf v0, v16, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.floor.nxv32f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.floor.nxv32f16(, metadata) + +define @floor_nxv1f32( %x) { +; CHECK-LABEL: floor_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.floor.nxv1f32( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.floor.nxv1f32(, metadata) + +define @floor_nxv2f32( %x) { +; CHECK-LABEL: floor_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.floor.nxv2f32( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.floor.nxv2f32(, metadata) + +define @floor_nxv4f32( %x) { +; CHECK-LABEL: floor_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.floor.nxv4f32( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.floor.nxv4f32(, metadata) + +define @floor_nxv8f32( %x) { +; CHECK-LABEL: floor_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.floor.nxv8f32( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.floor.nxv8f32(, metadata) + +define @floor_nxv16f32( %x) { +; CHECK-LABEL: floor_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v16, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.floor.nxv16f32( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.floor.nxv16f32(, metadata) + +define @floor_nxv1f64( %x) { +; CHECK-LABEL: floor_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI11_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.floor.nxv1f64( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.floor.nxv1f64(, metadata) + +define @floor_nxv2f64( %x) { +; CHECK-LABEL: floor_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI12_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.floor.nxv2f64( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.floor.nxv2f64(, metadata) + +define @floor_nxv4f64( %x) { +; CHECK-LABEL: floor_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI13_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.floor.nxv4f64( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.floor.nxv4f64(, metadata) + +define @floor_nxv8f64( %x) { +; CHECK-LABEL: floor_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI14_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: vmflt.vf v0, v16, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.floor.nxv8f64( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.floor.nxv8f64(, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll new file mode 100644 index 0000000..dcbca9c --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll @@ -0,0 +1,336 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +define <1 x half> @ceil_v1f16(<1 x half> %x) { +; CHECK-LABEL: ceil_v1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI0_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <1 x half> @llvm.experimental.constrained.ceil.v1f16(<1 x half> %x, metadata !"fpexcept.strict") + ret <1 x half> %a +} +declare <1 x half> @llvm.experimental.constrained.ceil.v1f16(<1 x half>, metadata) + +define <2 x half> @ceil_v2f16(<2 x half> %x) { +; CHECK-LABEL: ceil_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI1_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <2 x half> @llvm.experimental.constrained.ceil.v2f16(<2 x half> %x, metadata !"fpexcept.strict") + ret <2 x half> %a +} +declare <2 x half> @llvm.experimental.constrained.ceil.v2f16(<2 x half>, metadata) + +define <4 x half> @ceil_v4f16(<4 x half> %x) { +; CHECK-LABEL: ceil_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI2_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <4 x half> @llvm.experimental.constrained.ceil.v4f16(<4 x half> %x, metadata !"fpexcept.strict") + ret <4 x half> %a +} +declare <4 x half> @llvm.experimental.constrained.ceil.v4f16(<4 x half>, metadata) + +define <8 x half> @ceil_v8f16(<8 x half> %x) { +; CHECK-LABEL: ceil_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI3_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <8 x half> @llvm.experimental.constrained.ceil.v8f16(<8 x half> %x, metadata !"fpexcept.strict") + ret <8 x half> %a +} +declare <8 x half> @llvm.experimental.constrained.ceil.v8f16(<8 x half>, metadata) + +define <16 x half> @ceil_v16f16(<16 x half> %x) { +; CHECK-LABEL: ceil_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI4_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call <16 x half> @llvm.experimental.constrained.ceil.v16f16(<16 x half> %x, metadata !"fpexcept.strict") + ret <16 x half> %a +} +declare <16 x half> @llvm.experimental.constrained.ceil.v16f16(<16 x half>, metadata) + +define <32 x half> @ceil_v32f16(<32 x half> %x) { +; CHECK-LABEL: ceil_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI5_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call <32 x half> @llvm.experimental.constrained.ceil.v32f16(<32 x half> %x, metadata !"fpexcept.strict") + ret <32 x half> %a +} +declare <32 x half> @llvm.experimental.constrained.ceil.v32f16(<32 x half>, metadata) + +define <1 x float> @ceil_v1f32(<1 x float> %x) { +; CHECK-LABEL: ceil_v1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <1 x float> @llvm.experimental.constrained.ceil.v1f32(<1 x float> %x, metadata !"fpexcept.strict") + ret <1 x float> %a +} +declare <1 x float> @llvm.experimental.constrained.ceil.v1f32(<1 x float>, metadata) + +define <2 x float> @ceil_v2f32(<2 x float> %x) { +; CHECK-LABEL: ceil_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <2 x float> @llvm.experimental.constrained.ceil.v2f32(<2 x float> %x, metadata !"fpexcept.strict") + ret <2 x float> %a +} +declare <2 x float> @llvm.experimental.constrained.ceil.v2f32(<2 x float>, metadata) + +define <4 x float> @ceil_v4f32(<4 x float> %x) { +; CHECK-LABEL: ceil_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float> %x, metadata !"fpexcept.strict") + ret <4 x float> %a +} +declare <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float>, metadata) + +define <8 x float> @ceil_v8f32(<8 x float> %x) { +; CHECK-LABEL: ceil_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call <8 x float> @llvm.experimental.constrained.ceil.v8f32(<8 x float> %x, metadata !"fpexcept.strict") + ret <8 x float> %a +} +declare <8 x float> @llvm.experimental.constrained.ceil.v8f32(<8 x float>, metadata) + +define <16 x float> @ceil_v16f32(<16 x float> %x) { +; CHECK-LABEL: ceil_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call <16 x float> @llvm.experimental.constrained.ceil.v16f32(<16 x float> %x, metadata !"fpexcept.strict") + ret <16 x float> %a +} +declare <16 x float> @llvm.experimental.constrained.ceil.v16f32(<16 x float>, metadata) + +define <1 x double> @ceil_v1f64(<1 x double> %x) { +; CHECK-LABEL: ceil_v1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI11_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double> %x, metadata !"fpexcept.strict") + ret <1 x double> %a +} +declare <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double>, metadata) + +define <2 x double> @ceil_v2f64(<2 x double> %x) { +; CHECK-LABEL: ceil_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI12_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double> %x, metadata !"fpexcept.strict") + ret <2 x double> %a +} +declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata) + +define <4 x double> @ceil_v4f64(<4 x double> %x) { +; CHECK-LABEL: ceil_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI13_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double> %x, metadata !"fpexcept.strict") + ret <4 x double> %a +} +declare <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double>, metadata) + +define <8 x double> @ceil_v8f64(<8 x double> %x) { +; CHECK-LABEL: ceil_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI14_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: fsrmi a0, 3 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call <8 x double> @llvm.experimental.constrained.ceil.v8f64(<8 x double> %x, metadata !"fpexcept.strict") + ret <8 x double> %a +} +declare <8 x double> @llvm.experimental.constrained.ceil.v8f64(<8 x double>, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll new file mode 100644 index 0000000..751b5dc --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll @@ -0,0 +1,336 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +define <1 x half> @floor_v1f16(<1 x half> %x) { +; CHECK-LABEL: floor_v1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI0_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <1 x half> @llvm.experimental.constrained.floor.v1f16(<1 x half> %x, metadata !"fpexcept.strict") + ret <1 x half> %a +} +declare <1 x half> @llvm.experimental.constrained.floor.v1f16(<1 x half>, metadata) + +define <2 x half> @floor_v2f16(<2 x half> %x) { +; CHECK-LABEL: floor_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI1_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <2 x half> @llvm.experimental.constrained.floor.v2f16(<2 x half> %x, metadata !"fpexcept.strict") + ret <2 x half> %a +} +declare <2 x half> @llvm.experimental.constrained.floor.v2f16(<2 x half>, metadata) + +define <4 x half> @floor_v4f16(<4 x half> %x) { +; CHECK-LABEL: floor_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI2_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <4 x half> @llvm.experimental.constrained.floor.v4f16(<4 x half> %x, metadata !"fpexcept.strict") + ret <4 x half> %a +} +declare <4 x half> @llvm.experimental.constrained.floor.v4f16(<4 x half>, metadata) + +define <8 x half> @floor_v8f16(<8 x half> %x) { +; CHECK-LABEL: floor_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI3_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <8 x half> @llvm.experimental.constrained.floor.v8f16(<8 x half> %x, metadata !"fpexcept.strict") + ret <8 x half> %a +} +declare <8 x half> @llvm.experimental.constrained.floor.v8f16(<8 x half>, metadata) + +define <16 x half> @floor_v16f16(<16 x half> %x) { +; CHECK-LABEL: floor_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI4_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call <16 x half> @llvm.experimental.constrained.floor.v16f16(<16 x half> %x, metadata !"fpexcept.strict") + ret <16 x half> %a +} +declare <16 x half> @llvm.experimental.constrained.floor.v16f16(<16 x half>, metadata) + +define <32 x half> @floor_v32f16(<32 x half> %x) { +; CHECK-LABEL: floor_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI5_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call <32 x half> @llvm.experimental.constrained.floor.v32f16(<32 x half> %x, metadata !"fpexcept.strict") + ret <32 x half> %a +} +declare <32 x half> @llvm.experimental.constrained.floor.v32f16(<32 x half>, metadata) + +define <1 x float> @floor_v1f32(<1 x float> %x) { +; CHECK-LABEL: floor_v1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <1 x float> @llvm.experimental.constrained.floor.v1f32(<1 x float> %x, metadata !"fpexcept.strict") + ret <1 x float> %a +} +declare <1 x float> @llvm.experimental.constrained.floor.v1f32(<1 x float>, metadata) + +define <2 x float> @floor_v2f32(<2 x float> %x) { +; CHECK-LABEL: floor_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <2 x float> @llvm.experimental.constrained.floor.v2f32(<2 x float> %x, metadata !"fpexcept.strict") + ret <2 x float> %a +} +declare <2 x float> @llvm.experimental.constrained.floor.v2f32(<2 x float>, metadata) + +define <4 x float> @floor_v4f32(<4 x float> %x) { +; CHECK-LABEL: floor_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float> %x, metadata !"fpexcept.strict") + ret <4 x float> %a +} +declare <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float>, metadata) + +define <8 x float> @floor_v8f32(<8 x float> %x) { +; CHECK-LABEL: floor_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call <8 x float> @llvm.experimental.constrained.floor.v8f32(<8 x float> %x, metadata !"fpexcept.strict") + ret <8 x float> %a +} +declare <8 x float> @llvm.experimental.constrained.floor.v8f32(<8 x float>, metadata) + +define <16 x float> @floor_v16f32(<16 x float> %x) { +; CHECK-LABEL: floor_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call <16 x float> @llvm.experimental.constrained.floor.v16f32(<16 x float> %x, metadata !"fpexcept.strict") + ret <16 x float> %a +} +declare <16 x float> @llvm.experimental.constrained.floor.v16f32(<16 x float>, metadata) + +define <1 x double> @floor_v1f64(<1 x double> %x) { +; CHECK-LABEL: floor_v1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI11_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double> %x, metadata !"fpexcept.strict") + ret <1 x double> %a +} +declare <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double>, metadata) + +define <2 x double> @floor_v2f64(<2 x double> %x) { +; CHECK-LABEL: floor_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI12_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double> %x, metadata !"fpexcept.strict") + ret <2 x double> %a +} +declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata) + +define <4 x double> @floor_v4f64(<4 x double> %x) { +; CHECK-LABEL: floor_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI13_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double> %x, metadata !"fpexcept.strict") + ret <4 x double> %a +} +declare <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double>, metadata) + +define <8 x double> @floor_v8f64(<8 x double> %x) { +; CHECK-LABEL: floor_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI14_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: fsrmi a0, 2 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call <8 x double> @llvm.experimental.constrained.floor.v8f64(<8 x double> %x, metadata !"fpexcept.strict") + ret <8 x double> %a +} +declare <8 x double> @llvm.experimental.constrained.floor.v8f64(<8 x double>, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll new file mode 100644 index 0000000..28da30f --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll @@ -0,0 +1,282 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <2 x half> @llvm.experimental.constrained.nearbyint.v2f16(<2 x half>, metadata, metadata) + +define <2 x half> @nearbyint_v2f16(<2 x half> %v) { +; CHECK-LABEL: nearbyint_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI0_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %r = call <2 x half> @llvm.experimental.constrained.nearbyint.v2f16(<2 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret <2 x half> %r +} + +declare <4 x half> @llvm.experimental.constrained.nearbyint.v4f16(<4 x half>, metadata, metadata) + +define <4 x half> @nearbyint_v4f16(<4 x half> %v) { +; CHECK-LABEL: nearbyint_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI1_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %r = call <4 x half> @llvm.experimental.constrained.nearbyint.v4f16(<4 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret <4 x half> %r +} + +declare <8 x half> @llvm.experimental.constrained.nearbyint.v8f16(<8 x half>, metadata, metadata) + +define <8 x half> @nearbyint_v8f16(<8 x half> %v) { +; CHECK-LABEL: nearbyint_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI2_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %r = call <8 x half> @llvm.experimental.constrained.nearbyint.v8f16(<8 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret <8 x half> %r +} + +declare <16 x half> @llvm.experimental.constrained.nearbyint.v16f16(<16 x half>, metadata, metadata) + +define <16 x half> @nearbyint_v16f16(<16 x half> %v) { +; CHECK-LABEL: nearbyint_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI3_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %r = call <16 x half> @llvm.experimental.constrained.nearbyint.v16f16(<16 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret <16 x half> %r +} + +declare <32 x half> @llvm.experimental.constrained.nearbyint.v32f16(<32 x half>, metadata, metadata) + +define <32 x half> @nearbyint_v32f16(<32 x half> %v) { +; CHECK-LABEL: nearbyint_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI4_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %r = call <32 x half> @llvm.experimental.constrained.nearbyint.v32f16(<32 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret <32 x half> %r +} + +declare <2 x float> @llvm.experimental.constrained.nearbyint.v2f32(<2 x float>, metadata, metadata) + +define <2 x float> @nearbyint_v2f32(<2 x float> %v) { +; CHECK-LABEL: nearbyint_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %r = call <2 x float> @llvm.experimental.constrained.nearbyint.v2f32(<2 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret <2 x float> %r +} + +declare <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float>, metadata, metadata) + +define <4 x float> @nearbyint_v4f32(<4 x float> %v) { +; CHECK-LABEL: nearbyint_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %r = call <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret <4 x float> %r +} + +declare <8 x float> @llvm.experimental.constrained.nearbyint.v8f32(<8 x float>, metadata, metadata) + +define <8 x float> @nearbyint_v8f32(<8 x float> %v) { +; CHECK-LABEL: nearbyint_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %r = call <8 x float> @llvm.experimental.constrained.nearbyint.v8f32(<8 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret <8 x float> %r +} + +declare <16 x float> @llvm.experimental.constrained.nearbyint.v16f32(<16 x float>, metadata, metadata) + +define <16 x float> @nearbyint_v16f32(<16 x float> %v) { +; CHECK-LABEL: nearbyint_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %r = call <16 x float> @llvm.experimental.constrained.nearbyint.v16f32(<16 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret <16 x float> %r +} + +declare <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double>, metadata, metadata) + +define <2 x double> @nearbyint_v2f64(<2 x double> %v) { +; CHECK-LABEL: nearbyint_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI9_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI9_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %r = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret <2 x double> %r +} + +declare <4 x double> @llvm.experimental.constrained.nearbyint.v4f64(<4 x double>, metadata, metadata) + +define <4 x double> @nearbyint_v4f64(<4 x double> %v) { +; CHECK-LABEL: nearbyint_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI10_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI10_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %r = call <4 x double> @llvm.experimental.constrained.nearbyint.v4f64(<4 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret <4 x double> %r +} + +declare <8 x double> @llvm.experimental.constrained.nearbyint.v8f64(<8 x double>, metadata, metadata) + +define <8 x double> @nearbyint_v8f64(<8 x double> %v) { +; CHECK-LABEL: nearbyint_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI11_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %r = call <8 x double> @llvm.experimental.constrained.nearbyint.v8f64(<8 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret <8 x double> %r +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-costrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-costrained-sdnode.ll new file mode 100644 index 0000000..6e8c0b7 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-costrained-sdnode.ll @@ -0,0 +1,338 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +; This file tests the code generation for `llvm.experimental.constrained.round.*` on scalable vector type. + +define <1 x half> @round_v1f16(<1 x half> %x) { +; CHECK-LABEL: round_v1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI0_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <1 x half> @llvm.experimental.constrained.round.v1f16(<1 x half> %x, metadata !"fpexcept.strict") + ret <1 x half> %a +} +declare <1 x half> @llvm.experimental.constrained.round.v1f16(<1 x half>, metadata) + +define <2 x half> @round_v2f16(<2 x half> %x) { +; CHECK-LABEL: round_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI1_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <2 x half> @llvm.experimental.constrained.round.v2f16(<2 x half> %x, metadata !"fpexcept.strict") + ret <2 x half> %a +} +declare <2 x half> @llvm.experimental.constrained.round.v2f16(<2 x half>, metadata) + +define <4 x half> @round_v4f16(<4 x half> %x) { +; CHECK-LABEL: round_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI2_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <4 x half> @llvm.experimental.constrained.round.v4f16(<4 x half> %x, metadata !"fpexcept.strict") + ret <4 x half> %a +} +declare <4 x half> @llvm.experimental.constrained.round.v4f16(<4 x half>, metadata) + +define <8 x half> @round_v8f16(<8 x half> %x) { +; CHECK-LABEL: round_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI3_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <8 x half> @llvm.experimental.constrained.round.v8f16(<8 x half> %x, metadata !"fpexcept.strict") + ret <8 x half> %a +} +declare <8 x half> @llvm.experimental.constrained.round.v8f16(<8 x half>, metadata) + +define <16 x half> @round_v16f16(<16 x half> %x) { +; CHECK-LABEL: round_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI4_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call <16 x half> @llvm.experimental.constrained.round.v16f16(<16 x half> %x, metadata !"fpexcept.strict") + ret <16 x half> %a +} +declare <16 x half> @llvm.experimental.constrained.round.v16f16(<16 x half>, metadata) + +define <32 x half> @round_v32f16(<32 x half> %x) { +; CHECK-LABEL: round_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI5_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call <32 x half> @llvm.experimental.constrained.round.v32f16(<32 x half> %x, metadata !"fpexcept.strict") + ret <32 x half> %a +} +declare <32 x half> @llvm.experimental.constrained.round.v32f16(<32 x half>, metadata) + +define <1 x float> @round_v1f32(<1 x float> %x) { +; CHECK-LABEL: round_v1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <1 x float> @llvm.experimental.constrained.round.v1f32(<1 x float> %x, metadata !"fpexcept.strict") + ret <1 x float> %a +} +declare <1 x float> @llvm.experimental.constrained.round.v1f32(<1 x float>, metadata) + +define <2 x float> @round_v2f32(<2 x float> %x) { +; CHECK-LABEL: round_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <2 x float> @llvm.experimental.constrained.round.v2f32(<2 x float> %x, metadata !"fpexcept.strict") + ret <2 x float> %a +} +declare <2 x float> @llvm.experimental.constrained.round.v2f32(<2 x float>, metadata) + +define <4 x float> @round_v4f32(<4 x float> %x) { +; CHECK-LABEL: round_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float> %x, metadata !"fpexcept.strict") + ret <4 x float> %a +} +declare <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float>, metadata) + +define <8 x float> @round_v8f32(<8 x float> %x) { +; CHECK-LABEL: round_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call <8 x float> @llvm.experimental.constrained.round.v8f32(<8 x float> %x, metadata !"fpexcept.strict") + ret <8 x float> %a +} +declare <8 x float> @llvm.experimental.constrained.round.v8f32(<8 x float>, metadata) + +define <16 x float> @round_v16f32(<16 x float> %x) { +; CHECK-LABEL: round_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call <16 x float> @llvm.experimental.constrained.round.v16f32(<16 x float> %x, metadata !"fpexcept.strict") + ret <16 x float> %a +} +declare <16 x float> @llvm.experimental.constrained.round.v16f32(<16 x float>, metadata) + +define <1 x double> @round_v1f64(<1 x double> %x) { +; CHECK-LABEL: round_v1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI11_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double> %x, metadata !"fpexcept.strict") + ret <1 x double> %a +} +declare <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double>, metadata) + +define <2 x double> @round_v2f64(<2 x double> %x) { +; CHECK-LABEL: round_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI12_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double> %x, metadata !"fpexcept.strict") + ret <2 x double> %a +} +declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata) + +define <4 x double> @round_v4f64(<4 x double> %x) { +; CHECK-LABEL: round_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI13_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call <4 x double> @llvm.experimental.constrained.round.v4f64(<4 x double> %x, metadata !"fpexcept.strict") + ret <4 x double> %a +} +declare <4 x double> @llvm.experimental.constrained.round.v4f64(<4 x double>, metadata) + +define <8 x double> @round_v8f64(<8 x double> %x) { +; CHECK-LABEL: round_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI14_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call <8 x double> @llvm.experimental.constrained.round.v8f64(<8 x double> %x, metadata !"fpexcept.strict") + ret <8 x double> %a +} +declare <8 x double> @llvm.experimental.constrained.round.v8f64(<8 x double>, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll new file mode 100644 index 0000000..a4e6fe6 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll @@ -0,0 +1,338 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +; This file tests the code generation for `llvm.experimental.constrained.roundeven.*` on scalable vector type. + +define <1 x half> @roundeven_v1f16(<1 x half> %x) { +; CHECK-LABEL: roundeven_v1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI0_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <1 x half> @llvm.experimental.constrained.roundeven.v1f16(<1 x half> %x, metadata !"fpexcept.strict") + ret <1 x half> %a +} +declare <1 x half> @llvm.experimental.constrained.roundeven.v1f16(<1 x half>, metadata) + +define <2 x half> @roundeven_v2f16(<2 x half> %x) { +; CHECK-LABEL: roundeven_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI1_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <2 x half> @llvm.experimental.constrained.roundeven.v2f16(<2 x half> %x, metadata !"fpexcept.strict") + ret <2 x half> %a +} +declare <2 x half> @llvm.experimental.constrained.roundeven.v2f16(<2 x half>, metadata) + +define <4 x half> @roundeven_v4f16(<4 x half> %x) { +; CHECK-LABEL: roundeven_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI2_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <4 x half> @llvm.experimental.constrained.roundeven.v4f16(<4 x half> %x, metadata !"fpexcept.strict") + ret <4 x half> %a +} +declare <4 x half> @llvm.experimental.constrained.roundeven.v4f16(<4 x half>, metadata) + +define <8 x half> @roundeven_v8f16(<8 x half> %x) { +; CHECK-LABEL: roundeven_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI3_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <8 x half> @llvm.experimental.constrained.roundeven.v8f16(<8 x half> %x, metadata !"fpexcept.strict") + ret <8 x half> %a +} +declare <8 x half> @llvm.experimental.constrained.roundeven.v8f16(<8 x half>, metadata) + +define <16 x half> @roundeven_v16f16(<16 x half> %x) { +; CHECK-LABEL: roundeven_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI4_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call <16 x half> @llvm.experimental.constrained.roundeven.v16f16(<16 x half> %x, metadata !"fpexcept.strict") + ret <16 x half> %a +} +declare <16 x half> @llvm.experimental.constrained.roundeven.v16f16(<16 x half>, metadata) + +define <32 x half> @roundeven_v32f16(<32 x half> %x) { +; CHECK-LABEL: roundeven_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI5_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call <32 x half> @llvm.experimental.constrained.roundeven.v32f16(<32 x half> %x, metadata !"fpexcept.strict") + ret <32 x half> %a +} +declare <32 x half> @llvm.experimental.constrained.roundeven.v32f16(<32 x half>, metadata) + +define <1 x float> @roundeven_v1f32(<1 x float> %x) { +; CHECK-LABEL: roundeven_v1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <1 x float> @llvm.experimental.constrained.roundeven.v1f32(<1 x float> %x, metadata !"fpexcept.strict") + ret <1 x float> %a +} +declare <1 x float> @llvm.experimental.constrained.roundeven.v1f32(<1 x float>, metadata) + +define <2 x float> @roundeven_v2f32(<2 x float> %x) { +; CHECK-LABEL: roundeven_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <2 x float> @llvm.experimental.constrained.roundeven.v2f32(<2 x float> %x, metadata !"fpexcept.strict") + ret <2 x float> %a +} +declare <2 x float> @llvm.experimental.constrained.roundeven.v2f32(<2 x float>, metadata) + +define <4 x float> @roundeven_v4f32(<4 x float> %x) { +; CHECK-LABEL: roundeven_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <4 x float> @llvm.experimental.constrained.roundeven.v4f32(<4 x float> %x, metadata !"fpexcept.strict") + ret <4 x float> %a +} +declare <4 x float> @llvm.experimental.constrained.roundeven.v4f32(<4 x float>, metadata) + +define <8 x float> @roundeven_v8f32(<8 x float> %x) { +; CHECK-LABEL: roundeven_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call <8 x float> @llvm.experimental.constrained.roundeven.v8f32(<8 x float> %x, metadata !"fpexcept.strict") + ret <8 x float> %a +} +declare <8 x float> @llvm.experimental.constrained.roundeven.v8f32(<8 x float>, metadata) + +define <16 x float> @roundeven_v16f32(<16 x float> %x) { +; CHECK-LABEL: roundeven_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call <16 x float> @llvm.experimental.constrained.roundeven.v16f32(<16 x float> %x, metadata !"fpexcept.strict") + ret <16 x float> %a +} +declare <16 x float> @llvm.experimental.constrained.roundeven.v16f32(<16 x float>, metadata) + +define <1 x double> @roundeven_v1f64(<1 x double> %x) { +; CHECK-LABEL: roundeven_v1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI11_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <1 x double> @llvm.experimental.constrained.roundeven.v1f64(<1 x double> %x, metadata !"fpexcept.strict") + ret <1 x double> %a +} +declare <1 x double> @llvm.experimental.constrained.roundeven.v1f64(<1 x double>, metadata) + +define <2 x double> @roundeven_v2f64(<2 x double> %x) { +; CHECK-LABEL: roundeven_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI12_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <2 x double> @llvm.experimental.constrained.roundeven.v2f64(<2 x double> %x, metadata !"fpexcept.strict") + ret <2 x double> %a +} +declare <2 x double> @llvm.experimental.constrained.roundeven.v2f64(<2 x double>, metadata) + +define <4 x double> @roundeven_v4f64(<4 x double> %x) { +; CHECK-LABEL: roundeven_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI13_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call <4 x double> @llvm.experimental.constrained.roundeven.v4f64(<4 x double> %x, metadata !"fpexcept.strict") + ret <4 x double> %a +} +declare <4 x double> @llvm.experimental.constrained.roundeven.v4f64(<4 x double>, metadata) + +define <8 x double> @roundeven_v8f64(<8 x double> %x) { +; CHECK-LABEL: roundeven_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI14_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call <8 x double> @llvm.experimental.constrained.roundeven.v8f64(<8 x double> %x, metadata !"fpexcept.strict") + ret <8 x double> %a +} +declare <8 x double> @llvm.experimental.constrained.roundeven.v8f64(<8 x double>, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll new file mode 100644 index 0000000..7c43e28 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll @@ -0,0 +1,306 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +define <1 x half> @trunc_v1f16(<1 x half> %x) { +; CHECK-LABEL: trunc_v1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI0_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <1 x half> @llvm.experimental.constrained.trunc.v1f16(<1 x half> %x, metadata !"fpexcept.strict") + ret <1 x half> %a +} +declare <1 x half> @llvm.experimental.constrained.trunc.v1f16(<1 x half>, metadata) + +define <2 x half> @trunc_v2f16(<2 x half> %x) { +; CHECK-LABEL: trunc_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI1_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <2 x half> @llvm.experimental.constrained.trunc.v2f16(<2 x half> %x, metadata !"fpexcept.strict") + ret <2 x half> %a +} +declare <2 x half> @llvm.experimental.constrained.trunc.v2f16(<2 x half>, metadata) + +define <4 x half> @trunc_v4f16(<4 x half> %x) { +; CHECK-LABEL: trunc_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI2_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <4 x half> @llvm.experimental.constrained.trunc.v4f16(<4 x half> %x, metadata !"fpexcept.strict") + ret <4 x half> %a +} +declare <4 x half> @llvm.experimental.constrained.trunc.v4f16(<4 x half>, metadata) + +define <8 x half> @trunc_v8f16(<8 x half> %x) { +; CHECK-LABEL: trunc_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI3_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <8 x half> @llvm.experimental.constrained.trunc.v8f16(<8 x half> %x, metadata !"fpexcept.strict") + ret <8 x half> %a +} +declare <8 x half> @llvm.experimental.constrained.trunc.v8f16(<8 x half>, metadata) + +define <16 x half> @trunc_v16f16(<16 x half> %x) { +; CHECK-LABEL: trunc_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI4_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call <16 x half> @llvm.experimental.constrained.trunc.v16f16(<16 x half> %x, metadata !"fpexcept.strict") + ret <16 x half> %a +} +declare <16 x half> @llvm.experimental.constrained.trunc.v16f16(<16 x half>, metadata) + +define <32 x half> @trunc_v32f16(<32 x half> %x) { +; CHECK-LABEL: trunc_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI5_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call <32 x half> @llvm.experimental.constrained.trunc.v32f16(<32 x half> %x, metadata !"fpexcept.strict") + ret <32 x half> %a +} +declare <32 x half> @llvm.experimental.constrained.trunc.v32f16(<32 x half>, metadata) + +define <1 x float> @trunc_v1f32(<1 x float> %x) { +; CHECK-LABEL: trunc_v1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <1 x float> @llvm.experimental.constrained.trunc.v1f32(<1 x float> %x, metadata !"fpexcept.strict") + ret <1 x float> %a +} +declare <1 x float> @llvm.experimental.constrained.trunc.v1f32(<1 x float>, metadata) + +define <2 x float> @trunc_v2f32(<2 x float> %x) { +; CHECK-LABEL: trunc_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <2 x float> @llvm.experimental.constrained.trunc.v2f32(<2 x float> %x, metadata !"fpexcept.strict") + ret <2 x float> %a +} +declare <2 x float> @llvm.experimental.constrained.trunc.v2f32(<2 x float>, metadata) + +define <4 x float> @trunc_v4f32(<4 x float> %x) { +; CHECK-LABEL: trunc_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %x, metadata !"fpexcept.strict") + ret <4 x float> %a +} +declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata) + +define <8 x float> @trunc_v8f32(<8 x float> %x) { +; CHECK-LABEL: trunc_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call <8 x float> @llvm.experimental.constrained.trunc.v8f32(<8 x float> %x, metadata !"fpexcept.strict") + ret <8 x float> %a +} +declare <8 x float> @llvm.experimental.constrained.trunc.v8f32(<8 x float>, metadata) + +define <16 x float> @trunc_v16f32(<16 x float> %x) { +; CHECK-LABEL: trunc_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float> %x, metadata !"fpexcept.strict") + ret <16 x float> %a +} +declare <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float>, metadata) + +define <1 x double> @trunc_v1f64(<1 x double> %x) { +; CHECK-LABEL: trunc_v1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI11_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double> %x, metadata !"fpexcept.strict") + ret <1 x double> %a +} +declare <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double>, metadata) + +define <2 x double> @trunc_v2f64(<2 x double> %x) { +; CHECK-LABEL: trunc_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI12_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %x, metadata !"fpexcept.strict") + ret <2 x double> %a +} +declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata) + +define <4 x double> @trunc_v4f64(<4 x double> %x) { +; CHECK-LABEL: trunc_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI13_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double> %x, metadata !"fpexcept.strict") + ret <4 x double> %a +} +declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata) + +define <8 x double> @trunc_v8f64(<8 x double> %x) { +; CHECK-LABEL: trunc_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI14_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call <8 x double> @llvm.experimental.constrained.trunc.v8f64(<8 x double> %x, metadata !"fpexcept.strict") + ret <8 x double> %a +} +declare <8 x double> @llvm.experimental.constrained.trunc.v8f64(<8 x double>, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll new file mode 100644 index 0000000..32d8f9d --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll @@ -0,0 +1,350 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare @llvm.experimental.constrained.nearbyint.nxv1f16(, metadata, metadata) + +define @nearbyint_nxv1f16( %v) { +; CHECK-LABEL: nearbyint_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI0_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.nearbyint.nxv1f16( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.nearbyint.nxv2f16(, metadata, metadata) + +define @nearbyint_nxv2f16( %v) { +; CHECK-LABEL: nearbyint_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI1_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.nearbyint.nxv2f16( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.nearbyint.nxv4f16(, metadata, metadata) + +define @nearbyint_nxv4f16( %v) { +; CHECK-LABEL: nearbyint_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI2_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.nearbyint.nxv4f16( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.nearbyint.nxv8f16(, metadata, metadata) + +define @nearbyint_nxv8f16( %v) { +; CHECK-LABEL: nearbyint_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI3_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.nearbyint.nxv8f16( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.nearbyint.nxv16f16(, metadata, metadata) + +define @nearbyint_nxv16f16( %v) { +; CHECK-LABEL: nearbyint_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI4_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.nearbyint.nxv16f16( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.nearbyint.nxv32f16(, metadata, metadata) + +define @nearbyint_nxv32f16( %v) { +; CHECK-LABEL: nearbyint_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI5_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: vmflt.vf v0, v16, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.nearbyint.nxv32f16( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.nearbyint.nxv1f32(, metadata, metadata) + +define @nearbyint_nxv1f32( %v) { +; CHECK-LABEL: nearbyint_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.nearbyint.nxv1f32( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.nearbyint.nxv2f32(, metadata, metadata) + +define @nearbyint_nxv2f32( %v) { +; CHECK-LABEL: nearbyint_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.nearbyint.nxv2f32( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.nearbyint.nxv4f32(, metadata, metadata) + +define @nearbyint_nxv4f32( %v) { +; CHECK-LABEL: nearbyint_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.nearbyint.nxv4f32( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.nearbyint.nxv8f32(, metadata, metadata) + +define @nearbyint_nxv8f32( %v) { +; CHECK-LABEL: nearbyint_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.nearbyint.nxv8f32( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.nearbyint.nxv16f32(, metadata, metadata) + +define @nearbyint_nxv16f32( %v) { +; CHECK-LABEL: nearbyint_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v16, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.nearbyint.nxv16f32( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.nearbyint.nxv1f64(, metadata, metadata) + +define @nearbyint_nxv1f64( %v) { +; CHECK-LABEL: nearbyint_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI11_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.nearbyint.nxv1f64( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.nearbyint.nxv2f64(, metadata, metadata) + +define @nearbyint_nxv2f64( %v) { +; CHECK-LABEL: nearbyint_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI12_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.nearbyint.nxv2f64( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.nearbyint.nxv4f64(, metadata, metadata) + +define @nearbyint_nxv4f64( %v) { +; CHECK-LABEL: nearbyint_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI13_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.nearbyint.nxv4f64( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.nearbyint.nxv8f64(, metadata, metadata) + +define @nearbyint_nxv8f64( %v) { +; CHECK-LABEL: nearbyint_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI14_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: vmflt.vf v0, v16, fa5 +; CHECK-NEXT: frflags a0 +; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: fsflags a0 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.nearbyint.nxv8f64( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fround-costrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fround-costrained-sdnode.ll new file mode 100644 index 0000000..543f7b3 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fround-costrained-sdnode.ll @@ -0,0 +1,337 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +; This file tests the code generation for `llvm.experimental.constrained.round.*` on scalable vector type. + +define @round_nxv1f16( %x) { +; CHECK-LABEL: round_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI0_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.round.nxv1f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.round.nxv1f16(, metadata) + +define @round_nxv2f16( %x) { +; CHECK-LABEL: round_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI1_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.round.nxv2f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.round.nxv2f16(, metadata) + +define @round_nxv4f16( %x) { +; CHECK-LABEL: round_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI2_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.round.nxv4f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.round.nxv4f16(, metadata) + +define @round_nxv8f16( %x) { +; CHECK-LABEL: round_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI3_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.round.nxv8f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.round.nxv8f16(, metadata) + +define @round_nxv16f16( %x) { +; CHECK-LABEL: round_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI4_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.round.nxv16f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.round.nxv16f16(, metadata) + +define @round_nxv32f16( %x) { +; CHECK-LABEL: round_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI5_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: vmflt.vf v0, v16, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.round.nxv32f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.round.nxv32f16(, metadata) + +define @round_nxv1f32( %x) { +; CHECK-LABEL: round_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.round.nxv1f32( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.round.nxv1f32(, metadata) + +define @round_nxv2f32( %x) { +; CHECK-LABEL: round_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.round.nxv2f32( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.round.nxv2f32(, metadata) + +define @round_nxv4f32( %x) { +; CHECK-LABEL: round_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.round.nxv4f32( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.round.nxv4f32(, metadata) + +define @round_nxv8f32( %x) { +; CHECK-LABEL: round_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.round.nxv8f32( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.round.nxv8f32(, metadata) + +define @round_nxv16f32( %x) { +; CHECK-LABEL: round_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v16, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.round.nxv16f32( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.round.nxv16f32(, metadata) + +define @round_nxv1f64( %x) { +; CHECK-LABEL: round_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI11_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.round.nxv1f64( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.round.nxv1f64(, metadata) + +define @round_nxv2f64( %x) { +; CHECK-LABEL: round_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI12_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.round.nxv2f64( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.round.nxv2f64(, metadata) + +define @round_nxv4f64( %x) { +; CHECK-LABEL: round_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI13_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.round.nxv4f64( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.round.nxv4f64(, metadata) + +define @round_nxv8f64( %x) { +; CHECK-LABEL: round_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI14_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: vmflt.vf v0, v16, fa5 +; CHECK-NEXT: fsrmi a0, 4 +; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.round.nxv8f64( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.round.nxv8f64(, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll new file mode 100644 index 0000000..58918d1 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll @@ -0,0 +1,337 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +; This file tests the code generation for `llvm.experimental.constrained.roundeven.*` on scalable vector type. + +define @roundeven_nxv1f16( %x) { +; CHECK-LABEL: roundeven_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI0_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.roundeven.nxv1f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.roundeven.nxv1f16(, metadata) + +define @roundeven_nxv2f16( %x) { +; CHECK-LABEL: roundeven_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI1_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.roundeven.nxv2f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.roundeven.nxv2f16(, metadata) + +define @roundeven_nxv4f16( %x) { +; CHECK-LABEL: roundeven_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI2_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.roundeven.nxv4f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.roundeven.nxv4f16(, metadata) + +define @roundeven_nxv8f16( %x) { +; CHECK-LABEL: roundeven_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI3_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.roundeven.nxv8f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.roundeven.nxv8f16(, metadata) + +define @roundeven_nxv16f16( %x) { +; CHECK-LABEL: roundeven_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI4_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.roundeven.nxv16f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.roundeven.nxv16f16(, metadata) + +define @roundeven_nxv32f16( %x) { +; CHECK-LABEL: roundeven_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI5_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: vmflt.vf v0, v16, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.roundeven.nxv32f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.roundeven.nxv32f16(, metadata) + +define @roundeven_nxv1f32( %x) { +; CHECK-LABEL: roundeven_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.roundeven.nxv1f32( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.roundeven.nxv1f32(, metadata) + +define @roundeven_nxv2f32( %x) { +; CHECK-LABEL: roundeven_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.roundeven.nxv2f32( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.roundeven.nxv2f32(, metadata) + +define @roundeven_nxv4f32( %x) { +; CHECK-LABEL: roundeven_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.roundeven.nxv4f32( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.roundeven.nxv4f32(, metadata) + +define @roundeven_nxv8f32( %x) { +; CHECK-LABEL: roundeven_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.roundeven.nxv8f32( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.roundeven.nxv8f32(, metadata) + +define @roundeven_nxv16f32( %x) { +; CHECK-LABEL: roundeven_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v16, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.roundeven.nxv16f32( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.roundeven.nxv16f32(, metadata) + +define @roundeven_nxv1f64( %x) { +; CHECK-LABEL: roundeven_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI11_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.roundeven.nxv1f64( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.roundeven.nxv1f64(, metadata) + +define @roundeven_nxv2f64( %x) { +; CHECK-LABEL: roundeven_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI12_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.roundeven.nxv2f64( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.roundeven.nxv2f64(, metadata) + +define @roundeven_nxv4f64( %x) { +; CHECK-LABEL: roundeven_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI13_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.roundeven.nxv4f64( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.roundeven.nxv4f64(, metadata) + +define @roundeven_nxv8f64( %x) { +; CHECK-LABEL: roundeven_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI14_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: vmflt.vf v0, v16, fa5 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.roundeven.nxv8f64( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.roundeven.nxv8f64(, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll new file mode 100644 index 0000000..553c66a --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll @@ -0,0 +1,305 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +define @trunc_nxv1f16( %x) { +; CHECK-LABEL: trunc_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI0_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.trunc.nxv1f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.trunc.nxv1f16(, metadata) + +define @trunc_nxv2f16( %x) { +; CHECK-LABEL: trunc_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI1_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.trunc.nxv2f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.trunc.nxv2f16(, metadata) + +define @trunc_nxv4f16( %x) { +; CHECK-LABEL: trunc_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI2_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.trunc.nxv4f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.trunc.nxv4f16(, metadata) + +define @trunc_nxv8f16( %x) { +; CHECK-LABEL: trunc_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI3_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.trunc.nxv8f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.trunc.nxv8f16(, metadata) + +define @trunc_nxv16f16( %x) { +; CHECK-LABEL: trunc_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI4_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.trunc.nxv16f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.trunc.nxv16f16(, metadata) + +define @trunc_nxv32f16( %x) { +; CHECK-LABEL: trunc_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI5_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: vmflt.vf v0, v16, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.trunc.nxv32f16( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.trunc.nxv32f16(, metadata) + +define @trunc_nxv1f32( %x) { +; CHECK-LABEL: trunc_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.trunc.nxv1f32( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.trunc.nxv1f32(, metadata) + +define @trunc_nxv2f32( %x) { +; CHECK-LABEL: trunc_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.trunc.nxv2f32( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.trunc.nxv2f32(, metadata) + +define @trunc_nxv4f32( %x) { +; CHECK-LABEL: trunc_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.trunc.nxv4f32( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.trunc.nxv4f32(, metadata) + +define @trunc_nxv8f32( %x) { +; CHECK-LABEL: trunc_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.trunc.nxv8f32( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.trunc.nxv8f32(, metadata) + +define @trunc_nxv16f32( %x) { +; CHECK-LABEL: trunc_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x fa5, a0 +; CHECK-NEXT: vmflt.vf v0, v16, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.trunc.nxv16f32( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.trunc.nxv16f32(, metadata) + +define @trunc_nxv1f64( %x) { +; CHECK-LABEL: trunc_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI11_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: vmflt.vf v0, v9, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.trunc.nxv1f64( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.trunc.nxv1f64(, metadata) + +define @trunc_nxv2f64( %x) { +; CHECK-LABEL: trunc_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI12_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: vmflt.vf v0, v10, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.trunc.nxv2f64( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.trunc.nxv2f64(, metadata) + +define @trunc_nxv4f64( %x) { +; CHECK-LABEL: trunc_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI13_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: vmflt.vf v0, v12, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.trunc.nxv4f64( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.trunc.nxv4f64(, metadata) + +define @trunc_nxv8f64( %x) { +; CHECK-LABEL: trunc_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v8 +; CHECK-NEXT: lui a0, %hi(.LCPI14_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: vmflt.vf v0, v16, fa5 +; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t +; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %a = call @llvm.experimental.constrained.trunc.nxv8f64( %x, metadata !"fpexcept.strict") + ret %a +} +declare @llvm.experimental.constrained.trunc.nxv8f64(, metadata) -- 2.7.4