From: Yeting Kuo Date: Mon, 6 Mar 2023 10:13:48 +0000 (+0800) Subject: [RISCV] Support ISD::STRICT_FADD/FSUB/FMUL/FDIV for vector types. X-Git-Tag: upstream/17.0.6~14788 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=9637e950cb3a623baab2156c2cb97a930fbaeec4;p=platform%2Fupstream%2Fllvm.git [RISCV] Support ISD::STRICT_FADD/FSUB/FMUL/FDIV for vector types. The patch handles fixed type strict-fp by new RISCVISD::STRICT_ prefixed isd nodes. Reviewed By: craig.topper Differential Revision: https://reviews.llvm.org/D145900 --- diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index c3c63ff45e9b..7846feebfe2b 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -805,6 +805,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setOperationAction(FloatingPointVPOps, VT, Custom); setOperationAction(ISD::STRICT_FP_EXTEND, VT, Custom); + setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, + ISD::STRICT_FDIV}, + VT, Legal); }; // Sets common extload/truncstore actions on RVV floating-point vector @@ -1019,6 +1022,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setOperationAction(FloatingPointVPOps, VT, Custom); setOperationAction(ISD::STRICT_FP_EXTEND, VT, Custom); + setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, + ISD::STRICT_FMUL, ISD::STRICT_FDIV}, + VT, Custom); } // Custom-legalize bitcasts from fixed-length vectors to scalar types. @@ -4430,6 +4436,18 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op, return lowerFixedLengthVectorSelectToRVV(Op, DAG); case ISD::FCOPYSIGN: return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG); + case ISD::STRICT_FADD: + return lowerToScalableOp(Op, DAG, RISCVISD::STRICT_FADD_VL, + /*HasMergeOp*/ true); + case ISD::STRICT_FSUB: + return lowerToScalableOp(Op, DAG, RISCVISD::STRICT_FSUB_VL, + /*HasMergeOp*/ true); + case ISD::STRICT_FMUL: + return lowerToScalableOp(Op, DAG, RISCVISD::STRICT_FMUL_VL, + /*HasMergeOp*/ true); + case ISD::STRICT_FDIV: + return lowerToScalableOp(Op, DAG, RISCVISD::STRICT_FDIV_VL, + /*HasMergeOp*/ true); case ISD::MGATHER: case ISD::VP_GATHER: return lowerMaskedGather(Op, DAG); @@ -7345,6 +7363,16 @@ SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG, Ops.push_back(Mask); Ops.push_back(VL); + // StrictFP operations have two result values. Their lowered result should + // have same result count. + if (Op->isStrictFPOpcode()) { + SDValue ScalableRes = + DAG.getNode(NewOpc, DL, DAG.getVTList(ContainerVT, MVT::Other), Ops, + Op->getFlags()); + SDValue SubVec = convertFromScalableVector(VT, ScalableRes, DAG, Subtarget); + return DAG.getMergeValues({SubVec, ScalableRes.getValue(1)}, DL); + } + SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops, Op->getFlags()); return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget); @@ -13995,8 +14023,12 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { NODE_NAME_CASE(VFCVT_RM_F_XU_VL) NODE_NAME_CASE(VFCVT_RM_F_X_VL) NODE_NAME_CASE(FP_EXTEND_VL) - NODE_NAME_CASE(STRICT_FP_EXTEND_VL) NODE_NAME_CASE(FP_ROUND_VL) + NODE_NAME_CASE(STRICT_FADD_VL) + NODE_NAME_CASE(STRICT_FSUB_VL) + NODE_NAME_CASE(STRICT_FMUL_VL) + NODE_NAME_CASE(STRICT_FDIV_VL) + NODE_NAME_CASE(STRICT_FP_EXTEND_VL) NODE_NAME_CASE(VWMUL_VL) NODE_NAME_CASE(VWMULU_VL) NODE_NAME_CASE(VWMULSU_VL) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h index 5f96e2838310..b3a202476751 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -330,6 +330,10 @@ enum NodeType : unsigned { // result being sign extended to 64 bit. These saturate out of range inputs. STRICT_FCVT_W_RV64 = ISD::FIRST_TARGET_STRICTFP_OPCODE, STRICT_FCVT_WU_RV64, + STRICT_FADD_VL, + STRICT_FSUB_VL, + STRICT_FMUL_VL, + STRICT_FDIV_VL, STRICT_FP_EXTEND_VL, // WARNING: Do not add anything in the end unless you want the node to diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td index 4af99d466946..23fb5364ab16 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -72,7 +72,7 @@ multiclass VPatUSLoadStoreMaskSDNode (store_instr VR:$rs2, GPR:$rs1, m.AVL, m.Log2SEW)>; } -class VPatBinarySDNode_VV; -class VPatBinarySDNode_XI; -multiclass VPatBinarySDNode_VV_VX { +multiclass VPatBinarySDNode_VV_VX { foreach vti = AllIntegerVectors in { def : VPatBinarySDNode_VV { } } -multiclass VPatBinarySDNode_VV_VX_VI : VPatBinarySDNode_VV_VX { foreach vti = AllIntegerVectors in { @@ -131,7 +131,7 @@ multiclass VPatBinarySDNode_VV_VX_VI; -multiclass VPatBinaryFPSDNode_VV_VF { +multiclass VPatBinaryFPSDNode_VV_VF { foreach vti = AllFloatVectors in { def : VPatBinarySDNode_VV { } } -multiclass VPatBinaryFPSDNode_R_VF { +multiclass VPatBinaryFPSDNode_R_VF { foreach fvti = AllFloatVectors in def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)), (fvti.Vector fvti.RegClass:$rs1))), @@ -826,18 +826,18 @@ foreach mti = AllMasks in { let Predicates = [HasVInstructionsAnyF] in { // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions -defm : VPatBinaryFPSDNode_VV_VF; -defm : VPatBinaryFPSDNode_VV_VF; -defm : VPatBinaryFPSDNode_R_VF; +defm : VPatBinaryFPSDNode_VV_VF; +defm : VPatBinaryFPSDNode_VV_VF; +defm : VPatBinaryFPSDNode_R_VF; // 13.3. Vector Widening Floating-Point Add/Subtract Instructions defm : VPatWidenBinaryFPSDNode_VV_VF_WV_WF; defm : VPatWidenBinaryFPSDNode_VV_VF_WV_WF; // 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions -defm : VPatBinaryFPSDNode_VV_VF; -defm : VPatBinaryFPSDNode_VV_VF; -defm : VPatBinaryFPSDNode_R_VF; +defm : VPatBinaryFPSDNode_VV_VF; +defm : VPatBinaryFPSDNode_VV_VF; +defm : VPatBinaryFPSDNode_R_VF; // 13.5. Vector Widening Floating-Point Multiply Instructions defm : VPatWidenBinaryFPSDNode_VV_VF; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td index 0e4024111eb8..5087628749cb 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -107,6 +107,24 @@ def riscv_fcopysign_vl : SDNode<"RISCVISD::FCOPYSIGN_VL", SDT_RISCVCopySign_VL>; def riscv_fminnum_vl : SDNode<"RISCVISD::FMINNUM_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; def riscv_fmaxnum_vl : SDNode<"RISCVISD::FMAXNUM_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; +def riscv_strict_fadd_vl : SDNode<"RISCVISD::STRICT_FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>; +def riscv_strict_fsub_vl : SDNode<"RISCVISD::STRICT_FSUB_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>; +def riscv_strict_fmul_vl : SDNode<"RISCVISD::STRICT_FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>; +def riscv_strict_fdiv_vl : SDNode<"RISCVISD::STRICT_FDIV_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>; + +def any_riscv_fadd_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), + [(riscv_fadd_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), + (riscv_strict_fadd_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; +def any_riscv_fsub_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), + [(riscv_fsub_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), + (riscv_strict_fsub_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; +def any_riscv_fmul_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), + [(riscv_fmul_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), + (riscv_strict_fmul_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; +def any_riscv_fdiv_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), + [(riscv_fdiv_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), + (riscv_strict_fdiv_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; + def SDT_RISCVVecFMA_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, @@ -421,7 +439,7 @@ def sew16simm5 : ComplexPattern", []>; def sew32simm5 : ComplexPattern", []>; def sew64simm5 : ComplexPattern", []>; -multiclass VPatBinaryVL_V; } -multiclass VPatBinaryVL_XI; } -multiclass VPatBinaryVL_VV_VX { +multiclass VPatBinaryVL_VV_VX { foreach vti = AllIntegerVectors in { defm : VPatBinaryVL_V { } } -multiclass VPatBinaryVL_VV_VX_VI : VPatBinaryVL_VV_VX { foreach vti = AllIntegerVectors in { @@ -531,7 +549,7 @@ multiclass VPatBinaryVL_VV_VX_VI { +multiclass VPatBinaryWVL_VV_VX { foreach VtiToWti = AllWidenableIntVectors in { defvar vti = VtiToWti.Vti; defvar wti = VtiToWti.Wti; @@ -545,7 +563,7 @@ multiclass VPatBinaryWVL_VV_VX { SplatPat, GPR>; } } -multiclass VPatBinaryWVL_VV_VX_WV_WX : VPatBinaryWVL_VV_VX { foreach VtiToWti = AllWidenableIntVectors in { @@ -565,7 +583,7 @@ multiclass VPatBinaryWVL_VV_VX_WV_WX { +multiclass VPatBinaryNVL_WV_WX_WI { foreach VtiToWti = AllWidenableIntVectors in { defvar vti = VtiToWti.Vti; defvar wti = VtiToWti.Wti; @@ -585,7 +603,7 @@ multiclass VPatBinaryNVL_WV_WX_WI { } } -multiclass VPatBinaryVL_VF; } -multiclass VPatBinaryFPVL_VV_VF { +multiclass VPatBinaryFPVL_VV_VF { foreach vti = AllFloatVectors in { defm : VPatBinaryVL_V { } } -multiclass VPatBinaryFPVL_R_VF { +multiclass VPatBinaryFPVL_R_VF { foreach fvti = AllFloatVectors in { def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2), fvti.RegClass:$rs1, @@ -1612,18 +1630,18 @@ defm : VPatBinaryVL_VV_VX; let Predicates = [HasVInstructionsAnyF] in { // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions -defm : VPatBinaryFPVL_VV_VF; -defm : VPatBinaryFPVL_VV_VF; -defm : VPatBinaryFPVL_R_VF; +defm : VPatBinaryFPVL_VV_VF; +defm : VPatBinaryFPVL_VV_VF; +defm : VPatBinaryFPVL_R_VF; // 13.3. Vector Widening Floating-Point Add/Subtract Instructions defm : VPatWidenBinaryFPVL_VV_VF_WV_WF; defm : VPatWidenBinaryFPVL_VV_VF_WV_WF; // 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions -defm : VPatBinaryFPVL_VV_VF; -defm : VPatBinaryFPVL_VV_VF; -defm : VPatBinaryFPVL_R_VF; +defm : VPatBinaryFPVL_VV_VF; +defm : VPatBinaryFPVL_VV_VF; +defm : VPatBinaryFPVL_R_VF; // 13.5. Vector Widening Floating-Point Multiply Instructions defm : VPatWidenBinaryFPVL_VV_VF; diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-constrained-sdnode.ll new file mode 100644 index 000000000000..f7b51e8a7aea --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-constrained-sdnode.ll @@ -0,0 +1,295 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128\ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d -riscv-v-vector-bits-min=128\ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <2 x half> @llvm.experimental.constrained.fadd.v2f16(<2 x half>, <2 x half>, metadata, metadata) +define <2 x half> @vfadd_vv_v2f16(<2 x half> %va, <2 x half> %vb) { +; CHECK-LABEL: vfadd_vv_v2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <2 x half> @llvm.experimental.constrained.fadd.v2f16(<2 x half> %va, <2 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <2 x half> %vc +} + +define <2 x half> @vfadd_vf_v2f16(<2 x half> %va, half %b) { +; CHECK-LABEL: vfadd_vf_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <2 x half> poison, half %b, i32 0 + %splat = shufflevector <2 x half> %head, <2 x half> poison, <2 x i32> zeroinitializer + %vc = call <2 x half> @llvm.experimental.constrained.fadd.v2f16(<2 x half> %va, <2 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <2 x half> %vc +} + +declare <4 x half> @llvm.experimental.constrained.fadd.v4f16(<4 x half>, <4 x half>, metadata, metadata) +define <4 x half> @vfadd_vv_v4f16(<4 x half> %va, <4 x half> %vb) { +; CHECK-LABEL: vfadd_vv_v4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <4 x half> @llvm.experimental.constrained.fadd.v4f16(<4 x half> %va, <4 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <4 x half> %vc +} + +define <4 x half> @vfadd_vf_v4f16(<4 x half> %va, half %b) { +; CHECK-LABEL: vfadd_vf_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <4 x half> poison, half %b, i32 0 + %splat = shufflevector <4 x half> %head, <4 x half> poison, <4 x i32> zeroinitializer + %vc = call <4 x half> @llvm.experimental.constrained.fadd.v4f16(<4 x half> %va, <4 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <4 x half> %vc +} + +declare <8 x half> @llvm.experimental.constrained.fadd.v8f16(<8 x half>, <8 x half>, metadata, metadata) +define <8 x half> @vfadd_vv_v8f16(<8 x half> %va, <8 x half> %vb) { +; CHECK-LABEL: vfadd_vv_v8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <8 x half> @llvm.experimental.constrained.fadd.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x half> %vc +} + +define <8 x half> @vfadd_vf_v8f16(<8 x half> %va, half %b) { +; CHECK-LABEL: vfadd_vf_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <8 x half> poison, half %b, i32 0 + %splat = shufflevector <8 x half> %head, <8 x half> poison, <8 x i32> zeroinitializer + %vc = call <8 x half> @llvm.experimental.constrained.fadd.v8f16(<8 x half> %va, <8 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x half> %vc +} + +declare <16 x half> @llvm.experimental.constrained.fadd.v16f16(<16 x half>, <16 x half>, metadata, metadata) +define <16 x half> @vfadd_vv_v16f16(<16 x half> %va, <16 x half> %vb) { +; CHECK-LABEL: vfadd_vv_v16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %vc = call <16 x half> @llvm.experimental.constrained.fadd.v16f16(<16 x half> %va, <16 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <16 x half> %vc +} + +define <16 x half> @vfadd_vf_v16f16(<16 x half> %va, half %b) { +; CHECK-LABEL: vfadd_vf_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <16 x half> poison, half %b, i32 0 + %splat = shufflevector <16 x half> %head, <16 x half> poison, <16 x i32> zeroinitializer + %vc = call <16 x half> @llvm.experimental.constrained.fadd.v16f16(<16 x half> %va, <16 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <16 x half> %vc +} + +declare <32 x half> @llvm.experimental.constrained.fadd.v32f16(<32 x half>, <32 x half>, metadata, metadata) +define <32 x half> @vfadd_vv_v32f16(<32 x half> %va, <32 x half> %vb) { +; CHECK-LABEL: vfadd_vv_v32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %vc = call <32 x half> @llvm.experimental.constrained.fadd.v32f16(<32 x half> %va, <32 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <32 x half> %vc +} + +define <32 x half> @vfadd_vf_v32f16(<32 x half> %va, half %b) { +; CHECK-LABEL: vfadd_vf_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <32 x half> poison, half %b, i32 0 + %splat = shufflevector <32 x half> %head, <32 x half> poison, <32 x i32> zeroinitializer + %vc = call <32 x half> @llvm.experimental.constrained.fadd.v32f16(<32 x half> %va, <32 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <32 x half> %vc +} + +declare <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float>, <2 x float>, metadata, metadata) +define <2 x float> @vfadd_vv_v2f32(<2 x float> %va, <2 x float> %vb) { +; CHECK-LABEL: vfadd_vv_v2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %va, <2 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <2 x float> %vc +} + +define <2 x float> @vfadd_vf_v2f32(<2 x float> %va, float %b) { +; CHECK-LABEL: vfadd_vf_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <2 x float> poison, float %b, i32 0 + %splat = shufflevector <2 x float> %head, <2 x float> poison, <2 x i32> zeroinitializer + %vc = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %va, <2 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <2 x float> %vc +} + +declare <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float>, <4 x float>, metadata, metadata) +define <4 x float> @vfadd_vv_v4f32(<4 x float> %va, <4 x float> %vb) { +; CHECK-LABEL: vfadd_vv_v4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float> %va, <4 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <4 x float> %vc +} + +define <4 x float> @vfadd_vf_v4f32(<4 x float> %va, float %b) { +; CHECK-LABEL: vfadd_vf_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <4 x float> poison, float %b, i32 0 + %splat = shufflevector <4 x float> %head, <4 x float> poison, <4 x i32> zeroinitializer + %vc = call <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float> %va, <4 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <4 x float> %vc +} + +declare <8 x float> @llvm.experimental.constrained.fadd.v8f32(<8 x float>, <8 x float>, metadata, metadata) +define <8 x float> @vfadd_vv_v8f32(<8 x float> %va, <8 x float> %vb) { +; CHECK-LABEL: vfadd_vv_v8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %vc = call <8 x float> @llvm.experimental.constrained.fadd.v8f32(<8 x float> %va, <8 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x float> %vc +} + +define <8 x float> @vfadd_vf_v8f32(<8 x float> %va, float %b) { +; CHECK-LABEL: vfadd_vf_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <8 x float> poison, float %b, i32 0 + %splat = shufflevector <8 x float> %head, <8 x float> poison, <8 x i32> zeroinitializer + %vc = call <8 x float> @llvm.experimental.constrained.fadd.v8f32(<8 x float> %va, <8 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x float> %vc +} + +declare <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float>, <16 x float>, metadata, metadata) +define <16 x float> @vfadd_vv_v16f32(<16 x float> %va, <16 x float> %vb) { +; CHECK-LABEL: vfadd_vv_v16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %vc = call <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float> %va, <16 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <16 x float> %vc +} + +define <16 x float> @vfadd_vf_v16f32(<16 x float> %va, float %b) { +; CHECK-LABEL: vfadd_vf_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <16 x float> poison, float %b, i32 0 + %splat = shufflevector <16 x float> %head, <16 x float> poison, <16 x i32> zeroinitializer + %vc = call <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float> %va, <16 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <16 x float> %vc +} + +declare <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double>, <2 x double>, metadata, metadata) +define <2 x double> @vfadd_vv_v2f64(<2 x double> %va, <2 x double> %vb) { +; CHECK-LABEL: vfadd_vv_v2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double> %va, <2 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <2 x double> %vc +} + +define <2 x double> @vfadd_vf_v2f64(<2 x double> %va, double %b) { +; CHECK-LABEL: vfadd_vf_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <2 x double> poison, double %b, i32 0 + %splat = shufflevector <2 x double> %head, <2 x double> poison, <2 x i32> zeroinitializer + %vc = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double> %va, <2 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <2 x double> %vc +} + +declare <4 x double> @llvm.experimental.constrained.fadd.v4f64(<4 x double>, <4 x double>, metadata, metadata) +define <4 x double> @vfadd_vv_v4f64(<4 x double> %va, <4 x double> %vb) { +; CHECK-LABEL: vfadd_vv_v4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %vc = call <4 x double> @llvm.experimental.constrained.fadd.v4f64(<4 x double> %va, <4 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <4 x double> %vc +} + +define <4 x double> @vfadd_vf_v4f64(<4 x double> %va, double %b) { +; CHECK-LABEL: vfadd_vf_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <4 x double> poison, double %b, i32 0 + %splat = shufflevector <4 x double> %head, <4 x double> poison, <4 x i32> zeroinitializer + %vc = call <4 x double> @llvm.experimental.constrained.fadd.v4f64(<4 x double> %va, <4 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <4 x double> %vc +} + +declare <8 x double> @llvm.experimental.constrained.fadd.v8f64(<8 x double>, <8 x double>, metadata, metadata) +define <8 x double> @vfadd_vv_v8f64(<8 x double> %va, <8 x double> %vb) { +; CHECK-LABEL: vfadd_vv_v8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %vc = call <8 x double> @llvm.experimental.constrained.fadd.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x double> %vc +} + +define <8 x double> @vfadd_vf_v8f64(<8 x double> %va, double %b) { +; CHECK-LABEL: vfadd_vf_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <8 x double> poison, double %b, i32 0 + %splat = shufflevector <8 x double> %head, <8 x double> poison, <8 x i32> zeroinitializer + %vc = call <8 x double> @llvm.experimental.constrained.fadd.v8f64(<8 x double> %va, <8 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x double> %vc +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-constrained-sdnode.ll new file mode 100644 index 000000000000..900386b621ec --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-constrained-sdnode.ll @@ -0,0 +1,331 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <2 x half> @llvm.experimental.constrained.fdiv.v2f16(<2 x half>, <2 x half>, metadata, metadata) +define <2 x half> @vfdiv_vv_v2f16(<2 x half> %va, <2 x half> %vb) { +; CHECK-LABEL: vfdiv_vv_v2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <2 x half> @llvm.experimental.constrained.fdiv.v2f16(<2 x half> %va, <2 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <2 x half> %vc +} + +define <2 x half> @vfdiv_vf_v2f16(<2 x half> %va, half %b) { +; CHECK-LABEL: vfdiv_vf_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <2 x half> poison, half %b, i32 0 + %splat = shufflevector <2 x half> %head, <2 x half> poison, <2 x i32> zeroinitializer + %vc = call <2 x half> @llvm.experimental.constrained.fdiv.v2f16(<2 x half> %va, <2 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <2 x half> %vc +} + +declare <4 x half> @llvm.experimental.constrained.fdiv.v4f16(<4 x half>, <4 x half>, metadata, metadata) +define <4 x half> @vfdiv_vv_v4f16(<4 x half> %va, <4 x half> %vb) { +; CHECK-LABEL: vfdiv_vv_v4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <4 x half> @llvm.experimental.constrained.fdiv.v4f16(<4 x half> %va, <4 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <4 x half> %vc +} + +define <4 x half> @vfdiv_vf_v4f16(<4 x half> %va, half %b) { +; CHECK-LABEL: vfdiv_vf_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <4 x half> poison, half %b, i32 0 + %splat = shufflevector <4 x half> %head, <4 x half> poison, <4 x i32> zeroinitializer + %vc = call <4 x half> @llvm.experimental.constrained.fdiv.v4f16(<4 x half> %va, <4 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <4 x half> %vc +} + +declare <8 x half> @llvm.experimental.constrained.fdiv.v8f16(<8 x half>, <8 x half>, metadata, metadata) +define <8 x half> @vfdiv_vv_v8f16(<8 x half> %va, <8 x half> %vb) { +; CHECK-LABEL: vfdiv_vv_v8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <8 x half> @llvm.experimental.constrained.fdiv.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x half> %vc +} + +define <8 x half> @vfdiv_vf_v8f16(<8 x half> %va, half %b) { +; CHECK-LABEL: vfdiv_vf_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <8 x half> poison, half %b, i32 0 + %splat = shufflevector <8 x half> %head, <8 x half> poison, <8 x i32> zeroinitializer + %vc = call <8 x half> @llvm.experimental.constrained.fdiv.v8f16(<8 x half> %va, <8 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x half> %vc +} + +define <8 x half> @vfdiv_fv_v8f16(<8 x half> %va, half %b) { +; CHECK-LABEL: vfdiv_fv_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <8 x half> poison, half %b, i32 0 + %splat = shufflevector <8 x half> %head, <8 x half> poison, <8 x i32> zeroinitializer + %vc = call <8 x half> @llvm.experimental.constrained.fdiv.v8f16(<8 x half> %splat, <8 x half> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x half> %vc +} + +declare <16 x half> @llvm.experimental.constrained.fdiv.v16f16(<16 x half>, <16 x half>, metadata, metadata) +define <16 x half> @vfdiv_vv_v16f16(<16 x half> %va, <16 x half> %vb) { +; CHECK-LABEL: vfdiv_vv_v16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %vc = call <16 x half> @llvm.experimental.constrained.fdiv.v16f16(<16 x half> %va, <16 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <16 x half> %vc +} + +define <16 x half> @vfdiv_vf_v16f16(<16 x half> %va, half %b) { +; CHECK-LABEL: vfdiv_vf_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <16 x half> poison, half %b, i32 0 + %splat = shufflevector <16 x half> %head, <16 x half> poison, <16 x i32> zeroinitializer + %vc = call <16 x half> @llvm.experimental.constrained.fdiv.v16f16(<16 x half> %va, <16 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <16 x half> %vc +} + +declare <32 x half> @llvm.experimental.constrained.fdiv.v32f16(<32 x half>, <32 x half>, metadata, metadata) +define <32 x half> @vfdiv_vv_v32f16(<32 x half> %va, <32 x half> %vb) { +; CHECK-LABEL: vfdiv_vv_v32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %vc = call <32 x half> @llvm.experimental.constrained.fdiv.v32f16(<32 x half> %va, <32 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <32 x half> %vc +} + +define <32 x half> @vfdiv_vf_v32f16(<32 x half> %va, half %b) { +; CHECK-LABEL: vfdiv_vf_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <32 x half> poison, half %b, i32 0 + %splat = shufflevector <32 x half> %head, <32 x half> poison, <32 x i32> zeroinitializer + %vc = call <32 x half> @llvm.experimental.constrained.fdiv.v32f16(<32 x half> %va, <32 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <32 x half> %vc +} + +declare <2 x float> @llvm.experimental.constrained.fdiv.v2f32(<2 x float>, <2 x float>, metadata, metadata) +define <2 x float> @vfdiv_vv_v2f32(<2 x float> %va, <2 x float> %vb) { +; CHECK-LABEL: vfdiv_vv_v2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <2 x float> @llvm.experimental.constrained.fdiv.v2f32(<2 x float> %va, <2 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <2 x float> %vc +} + +define <2 x float> @vfdiv_vf_v2f32(<2 x float> %va, float %b) { +; CHECK-LABEL: vfdiv_vf_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <2 x float> poison, float %b, i32 0 + %splat = shufflevector <2 x float> %head, <2 x float> poison, <2 x i32> zeroinitializer + %vc = call <2 x float> @llvm.experimental.constrained.fdiv.v2f32(<2 x float> %va, <2 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <2 x float> %vc +} + +declare <4 x float> @llvm.experimental.constrained.fdiv.v4f32(<4 x float>, <4 x float>, metadata, metadata) +define <4 x float> @vfdiv_vv_v4f32(<4 x float> %va, <4 x float> %vb) { +; CHECK-LABEL: vfdiv_vv_v4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <4 x float> @llvm.experimental.constrained.fdiv.v4f32(<4 x float> %va, <4 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <4 x float> %vc +} + +define <4 x float> @vfdiv_vf_v4f32(<4 x float> %va, float %b) { +; CHECK-LABEL: vfdiv_vf_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <4 x float> poison, float %b, i32 0 + %splat = shufflevector <4 x float> %head, <4 x float> poison, <4 x i32> zeroinitializer + %vc = call <4 x float> @llvm.experimental.constrained.fdiv.v4f32(<4 x float> %va, <4 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <4 x float> %vc +} + +declare <8 x float> @llvm.experimental.constrained.fdiv.v8f32(<8 x float>, <8 x float>, metadata, metadata) +define <8 x float> @vfdiv_vv_v8f32(<8 x float> %va, <8 x float> %vb) { +; CHECK-LABEL: vfdiv_vv_v8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %vc = call <8 x float> @llvm.experimental.constrained.fdiv.v8f32(<8 x float> %va, <8 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x float> %vc +} + +define <8 x float> @vfdiv_vf_v8f32(<8 x float> %va, float %b) { +; CHECK-LABEL: vfdiv_vf_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <8 x float> poison, float %b, i32 0 + %splat = shufflevector <8 x float> %head, <8 x float> poison, <8 x i32> zeroinitializer + %vc = call <8 x float> @llvm.experimental.constrained.fdiv.v8f32(<8 x float> %va, <8 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x float> %vc +} + +define <8 x float> @vfdiv_fv_v8f32(<8 x float> %va, float %b) { +; CHECK-LABEL: vfdiv_fv_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <8 x float> poison, float %b, i32 0 + %splat = shufflevector <8 x float> %head, <8 x float> poison, <8 x i32> zeroinitializer + %vc = call <8 x float> @llvm.experimental.constrained.fdiv.v8f32(<8 x float> %splat, <8 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x float> %vc +} + +declare <16 x float> @llvm.experimental.constrained.fdiv.v16f32(<16 x float>, <16 x float>, metadata, metadata) +define <16 x float> @vfdiv_vv_v16f32(<16 x float> %va, <16 x float> %vb) { +; CHECK-LABEL: vfdiv_vv_v16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %vc = call <16 x float> @llvm.experimental.constrained.fdiv.v16f32(<16 x float> %va, <16 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <16 x float> %vc +} + +define <16 x float> @vfdiv_vf_v16f32(<16 x float> %va, float %b) { +; CHECK-LABEL: vfdiv_vf_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <16 x float> poison, float %b, i32 0 + %splat = shufflevector <16 x float> %head, <16 x float> poison, <16 x i32> zeroinitializer + %vc = call <16 x float> @llvm.experimental.constrained.fdiv.v16f32(<16 x float> %va, <16 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <16 x float> %vc +} + +declare <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double>, <2 x double>, metadata, metadata) +define <2 x double> @vfdiv_vv_v2f64(<2 x double> %va, <2 x double> %vb) { +; CHECK-LABEL: vfdiv_vv_v2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double> %va, <2 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <2 x double> %vc +} + +define <2 x double> @vfdiv_vf_v2f64(<2 x double> %va, double %b) { +; CHECK-LABEL: vfdiv_vf_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <2 x double> poison, double %b, i32 0 + %splat = shufflevector <2 x double> %head, <2 x double> poison, <2 x i32> zeroinitializer + %vc = call <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double> %va, <2 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <2 x double> %vc +} + +declare <4 x double> @llvm.experimental.constrained.fdiv.v4f64(<4 x double>, <4 x double>, metadata, metadata) +define <4 x double> @vfdiv_vv_v4f64(<4 x double> %va, <4 x double> %vb) { +; CHECK-LABEL: vfdiv_vv_v4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %vc = call <4 x double> @llvm.experimental.constrained.fdiv.v4f64(<4 x double> %va, <4 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <4 x double> %vc +} + +define <4 x double> @vfdiv_vf_v4f64(<4 x double> %va, double %b) { +; CHECK-LABEL: vfdiv_vf_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <4 x double> poison, double %b, i32 0 + %splat = shufflevector <4 x double> %head, <4 x double> poison, <4 x i32> zeroinitializer + %vc = call <4 x double> @llvm.experimental.constrained.fdiv.v4f64(<4 x double> %va, <4 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <4 x double> %vc +} + +declare <8 x double> @llvm.experimental.constrained.fdiv.v8f64(<8 x double>, <8 x double>, metadata, metadata) +define <8 x double> @vfdiv_vv_v8f64(<8 x double> %va, <8 x double> %vb) { +; CHECK-LABEL: vfdiv_vv_v8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %vc = call <8 x double> @llvm.experimental.constrained.fdiv.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x double> %vc +} + +define <8 x double> @vfdiv_vf_v8f64(<8 x double> %va, double %b) { +; CHECK-LABEL: vfdiv_vf_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <8 x double> poison, double %b, i32 0 + %splat = shufflevector <8 x double> %head, <8 x double> poison, <8 x i32> zeroinitializer + %vc = call <8 x double> @llvm.experimental.constrained.fdiv.v8f64(<8 x double> %va, <8 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x double> %vc +} + +define <8 x double> @vfdiv_fv_v8f64(<8 x double> %va, double %b) { +; CHECK-LABEL: vfdiv_fv_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <8 x double> poison, double %b, i32 0 + %splat = shufflevector <8 x double> %head, <8 x double> poison, <8 x i32> zeroinitializer + %vc = call <8 x double> @llvm.experimental.constrained.fdiv.v8f64(<8 x double> %splat, <8 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x double> %vc +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-constrained-sdnode.ll new file mode 100644 index 000000000000..13ae8831391a --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-constrained-sdnode.ll @@ -0,0 +1,367 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <1 x half> @llvm.experimental.constrained.fmul.v1f16(<1 x half>, <1 x half>, metadata, metadata) +define <1 x half> @vfmul_vv_v1f16(<1 x half> %va, <1 x half> %vb) { +; CHECK-LABEL: vfmul_vv_v1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <1 x half> @llvm.experimental.constrained.fmul.v1f16(<1 x half> %va, <1 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <1 x half> %vc +} + +define <1 x half> @vfmul_vf_v1f16(<1 x half> %va, half %b) { +; CHECK-LABEL: vfmul_vf_v1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <1 x half> poison, half %b, i32 0 + %splat = shufflevector <1 x half> %head, <1 x half> poison, <1 x i32> zeroinitializer + %vc = call <1 x half> @llvm.experimental.constrained.fmul.v1f16(<1 x half> %va, <1 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <1 x half> %vc +} + +declare <2 x half> @llvm.experimental.constrained.fmul.v2f16(<2 x half>, <2 x half>, metadata, metadata) +define <2 x half> @vfmul_vv_v2f16(<2 x half> %va, <2 x half> %vb) { +; CHECK-LABEL: vfmul_vv_v2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <2 x half> @llvm.experimental.constrained.fmul.v2f16(<2 x half> %va, <2 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <2 x half> %vc +} + +define <2 x half> @vfmul_vf_v2f16(<2 x half> %va, half %b) { +; CHECK-LABEL: vfmul_vf_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <2 x half> poison, half %b, i32 0 + %splat = shufflevector <2 x half> %head, <2 x half> poison, <2 x i32> zeroinitializer + %vc = call <2 x half> @llvm.experimental.constrained.fmul.v2f16(<2 x half> %va, <2 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <2 x half> %vc +} + +declare <4 x half> @llvm.experimental.constrained.fmul.v4f16(<4 x half>, <4 x half>, metadata, metadata) +define <4 x half> @vfmul_vv_v4f16(<4 x half> %va, <4 x half> %vb) { +; CHECK-LABEL: vfmul_vv_v4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <4 x half> @llvm.experimental.constrained.fmul.v4f16(<4 x half> %va, <4 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <4 x half> %vc +} + +define <4 x half> @vfmul_vf_v4f16(<4 x half> %va, half %b) { +; CHECK-LABEL: vfmul_vf_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <4 x half> poison, half %b, i32 0 + %splat = shufflevector <4 x half> %head, <4 x half> poison, <4 x i32> zeroinitializer + %vc = call <4 x half> @llvm.experimental.constrained.fmul.v4f16(<4 x half> %va, <4 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <4 x half> %vc +} + +declare <8 x half> @llvm.experimental.constrained.fmul.v8f16(<8 x half>, <8 x half>, metadata, metadata) +define <8 x half> @vfmul_vv_v8f16(<8 x half> %va, <8 x half> %vb) { +; CHECK-LABEL: vfmul_vv_v8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <8 x half> @llvm.experimental.constrained.fmul.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x half> %vc +} + +define <8 x half> @vfmul_vf_v8f16(<8 x half> %va, half %b) { +; CHECK-LABEL: vfmul_vf_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <8 x half> poison, half %b, i32 0 + %splat = shufflevector <8 x half> %head, <8 x half> poison, <8 x i32> zeroinitializer + %vc = call <8 x half> @llvm.experimental.constrained.fmul.v8f16(<8 x half> %va, <8 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x half> %vc +} + +declare <16 x half> @llvm.experimental.constrained.fmul.v16f16(<16 x half>, <16 x half>, metadata, metadata) +define <16 x half> @vfmul_vv_v16f16(<16 x half> %va, <16 x half> %vb) { +; CHECK-LABEL: vfmul_vv_v16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %vc = call <16 x half> @llvm.experimental.constrained.fmul.v16f16(<16 x half> %va, <16 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <16 x half> %vc +} + +define <16 x half> @vfmul_vf_v16f16(<16 x half> %va, half %b) { +; CHECK-LABEL: vfmul_vf_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <16 x half> poison, half %b, i32 0 + %splat = shufflevector <16 x half> %head, <16 x half> poison, <16 x i32> zeroinitializer + %vc = call <16 x half> @llvm.experimental.constrained.fmul.v16f16(<16 x half> %va, <16 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <16 x half> %vc +} + +declare <32 x half> @llvm.experimental.constrained.fmul.v32f16(<32 x half>, <32 x half>, metadata, metadata) +define <32 x half> @vfmul_vv_v32f16(<32 x half> %va, <32 x half> %vb) { +; CHECK-LABEL: vfmul_vv_v32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %vc = call <32 x half> @llvm.experimental.constrained.fmul.v32f16(<32 x half> %va, <32 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <32 x half> %vc +} + +define <32 x half> @vfmul_vf_v32f16(<32 x half> %va, half %b) { +; CHECK-LABEL: vfmul_vf_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <32 x half> poison, half %b, i32 0 + %splat = shufflevector <32 x half> %head, <32 x half> poison, <32 x i32> zeroinitializer + %vc = call <32 x half> @llvm.experimental.constrained.fmul.v32f16(<32 x half> %va, <32 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <32 x half> %vc +} + +declare <1 x float> @llvm.experimental.constrained.fmul.v1f32(<1 x float>, <1 x float>, metadata, metadata) +define <1 x float> @vfmul_vv_v1f32(<1 x float> %va, <1 x float> %vb) { +; CHECK-LABEL: vfmul_vv_v1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <1 x float> @llvm.experimental.constrained.fmul.v1f32(<1 x float> %va, <1 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <1 x float> %vc +} + +define <1 x float> @vfmul_vf_v1f32(<1 x float> %va, float %b) { +; CHECK-LABEL: vfmul_vf_v1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <1 x float> poison, float %b, i32 0 + %splat = shufflevector <1 x float> %head, <1 x float> poison, <1 x i32> zeroinitializer + %vc = call <1 x float> @llvm.experimental.constrained.fmul.v1f32(<1 x float> %va, <1 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <1 x float> %vc +} + +declare <2 x float> @llvm.experimental.constrained.fmul.v2f32(<2 x float>, <2 x float>, metadata, metadata) +define <2 x float> @vfmul_vv_v2f32(<2 x float> %va, <2 x float> %vb) { +; CHECK-LABEL: vfmul_vv_v2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <2 x float> @llvm.experimental.constrained.fmul.v2f32(<2 x float> %va, <2 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <2 x float> %vc +} + +define <2 x float> @vfmul_vf_v2f32(<2 x float> %va, float %b) { +; CHECK-LABEL: vfmul_vf_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <2 x float> poison, float %b, i32 0 + %splat = shufflevector <2 x float> %head, <2 x float> poison, <2 x i32> zeroinitializer + %vc = call <2 x float> @llvm.experimental.constrained.fmul.v2f32(<2 x float> %va, <2 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <2 x float> %vc +} + +declare <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float>, <4 x float>, metadata, metadata) +define <4 x float> @vfmul_vv_v4f32(<4 x float> %va, <4 x float> %vb) { +; CHECK-LABEL: vfmul_vv_v4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float> %va, <4 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <4 x float> %vc +} + +define <4 x float> @vfmul_vf_v4f32(<4 x float> %va, float %b) { +; CHECK-LABEL: vfmul_vf_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <4 x float> poison, float %b, i32 0 + %splat = shufflevector <4 x float> %head, <4 x float> poison, <4 x i32> zeroinitializer + %vc = call <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float> %va, <4 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <4 x float> %vc +} + +declare <8 x float> @llvm.experimental.constrained.fmul.v8f32(<8 x float>, <8 x float>, metadata, metadata) +define <8 x float> @vfmul_vv_v8f32(<8 x float> %va, <8 x float> %vb) { +; CHECK-LABEL: vfmul_vv_v8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %vc = call <8 x float> @llvm.experimental.constrained.fmul.v8f32(<8 x float> %va, <8 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x float> %vc +} + +define <8 x float> @vfmul_vf_v8f32(<8 x float> %va, float %b) { +; CHECK-LABEL: vfmul_vf_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <8 x float> poison, float %b, i32 0 + %splat = shufflevector <8 x float> %head, <8 x float> poison, <8 x i32> zeroinitializer + %vc = call <8 x float> @llvm.experimental.constrained.fmul.v8f32(<8 x float> %va, <8 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x float> %vc +} + +declare <16 x float> @llvm.experimental.constrained.fmul.v16f32(<16 x float>, <16 x float>, metadata, metadata) +define <16 x float> @vfmul_vv_v16f32(<16 x float> %va, <16 x float> %vb) { +; CHECK-LABEL: vfmul_vv_v16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %vc = call <16 x float> @llvm.experimental.constrained.fmul.v16f32(<16 x float> %va, <16 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <16 x float> %vc +} + +define <16 x float> @vfmul_vf_v16f32(<16 x float> %va, float %b) { +; CHECK-LABEL: vfmul_vf_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <16 x float> poison, float %b, i32 0 + %splat = shufflevector <16 x float> %head, <16 x float> poison, <16 x i32> zeroinitializer + %vc = call <16 x float> @llvm.experimental.constrained.fmul.v16f32(<16 x float> %va, <16 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <16 x float> %vc +} + +declare <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x double>, <1 x double>, metadata, metadata) +define <1 x double> @vfmul_vv_v1f64(<1 x double> %va, <1 x double> %vb) { +; CHECK-LABEL: vfmul_vv_v1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x double> %va, <1 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <1 x double> %vc +} + +define <1 x double> @vfmul_vf_v1f64(<1 x double> %va, double %b) { +; CHECK-LABEL: vfmul_vf_v1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <1 x double> poison, double %b, i32 0 + %splat = shufflevector <1 x double> %head, <1 x double> poison, <1 x i32> zeroinitializer + %vc = call <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x double> %va, <1 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <1 x double> %vc +} + +declare <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double>, <2 x double>, metadata, metadata) +define <2 x double> @vfmul_vv_v2f64(<2 x double> %va, <2 x double> %vb) { +; CHECK-LABEL: vfmul_vv_v2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %va, <2 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <2 x double> %vc +} + +define <2 x double> @vfmul_vf_v2f64(<2 x double> %va, double %b) { +; CHECK-LABEL: vfmul_vf_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <2 x double> poison, double %b, i32 0 + %splat = shufflevector <2 x double> %head, <2 x double> poison, <2 x i32> zeroinitializer + %vc = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %va, <2 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <2 x double> %vc +} + +declare <4 x double> @llvm.experimental.constrained.fmul.v4f64(<4 x double>, <4 x double>, metadata, metadata) +define <4 x double> @vfmul_vv_v4f64(<4 x double> %va, <4 x double> %vb) { +; CHECK-LABEL: vfmul_vv_v4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %vc = call <4 x double> @llvm.experimental.constrained.fmul.v4f64(<4 x double> %va, <4 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <4 x double> %vc +} + +define <4 x double> @vfmul_vf_v4f64(<4 x double> %va, double %b) { +; CHECK-LABEL: vfmul_vf_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <4 x double> poison, double %b, i32 0 + %splat = shufflevector <4 x double> %head, <4 x double> poison, <4 x i32> zeroinitializer + %vc = call <4 x double> @llvm.experimental.constrained.fmul.v4f64(<4 x double> %va, <4 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <4 x double> %vc +} + +declare <8 x double> @llvm.experimental.constrained.fmul.v8f64(<8 x double>, <8 x double>, metadata, metadata) +define <8 x double> @vfmul_vv_v8f64(<8 x double> %va, <8 x double> %vb) { +; CHECK-LABEL: vfmul_vv_v8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %vc = call <8 x double> @llvm.experimental.constrained.fmul.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x double> %vc +} + +define <8 x double> @vfmul_vf_v8f64(<8 x double> %va, double %b) { +; CHECK-LABEL: vfmul_vf_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <8 x double> poison, double %b, i32 0 + %splat = shufflevector <8 x double> %head, <8 x double> poison, <8 x i32> zeroinitializer + %vc = call <8 x double> @llvm.experimental.constrained.fmul.v8f64(<8 x double> %va, <8 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x double> %vc +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-constrained-sdnode.ll new file mode 100644 index 000000000000..50e6e2dc5115 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-constrained-sdnode.ll @@ -0,0 +1,331 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <2 x half> @llvm.experimental.constrained.fsub.v2f16(<2 x half>, <2 x half>, metadata, metadata) +define <2 x half> @vfsub_vv_v2f16(<2 x half> %va, <2 x half> %vb) { +; CHECK-LABEL: vfsub_vv_v2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <2 x half> @llvm.experimental.constrained.fsub.v2f16(<2 x half> %va, <2 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <2 x half> %vc +} + +define <2 x half> @vfsub_vf_v2f16(<2 x half> %va, half %b) { +; CHECK-LABEL: vfsub_vf_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <2 x half> poison, half %b, i32 0 + %splat = shufflevector <2 x half> %head, <2 x half> poison, <2 x i32> zeroinitializer + %vc = call <2 x half> @llvm.experimental.constrained.fsub.v2f16(<2 x half> %va, <2 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <2 x half> %vc +} + +declare <4 x half> @llvm.experimental.constrained.fsub.v4f16(<4 x half>, <4 x half>, metadata, metadata) +define <4 x half> @vfsub_vv_v4f16(<4 x half> %va, <4 x half> %vb) { +; CHECK-LABEL: vfsub_vv_v4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <4 x half> @llvm.experimental.constrained.fsub.v4f16(<4 x half> %va, <4 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <4 x half> %vc +} + +define <4 x half> @vfsub_vf_v4f16(<4 x half> %va, half %b) { +; CHECK-LABEL: vfsub_vf_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <4 x half> poison, half %b, i32 0 + %splat = shufflevector <4 x half> %head, <4 x half> poison, <4 x i32> zeroinitializer + %vc = call <4 x half> @llvm.experimental.constrained.fsub.v4f16(<4 x half> %va, <4 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <4 x half> %vc +} + +declare <8 x half> @llvm.experimental.constrained.fsub.v8f16(<8 x half>, <8 x half>, metadata, metadata) +define <8 x half> @vfsub_vv_v8f16(<8 x half> %va, <8 x half> %vb) { +; CHECK-LABEL: vfsub_vv_v8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <8 x half> @llvm.experimental.constrained.fsub.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x half> %vc +} + +define <8 x half> @vfsub_vf_v8f16(<8 x half> %va, half %b) { +; CHECK-LABEL: vfsub_vf_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <8 x half> poison, half %b, i32 0 + %splat = shufflevector <8 x half> %head, <8 x half> poison, <8 x i32> zeroinitializer + %vc = call <8 x half> @llvm.experimental.constrained.fsub.v8f16(<8 x half> %va, <8 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x half> %vc +} + +define <8 x half> @vfsub_fv_v8f16(<8 x half> %va, half %b) { +; CHECK-LABEL: vfsub_fv_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <8 x half> poison, half %b, i32 0 + %splat = shufflevector <8 x half> %head, <8 x half> poison, <8 x i32> zeroinitializer + %vc = call <8 x half> @llvm.experimental.constrained.fsub.v8f16(<8 x half> %splat, <8 x half> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x half> %vc +} + +declare <16 x half> @llvm.experimental.constrained.fsub.v16f16(<16 x half>, <16 x half>, metadata, metadata) +define <16 x half> @vfsub_vv_v16f16(<16 x half> %va, <16 x half> %vb) { +; CHECK-LABEL: vfsub_vv_v16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %vc = call <16 x half> @llvm.experimental.constrained.fsub.v16f16(<16 x half> %va, <16 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <16 x half> %vc +} + +define <16 x half> @vfsub_vf_v16f16(<16 x half> %va, half %b) { +; CHECK-LABEL: vfsub_vf_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <16 x half> poison, half %b, i32 0 + %splat = shufflevector <16 x half> %head, <16 x half> poison, <16 x i32> zeroinitializer + %vc = call <16 x half> @llvm.experimental.constrained.fsub.v16f16(<16 x half> %va, <16 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <16 x half> %vc +} + +declare <32 x half> @llvm.experimental.constrained.fsub.v32f16(<32 x half>, <32 x half>, metadata, metadata) +define <32 x half> @vfsub_vv_v32f16(<32 x half> %va, <32 x half> %vb) { +; CHECK-LABEL: vfsub_vv_v32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %vc = call <32 x half> @llvm.experimental.constrained.fsub.v32f16(<32 x half> %va, <32 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <32 x half> %vc +} + +define <32 x half> @vfsub_vf_v32f16(<32 x half> %va, half %b) { +; CHECK-LABEL: vfsub_vf_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <32 x half> poison, half %b, i32 0 + %splat = shufflevector <32 x half> %head, <32 x half> poison, <32 x i32> zeroinitializer + %vc = call <32 x half> @llvm.experimental.constrained.fsub.v32f16(<32 x half> %va, <32 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <32 x half> %vc +} + +declare <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float>, <2 x float>, metadata, metadata) +define <2 x float> @vfsub_vv_v2f32(<2 x float> %va, <2 x float> %vb) { +; CHECK-LABEL: vfsub_vv_v2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> %va, <2 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <2 x float> %vc +} + +define <2 x float> @vfsub_vf_v2f32(<2 x float> %va, float %b) { +; CHECK-LABEL: vfsub_vf_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <2 x float> poison, float %b, i32 0 + %splat = shufflevector <2 x float> %head, <2 x float> poison, <2 x i32> zeroinitializer + %vc = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> %va, <2 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <2 x float> %vc +} + +declare <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float>, <4 x float>, metadata, metadata) +define <4 x float> @vfsub_vv_v4f32(<4 x float> %va, <4 x float> %vb) { +; CHECK-LABEL: vfsub_vv_v4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float> %va, <4 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <4 x float> %vc +} + +define <4 x float> @vfsub_vf_v4f32(<4 x float> %va, float %b) { +; CHECK-LABEL: vfsub_vf_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <4 x float> poison, float %b, i32 0 + %splat = shufflevector <4 x float> %head, <4 x float> poison, <4 x i32> zeroinitializer + %vc = call <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float> %va, <4 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <4 x float> %vc +} + +declare <8 x float> @llvm.experimental.constrained.fsub.v8f32(<8 x float>, <8 x float>, metadata, metadata) +define <8 x float> @vfsub_vv_v8f32(<8 x float> %va, <8 x float> %vb) { +; CHECK-LABEL: vfsub_vv_v8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %vc = call <8 x float> @llvm.experimental.constrained.fsub.v8f32(<8 x float> %va, <8 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x float> %vc +} + +define <8 x float> @vfsub_vf_v8f32(<8 x float> %va, float %b) { +; CHECK-LABEL: vfsub_vf_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <8 x float> poison, float %b, i32 0 + %splat = shufflevector <8 x float> %head, <8 x float> poison, <8 x i32> zeroinitializer + %vc = call <8 x float> @llvm.experimental.constrained.fsub.v8f32(<8 x float> %va, <8 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x float> %vc +} + +define <8 x float> @vfsub_fv_v8f32(<8 x float> %va, float %b) { +; CHECK-LABEL: vfsub_fv_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <8 x float> poison, float %b, i32 0 + %splat = shufflevector <8 x float> %head, <8 x float> poison, <8 x i32> zeroinitializer + %vc = call <8 x float> @llvm.experimental.constrained.fsub.v8f32(<8 x float> %splat, <8 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x float> %vc +} + +declare <16 x float> @llvm.experimental.constrained.fsub.v16f32(<16 x float>, <16 x float>, metadata, metadata) +define <16 x float> @vfsub_vv_v16f32(<16 x float> %va, <16 x float> %vb) { +; CHECK-LABEL: vfsub_vv_v16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %vc = call <16 x float> @llvm.experimental.constrained.fsub.v16f32(<16 x float> %va, <16 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <16 x float> %vc +} + +define <16 x float> @vfsub_vf_v16f32(<16 x float> %va, float %b) { +; CHECK-LABEL: vfsub_vf_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <16 x float> poison, float %b, i32 0 + %splat = shufflevector <16 x float> %head, <16 x float> poison, <16 x i32> zeroinitializer + %vc = call <16 x float> @llvm.experimental.constrained.fsub.v16f32(<16 x float> %va, <16 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <16 x float> %vc +} + +declare <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double>, <2 x double>, metadata, metadata) +define <2 x double> @vfsub_vv_v2f64(<2 x double> %va, <2 x double> %vb) { +; CHECK-LABEL: vfsub_vv_v2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double> %va, <2 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <2 x double> %vc +} + +define <2 x double> @vfsub_vf_v2f64(<2 x double> %va, double %b) { +; CHECK-LABEL: vfsub_vf_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <2 x double> poison, double %b, i32 0 + %splat = shufflevector <2 x double> %head, <2 x double> poison, <2 x i32> zeroinitializer + %vc = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double> %va, <2 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <2 x double> %vc +} + +declare <4 x double> @llvm.experimental.constrained.fsub.v4f64(<4 x double>, <4 x double>, metadata, metadata) +define <4 x double> @vfsub_vv_v4f64(<4 x double> %va, <4 x double> %vb) { +; CHECK-LABEL: vfsub_vv_v4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %vc = call <4 x double> @llvm.experimental.constrained.fsub.v4f64(<4 x double> %va, <4 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <4 x double> %vc +} + +define <4 x double> @vfsub_vf_v4f64(<4 x double> %va, double %b) { +; CHECK-LABEL: vfsub_vf_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <4 x double> poison, double %b, i32 0 + %splat = shufflevector <4 x double> %head, <4 x double> poison, <4 x i32> zeroinitializer + %vc = call <4 x double> @llvm.experimental.constrained.fsub.v4f64(<4 x double> %va, <4 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <4 x double> %vc +} + +declare <8 x double> @llvm.experimental.constrained.fsub.v8f64(<8 x double>, <8 x double>, metadata, metadata) +define <8 x double> @vfsub_vv_v8f64(<8 x double> %va, <8 x double> %vb) { +; CHECK-LABEL: vfsub_vv_v8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %vc = call <8 x double> @llvm.experimental.constrained.fsub.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x double> %vc +} + +define <8 x double> @vfsub_vf_v8f64(<8 x double> %va, double %b) { +; CHECK-LABEL: vfsub_vf_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <8 x double> poison, double %b, i32 0 + %splat = shufflevector <8 x double> %head, <8 x double> poison, <8 x i32> zeroinitializer + %vc = call <8 x double> @llvm.experimental.constrained.fsub.v8f64(<8 x double> %va, <8 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x double> %vc +} + +define <8 x double> @vfsub_fv_v8f64(<8 x double> %va, double %b) { +; CHECK-LABEL: vfsub_fv_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <8 x double> poison, double %b, i32 0 + %splat = shufflevector <8 x double> %head, <8 x double> poison, <8 x i32> zeroinitializer + %vc = call <8 x double> @llvm.experimental.constrained.fsub.v8f64(<8 x double> %splat, <8 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret <8 x double> %vc +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-constrained-sdnode.ll new file mode 100644 index 000000000000..313a58efe902 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-constrained-sdnode.ll @@ -0,0 +1,365 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare @llvm.experimental.constrained.fadd.nxv1f16(, , metadata, metadata) +define @vfadd_vv_nxv1f16( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fadd.nxv1f16( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfadd_vf_nxv1f16( %va, half %b) { +; CHECK-LABEL: vfadd_vf_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fadd.nxv1f16( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fadd.nxv2f16(, , metadata, metadata) +define @vfadd_vv_nxv2f16( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fadd.nxv2f16( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfadd_vf_nxv2f16( %va, half %b) { +; CHECK-LABEL: vfadd_vf_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fadd.nxv2f16( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fadd.nxv4f16(, , metadata, metadata) +define @vfadd_vv_nxv4f16( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fadd.nxv4f16( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfadd_vf_nxv4f16( %va, half %b) { +; CHECK-LABEL: vfadd_vf_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fadd.nxv4f16( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fadd.nxv8f16(, , metadata, metadata) +define @vfadd_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fadd.nxv8f16( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfadd_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: vfadd_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fadd.nxv8f16( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fadd.nxv16f16(, , metadata, metadata) +define @vfadd_vv_nxv16f16( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fadd.nxv16f16( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfadd_vf_nxv16f16( %va, half %b) { +; CHECK-LABEL: vfadd_vf_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fadd.nxv16f16( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fadd.nxv32f16(, , metadata, metadata) +define @vfadd_vv_nxv32f16( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fadd.nxv32f16( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfadd_vf_nxv32f16( %va, half %b) { +; CHECK-LABEL: vfadd_vf_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fadd.nxv32f16( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fadd.nxv1f32(, , metadata, metadata) +define @vfadd_vv_nxv1f32( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fadd.nxv1f32( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfadd_vf_nxv1f32( %va, float %b) { +; CHECK-LABEL: vfadd_vf_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fadd.nxv1f32( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fadd.nxv2f32(, , metadata, metadata) +define @vfadd_vv_nxv2f32( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fadd.nxv2f32( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfadd_vf_nxv2f32( %va, float %b) { +; CHECK-LABEL: vfadd_vf_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fadd.nxv2f32( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fadd.nxv4f32(, , metadata, metadata) +define @vfadd_vv_nxv4f32( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fadd.nxv4f32( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfadd_vf_nxv4f32( %va, float %b) { +; CHECK-LABEL: vfadd_vf_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fadd.nxv4f32( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fadd.nxv8f32(, , metadata, metadata) +define @vfadd_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fadd.nxv8f32( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfadd_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: vfadd_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fadd.nxv8f32( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fadd.nxv16f32(, , metadata, metadata) +define @vfadd_vv_nxv16f32( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fadd.nxv16f32( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfadd_vf_nxv16f32( %va, float %b) { +; CHECK-LABEL: vfadd_vf_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fadd.nxv16f32( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fadd.nxv1f64(, , metadata, metadata) +define @vfadd_vv_nxv1f64( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fadd.nxv1f64( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfadd_vf_nxv1f64( %va, double %b) { +; CHECK-LABEL: vfadd_vf_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fadd.nxv1f64( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fadd.nxv2f64(, , metadata, metadata) +define @vfadd_vv_nxv2f64( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fadd.nxv2f64( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfadd_vf_nxv2f64( %va, double %b) { +; CHECK-LABEL: vfadd_vf_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fadd.nxv2f64( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fadd.nxv4f64(, , metadata, metadata) +define @vfadd_vv_nxv4f64( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fadd.nxv4f64( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfadd_vf_nxv4f64( %va, double %b) { +; CHECK-LABEL: vfadd_vf_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fadd.nxv4f64( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fadd.nxv8f64(, , metadata, metadata) +define @vfadd_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: vfadd_vv_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fadd.nxv8f64( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfadd_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: vfadd_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fadd.nxv8f64( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll new file mode 100644 index 000000000000..7c10ca993011 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll @@ -0,0 +1,401 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare @llvm.experimental.constrained.fdiv.nxv1f16(, , metadata, metadata) +define @vfdiv_vv_nxv1f16( %va, %vb) { +; CHECK-LABEL: vfdiv_vv_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fdiv.nxv1f16( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfdiv_vf_nxv1f16( %va, half %b) { +; CHECK-LABEL: vfdiv_vf_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fdiv.nxv1f16( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fdiv.nxv2f16(, , metadata, metadata) +define @vfdiv_vv_nxv2f16( %va, %vb) { +; CHECK-LABEL: vfdiv_vv_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fdiv.nxv2f16( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfdiv_vf_nxv2f16( %va, half %b) { +; CHECK-LABEL: vfdiv_vf_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fdiv.nxv2f16( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fdiv.nxv4f16(, , metadata, metadata) +define @vfdiv_vv_nxv4f16( %va, %vb) { +; CHECK-LABEL: vfdiv_vv_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fdiv.nxv4f16( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfdiv_vf_nxv4f16( %va, half %b) { +; CHECK-LABEL: vfdiv_vf_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fdiv.nxv4f16( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fdiv.nxv8f16(, , metadata, metadata) +define @vfdiv_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: vfdiv_vv_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fdiv.nxv8f16( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfdiv_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: vfdiv_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fdiv.nxv8f16( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfdiv_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: vfdiv_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fdiv.nxv8f16( %splat, %va, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fdiv.nxv16f16(, , metadata, metadata) +define @vfdiv_vv_nxv16f16( %va, %vb) { +; CHECK-LABEL: vfdiv_vv_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fdiv.nxv16f16( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfdiv_vf_nxv16f16( %va, half %b) { +; CHECK-LABEL: vfdiv_vf_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fdiv.nxv16f16( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fdiv.nxv32f16(, , metadata, metadata) +define @vfdiv_vv_nxv32f16( %va, %vb) { +; CHECK-LABEL: vfdiv_vv_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fdiv.nxv32f16( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfdiv_vf_nxv32f16( %va, half %b) { +; CHECK-LABEL: vfdiv_vf_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fdiv.nxv32f16( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fdiv.nxv1f32(, , metadata, metadata) +define @vfdiv_vv_nxv1f32( %va, %vb) { +; CHECK-LABEL: vfdiv_vv_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fdiv.nxv1f32( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfdiv_vf_nxv1f32( %va, float %b) { +; CHECK-LABEL: vfdiv_vf_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fdiv.nxv1f32( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fdiv.nxv2f32(, , metadata, metadata) +define @vfdiv_vv_nxv2f32( %va, %vb) { +; CHECK-LABEL: vfdiv_vv_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fdiv.nxv2f32( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfdiv_vf_nxv2f32( %va, float %b) { +; CHECK-LABEL: vfdiv_vf_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fdiv.nxv2f32( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fdiv.nxv4f32(, , metadata, metadata) +define @vfdiv_vv_nxv4f32( %va, %vb) { +; CHECK-LABEL: vfdiv_vv_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fdiv.nxv4f32( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfdiv_vf_nxv4f32( %va, float %b) { +; CHECK-LABEL: vfdiv_vf_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fdiv.nxv4f32( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fdiv.nxv8f32(, , metadata, metadata) +define @vfdiv_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: vfdiv_vv_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fdiv.nxv8f32( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfdiv_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: vfdiv_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fdiv.nxv8f32( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfdiv_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: vfdiv_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fdiv.nxv8f32( %splat, %va, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fdiv.nxv16f32(, , metadata, metadata) +define @vfdiv_vv_nxv16f32( %va, %vb) { +; CHECK-LABEL: vfdiv_vv_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fdiv.nxv16f32( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfdiv_vf_nxv16f32( %va, float %b) { +; CHECK-LABEL: vfdiv_vf_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fdiv.nxv16f32( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fdiv.nxv1f64(, , metadata, metadata) +define @vfdiv_vv_nxv1f64( %va, %vb) { +; CHECK-LABEL: vfdiv_vv_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fdiv.nxv1f64( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfdiv_vf_nxv1f64( %va, double %b) { +; CHECK-LABEL: vfdiv_vf_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fdiv.nxv1f64( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fdiv.nxv2f64(, , metadata, metadata) +define @vfdiv_vv_nxv2f64( %va, %vb) { +; CHECK-LABEL: vfdiv_vv_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fdiv.nxv2f64( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfdiv_vf_nxv2f64( %va, double %b) { +; CHECK-LABEL: vfdiv_vf_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fdiv.nxv2f64( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fdiv.nxv4f64(, , metadata, metadata) +define @vfdiv_vv_nxv4f64( %va, %vb) { +; CHECK-LABEL: vfdiv_vv_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fdiv.nxv4f64( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfdiv_vf_nxv4f64( %va, double %b) { +; CHECK-LABEL: vfdiv_vf_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fdiv.nxv4f64( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fdiv.nxv8f64(, , metadata, metadata) +define @vfdiv_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: vfdiv_vv_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fdiv.nxv8f64( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfdiv_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: vfdiv_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fdiv.nxv8f64( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfdiv_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: vfdiv_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fdiv.nxv8f64( %splat, %va, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-constrained-sdnode.ll new file mode 100644 index 000000000000..1c8a34b4eff0 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-constrained-sdnode.ll @@ -0,0 +1,365 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare @llvm.experimental.constrained.fmul.nxv1f16(, , metadata, metadata) +define @vfmul_vv_nxv1f16( %va, %vb) { +; CHECK-LABEL: vfmul_vv_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fmul.nxv1f16( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfmul_vf_nxv1f16( %va, half %b) { +; CHECK-LABEL: vfmul_vf_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fmul.nxv1f16( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fmul.nxv2f16(, , metadata, metadata) +define @vfmul_vv_nxv2f16( %va, %vb) { +; CHECK-LABEL: vfmul_vv_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fmul.nxv2f16( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfmul_vf_nxv2f16( %va, half %b) { +; CHECK-LABEL: vfmul_vf_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fmul.nxv2f16( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fmul.nxv4f16(, , metadata, metadata) +define @vfmul_vv_nxv4f16( %va, %vb) { +; CHECK-LABEL: vfmul_vv_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fmul.nxv4f16( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfmul_vf_nxv4f16( %va, half %b) { +; CHECK-LABEL: vfmul_vf_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fmul.nxv4f16( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fmul.nxv8f16(, , metadata, metadata) +define @vfmul_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: vfmul_vv_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fmul.nxv8f16( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfmul_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: vfmul_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fmul.nxv8f16( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fmul.nxv16f16(, , metadata, metadata) +define @vfmul_vv_nxv16f16( %va, %vb) { +; CHECK-LABEL: vfmul_vv_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fmul.nxv16f16( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfmul_vf_nxv16f16( %va, half %b) { +; CHECK-LABEL: vfmul_vf_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fmul.nxv16f16( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fmul.nxv32f16(, , metadata, metadata) +define @vfmul_vv_nxv32f16( %va, %vb) { +; CHECK-LABEL: vfmul_vv_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fmul.nxv32f16( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfmul_vf_nxv32f16( %va, half %b) { +; CHECK-LABEL: vfmul_vf_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fmul.nxv32f16( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fmul.nxv1f32(, , metadata, metadata) +define @vfmul_vv_nxv1f32( %va, %vb) { +; CHECK-LABEL: vfmul_vv_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fmul.nxv1f32( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfmul_vf_nxv1f32( %va, float %b) { +; CHECK-LABEL: vfmul_vf_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fmul.nxv1f32( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fmul.nxv2f32(, , metadata, metadata) +define @vfmul_vv_nxv2f32( %va, %vb) { +; CHECK-LABEL: vfmul_vv_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fmul.nxv2f32( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfmul_vf_nxv2f32( %va, float %b) { +; CHECK-LABEL: vfmul_vf_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fmul.nxv2f32( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fmul.nxv4f32(, , metadata, metadata) +define @vfmul_vv_nxv4f32( %va, %vb) { +; CHECK-LABEL: vfmul_vv_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fmul.nxv4f32( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfmul_vf_nxv4f32( %va, float %b) { +; CHECK-LABEL: vfmul_vf_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fmul.nxv4f32( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fmul.nxv8f32(, , metadata, metadata) +define @vfmul_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: vfmul_vv_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fmul.nxv8f32( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfmul_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: vfmul_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fmul.nxv8f32( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fmul.nxv16f32(, , metadata, metadata) +define @vfmul_vv_nxv16f32( %va, %vb) { +; CHECK-LABEL: vfmul_vv_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fmul.nxv16f32( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfmul_vf_nxv16f32( %va, float %b) { +; CHECK-LABEL: vfmul_vf_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fmul.nxv16f32( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fmul.nxv1f64(, , metadata, metadata) +define @vfmul_vv_nxv1f64( %va, %vb) { +; CHECK-LABEL: vfmul_vv_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fmul.nxv1f64( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfmul_vf_nxv1f64( %va, double %b) { +; CHECK-LABEL: vfmul_vf_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fmul.nxv1f64( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fmul.nxv2f64(, , metadata, metadata) +define @vfmul_vv_nxv2f64( %va, %vb) { +; CHECK-LABEL: vfmul_vv_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fmul.nxv2f64( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfmul_vf_nxv2f64( %va, double %b) { +; CHECK-LABEL: vfmul_vf_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fmul.nxv2f64( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fmul.nxv4f64(, , metadata, metadata) +define @vfmul_vv_nxv4f64( %va, %vb) { +; CHECK-LABEL: vfmul_vv_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fmul.nxv4f64( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfmul_vf_nxv4f64( %va, double %b) { +; CHECK-LABEL: vfmul_vf_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fmul.nxv4f64( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fmul.nxv8f64(, , metadata, metadata) +define @vfmul_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: vfmul_vv_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fmul.nxv8f64( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfmul_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: vfmul_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fmul.nxv8f64( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-constrained-sdnode.ll new file mode 100644 index 000000000000..f4e24a7b9442 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-constrained-sdnode.ll @@ -0,0 +1,401 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare @llvm.experimental.constrained.fsub.nxv1f16(, , metadata, metadata) +define @vfsub_vv_nxv1f16( %va, %vb) { +; CHECK-LABEL: vfsub_vv_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fsub.nxv1f16( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfsub_vf_nxv1f16( %va, half %b) { +; CHECK-LABEL: vfsub_vf_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fsub.nxv1f16( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fsub.nxv2f16(, , metadata, metadata) +define @vfsub_vv_nxv2f16( %va, %vb) { +; CHECK-LABEL: vfsub_vv_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fsub.nxv2f16( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfsub_vf_nxv2f16( %va, half %b) { +; CHECK-LABEL: vfsub_vf_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fsub.nxv2f16( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fsub.nxv4f16(, , metadata, metadata) +define @vfsub_vv_nxv4f16( %va, %vb) { +; CHECK-LABEL: vfsub_vv_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fsub.nxv4f16( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfsub_vf_nxv4f16( %va, half %b) { +; CHECK-LABEL: vfsub_vf_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fsub.nxv4f16( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fsub.nxv8f16(, , metadata, metadata) +define @vfsub_vv_nxv8f16( %va, %vb) { +; CHECK-LABEL: vfsub_vv_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fsub.nxv8f16( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfsub_vf_nxv8f16( %va, half %b) { +; CHECK-LABEL: vfsub_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fsub.nxv8f16( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfsub_fv_nxv8f16( %va, half %b) { +; CHECK-LABEL: vfsub_fv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fsub.nxv8f16( %splat, %va, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fsub.nxv16f16(, , metadata, metadata) +define @vfsub_vv_nxv16f16( %va, %vb) { +; CHECK-LABEL: vfsub_vv_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fsub.nxv16f16( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfsub_vf_nxv16f16( %va, half %b) { +; CHECK-LABEL: vfsub_vf_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fsub.nxv16f16( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fsub.nxv32f16(, , metadata, metadata) +define @vfsub_vv_nxv32f16( %va, %vb) { +; CHECK-LABEL: vfsub_vv_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fsub.nxv32f16( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfsub_vf_nxv32f16( %va, half %b) { +; CHECK-LABEL: vfsub_vf_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fsub.nxv32f16( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fsub.nxv1f32(, , metadata, metadata) +define @vfsub_vv_nxv1f32( %va, %vb) { +; CHECK-LABEL: vfsub_vv_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fsub.nxv1f32( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfsub_vf_nxv1f32( %va, float %b) { +; CHECK-LABEL: vfsub_vf_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fsub.nxv1f32( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fsub.nxv2f32(, , metadata, metadata) +define @vfsub_vv_nxv2f32( %va, %vb) { +; CHECK-LABEL: vfsub_vv_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fsub.nxv2f32( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfsub_vf_nxv2f32( %va, float %b) { +; CHECK-LABEL: vfsub_vf_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fsub.nxv2f32( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fsub.nxv4f32(, , metadata, metadata) +define @vfsub_vv_nxv4f32( %va, %vb) { +; CHECK-LABEL: vfsub_vv_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fsub.nxv4f32( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfsub_vf_nxv4f32( %va, float %b) { +; CHECK-LABEL: vfsub_vf_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fsub.nxv4f32( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fsub.nxv8f32(, , metadata, metadata) +define @vfsub_vv_nxv8f32( %va, %vb) { +; CHECK-LABEL: vfsub_vv_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fsub.nxv8f32( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfsub_vf_nxv8f32( %va, float %b) { +; CHECK-LABEL: vfsub_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fsub.nxv8f32( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfsub_fv_nxv8f32( %va, float %b) { +; CHECK-LABEL: vfsub_fv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fsub.nxv8f32( %splat, %va, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fsub.nxv16f32(, , metadata, metadata) +define @vfsub_vv_nxv16f32( %va, %vb) { +; CHECK-LABEL: vfsub_vv_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fsub.nxv16f32( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfsub_vf_nxv16f32( %va, float %b) { +; CHECK-LABEL: vfsub_vf_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fsub.nxv16f32( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fsub.nxv1f64(, , metadata, metadata) +define @vfsub_vv_nxv1f64( %va, %vb) { +; CHECK-LABEL: vfsub_vv_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fsub.nxv1f64( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfsub_vf_nxv1f64( %va, double %b) { +; CHECK-LABEL: vfsub_vf_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fsub.nxv1f64( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fsub.nxv2f64(, , metadata, metadata) +define @vfsub_vv_nxv2f64( %va, %vb) { +; CHECK-LABEL: vfsub_vv_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fsub.nxv2f64( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfsub_vf_nxv2f64( %va, double %b) { +; CHECK-LABEL: vfsub_vf_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fsub.nxv2f64( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fsub.nxv4f64(, , metadata, metadata) +define @vfsub_vv_nxv4f64( %va, %vb) { +; CHECK-LABEL: vfsub_vv_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fsub.nxv4f64( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfsub_vf_nxv4f64( %va, double %b) { +; CHECK-LABEL: vfsub_vf_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fsub.nxv4f64( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +declare @llvm.experimental.constrained.fsub.nxv8f64(, , metadata, metadata) +define @vfsub_vv_nxv8f64( %va, %vb) { +; CHECK-LABEL: vfsub_vv_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %vc = call @llvm.experimental.constrained.fsub.nxv8f64( %va, %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfsub_vf_nxv8f64( %va, double %b) { +; CHECK-LABEL: vfsub_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fsub.nxv8f64( %va, %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +} + +define @vfsub_fv_nxv8f64( %va, double %b) { +; CHECK-LABEL: vfsub_fv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vc = call @llvm.experimental.constrained.fsub.nxv8f64( %splat, %va, metadata !"round.dynamic", metadata !"fpexcept.ignore") + ret %vc +}