From: LiaoChunyu Date: Fri, 26 May 2023 06:19:45 +0000 (+0800) Subject: [RISCV] Custom lower vector llvm.is.fpclass to vfclass.v X-Git-Tag: upstream/17.0.6~7098 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=477d1080cb30a5959a0bd47ab84cbae2d87f1e13;p=platform%2Fupstream%2Fllvm.git [RISCV] Custom lower vector llvm.is.fpclass to vfclass.v After D149063. This patch adds support for both scalable and fixed-length vector. Reviewed By: craig.topper Differential Revision: https://reviews.llvm.org/D151176 --- diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 3f8a45e..10fe699 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -780,7 +780,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setOperationAction({ISD::FMINNUM, ISD::FMAXNUM}, VT, Legal); setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND, - ISD::FROUNDEVEN, ISD::FRINT, ISD::FNEARBYINT}, + ISD::FROUNDEVEN, ISD::FRINT, ISD::FNEARBYINT, + ISD::IS_FPCLASS}, VT, Custom); setOperationAction(FloatingPointVecReduceOps, VT, Custom); @@ -1037,7 +1038,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setOperationAction({ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV, ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN, ISD::FSQRT, - ISD::FMA, ISD::FMINNUM, ISD::FMAXNUM}, + ISD::FMA, ISD::FMINNUM, ISD::FMAXNUM, + ISD::IS_FPCLASS}, VT, Custom); setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom); @@ -4346,8 +4348,8 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, return Op; } -static SDValue LowerIS_FPCLASS(SDValue Op, SelectionDAG &DAG, - const RISCVSubtarget &Subtarget) { +SDValue RISCVTargetLowering::LowerIS_FPCLASS(SDValue Op, + SelectionDAG &DAG) const { SDLoc DL(Op); MVT VT = Op.getSimpleValueType(); MVT XLenVT = Subtarget.getXLenVT(); @@ -4376,6 +4378,50 @@ static SDValue LowerIS_FPCLASS(SDValue Op, SelectionDAG &DAG, TDCMask |= RISCV::FPMASK_Negative_Zero; SDValue TDCMaskV = DAG.getConstant(TDCMask, DL, XLenVT); + + if (VT.isVector()) { + SDValue Op0 = Op.getOperand(0); + MVT VT0 = Op.getOperand(0).getSimpleValueType(); + MVT DstVT = VT0.changeVectorElementTypeToInteger(); + + if (VT.isScalableVector()) { + SDValue VL = DAG.getRegister(RISCV::X0, XLenVT); + SDValue Mask = getAllOnesMask(DstVT, VL, DL, DAG); + SDValue FPCLASS = DAG.getNode(RISCVISD::FCLASS_VL, DL, DstVT, + {Op0, Mask, VL}, Op->getFlags()); + SDValue AND = DAG.getNode(ISD::AND, DL, DstVT, FPCLASS, + DAG.getConstant(TDCMask, DL, DstVT)); + return DAG.getSetCC(DL, VT, AND, DAG.getConstant(0, DL, DstVT), + ISD::SETNE); + } + + MVT ContainerVT0 = getContainerForFixedLengthVector(VT0); + MVT ContainerVT = getContainerForFixedLengthVector(VT); + MVT ContainerDstVT = getContainerForFixedLengthVector(DstVT); + auto [Mask, VL] = + getDefaultVLOps(DstVT, ContainerDstVT, DL, DAG, Subtarget); + + SDValue FPCLASS = DAG.getNode( + RISCVISD::FCLASS_VL, DL, DAG.getVTList(ContainerDstVT, MVT::Other), + {convertToScalableVector(ContainerVT0, Op0, DAG, Subtarget) /*Op0*/, + Mask, VL}, + Op->getFlags()); + + TDCMaskV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerDstVT, + DAG.getUNDEF(ContainerDstVT), TDCMaskV, VL); + SDValue AND = DAG.getNode(RISCVISD::AND_VL, DL, ContainerDstVT, FPCLASS, + TDCMaskV, DAG.getUNDEF(ContainerDstVT), Mask, VL); + + SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); + SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerDstVT, + DAG.getUNDEF(ContainerDstVT), SplatZero, VL); + + SDValue VMSNE = DAG.getNode(RISCVISD::SETCC_VL, DL, ContainerVT, + {AND, SplatZero, DAG.getCondCode(ISD::SETNE), + DAG.getUNDEF(ContainerVT), Mask, VL}); + return convertFromScalableVector(VT, VMSNE, DAG, Subtarget); + } + SDValue FPCLASS = DAG.getNode(RISCVISD::FPCLASS, DL, VT, Op.getOperand(0)); SDValue AND = DAG.getNode(ISD::AND, DL, VT, FPCLASS, TDCMaskV); return DAG.getSetCC(DL, VT, AND, DAG.getConstant(0, DL, XLenVT), @@ -4503,7 +4549,7 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op, case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); case ISD::IS_FPCLASS: - return LowerIS_FPCLASS(Op, DAG, Subtarget); + return LowerIS_FPCLASS(Op, DAG); case ISD::BITREVERSE: { MVT VT = Op.getSimpleValueType(); SDLoc DL(Op); @@ -15216,6 +15262,7 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { NODE_NAME_CASE(FNEG_VL) NODE_NAME_CASE(FABS_VL) NODE_NAME_CASE(FSQRT_VL) + NODE_NAME_CASE(FCLASS_VL) NODE_NAME_CASE(VFMADD_VL) NODE_NAME_CASE(VFNMADD_VL) NODE_NAME_CASE(VFMSUB_VL) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h index 5ddf94f..829ff1f 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -241,6 +241,7 @@ enum NodeType : unsigned { FNEG_VL, FABS_VL, FSQRT_VL, + FCLASS_VL, FCOPYSIGN_VL, // Has a merge operand VFCVT_RTZ_X_F_VL, VFCVT_RTZ_XU_F_VL, @@ -821,6 +822,7 @@ private: SelectionDAG &DAG) const; SDValue lowerToScalableOp(SDValue Op, SelectionDAG &DAG, unsigned NewOpc, bool HasMergeOp = false, bool HasMask = true) const; + SDValue LowerIS_FPCLASS(SDValue Op, SelectionDAG &DAG) const; SDValue lowerVPOp(SDValue Op, SelectionDAG &DAG, unsigned RISCVISDOpc, bool HasMergeOp = false) const; SDValue lowerLogicVPOp(SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td index a605412..6ba45a2 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -129,6 +129,14 @@ def any_riscv_fsqrt_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), [(riscv_fsqrt_vl node:$src, node:$mask, node:$vl), (riscv_strict_fsqrt_vl node:$src, node:$mask, node:$vl)]>; +def riscv_fclass_vl : SDNode<"RISCVISD::FCLASS_VL", + SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisVec<0>, + SDTCisFP<1>, SDTCisVec<1>, + SDTCisSameSizeAs<0, 1>, + SDTCVecEltisVT<2, i1>, + SDTCisSameNumEltsAs<0, 2>, + SDTCisVT<3, XLenVT>]>>; + def SDT_RISCVVecFMA_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, @@ -1999,6 +2007,12 @@ foreach vti = AllFloatVectors in { (!cast("PseudoVFROUND_NOEXCEPT_V_" # vti.LMul.MX #"_MASK") (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; + + // 14.14. Vector Floating-Point Classify Instruction + def : Pat<(riscv_fclass_vl (vti.Vector vti.RegClass:$rs2), + (vti.Mask true_mask), VLOpFrag), + (!cast("PseudoVFCLASS_V_"# vti.LMul.MX) + vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>; } } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass.ll new file mode 100644 index 0000000..f507db7f --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass.ll @@ -0,0 +1,170 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s + +define <2 x i1> @isnan_v2f16(<2 x half> %x) { +; CHECK-LABEL: isnan_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 768 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <2 x i1> @llvm.is.fpclass.v2f16(<2 x half> %x, i32 3) ; nan + ret <2 x i1> %1 +} + +define <2 x i1> @isnan_v2f32(<2 x float> %x) { +; CHECK-LABEL: isnan_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 927 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <2 x i1> @llvm.is.fpclass.v2f32(<2 x float> %x, i32 639) + ret <2 x i1> %1 +} + + +define <4 x i1> @isnan_v4f32(<4 x float> %x) { +; CHECK-LABEL: isnan_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 768 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <4 x i1> @llvm.is.fpclass.v4f32(<4 x float> %x, i32 3) ; nan + ret <4 x i1> %1 +} + +define <8 x i1> @isnan_v8f32(<8 x float> %x) { +; CHECK-LABEL: isnan_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 512 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <8 x i1> @llvm.is.fpclass.v8f32(<8 x float> %x, i32 2) + ret <8 x i1> %1 +} + +define <16 x i1> @isnan_v16f32(<16 x float> %x) { +; CHECK-LABEL: isnan_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 256 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <16 x i1> @llvm.is.fpclass.v16f32(<16 x float> %x, i32 1) + ret <16 x i1> %1 +} + +define <2 x i1> @isnormal_v2f64(<2 x double> %x) { +; CHECK-LABEL: isnormal_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 129 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <2 x i1> @llvm.is.fpclass.v2f64(<2 x double> %x, i32 516) ; 0x204 = "inf" + ret <2 x i1> %1 +} + +define <4 x i1> @isposinf_v4f64(<4 x double> %x) { +; CHECK-LABEL: isposinf_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 128 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <4 x i1> @llvm.is.fpclass.v4f64(<4 x double> %x, i32 512) ; 0x200 = "+inf" + ret <4 x i1> %1 +} + +define <8 x i1> @isneginf_v8f64(<8 x double> %x) { +; CHECK-LABEL: isneginf_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: vand.vi v8, v8, 1 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <8 x i1> @llvm.is.fpclass.v8f64(<8 x double> %x, i32 4) ; "-inf" + ret <8 x i1> %1 +} + +define <16 x i1> @isfinite_v16f64(<16 x double> %x) { +; CHECK-LABEL: isfinite_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 126 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <16 x i1> @llvm.is.fpclass.v16f64(<16 x double> %x, i32 504) ; 0x1f8 = "finite" + ret <16 x i1> %1 +} + +define <16 x i1> @isposfinite_v16f64(<16 x double> %x) { +; CHECK-LABEL: isposfinite_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 112 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <16 x i1> @llvm.is.fpclass.v16f64(<16 x double> %x, i32 448) ; 0x1c0 = "+finite" + ret <16 x i1> %1 +} + +define <16 x i1> @isnegfinite_v16f64(<16 x double> %x) { +; CHECK-LABEL: isnegfinite_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: vand.vi v8, v8, 14 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <16 x i1> @llvm.is.fpclass.v16f64(<16 x double> %x, i32 56) ; 0x38 = "-finite" + ret <16 x i1> %1 +} + +define <16 x i1> @isnotfinite_v16f64(<16 x double> %x) { +; CHECK-LABEL: isnotfinite_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 897 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <16 x i1> @llvm.is.fpclass.v16f64(<16 x double> %x, i32 519) ; 0x207 = "inf|nan" + ret <16 x i1> %1 +} + +declare <2 x i1> @llvm.is.fpclass.v2f16(<2 x half>, i32) +declare <2 x i1> @llvm.is.fpclass.v2f32(<2 x float>, i32) +declare <4 x i1> @llvm.is.fpclass.v4f32(<4 x float>, i32) +declare <8 x i1> @llvm.is.fpclass.v8f32(<8 x float>, i32) +declare <16 x i1> @llvm.is.fpclass.v16f32(<16 x float>, i32) +declare <2 x i1> @llvm.is.fpclass.v2f64(<2 x double>, i32) +declare <4 x i1> @llvm.is.fpclass.v4f64(<4 x double>, i32) +declare <8 x i1> @llvm.is.fpclass.v8f64(<8 x double>, i32) +declare <16 x i1> @llvm.is.fpclass.v16f64(<16 x double>, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass-sdnode.ll new file mode 100644 index 0000000..d0fc44a9 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfclass-sdnode.ll @@ -0,0 +1,169 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +define @isnan_nxv2f16( %x) { +; CHECK-LABEL: isnan_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 768 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.is.fpclass.nxv2f16( %x, i32 3) ; nan + ret %1 +} + +define @isnan_nxv2f32( %x) { +; CHECK-LABEL: isnan_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 927 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.is.fpclass.nxv2f32( %x, i32 639) + ret %1 +} + + +define @isnan_nxv4f32( %x) { +; CHECK-LABEL: isnan_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 768 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.is.fpclass.nxv4f32( %x, i32 3) ; nan + ret %1 +} + +define @isnan_nxv8f32( %x) { +; CHECK-LABEL: isnan_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 512 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.is.fpclass.nxv8f32( %x, i32 2) + ret %1 +} + +define @isnan_nxv16f32( %x) { +; CHECK-LABEL: isnan_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 256 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.is.fpclass.nxv16f32( %x, i32 1) + ret %1 +} + +define @isnormal_nxv2f64( %x) { +; CHECK-LABEL: isnormal_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 129 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.is.fpclass.nxv2f64( %x, i32 516) ; 0x204 = "inf" + ret %1 +} + +define @isposinf_nxv4f64( %x) { +; CHECK-LABEL: isposinf_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 128 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.is.fpclass.nxv4f64( %x, i32 512) ; 0x200 = "+inf" + ret %1 +} + +define @isneginf_nxv8f64( %x) { +; CHECK-LABEL: isneginf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: vand.vi v8, v8, 1 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.is.fpclass.nxv8f64( %x, i32 4) ; "-inf" + ret %1 +} + +define @isfinite_nxv16f32( %x) { +; CHECK-LABEL: isfinite_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 126 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.is.fpclass.nxv16f32( %x, i32 504) ; 0x1f8 = "finite" + ret %1 +} + +define @isposfinite_nxv16f32( %x) { +; CHECK-LABEL: isposfinite_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 112 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.is.fpclass.nxv16f32( %x, i32 448) ; 0x1c0 = "+finite" + ret %1 +} + +define @isnegfinite_nxv16f32( %x) { +; CHECK-LABEL: isnegfinite_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: vand.vi v8, v8, 14 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.is.fpclass.nxv16f32( %x, i32 56) ; 0x38 = "-finite" + ret %1 +} + +define @isnotfinite_nxv16f32( %x) { +; CHECK-LABEL: isnotfinite_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 897 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.is.fpclass.nxv16f32( %x, i32 519) ; 0x207 = "inf|nan" + ret %1 +} + +declare @llvm.is.fpclass.nxv2f16(, i32) +declare @llvm.is.fpclass.nxv2f32(, i32) +declare @llvm.is.fpclass.nxv4f32(, i32) +declare @llvm.is.fpclass.nxv8f32(, i32) +declare @llvm.is.fpclass.nxv16f32(, i32) +declare @llvm.is.fpclass.nxv2f64(, i32) +declare @llvm.is.fpclass.nxv4f64(, i32) +declare @llvm.is.fpclass.nxv8f64(, i32)