From 7121bed210d9f568485c9b5b6854b629353fa0ea Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Thu, 16 Aug 2018 17:07:52 +0000 Subject: [PATCH] AMDGPU: Custom lower fexp This will allow the library to just use __builtin_expf directly without expanding this itself. Note f64 still won't work because there is no exp instruction for it. llvm-svn: 339902 --- llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 32 +++ llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h | 3 +- llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 2 + llvm/test/CodeGen/AMDGPU/fexp.ll | 307 ++++++++++++++++++++++++++ 4 files changed, 343 insertions(+), 1 deletion(-) create mode 100644 llvm/test/CodeGen/AMDGPU/fexp.ll diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp index 5301626..7067bf1 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -318,6 +318,7 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM, setOperationAction(ISD::FLOG, MVT::f32, Custom); setOperationAction(ISD::FLOG10, MVT::f32, Custom); + setOperationAction(ISD::FEXP, MVT::f32, Custom); setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom); @@ -450,6 +451,7 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM, setOperationAction(ISD::FCOS, VT, Expand); setOperationAction(ISD::FDIV, VT, Expand); setOperationAction(ISD::FEXP2, VT, Expand); + setOperationAction(ISD::FEXP, VT, Expand); setOperationAction(ISD::FLOG2, VT, Expand); setOperationAction(ISD::FREM, VT, Expand); setOperationAction(ISD::FLOG, VT, Expand); @@ -1141,6 +1143,8 @@ SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, return LowerFLOG(Op, DAG, 1 / AMDGPU_LOG2E_F); case ISD::FLOG10: return LowerFLOG(Op, DAG, AMDGPU_LN2_F / AMDGPU_LN10_F); + case ISD::FEXP: + return lowerFEXP(Op, DAG); case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG); case ISD::FP_TO_FP16: return LowerFP_TO_FP16(Op, DAG); @@ -2214,6 +2218,34 @@ SDValue AMDGPUTargetLowering::LowerFLOG(SDValue Op, SelectionDAG &DAG, return DAG.getNode(ISD::FMUL, SL, VT, Log2Operand, Log2BaseInvertedOperand); } +// Return M_LOG2E of appropriate type +static SDValue getLog2EVal(SelectionDAG &DAG, const SDLoc &SL, EVT VT) { + switch (VT.getScalarType().getSimpleVT().SimpleTy) { + case MVT::f32: + return DAG.getConstantFP(1.44269504088896340735992468100189214f, SL, VT); + case MVT::f16: + return DAG.getConstantFP( + APFloat(APFloat::IEEEhalf(), "1.44269504088896340735992468100189214"), + SL, VT); + case MVT::f64: + return DAG.getConstantFP( + APFloat(APFloat::IEEEdouble(), "0x1.71547652b82fep+0"), SL, VT); + default: + llvm_unreachable("unsupported fp type"); + } +} + +// exp2(M_LOG2E_F * f); +SDValue AMDGPUTargetLowering::lowerFEXP(SDValue Op, SelectionDAG &DAG) const { + EVT VT = Op.getValueType(); + SDLoc SL(Op); + SDValue Src = Op.getOperand(0); + + const SDValue K = getLog2EVal(DAG, SL, VT); + SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Src, K, Op->getFlags()); + return DAG.getNode(ISD::FEXP2, SL, VT, Mul, Op->getFlags()); +} + static bool isCtlzOpc(unsigned Opc) { return Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF; } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h index 4e8b7a4..d56f9cd 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h @@ -58,8 +58,9 @@ protected: SDValue LowerFROUND64(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerFLOG(SDValue Op, SelectionDAG &Dag, + SDValue LowerFLOG(SDValue Op, SelectionDAG &DAG, double Log2BaseInverted) const; + SDValue lowerFEXP(SDValue Op, SelectionDAG &DAG) const; SDValue LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 0ae1514..3e7ca62 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -345,6 +345,7 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM, if (Subtarget->has16BitInsts()) { setOperationAction(ISD::FLOG, MVT::f16, Custom); + setOperationAction(ISD::FEXP, MVT::f16, Custom); setOperationAction(ISD::FLOG10, MVT::f16, Custom); } @@ -597,6 +598,7 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM, setOperationAction(ISD::FMAXNUM, MVT::v4f16, Custom); setOperationAction(ISD::FCANONICALIZE, MVT::v4f16, Custom); + setOperationAction(ISD::FEXP, MVT::v2f16, Custom); setOperationAction(ISD::SELECT, MVT::v4i16, Custom); setOperationAction(ISD::SELECT, MVT::v4f16, Custom); } diff --git a/llvm/test/CodeGen/AMDGPU/fexp.ll b/llvm/test/CodeGen/AMDGPU/fexp.ll new file mode 100644 index 0000000..dd69335 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/fexp.ll @@ -0,0 +1,307 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +;RUN: llc -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefix=SI %s +;RUN: llc -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefix=VI %s +;RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefix=GFX9 %s + +define float @v_exp_f32(float %arg0) { +; SI-LABEL: v_exp_f32: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0 +; SI-NEXT: v_exp_f32_e32 v0, v0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_exp_f32: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0 +; VI-NEXT: v_exp_f32_e32 v0, v0 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_exp_f32: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0 +; GFX9-NEXT: v_exp_f32_e32 v0, v0 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %result = call float @llvm.exp.f32(float %arg0) + ret float %result +} + +define <2 x float> @v_exp_v2f32(<2 x float> %arg0) { +; SI-LABEL: v_exp_v2f32: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, 0x3fb8aa3b +; SI-NEXT: v_mul_f32_e32 v0, v0, v2 +; SI-NEXT: v_mul_f32_e32 v1, v1, v2 +; SI-NEXT: v_exp_f32_e32 v0, v0 +; SI-NEXT: v_exp_f32_e32 v1, v1 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_exp_v2f32: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, 0x3fb8aa3b +; VI-NEXT: v_mul_f32_e32 v0, v0, v2 +; VI-NEXT: v_mul_f32_e32 v1, v1, v2 +; VI-NEXT: v_exp_f32_e32 v0, v0 +; VI-NEXT: v_exp_f32_e32 v1, v1 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_exp_v2f32: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, 0x3fb8aa3b +; GFX9-NEXT: v_mul_f32_e32 v0, v0, v2 +; GFX9-NEXT: v_mul_f32_e32 v1, v1, v2 +; GFX9-NEXT: v_exp_f32_e32 v0, v0 +; GFX9-NEXT: v_exp_f32_e32 v1, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %result = call <2 x float> @llvm.exp.v2f32(<2 x float> %arg0) + ret <2 x float> %result +} + +define <3 x float> @v_exp_v3f32(<3 x float> %arg0) { +; SI-LABEL: v_exp_v3f32: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, 0x3fb8aa3b +; SI-NEXT: v_mul_f32_e32 v0, v0, v3 +; SI-NEXT: v_mul_f32_e32 v1, v1, v3 +; SI-NEXT: v_mul_f32_e32 v2, v2, v3 +; SI-NEXT: v_exp_f32_e32 v0, v0 +; SI-NEXT: v_exp_f32_e32 v1, v1 +; SI-NEXT: v_exp_f32_e32 v2, v2 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_exp_v3f32: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v3, 0x3fb8aa3b +; VI-NEXT: v_mul_f32_e32 v0, v0, v3 +; VI-NEXT: v_mul_f32_e32 v1, v1, v3 +; VI-NEXT: v_mul_f32_e32 v2, v2, v3 +; VI-NEXT: v_exp_f32_e32 v0, v0 +; VI-NEXT: v_exp_f32_e32 v1, v1 +; VI-NEXT: v_exp_f32_e32 v2, v2 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_exp_v3f32: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v3, 0x3fb8aa3b +; GFX9-NEXT: v_mul_f32_e32 v0, v0, v3 +; GFX9-NEXT: v_mul_f32_e32 v1, v1, v3 +; GFX9-NEXT: v_mul_f32_e32 v2, v2, v3 +; GFX9-NEXT: v_exp_f32_e32 v0, v0 +; GFX9-NEXT: v_exp_f32_e32 v1, v1 +; GFX9-NEXT: v_exp_f32_e32 v2, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %result = call <3 x float> @llvm.exp.v3f32(<3 x float> %arg0) + ret <3 x float> %result +} + +define <4 x float> @v_exp_v4f32(<4 x float> %arg0) { +; SI-LABEL: v_exp_v4f32: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v4, 0x3fb8aa3b +; SI-NEXT: v_mul_f32_e32 v0, v0, v4 +; SI-NEXT: v_mul_f32_e32 v1, v1, v4 +; SI-NEXT: v_mul_f32_e32 v2, v2, v4 +; SI-NEXT: v_mul_f32_e32 v3, v3, v4 +; SI-NEXT: v_exp_f32_e32 v0, v0 +; SI-NEXT: v_exp_f32_e32 v1, v1 +; SI-NEXT: v_exp_f32_e32 v2, v2 +; SI-NEXT: v_exp_f32_e32 v3, v3 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_exp_v4f32: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v4, 0x3fb8aa3b +; VI-NEXT: v_mul_f32_e32 v0, v0, v4 +; VI-NEXT: v_mul_f32_e32 v1, v1, v4 +; VI-NEXT: v_mul_f32_e32 v2, v2, v4 +; VI-NEXT: v_mul_f32_e32 v3, v3, v4 +; VI-NEXT: v_exp_f32_e32 v0, v0 +; VI-NEXT: v_exp_f32_e32 v1, v1 +; VI-NEXT: v_exp_f32_e32 v2, v2 +; VI-NEXT: v_exp_f32_e32 v3, v3 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_exp_v4f32: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v4, 0x3fb8aa3b +; GFX9-NEXT: v_mul_f32_e32 v0, v0, v4 +; GFX9-NEXT: v_mul_f32_e32 v1, v1, v4 +; GFX9-NEXT: v_mul_f32_e32 v2, v2, v4 +; GFX9-NEXT: v_mul_f32_e32 v3, v3, v4 +; GFX9-NEXT: v_exp_f32_e32 v0, v0 +; GFX9-NEXT: v_exp_f32_e32 v1, v1 +; GFX9-NEXT: v_exp_f32_e32 v2, v2 +; GFX9-NEXT: v_exp_f32_e32 v3, v3 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %result = call <4 x float> @llvm.exp.v4f32(<4 x float> %arg0) + ret <4 x float> %result +} + +define half @v_exp_f16(half %arg0) { +; SI-LABEL: v_exp_f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0 +; SI-NEXT: v_exp_f32_e32 v0, v0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_exp_f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_mul_f16_e32 v0, 0x3dc5, v0 +; VI-NEXT: v_exp_f16_e32 v0, v0 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_exp_f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mul_f16_e32 v0, 0x3dc5, v0 +; GFX9-NEXT: v_exp_f16_e32 v0, v0 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %result = call half @llvm.exp.f16(half %arg0) + ret half %result +} + +define <2 x half> @v_exp_v2f16(<2 x half> %arg0) { +; SI-LABEL: v_exp_v2f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_mov_b32_e32 v2, 0x3fb8aa3b +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_mul_f32_e32 v0, v0, v2 +; SI-NEXT: v_mul_f32_e32 v1, v1, v2 +; SI-NEXT: v_exp_f32_e32 v0, v0 +; SI-NEXT: v_exp_f32_e32 v1, v1 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_exp_v2f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, 0x3dc5 +; VI-NEXT: v_mul_f16_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_mul_f16_e32 v0, v0, v1 +; VI-NEXT: v_exp_f16_sdwa v2, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD +; VI-NEXT: v_exp_f16_e32 v0, v0 +; VI-NEXT: v_or_b32_e32 v0, v0, v2 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_exp_v2f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v1, 0x3dc5 +; GFX9-NEXT: v_pk_mul_f16 v0, v0, v1 op_sel_hi:[1,0] +; GFX9-NEXT: v_exp_f16_e32 v1, v0 +; GFX9-NEXT: v_exp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX9-NEXT: v_lshl_or_b32 v0, v0, 16, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %result = call <2 x half> @llvm.exp.v2f16(<2 x half> %arg0) + ret <2 x half> %result +} + +; define <3 x half> @v_exp_v3f16(<3 x half> %arg0) { +; %result = call <3 x half> @llvm.exp.v3f16(<3 x half> %arg0) +; ret <3 x half> %result +; } + +define <4 x half> @v_exp_v4f16(<4 x half> %arg0) { +; SI-LABEL: v_exp_v4f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_mov_b32_e32 v4, 0x3fb8aa3b +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_mul_f32_e32 v3, v3, v4 +; SI-NEXT: v_mul_f32_e32 v2, v2, v4 +; SI-NEXT: v_mul_f32_e32 v1, v1, v4 +; SI-NEXT: v_mul_f32_e32 v0, v0, v4 +; SI-NEXT: v_exp_f32_e32 v3, v3 +; SI-NEXT: v_exp_f32_e32 v2, v2 +; SI-NEXT: v_exp_f32_e32 v1, v1 +; SI-NEXT: v_exp_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v3, v2, v3 +; SI-NEXT: v_or_b32_e32 v1, v0, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v3 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_exp_v4f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, 0x3dc5 +; VI-NEXT: v_mul_f16_e32 v3, v1, v2 +; VI-NEXT: v_mul_f16_e32 v4, v0, v2 +; VI-NEXT: v_mul_f16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_mul_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_exp_f16_e32 v3, v3 +; VI-NEXT: v_exp_f16_sdwa v1, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD +; VI-NEXT: v_exp_f16_e32 v4, v4 +; VI-NEXT: v_exp_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD +; VI-NEXT: v_or_b32_e32 v1, v3, v1 +; VI-NEXT: v_or_b32_e32 v0, v4, v0 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_exp_v4f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, 0x3dc5 +; GFX9-NEXT: v_mul_f16_e32 v3, v1, v2 +; GFX9-NEXT: v_mul_f16_e32 v4, v0, v2 +; GFX9-NEXT: v_mul_f16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_mul_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_exp_f16_e32 v3, v3 +; GFX9-NEXT: v_exp_f16_e32 v4, v4 +; GFX9-NEXT: v_exp_f16_e32 v0, v0 +; GFX9-NEXT: v_exp_f16_e32 v1, v1 +; GFX9-NEXT: v_mov_b32_e32 v2, 0xffff +; GFX9-NEXT: v_and_b32_e32 v4, v2, v4 +; GFX9-NEXT: v_and_b32_e32 v2, v2, v3 +; GFX9-NEXT: v_lshl_or_b32 v0, v0, 16, v4 +; GFX9-NEXT: v_lshl_or_b32 v1, v1, 16, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %result = call <4 x half> @llvm.exp.v4f16(<4 x half> %arg0) + ret <4 x half> %result +} + +declare float @llvm.exp.f32(float) +declare <2 x float> @llvm.exp.v2f32(<2 x float>) +declare <3 x float> @llvm.exp.v3f32(<3 x float>) +declare <4 x float> @llvm.exp.v4f32(<4 x float>) + +declare half @llvm.exp.f16(half) +declare <2 x half> @llvm.exp.v2f16(<2 x half>) +declare <3 x half> @llvm.exp.v3f16(<3 x half>) +declare <4 x half> @llvm.exp.v4f16(<4 x half>) + -- 2.7.4