From d9333e360a7c52587ab6e4328e7493b357fb2cf3 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Fri, 16 Jun 2023 18:13:07 -0400 Subject: [PATCH] Revert "AMDGPU: Drop and auto-upgrade llvm.amdgcn.ldexp to llvm.ldexp" This reverts commit 1159c670d40e3ef302264c681fe7e0268a550874. Accidentally pushed wrong patch --- llvm/include/llvm/IR/IntrinsicsAMDGPU.td | 6 + llvm/lib/IR/AutoUpgrade.cpp | 7 - llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 3 +- .../Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp | 44 ++++ llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 4 + llvm/test/Bitcode/amdgcn-ldexp.ll | 30 --- llvm/test/CodeGen/AMDGPU/known-never-snan.ll | 4 +- llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.f16.ll | 230 +++++++++++++++++++++ llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.ll | 31 +++ 9 files changed, 319 insertions(+), 40 deletions(-) delete mode 100644 llvm/test/Bitcode/amdgcn-ldexp.ll create mode 100644 llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.f16.ll create mode 100644 llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.ll diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td index 969956f..8c0f25b 100644 --- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td +++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td @@ -362,6 +362,12 @@ def int_amdgcn_rsq_legacy : ClangBuiltin<"__builtin_amdgcn_rsq_legacy">, def int_amdgcn_rsq_clamp : DefaultAttrsIntrinsic< [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]>; +// For int_amdgcn_ldexp_f16, only the low 16 bits of the i32 src1 operand will used. +def int_amdgcn_ldexp : DefaultAttrsIntrinsic< + [llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty], + [IntrNoMem, IntrSpeculatable] +>; + def int_amdgcn_frexp_mant : DefaultAttrsIntrinsic< [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable] >; diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp index 4dc578b..19e69ef 100644 --- a/llvm/lib/IR/AutoUpgrade.cpp +++ b/llvm/lib/IR/AutoUpgrade.cpp @@ -836,13 +836,6 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) { {F->getReturnType()}); return true; } - if (Name.startswith("amdgcn.ldexp")) { - // Target specific intrinsic became redundant - NewFn = Intrinsic::getDeclaration( - F->getParent(), Intrinsic::ldexp, - {F->getReturnType(), F->getArg(1)->getType()}); - return true; - } break; } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp index d298f5b..a09604f 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -3220,7 +3220,8 @@ SDValue AMDGPUTargetLowering::performIntrinsicWOChainCombine( case Intrinsic::amdgcn_rsq: case Intrinsic::amdgcn_rcp_legacy: case Intrinsic::amdgcn_rsq_legacy: - case Intrinsic::amdgcn_rsq_clamp: { + case Intrinsic::amdgcn_rsq_clamp: + case Intrinsic::amdgcn_ldexp: { // FIXME: This is probably wrong. If src is an sNaN, it won't be quieted SDValue Src = N->getOperand(1); return Src.isUndef() ? Src : SDValue(); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp index a7f5d61..b3bb61d 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp @@ -996,6 +996,50 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const { break; } + case Intrinsic::amdgcn_ldexp: { + // FIXME: This doesn't introduce new instructions and belongs in + // InstructionSimplify. + Type *Ty = II.getType(); + Value *Op0 = II.getArgOperand(0); + Value *Op1 = II.getArgOperand(1); + + // Folding undef to qnan is safe regardless of the FP mode. + if (isa(Op0)) { + auto *QNaN = ConstantFP::get(Ty, APFloat::getQNaN(Ty->getFltSemantics())); + return IC.replaceInstUsesWith(II, QNaN); + } + + const APFloat *C = nullptr; + match(Op0, PatternMatch::m_APFloat(C)); + + // FIXME: Should flush denorms depending on FP mode, but that's ignored + // everywhere else. + // + // These cases should be safe, even with strictfp. + // ldexp(0.0, x) -> 0.0 + // ldexp(-0.0, x) -> -0.0 + // ldexp(inf, x) -> inf + // ldexp(-inf, x) -> -inf + if (C && (C->isZero() || C->isInfinity())) { + return IC.replaceInstUsesWith(II, Op0); + } + + // With strictfp, be more careful about possibly needing to flush denormals + // or not, and snan behavior depends on ieee_mode. + if (II.isStrictFP()) + break; + + if (C && C->isNaN()) + return IC.replaceInstUsesWith(II, ConstantFP::get(Ty, C->makeQuiet())); + + // ldexp(x, 0) -> x + // ldexp(x, undef) -> x + if (isa(Op1) || match(Op1, PatternMatch::m_ZeroInt())) { + return IC.replaceInstUsesWith(II, Op0); + } + + break; + } case Intrinsic::amdgcn_fmul_legacy: { Value *Op0 = II.getArgOperand(0); Value *Op1 = II.getArgOperand(1); diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index a79d72f..ab7a543 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -7223,6 +7223,9 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, return emitRemovedIntrinsicError(DAG, DL, VT); } + case Intrinsic::amdgcn_ldexp: + return DAG.getNode(ISD::FLDEXP, DL, VT, Op.getOperand(1), Op.getOperand(2)); + case Intrinsic::amdgcn_fract: return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1)); @@ -10669,6 +10672,7 @@ bool SITargetLowering::isCanonicalized(Register Reg, MachineFunction &MF, case Intrinsic::amdgcn_div_fmas: case Intrinsic::amdgcn_div_fixup: case Intrinsic::amdgcn_fract: + case Intrinsic::amdgcn_ldexp: case Intrinsic::amdgcn_cvt_pkrtz: case Intrinsic::amdgcn_cubeid: case Intrinsic::amdgcn_cubema: diff --git a/llvm/test/Bitcode/amdgcn-ldexp.ll b/llvm/test/Bitcode/amdgcn-ldexp.ll deleted file mode 100644 index 0ba901a..0000000 --- a/llvm/test/Bitcode/amdgcn-ldexp.ll +++ /dev/null @@ -1,30 +0,0 @@ -; RUN: llvm-as < %s | llvm-dis | FileCheck %s - -define float @f32(float %a, i32 %b) { - ; CHECK: %call = call float @llvm.ldexp.f32.i32(float %a, i32 %b) - ; CHECK-NOT: amdgcn.ldexp - %call = call float @llvm.amdgcn.ldexp.f32(float %a, i32 %b) - ret float %call -} - -define double @f64(double %a, i32 %b) { - ; CHECK: %call = call double @llvm.ldexp.f64.i32(double %a, i32 %b) - ; CHECK-NOT: amdgcn.ldexp - %call = call double @llvm.amdgcn.ldexp.f64(double %a, i32 %b) - ret double %call -} - -define half @f16(half %a, i32 %b) { - ; CHECK: %call = call half @llvm.ldexp.f16.i32(half %a, i32 %b) - ; CHECK-NOT: amdgcn.ldexp - %call = call half @llvm.amdgcn.ldexp.f16(half %a, i32 %b) - ret half %call -} - -declare half @llvm.amdgcn.ldexp.f16(half, i32) -declare float @llvm.amdgcn.ldexp.f32(float, i32) -declare double @llvm.amdgcn.ldexp.f64(double, i32) -; CHECK: declare half @llvm.ldexp.f16.i32(half, i32) -; CHECK: declare float @llvm.ldexp.f32.i32(float, i32) -; CHECK: declare double @llvm.ldexp.f64.i32(double, i32) -; CHECK-NOT: amdgcn.ldexp diff --git a/llvm/test/CodeGen/AMDGPU/known-never-snan.ll b/llvm/test/CodeGen/AMDGPU/known-never-snan.ll index cb54473..5ee6eda 100644 --- a/llvm/test/CodeGen/AMDGPU/known-never-snan.ll +++ b/llvm/test/CodeGen/AMDGPU/known-never-snan.ll @@ -516,7 +516,7 @@ define float @v_test_known_not_snan_ldexp_input_fmed3_r_i_i_f32(float %a, i32 %b ; GCN-NEXT: v_ldexp_f32 v0, v0, v1 ; GCN-NEXT: v_med3_f32 v0, v0, 2.0, 4.0 ; GCN-NEXT: s_setpc_b64 s[30:31] - %known.not.snan = call float @llvm.ldexp.f32.i32(float %a, i32 %b) + %known.not.snan = call float @llvm.amdgcn.ldexp.f32(float %a, i32 %b) %max = call float @llvm.maxnum.f32(float %known.not.snan, float 2.0) %med = call float @llvm.minnum.f32(float %max, float 4.0) ret float %med @@ -658,7 +658,7 @@ declare float @llvm.maxnum.f32(float, float) #1 declare float @llvm.copysign.f32(float, float) #1 declare float @llvm.fma.f32(float, float, float) #1 declare float @llvm.fmuladd.f32(float, float, float) #1 -declare float @llvm.ldexp.f32.i32(float, i32) #1 +declare float @llvm.amdgcn.ldexp.f32(float, i32) #1 declare float @llvm.amdgcn.fmul.legacy(float, float) #1 declare float @llvm.amdgcn.fmed3.f32(float, float, float) #1 declare float @llvm.amdgcn.frexp.mant.f32(float) #1 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.f16.ll new file mode 100644 index 0000000..595dffe --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.f16.ll @@ -0,0 +1,230 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=VI %s +; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx1010 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10 %s +; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s + +declare half @llvm.amdgcn.ldexp.f16(half %a, i32 %b) + +define amdgpu_kernel void @ldexp_f16( +; VI-LABEL: ldexp_f16: +; VI: ; %bb.0: +; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x34 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_mov_b32 s10, s2 +; VI-NEXT: s_mov_b32 s11, s3 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_mov_b32 s12, s6 +; VI-NEXT: s_mov_b32 s13, s7 +; VI-NEXT: s_mov_b32 s14, s2 +; VI-NEXT: s_mov_b32 s15, s3 +; VI-NEXT: buffer_load_dword v0, off, s[8:11], 0 +; VI-NEXT: buffer_load_ushort v1, off, s[12:15], 0 +; VI-NEXT: s_mov_b32 s0, s4 +; VI-NEXT: s_movk_i32 s4, 0x8000 +; VI-NEXT: v_mov_b32_e32 v2, 0x7fff +; VI-NEXT: s_mov_b32 s1, s5 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_med3_i32 v0, v0, s4, v2 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_ldexp_f16_e32 v0, v1, v0 +; VI-NEXT: buffer_store_short v0, off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX10-LABEL: ldexp_f16: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_clause 0x1 +; GFX10-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX10-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x34 +; GFX10-NEXT: s_mov_b32 s2, -1 +; GFX10-NEXT: s_mov_b32 s3, 0x31016000 +; GFX10-NEXT: s_mov_b32 s10, s2 +; GFX10-NEXT: s_mov_b32 s11, s3 +; GFX10-NEXT: s_mov_b32 s14, s2 +; GFX10-NEXT: s_mov_b32 s15, s3 +; GFX10-NEXT: s_movk_i32 s0, 0x8000 +; GFX10-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-NEXT: s_mov_b32 s12, s6 +; GFX10-NEXT: buffer_load_dword v0, off, s[8:11], 0 +; GFX10-NEXT: s_mov_b32 s13, s7 +; GFX10-NEXT: s_mov_b32 s1, s5 +; GFX10-NEXT: buffer_load_ushort v1, off, s[12:15], 0 +; GFX10-NEXT: s_waitcnt vmcnt(1) +; GFX10-NEXT: v_med3_i32 v0, v0, s0, 0x7fff +; GFX10-NEXT: s_mov_b32 s0, s4 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_ldexp_f16_e32 v0, v1, v0 +; GFX10-NEXT: buffer_store_short v0, off, s[0:3], 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: ldexp_f16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b128 s[4:7], s[0:1], 0x24 +; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x34 +; GFX11-NEXT: s_mov_b32 s10, -1 +; GFX11-NEXT: s_mov_b32 s11, 0x31016000 +; GFX11-NEXT: s_mov_b32 s2, s10 +; GFX11-NEXT: s_mov_b32 s3, s11 +; GFX11-NEXT: s_mov_b32 s14, s10 +; GFX11-NEXT: s_mov_b32 s15, s11 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: s_mov_b32 s12, s6 +; GFX11-NEXT: buffer_load_b32 v0, off, s[0:3], 0 +; GFX11-NEXT: s_mov_b32 s13, s7 +; GFX11-NEXT: s_movk_i32 s0, 0x8000 +; GFX11-NEXT: buffer_load_u16 v1, off, s[12:15], 0 +; GFX11-NEXT: s_mov_b32 s8, s4 +; GFX11-NEXT: s_mov_b32 s9, s5 +; GFX11-NEXT: s_waitcnt vmcnt(1) +; GFX11-NEXT: v_med3_i32 v0, v0, s0, 0x7fff +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_ldexp_f16_e32 v0, v1, v0 +; GFX11-NEXT: buffer_store_b16 v0, off, s[8:11], 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm + ptr addrspace(1) %r, + ptr addrspace(1) %a, + ptr addrspace(1) %b) { + %a.val = load half, ptr addrspace(1) %a + %b.val = load i32, ptr addrspace(1) %b + %r.val = call half @llvm.amdgcn.ldexp.f16(half %a.val, i32 %b.val) + store half %r.val, ptr addrspace(1) %r + ret void +} + +define amdgpu_kernel void @ldexp_f16_imm_a( +; VI-LABEL: ldexp_f16_imm_a: +; VI: ; %bb.0: +; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_mov_b32 s10, s6 +; VI-NEXT: s_mov_b32 s11, s7 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_mov_b32 s8, s2 +; VI-NEXT: s_mov_b32 s9, s3 +; VI-NEXT: buffer_load_dword v0, off, s[8:11], 0 +; VI-NEXT: s_mov_b32 s4, s0 +; VI-NEXT: s_movk_i32 s0, 0x8000 +; VI-NEXT: v_mov_b32_e32 v1, 0x7fff +; VI-NEXT: s_mov_b32 s5, s1 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_med3_i32 v0, v0, s0, v1 +; VI-NEXT: v_ldexp_f16_e32 v0, 2.0, v0 +; VI-NEXT: buffer_store_short v0, off, s[4:7], 0 +; VI-NEXT: s_endpgm +; +; GFX10-LABEL: ldexp_f16_imm_a: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GFX10-NEXT: s_mov_b32 s6, -1 +; GFX10-NEXT: s_mov_b32 s7, 0x31016000 +; GFX10-NEXT: s_mov_b32 s10, s6 +; GFX10-NEXT: s_mov_b32 s11, s7 +; GFX10-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-NEXT: s_mov_b32 s8, s2 +; GFX10-NEXT: s_mov_b32 s9, s3 +; GFX10-NEXT: s_movk_i32 s2, 0x8000 +; GFX10-NEXT: buffer_load_dword v0, off, s[8:11], 0 +; GFX10-NEXT: s_mov_b32 s4, s0 +; GFX10-NEXT: s_mov_b32 s5, s1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_med3_i32 v0, v0, s2, 0x7fff +; GFX10-NEXT: v_ldexp_f16_e32 v0, 2.0, v0 +; GFX10-NEXT: buffer_store_short v0, off, s[4:7], 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: ldexp_f16_imm_a: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x24 +; GFX11-NEXT: s_mov_b32 s6, -1 +; GFX11-NEXT: s_mov_b32 s7, 0x31016000 +; GFX11-NEXT: s_mov_b32 s10, s6 +; GFX11-NEXT: s_mov_b32 s11, s7 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: s_mov_b32 s8, s2 +; GFX11-NEXT: s_mov_b32 s9, s3 +; GFX11-NEXT: s_movk_i32 s2, 0x8000 +; GFX11-NEXT: buffer_load_b32 v0, off, s[8:11], 0 +; GFX11-NEXT: s_mov_b32 s4, s0 +; GFX11-NEXT: s_mov_b32 s5, s1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_med3_i32 v0, v0, s2, 0x7fff +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_ldexp_f16_e32 v0, 2.0, v0 +; GFX11-NEXT: buffer_store_b16 v0, off, s[4:7], 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm + ptr addrspace(1) %r, + ptr addrspace(1) %b) { + %b.val = load i32, ptr addrspace(1) %b + %r.val = call half @llvm.amdgcn.ldexp.f16(half 2.0, i32 %b.val) + store half %r.val, ptr addrspace(1) %r + ret void +} + +define amdgpu_kernel void @ldexp_f16_imm_b( +; VI-LABEL: ldexp_f16_imm_b: +; VI: ; %bb.0: +; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_mov_b32 s10, s6 +; VI-NEXT: s_mov_b32 s11, s7 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_mov_b32 s8, s2 +; VI-NEXT: s_mov_b32 s9, s3 +; VI-NEXT: buffer_load_ushort v0, off, s[8:11], 0 +; VI-NEXT: s_mov_b32 s4, s0 +; VI-NEXT: s_mov_b32 s5, s1 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_ldexp_f16_e64 v0, v0, 2 +; VI-NEXT: buffer_store_short v0, off, s[4:7], 0 +; VI-NEXT: s_endpgm +; +; GFX10-LABEL: ldexp_f16_imm_b: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GFX10-NEXT: s_mov_b32 s6, -1 +; GFX10-NEXT: s_mov_b32 s7, 0x31016000 +; GFX10-NEXT: s_mov_b32 s10, s6 +; GFX10-NEXT: s_mov_b32 s11, s7 +; GFX10-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-NEXT: s_mov_b32 s8, s2 +; GFX10-NEXT: s_mov_b32 s9, s3 +; GFX10-NEXT: s_mov_b32 s4, s0 +; GFX10-NEXT: buffer_load_ushort v0, off, s[8:11], 0 +; GFX10-NEXT: s_mov_b32 s5, s1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_ldexp_f16_e64 v0, v0, 2 +; GFX10-NEXT: buffer_store_short v0, off, s[4:7], 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: ldexp_f16_imm_b: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x24 +; GFX11-NEXT: s_mov_b32 s6, -1 +; GFX11-NEXT: s_mov_b32 s7, 0x31016000 +; GFX11-NEXT: s_mov_b32 s10, s6 +; GFX11-NEXT: s_mov_b32 s11, s7 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: s_mov_b32 s8, s2 +; GFX11-NEXT: s_mov_b32 s9, s3 +; GFX11-NEXT: s_mov_b32 s4, s0 +; GFX11-NEXT: buffer_load_u16 v0, off, s[8:11], 0 +; GFX11-NEXT: s_mov_b32 s5, s1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_ldexp_f16_e64 v0, v0, 2 +; GFX11-NEXT: buffer_store_b16 v0, off, s[4:7], 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm + ptr addrspace(1) %r, + ptr addrspace(1) %a) { + %a.val = load half, ptr addrspace(1) %a + %r.val = call half @llvm.amdgcn.ldexp.f16(half %a.val, i32 2) + store half %r.val, ptr addrspace(1) %r + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.ll new file mode 100644 index 0000000..d428206 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.ll @@ -0,0 +1,31 @@ +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s + +declare float @llvm.amdgcn.ldexp.f32(float, i32) nounwind readnone +declare double @llvm.amdgcn.ldexp.f64(double, i32) nounwind readnone + +; SI-LABEL: {{^}}test_ldexp_f32: +; SI: v_ldexp_f32 +; SI: s_endpgm +define amdgpu_kernel void @test_ldexp_f32(ptr addrspace(1) %out, float %a, i32 %b) nounwind { + %result = call float @llvm.amdgcn.ldexp.f32(float %a, i32 %b) nounwind readnone + store float %result, ptr addrspace(1) %out, align 4 + ret void +} + +; SI-LABEL: {{^}}test_ldexp_f64: +; SI: v_ldexp_f64 +; SI: s_endpgm +define amdgpu_kernel void @test_ldexp_f64(ptr addrspace(1) %out, double %a, i32 %b) nounwind { + %result = call double @llvm.amdgcn.ldexp.f64(double %a, i32 %b) nounwind readnone + store double %result, ptr addrspace(1) %out, align 8 + ret void +} + +; SI-LABEL: {{^}}test_ldexp_undef_f32: +; SI-NOT: v_ldexp_f32 +define amdgpu_kernel void @test_ldexp_undef_f32(ptr addrspace(1) %out, i32 %b) nounwind { + %result = call float @llvm.amdgcn.ldexp.f32(float undef, i32 %b) nounwind readnone + store float %result, ptr addrspace(1) %out, align 4 + ret void +} -- 2.7.4