From e6c364a62456409f22470e163ffd7f68772d4198 Mon Sep 17 00:00:00 2001 From: Jay Foad Date: Thu, 5 Aug 2021 09:58:29 +0100 Subject: [PATCH] [AMDGPU][SDag] Better lowering for 64-bit ctlz/cttz Differential Revision: https://reviews.llvm.org/D107546 --- llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 75 +-- llvm/test/CodeGen/AMDGPU/ctlz.ll | 166 +++--- llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll | 62 +-- llvm/test/CodeGen/AMDGPU/cttz.ll | 166 +++--- llvm/test/CodeGen/AMDGPU/cttz_zero_undef.ll | 25 +- llvm/test/CodeGen/AMDGPU/sdiv64.ll | 488 +++++++++--------- llvm/test/CodeGen/AMDGPU/srem64.ll | 698 ++++++++++++-------------- llvm/test/CodeGen/AMDGPU/udiv64.ll | 630 +++++++++++------------ llvm/test/CodeGen/AMDGPU/urem64.ll | 448 ++++++++--------- 9 files changed, 1243 insertions(+), 1515 deletions(-) diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp index 9b65ed1..5e30e1c 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -2377,69 +2377,42 @@ static bool isCttzOpc(unsigned Opc) { SDValue AMDGPUTargetLowering::LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const { SDLoc SL(Op); SDValue Src = Op.getOperand(0); - bool ZeroUndef = Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF || - Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF; - unsigned ISDOpc, NewOpc; - if (isCtlzOpc(Op.getOpcode())) { - ISDOpc = ISD::CTLZ_ZERO_UNDEF; - NewOpc = AMDGPUISD::FFBH_U32; - } else if (isCttzOpc(Op.getOpcode())) { - ISDOpc = ISD::CTTZ_ZERO_UNDEF; - NewOpc = AMDGPUISD::FFBL_B32; - } else - llvm_unreachable("Unexpected OPCode!!!"); + assert(isCtlzOpc(Op.getOpcode()) || isCttzOpc(Op.getOpcode())); + bool Ctlz = isCtlzOpc(Op.getOpcode()); + unsigned NewOpc = Ctlz ? AMDGPUISD::FFBH_U32 : AMDGPUISD::FFBL_B32; + bool ZeroUndef = Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF || + Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF; - if (ZeroUndef && Src.getValueType() == MVT::i32) + if (Src.getValueType() == MVT::i32) { + assert(ZeroUndef); return DAG.getNode(NewOpc, SL, MVT::i32, Src); - - const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); + } SDValue Lo, Hi; std::tie(Lo, Hi) = split64BitValue(Src, DAG); - EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), - *DAG.getContext(), MVT::i32); + SDValue OprLo = DAG.getNode(NewOpc, SL, MVT::i32, Lo); + SDValue OprHi = DAG.getNode(NewOpc, SL, MVT::i32, Hi); - SDValue HiOrLo = isCtlzOpc(Op.getOpcode()) ? Hi : Lo; - SDValue Hi0orLo0 = DAG.getSetCC(SL, SetCCVT, HiOrLo, Zero, ISD::SETEQ); + // (ctlz hi:lo) -> (umin3 (ffbh hi), (uaddsat (ffbh lo), 32), 64) + // (cttz hi:lo) -> (umin3 (uaddsat (ffbl hi), 32), (ffbl lo), 64) + // (ctlz_zero_undef hi:lo) -> (umin (ffbh hi), (add (ffbh lo), 32)) + // (cttz_zero_undef hi:lo) -> (umin (add (ffbl hi), 32), (ffbl lo)) - SDValue OprLo = DAG.getNode(ISDOpc, SL, MVT::i32, Lo); - SDValue OprHi = DAG.getNode(ISDOpc, SL, MVT::i32, Hi); - - const SDValue Bits32 = DAG.getConstant(32, SL, MVT::i32); - SDValue Add, NewOpr; - if (isCtlzOpc(Op.getOpcode())) { - Add = DAG.getNode(ISD::ADD, SL, MVT::i32, OprLo, Bits32); - // ctlz(x) = hi_32(x) == 0 ? ctlz(lo_32(x)) + 32 : ctlz(hi_32(x)) - NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0orLo0, Add, OprHi); - } else { - Add = DAG.getNode(ISD::ADD, SL, MVT::i32, OprHi, Bits32); - // cttz(x) = lo_32(x) == 0 ? cttz(hi_32(x)) + 32 : cttz(lo_32(x)) - NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0orLo0, Add, OprLo); - } + unsigned AddOpc = ZeroUndef ? ISD::ADD : ISD::UADDSAT; + const SDValue Const32 = DAG.getConstant(32, SL, MVT::i32); + if (Ctlz) + OprLo = DAG.getNode(AddOpc, SL, MVT::i32, OprLo, Const32); + else + OprHi = DAG.getNode(AddOpc, SL, MVT::i32, OprHi, Const32); + SDValue NewOpr; + NewOpr = DAG.getNode(ISD::UMIN, SL, MVT::i32, OprLo, OprHi); if (!ZeroUndef) { - // Test if the full 64-bit input is zero. - - // FIXME: DAG combines turn what should be an s_and_b64 into a v_or_b32, - // which we probably don't want. - SDValue LoOrHi = isCtlzOpc(Op.getOpcode()) ? Lo : Hi; - SDValue Lo0OrHi0 = DAG.getSetCC(SL, SetCCVT, LoOrHi, Zero, ISD::SETEQ); - SDValue SrcIsZero = DAG.getNode(ISD::AND, SL, SetCCVT, Lo0OrHi0, Hi0orLo0); - - // TODO: If i64 setcc is half rate, it can result in 1 fewer instruction - // with the same cycles, otherwise it is slower. - // SDValue SrcIsZero = DAG.getSetCC(SL, SetCCVT, Src, - // DAG.getConstant(0, SL, MVT::i64), ISD::SETEQ); - - const SDValue Bits32 = DAG.getConstant(64, SL, MVT::i32); - - // The instruction returns -1 for 0 input, but the defined intrinsic - // behavior is to return the number of bits. - NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, - SrcIsZero, Bits32, NewOpr); + const SDValue Const64 = DAG.getConstant(64, SL, MVT::i32); + NewOpr = DAG.getNode(ISD::UMIN, SL, MVT::i32, NewOpr, Const64); } return DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i64, NewOpr); diff --git a/llvm/test/CodeGen/AMDGPU/ctlz.ll b/llvm/test/CodeGen/AMDGPU/ctlz.ll index e28817f9..c51e125 100644 --- a/llvm/test/CodeGen/AMDGPU/ctlz.ll +++ b/llvm/test/CodeGen/AMDGPU/ctlz.ll @@ -557,16 +557,12 @@ define amdgpu_kernel void @s_ctlz_i64(i64 addrspace(1)* noalias %out, [8 x i32], ; SI-NEXT: s_mov_b32 s3, 0xf000 ; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_flbit_i32_b32 s6, s4 -; SI-NEXT: s_flbit_i32_b32 s7, s5 -; SI-NEXT: s_add_i32 s6, s6, 32 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v0, s7 -; SI-NEXT: v_mov_b32_e32 v1, s6 -; SI-NEXT: v_cmp_eq_u32_e64 vcc, s5, 0 -; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; SI-NEXT: v_cmp_ne_u32_e64 vcc, s4, 0 -; SI-NEXT: v_cndmask_b32_e32 v0, 64, v0, vcc +; SI-NEXT: s_flbit_i32_b32 s4, s4 +; SI-NEXT: s_flbit_i32_b32 s5, s5 +; SI-NEXT: s_min_u32 s4, s4, 0xffffffdf +; SI-NEXT: v_mov_b32_e32 v0, s5 +; SI-NEXT: s_add_i32 s4, s4, 32 +; SI-NEXT: v_min3_u32 v0, s4, v0, 64 ; SI-NEXT: v_mov_b32_e32 v1, 0 ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-NEXT: s_endpgm @@ -579,15 +575,10 @@ define amdgpu_kernel void @s_ctlz_i64(i64 addrspace(1)* noalias %out, [8 x i32], ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: v_mov_b32_e32 v1, 0 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: s_flbit_i32_b32 s2, s0 -; VI-NEXT: s_add_i32 s2, s2, 32 -; VI-NEXT: s_flbit_i32_b32 s3, s1 -; VI-NEXT: s_cmp_eq_u32 s1, 0 -; VI-NEXT: s_cselect_b32 s2, s2, s3 -; VI-NEXT: s_or_b32 s0, s0, s1 -; VI-NEXT: s_cmp_lg_u32 s0, 0 -; VI-NEXT: s_cselect_b32 s0, s2, 64 -; VI-NEXT: v_mov_b32_e32 v0, s0 +; VI-NEXT: s_flbit_i32_b32 s0, s0 +; VI-NEXT: v_add_u32_e64 v0, s[2:3], s0, 32 clamp +; VI-NEXT: s_flbit_i32_b32 s0, s1 +; VI-NEXT: v_min3_u32 v0, v0, s0, 64 ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; VI-NEXT: s_endpgm ; @@ -617,14 +608,9 @@ define amdgpu_kernel void @s_ctlz_i64(i64 addrspace(1)* noalias %out, [8 x i32], ; GFX10-NEXT: v_mov_b32_e32 v1, 0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: s_flbit_i32_b32 s0, s2 -; GFX10-NEXT: s_flbit_i32_b32 s1, s3 -; GFX10-NEXT: s_add_i32 s0, s0, 32 -; GFX10-NEXT: s_cmp_eq_u32 s3, 0 -; GFX10-NEXT: s_cselect_b32 s0, s0, s1 -; GFX10-NEXT: s_or_b32 s1, s2, s3 -; GFX10-NEXT: s_cmp_lg_u32 s1, 0 -; GFX10-NEXT: s_cselect_b32 s0, s0, 64 -; GFX10-NEXT: v_mov_b32_e32 v0, s0 +; GFX10-NEXT: v_add_nc_u32_e64 v0, s0, 32 clamp +; GFX10-NEXT: s_flbit_i32_b32 s0, s3 +; GFX10-NEXT: v_min3_u32 v0, v0, s0, 64 ; GFX10-NEXT: global_store_dwordx2 v1, v[0:1], s[4:5] ; GFX10-NEXT: s_endpgm ; @@ -656,16 +642,12 @@ define amdgpu_kernel void @s_ctlz_i64_trunc(i32 addrspace(1)* noalias %out, i64 ; SI-NEXT: s_mov_b32 s3, 0xf000 ; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_flbit_i32_b32 s6, s4 -; SI-NEXT: s_flbit_i32_b32 s7, s5 -; SI-NEXT: s_add_i32 s6, s6, 32 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v0, s7 -; SI-NEXT: v_mov_b32_e32 v1, s6 -; SI-NEXT: v_cmp_eq_u32_e64 vcc, s5, 0 -; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; SI-NEXT: v_cmp_ne_u32_e64 vcc, s4, 0 -; SI-NEXT: v_cndmask_b32_e32 v0, 64, v0, vcc +; SI-NEXT: s_flbit_i32_b32 s4, s4 +; SI-NEXT: s_min_u32 s4, s4, 0xffffffdf +; SI-NEXT: s_flbit_i32_b32 s5, s5 +; SI-NEXT: s_add_i32 s4, s4, 32 +; SI-NEXT: v_mov_b32_e32 v0, s5 +; SI-NEXT: v_min3_u32 v0, s4, v0, 64 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -676,15 +658,10 @@ define amdgpu_kernel void @s_ctlz_i64_trunc(i32 addrspace(1)* noalias %out, i64 ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: s_flbit_i32_b32 s2, s0 -; VI-NEXT: s_add_i32 s2, s2, 32 -; VI-NEXT: s_flbit_i32_b32 s3, s1 -; VI-NEXT: s_cmp_eq_u32 s1, 0 -; VI-NEXT: s_cselect_b32 s2, s2, s3 -; VI-NEXT: s_or_b32 s0, s0, s1 -; VI-NEXT: s_cmp_lg_u32 s0, 0 -; VI-NEXT: s_cselect_b32 s0, s2, 64 -; VI-NEXT: v_mov_b32_e32 v0, s0 +; VI-NEXT: s_flbit_i32_b32 s0, s0 +; VI-NEXT: v_add_u32_e64 v0, s[2:3], s0, 32 clamp +; VI-NEXT: s_flbit_i32_b32 s0, s1 +; VI-NEXT: v_min3_u32 v0, v0, s0, 64 ; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0 ; VI-NEXT: s_endpgm ; @@ -710,18 +687,13 @@ define amdgpu_kernel void @s_ctlz_i64_trunc(i32 addrspace(1)* noalias %out, i64 ; GFX10-NEXT: s_clause 0x1 ; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x2c ; GFX10-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 -; GFX10-NEXT: v_mov_b32_e32 v0, 0 +; GFX10-NEXT: v_mov_b32_e32 v1, 0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: s_flbit_i32_b32 s0, s2 -; GFX10-NEXT: s_flbit_i32_b32 s1, s3 -; GFX10-NEXT: s_add_i32 s0, s0, 32 -; GFX10-NEXT: s_cmp_eq_u32 s3, 0 -; GFX10-NEXT: s_cselect_b32 s0, s0, s1 -; GFX10-NEXT: s_or_b32 s1, s2, s3 -; GFX10-NEXT: s_cmp_lg_u32 s1, 0 -; GFX10-NEXT: s_cselect_b32 s0, s0, 64 -; GFX10-NEXT: v_mov_b32_e32 v1, s0 -; GFX10-NEXT: global_store_dword v0, v1, s[4:5] +; GFX10-NEXT: v_add_nc_u32_e64 v0, s0, 32 clamp +; GFX10-NEXT: s_flbit_i32_b32 s0, s3 +; GFX10-NEXT: v_min3_u32 v0, v0, s0, 64 +; GFX10-NEXT: global_store_dword v1, v0, s[4:5] ; GFX10-NEXT: s_endpgm ; ; GFX10-GISEL-LABEL: s_ctlz_i64_trunc: @@ -755,14 +727,11 @@ define amdgpu_kernel void @v_ctlz_i64(i64 addrspace(1)* noalias %out, i64 addrsp ; SI-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_ffbh_u32_e32 v4, v2 -; SI-NEXT: v_ffbh_u32_e32 v5, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_add_i32_e32 v4, vcc, 32, v4 -; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3 -; SI-NEXT: v_cndmask_b32_e32 v3, v5, v4, vcc -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 -; SI-NEXT: v_cndmask_b32_e32 v2, 64, v3, vcc +; SI-NEXT: v_ffbh_u32_e32 v2, v2 +; SI-NEXT: v_min_u32_e32 v2, 0xffffffdf, v2 +; SI-NEXT: v_add_i32_e32 v2, vcc, 32, v2 +; SI-NEXT: v_ffbh_u32_e32 v3, v3 +; SI-NEXT: v_min3_u32 v2, v2, v3, 64 ; SI-NEXT: v_mov_b32_e32 v3, v1 ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64 @@ -773,25 +742,20 @@ define amdgpu_kernel void @v_ctlz_i64(i64 addrspace(1)* noalias %out, i64 addrsp ; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c ; VI-NEXT: v_lshlrev_b32_e32 v3, 3, v0 -; VI-NEXT: v_mov_b32_e32 v4, 0 ; VI-NEXT: v_mov_b32_e32 v2, 0 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: v_mov_b32_e32 v5, s3 +; VI-NEXT: v_mov_b32_e32 v4, s3 ; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v3 -; VI-NEXT: v_addc_u32_e32 v1, vcc, v1, v4, vcc +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc ; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1] ; VI-NEXT: v_add_u32_e32 v3, vcc, s2, v3 -; VI-NEXT: v_addc_u32_e32 v4, vcc, v5, v4, vcc +; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_ffbh_u32_e32 v5, v0 -; VI-NEXT: v_add_u32_e32 v5, vcc, 32, v5 -; VI-NEXT: v_ffbh_u32_e32 v6, v1 -; VI-NEXT: v_or_b32_e32 v0, v0, v1 -; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; VI-NEXT: v_cndmask_b32_e32 v1, v6, v5, vcc -; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; VI-NEXT: v_cndmask_b32_e32 v1, 64, v1, vcc +; VI-NEXT: v_ffbh_u32_e32 v0, v0 +; VI-NEXT: v_add_u32_e64 v0, s[0:1], v0, 32 clamp +; VI-NEXT: v_ffbh_u32_e32 v1, v1 +; VI-NEXT: v_min3_u32 v1, v0, v1, 64 ; VI-NEXT: flat_store_dwordx2 v[3:4], v[1:2] ; VI-NEXT: s_endpgm ; @@ -830,14 +794,10 @@ define amdgpu_kernel void @v_ctlz_i64(i64 addrspace(1)* noalias %out, i64 addrsp ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: global_load_dwordx2 v[0:1], v2, s[2:3] ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_ffbh_u32_e32 v3, v0 -; GFX10-NEXT: v_ffbh_u32_e32 v4, v1 -; GFX10-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 -; GFX10-NEXT: v_add_nc_u32_e32 v3, 32, v3 -; GFX10-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc_lo -; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0 -; GFX10-NEXT: v_cndmask_b32_e32 v0, 64, v1, vcc_lo +; GFX10-NEXT: v_ffbh_u32_e32 v0, v0 +; GFX10-NEXT: v_ffbh_u32_e32 v1, v1 +; GFX10-NEXT: v_add_nc_u32_e64 v0, v0, 32 clamp +; GFX10-NEXT: v_min3_u32 v0, v0, v1, 64 ; GFX10-NEXT: v_mov_b32_e32 v1, 0 ; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] ; GFX10-NEXT: s_endpgm @@ -883,13 +843,10 @@ define amdgpu_kernel void @v_ctlz_i64_trunc(i32 addrspace(1)* noalias %out, i64 ; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_ffbh_u32_e32 v0, v3 -; SI-NEXT: v_ffbh_u32_e32 v5, v4 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 +; SI-NEXT: v_min_u32_e32 v0, 0xffffffdf, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 32, v0 -; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 -; SI-NEXT: v_cndmask_b32_e32 v0, v5, v0, vcc -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 -; SI-NEXT: v_cndmask_b32_e32 v0, 64, v0, vcc +; SI-NEXT: v_ffbh_u32_e32 v3, v4 +; SI-NEXT: v_min3_u32 v0, v0, v3, 64 ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_store_dword v0, v[1:2], s[4:7], 0 addr64 ; SI-NEXT: s_endpgm @@ -899,25 +856,20 @@ define amdgpu_kernel void @v_ctlz_i64_trunc(i32 addrspace(1)* noalias %out, i64 ; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c ; VI-NEXT: v_lshlrev_b32_e32 v1, 3, v0 -; VI-NEXT: v_mov_b32_e32 v4, 0 ; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: v_mov_b32_e32 v5, s3 +; VI-NEXT: v_mov_b32_e32 v4, s3 ; VI-NEXT: v_mov_b32_e32 v2, s1 ; VI-NEXT: v_add_u32_e32 v1, vcc, s0, v1 -; VI-NEXT: v_addc_u32_e32 v2, vcc, v2, v4, vcc +; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc ; VI-NEXT: flat_load_dwordx2 v[1:2], v[1:2] ; VI-NEXT: v_add_u32_e32 v3, vcc, s2, v0 -; VI-NEXT: v_addc_u32_e32 v4, vcc, v5, v4, vcc +; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_ffbh_u32_e32 v0, v1 -; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0 -; VI-NEXT: v_ffbh_u32_e32 v5, v2 -; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 -; VI-NEXT: v_or_b32_e32 v1, v1, v2 -; VI-NEXT: v_cndmask_b32_e32 v0, v5, v0, vcc -; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1 -; VI-NEXT: v_cndmask_b32_e32 v0, 64, v0, vcc +; VI-NEXT: v_add_u32_e64 v0, s[0:1], v0, 32 clamp +; VI-NEXT: v_ffbh_u32_e32 v1, v2 +; VI-NEXT: v_min3_u32 v0, v0, v1, 64 ; VI-NEXT: flat_store_dword v[3:4], v0 ; VI-NEXT: s_endpgm ; @@ -957,14 +909,10 @@ define amdgpu_kernel void @v_ctlz_i64_trunc(i32 addrspace(1)* noalias %out, i64 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: global_load_dwordx2 v[1:2], v1, s[2:3] ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_ffbh_u32_e32 v3, v1 -; GFX10-NEXT: v_ffbh_u32_e32 v4, v2 -; GFX10-NEXT: v_or_b32_e32 v1, v1, v2 -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2 -; GFX10-NEXT: v_add_nc_u32_e32 v3, 32, v3 -; GFX10-NEXT: v_cndmask_b32_e32 v2, v4, v3, vcc_lo -; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v1 -; GFX10-NEXT: v_cndmask_b32_e32 v1, 64, v2, vcc_lo +; GFX10-NEXT: v_ffbh_u32_e32 v1, v1 +; GFX10-NEXT: v_ffbh_u32_e32 v2, v2 +; GFX10-NEXT: v_add_nc_u32_e64 v1, v1, 32 clamp +; GFX10-NEXT: v_min3_u32 v1, v1, v2, 64 ; GFX10-NEXT: global_store_dword v0, v1, s[0:1] ; GFX10-NEXT: s_endpgm ; diff --git a/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll b/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll index 2b064f2..0f83cae 100644 --- a/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll +++ b/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll @@ -405,16 +405,14 @@ define amdgpu_kernel void @s_ctlz_zero_undef_i64(i64 addrspace(1)* noalias %out, ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x13 ; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9 ; SI-NEXT: s_mov_b32 s3, 0xf000 -; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_flbit_i32_b32 s2, s4 -; SI-NEXT: s_flbit_i32_b32 s4, s5 -; SI-NEXT: s_add_i32 s6, s2, 32 ; SI-NEXT: s_mov_b32 s2, -1 -; SI-NEXT: v_mov_b32_e32 v0, s4 -; SI-NEXT: v_mov_b32_e32 v1, s6 -; SI-NEXT: v_cmp_eq_u32_e64 vcc, s5, 0 -; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_flbit_i32_b32 s4, s4 +; SI-NEXT: s_flbit_i32_b32 s5, s5 +; SI-NEXT: s_add_i32 s4, s4, 32 +; SI-NEXT: s_min_u32 s4, s4, s5 ; SI-NEXT: v_mov_b32_e32 v1, 0 +; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -426,10 +424,9 @@ define amdgpu_kernel void @s_ctlz_zero_undef_i64(i64 addrspace(1)* noalias %out, ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: v_mov_b32_e32 v2, s2 ; VI-NEXT: s_flbit_i32_b32 s0, s0 +; VI-NEXT: s_flbit_i32_b32 s1, s1 ; VI-NEXT: s_add_i32 s0, s0, 32 -; VI-NEXT: s_flbit_i32_b32 s4, s1 -; VI-NEXT: s_cmp_eq_u32 s1, 0 -; VI-NEXT: s_cselect_b32 s0, s0, s4 +; VI-NEXT: s_min_u32 s0, s0, s1 ; VI-NEXT: v_mov_b32_e32 v0, s0 ; VI-NEXT: v_mov_b32_e32 v3, s3 ; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1] @@ -477,12 +474,10 @@ define amdgpu_kernel void @s_ctlz_zero_undef_i64_trunc(i32 addrspace(1)* noalias ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: s_flbit_i32_b32 s2, s4 ; SI-NEXT: s_flbit_i32_b32 s4, s5 -; SI-NEXT: s_add_i32 s6, s2, 32 +; SI-NEXT: s_add_i32 s2, s2, 32 +; SI-NEXT: s_min_u32 s4, s2, s4 ; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: v_mov_b32_e32 v0, s4 -; SI-NEXT: v_mov_b32_e32 v1, s6 -; SI-NEXT: v_cmp_eq_u32_e64 vcc, s5, 0 -; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc ; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -493,10 +488,9 @@ define amdgpu_kernel void @s_ctlz_zero_undef_i64_trunc(i32 addrspace(1)* noalias ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: v_mov_b32_e32 v0, s2 ; VI-NEXT: s_flbit_i32_b32 s0, s0 +; VI-NEXT: s_flbit_i32_b32 s1, s1 ; VI-NEXT: s_add_i32 s0, s0, 32 -; VI-NEXT: s_flbit_i32_b32 s4, s1 -; VI-NEXT: s_cmp_eq_u32 s1, 0 -; VI-NEXT: s_cselect_b32 s0, s0, s4 +; VI-NEXT: s_min_u32 s0, s0, s1 ; VI-NEXT: v_mov_b32_e32 v1, s3 ; VI-NEXT: v_mov_b32_e32 v2, s0 ; VI-NEXT: flat_store_dword v[0:1], v2 @@ -546,10 +540,9 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i64(i64 addrspace(1)* noalias %out, ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_ffbh_u32_e32 v2, v2 -; SI-NEXT: v_ffbh_u32_e32 v4, v3 ; SI-NEXT: v_add_i32_e32 v2, vcc, 32, v2 -; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3 -; SI-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc +; SI-NEXT: v_ffbh_u32_e32 v3, v3 +; SI-NEXT: v_min_u32_e32 v2, v2, v3 ; SI-NEXT: v_mov_b32_e32 v3, v1 ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64 @@ -560,22 +553,20 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i64(i64 addrspace(1)* noalias %out, ; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c ; VI-NEXT: v_lshlrev_b32_e32 v3, 3, v0 -; VI-NEXT: v_mov_b32_e32 v4, 0 ; VI-NEXT: v_mov_b32_e32 v2, 0 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: v_mov_b32_e32 v5, s3 +; VI-NEXT: v_mov_b32_e32 v4, s3 ; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v3 -; VI-NEXT: v_addc_u32_e32 v1, vcc, v1, v4, vcc +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc ; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1] ; VI-NEXT: v_add_u32_e32 v3, vcc, s2, v3 -; VI-NEXT: v_addc_u32_e32 v4, vcc, v5, v4, vcc +; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_ffbh_u32_e32 v0, v0 ; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0 -; VI-NEXT: v_ffbh_u32_e32 v5, v1 -; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; VI-NEXT: v_cndmask_b32_e32 v1, v5, v0, vcc +; VI-NEXT: v_ffbh_u32_e32 v1, v1 +; VI-NEXT: v_min_u32_e32 v1, v0, v1 ; VI-NEXT: flat_store_dwordx2 v[3:4], v[1:2] ; VI-NEXT: s_endpgm ; @@ -643,10 +634,9 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i64_trunc(i32 addrspace(1)* noalias ; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_ffbh_u32_e32 v0, v3 -; SI-NEXT: v_ffbh_u32_e32 v3, v4 ; SI-NEXT: v_add_i32_e32 v0, vcc, 32, v0 -; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 -; SI-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc +; SI-NEXT: v_ffbh_u32_e32 v3, v4 +; SI-NEXT: v_min_u32_e32 v0, v0, v3 ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_store_dword v0, v[1:2], s[4:7], 0 addr64 ; SI-NEXT: s_endpgm @@ -656,22 +646,20 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i64_trunc(i32 addrspace(1)* noalias ; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c ; VI-NEXT: v_lshlrev_b32_e32 v1, 3, v0 -; VI-NEXT: v_mov_b32_e32 v4, 0 ; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: v_mov_b32_e32 v5, s3 +; VI-NEXT: v_mov_b32_e32 v4, s3 ; VI-NEXT: v_mov_b32_e32 v2, s1 ; VI-NEXT: v_add_u32_e32 v1, vcc, s0, v1 -; VI-NEXT: v_addc_u32_e32 v2, vcc, v2, v4, vcc +; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc ; VI-NEXT: flat_load_dwordx2 v[1:2], v[1:2] ; VI-NEXT: v_add_u32_e32 v3, vcc, s2, v0 -; VI-NEXT: v_addc_u32_e32 v4, vcc, v5, v4, vcc +; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_ffbh_u32_e32 v0, v1 ; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0 ; VI-NEXT: v_ffbh_u32_e32 v1, v2 -; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 -; VI-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; VI-NEXT: v_min_u32_e32 v0, v0, v1 ; VI-NEXT: flat_store_dword v[3:4], v0 ; VI-NEXT: s_endpgm ; diff --git a/llvm/test/CodeGen/AMDGPU/cttz.ll b/llvm/test/CodeGen/AMDGPU/cttz.ll index 8921616..7172260 100644 --- a/llvm/test/CodeGen/AMDGPU/cttz.ll +++ b/llvm/test/CodeGen/AMDGPU/cttz.ll @@ -546,16 +546,12 @@ define amdgpu_kernel void @s_cttz_i64(i64 addrspace(1)* noalias %out, [8 x i32], ; SI-NEXT: s_mov_b32 s3, 0xf000 ; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_ff1_i32_b32 s6, s5 -; SI-NEXT: s_ff1_i32_b32 s7, s4 -; SI-NEXT: s_add_i32 s6, s6, 32 -; SI-NEXT: s_or_b32 s5, s5, s4 -; SI-NEXT: v_mov_b32_e32 v0, s7 -; SI-NEXT: v_mov_b32_e32 v1, s6 -; SI-NEXT: v_cmp_eq_u32_e64 vcc, s4, 0 -; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; SI-NEXT: v_cmp_ne_u32_e64 vcc, s5, 0 -; SI-NEXT: v_cndmask_b32_e32 v0, 64, v0, vcc +; SI-NEXT: s_ff1_i32_b32 s5, s5 +; SI-NEXT: s_min_u32 s5, s5, 0xffffffdf +; SI-NEXT: s_add_i32 s5, s5, 32 +; SI-NEXT: s_ff1_i32_b32 s4, s4 +; SI-NEXT: v_mov_b32_e32 v0, s5 +; SI-NEXT: v_min3_u32 v0, s4, v0, 64 ; SI-NEXT: v_mov_b32_e32 v1, 0 ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-NEXT: s_endpgm @@ -568,15 +564,10 @@ define amdgpu_kernel void @s_cttz_i64(i64 addrspace(1)* noalias %out, [8 x i32], ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: v_mov_b32_e32 v1, 0 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: s_ff1_i32_b32 s2, s1 -; VI-NEXT: s_add_i32 s2, s2, 32 -; VI-NEXT: s_ff1_i32_b32 s3, s0 -; VI-NEXT: s_cmp_eq_u32 s0, 0 -; VI-NEXT: s_cselect_b32 s2, s2, s3 -; VI-NEXT: s_or_b32 s0, s1, s0 -; VI-NEXT: s_cmp_lg_u32 s0, 0 -; VI-NEXT: s_cselect_b32 s0, s2, 64 -; VI-NEXT: v_mov_b32_e32 v0, s0 +; VI-NEXT: s_ff1_i32_b32 s1, s1 +; VI-NEXT: v_add_u32_e64 v0, s[2:3], s1, 32 clamp +; VI-NEXT: s_ff1_i32_b32 s0, s0 +; VI-NEXT: v_min3_u32 v0, s0, v0, 64 ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; VI-NEXT: s_endpgm ; @@ -606,14 +597,9 @@ define amdgpu_kernel void @s_cttz_i64(i64 addrspace(1)* noalias %out, [8 x i32], ; GFX10-NEXT: v_mov_b32_e32 v1, 0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: s_ff1_i32_b32 s0, s3 -; GFX10-NEXT: s_ff1_i32_b32 s1, s2 -; GFX10-NEXT: s_add_i32 s0, s0, 32 -; GFX10-NEXT: s_cmp_eq_u32 s2, 0 -; GFX10-NEXT: s_cselect_b32 s0, s0, s1 -; GFX10-NEXT: s_or_b32 s1, s3, s2 -; GFX10-NEXT: s_cmp_lg_u32 s1, 0 -; GFX10-NEXT: s_cselect_b32 s0, s0, 64 -; GFX10-NEXT: v_mov_b32_e32 v0, s0 +; GFX10-NEXT: v_add_nc_u32_e64 v0, s0, 32 clamp +; GFX10-NEXT: s_ff1_i32_b32 s0, s2 +; GFX10-NEXT: v_min3_u32 v0, s0, v0, 64 ; GFX10-NEXT: global_store_dwordx2 v1, v[0:1], s[4:5] ; GFX10-NEXT: s_endpgm ; @@ -645,16 +631,12 @@ define amdgpu_kernel void @s_cttz_i64_trunc(i32 addrspace(1)* noalias %out, i64 ; SI-NEXT: s_mov_b32 s3, 0xf000 ; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_ff1_i32_b32 s6, s5 -; SI-NEXT: s_ff1_i32_b32 s7, s4 -; SI-NEXT: s_add_i32 s6, s6, 32 -; SI-NEXT: s_or_b32 s5, s5, s4 -; SI-NEXT: v_mov_b32_e32 v0, s7 -; SI-NEXT: v_mov_b32_e32 v1, s6 -; SI-NEXT: v_cmp_eq_u32_e64 vcc, s4, 0 -; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; SI-NEXT: v_cmp_ne_u32_e64 vcc, s5, 0 -; SI-NEXT: v_cndmask_b32_e32 v0, 64, v0, vcc +; SI-NEXT: s_ff1_i32_b32 s5, s5 +; SI-NEXT: s_min_u32 s5, s5, 0xffffffdf +; SI-NEXT: s_add_i32 s5, s5, 32 +; SI-NEXT: s_ff1_i32_b32 s4, s4 +; SI-NEXT: v_mov_b32_e32 v0, s5 +; SI-NEXT: v_min3_u32 v0, s4, v0, 64 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -665,15 +647,10 @@ define amdgpu_kernel void @s_cttz_i64_trunc(i32 addrspace(1)* noalias %out, i64 ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: s_ff1_i32_b32 s2, s1 -; VI-NEXT: s_add_i32 s2, s2, 32 -; VI-NEXT: s_ff1_i32_b32 s3, s0 -; VI-NEXT: s_cmp_eq_u32 s0, 0 -; VI-NEXT: s_cselect_b32 s2, s2, s3 -; VI-NEXT: s_or_b32 s0, s1, s0 -; VI-NEXT: s_cmp_lg_u32 s0, 0 -; VI-NEXT: s_cselect_b32 s0, s2, 64 -; VI-NEXT: v_mov_b32_e32 v0, s0 +; VI-NEXT: s_ff1_i32_b32 s1, s1 +; VI-NEXT: v_add_u32_e64 v0, s[2:3], s1, 32 clamp +; VI-NEXT: s_ff1_i32_b32 s0, s0 +; VI-NEXT: v_min3_u32 v0, s0, v0, 64 ; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0 ; VI-NEXT: s_endpgm ; @@ -699,18 +676,13 @@ define amdgpu_kernel void @s_cttz_i64_trunc(i32 addrspace(1)* noalias %out, i64 ; GFX10-NEXT: s_clause 0x1 ; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x2c ; GFX10-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 -; GFX10-NEXT: v_mov_b32_e32 v0, 0 +; GFX10-NEXT: v_mov_b32_e32 v1, 0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: s_ff1_i32_b32 s0, s3 -; GFX10-NEXT: s_ff1_i32_b32 s1, s2 -; GFX10-NEXT: s_add_i32 s0, s0, 32 -; GFX10-NEXT: s_cmp_eq_u32 s2, 0 -; GFX10-NEXT: s_cselect_b32 s0, s0, s1 -; GFX10-NEXT: s_or_b32 s1, s3, s2 -; GFX10-NEXT: s_cmp_lg_u32 s1, 0 -; GFX10-NEXT: s_cselect_b32 s0, s0, 64 -; GFX10-NEXT: v_mov_b32_e32 v1, s0 -; GFX10-NEXT: global_store_dword v0, v1, s[4:5] +; GFX10-NEXT: v_add_nc_u32_e64 v0, s0, 32 clamp +; GFX10-NEXT: s_ff1_i32_b32 s0, s2 +; GFX10-NEXT: v_min3_u32 v0, s0, v0, 64 +; GFX10-NEXT: global_store_dword v1, v0, s[4:5] ; GFX10-NEXT: s_endpgm ; ; GFX10-GISEL-LABEL: s_cttz_i64_trunc: @@ -744,14 +716,11 @@ define amdgpu_kernel void @v_cttz_i64(i64 addrspace(1)* noalias %out, i64 addrsp ; SI-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_ffbl_b32_e32 v4, v3 -; SI-NEXT: v_ffbl_b32_e32 v5, v2 -; SI-NEXT: v_or_b32_e32 v3, v3, v2 -; SI-NEXT: v_add_i32_e32 v4, vcc, 32, v4 -; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 -; SI-NEXT: v_cndmask_b32_e32 v2, v5, v4, vcc -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 -; SI-NEXT: v_cndmask_b32_e32 v2, 64, v2, vcc +; SI-NEXT: v_ffbl_b32_e32 v3, v3 +; SI-NEXT: v_min_u32_e32 v3, 0xffffffdf, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v3 +; SI-NEXT: v_ffbl_b32_e32 v2, v2 +; SI-NEXT: v_min3_u32 v2, v2, v3, 64 ; SI-NEXT: v_mov_b32_e32 v3, v1 ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64 @@ -762,25 +731,20 @@ define amdgpu_kernel void @v_cttz_i64(i64 addrspace(1)* noalias %out, i64 addrsp ; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c ; VI-NEXT: v_lshlrev_b32_e32 v3, 3, v0 -; VI-NEXT: v_mov_b32_e32 v4, 0 ; VI-NEXT: v_mov_b32_e32 v2, 0 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: v_mov_b32_e32 v5, s3 +; VI-NEXT: v_mov_b32_e32 v4, s3 ; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v3 -; VI-NEXT: v_addc_u32_e32 v1, vcc, v1, v4, vcc +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc ; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1] ; VI-NEXT: v_add_u32_e32 v3, vcc, s2, v3 -; VI-NEXT: v_addc_u32_e32 v4, vcc, v5, v4, vcc +; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_ffbl_b32_e32 v5, v1 -; VI-NEXT: v_add_u32_e32 v5, vcc, 32, v5 -; VI-NEXT: v_ffbl_b32_e32 v6, v0 -; VI-NEXT: v_or_b32_e32 v1, v1, v0 -; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 -; VI-NEXT: v_cndmask_b32_e32 v0, v6, v5, vcc -; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1 -; VI-NEXT: v_cndmask_b32_e32 v1, 64, v0, vcc +; VI-NEXT: v_ffbl_b32_e32 v1, v1 +; VI-NEXT: v_add_u32_e64 v1, s[0:1], v1, 32 clamp +; VI-NEXT: v_ffbl_b32_e32 v0, v0 +; VI-NEXT: v_min3_u32 v1, v0, v1, 64 ; VI-NEXT: flat_store_dwordx2 v[3:4], v[1:2] ; VI-NEXT: s_endpgm ; @@ -819,15 +783,11 @@ define amdgpu_kernel void @v_cttz_i64(i64 addrspace(1)* noalias %out, i64 addrsp ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: global_load_dwordx2 v[0:1], v2, s[2:3] ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_ffbl_b32_e32 v3, v1 -; GFX10-NEXT: v_ffbl_b32_e32 v4, v0 -; GFX10-NEXT: v_or_b32_e32 v1, v1, v0 -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 -; GFX10-NEXT: v_add_nc_u32_e32 v3, 32, v3 -; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc_lo -; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v1 +; GFX10-NEXT: v_ffbl_b32_e32 v1, v1 +; GFX10-NEXT: v_ffbl_b32_e32 v0, v0 +; GFX10-NEXT: v_add_nc_u32_e64 v1, v1, 32 clamp +; GFX10-NEXT: v_min3_u32 v0, v0, v1, 64 ; GFX10-NEXT: v_mov_b32_e32 v1, 0 -; GFX10-NEXT: v_cndmask_b32_e32 v0, 64, v0, vcc_lo ; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] ; GFX10-NEXT: s_endpgm ; @@ -872,13 +832,10 @@ define amdgpu_kernel void @v_cttz_i64_trunc(i32 addrspace(1)* noalias %out, i64 ; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_ffbl_b32_e32 v0, v4 -; SI-NEXT: v_ffbl_b32_e32 v5, v3 -; SI-NEXT: v_or_b32_e32 v4, v4, v3 +; SI-NEXT: v_min_u32_e32 v0, 0xffffffdf, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 32, v0 -; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3 -; SI-NEXT: v_cndmask_b32_e32 v0, v5, v0, vcc -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4 -; SI-NEXT: v_cndmask_b32_e32 v0, 64, v0, vcc +; SI-NEXT: v_ffbl_b32_e32 v3, v3 +; SI-NEXT: v_min3_u32 v0, v3, v0, 64 ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_store_dword v0, v[1:2], s[4:7], 0 addr64 ; SI-NEXT: s_endpgm @@ -888,25 +845,20 @@ define amdgpu_kernel void @v_cttz_i64_trunc(i32 addrspace(1)* noalias %out, i64 ; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c ; VI-NEXT: v_lshlrev_b32_e32 v1, 3, v0 -; VI-NEXT: v_mov_b32_e32 v4, 0 ; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: v_mov_b32_e32 v5, s3 +; VI-NEXT: v_mov_b32_e32 v4, s3 ; VI-NEXT: v_mov_b32_e32 v2, s1 ; VI-NEXT: v_add_u32_e32 v1, vcc, s0, v1 -; VI-NEXT: v_addc_u32_e32 v2, vcc, v2, v4, vcc +; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc ; VI-NEXT: flat_load_dwordx2 v[1:2], v[1:2] ; VI-NEXT: v_add_u32_e32 v3, vcc, s2, v0 -; VI-NEXT: v_addc_u32_e32 v4, vcc, v5, v4, vcc +; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_ffbl_b32_e32 v0, v2 -; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0 -; VI-NEXT: v_ffbl_b32_e32 v5, v1 -; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; VI-NEXT: v_or_b32_e32 v2, v2, v1 -; VI-NEXT: v_cndmask_b32_e32 v0, v5, v0, vcc -; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 -; VI-NEXT: v_cndmask_b32_e32 v0, 64, v0, vcc +; VI-NEXT: v_add_u32_e64 v0, s[0:1], v0, 32 clamp +; VI-NEXT: v_ffbl_b32_e32 v1, v1 +; VI-NEXT: v_min3_u32 v0, v1, v0, 64 ; VI-NEXT: flat_store_dword v[3:4], v0 ; VI-NEXT: s_endpgm ; @@ -946,14 +898,10 @@ define amdgpu_kernel void @v_cttz_i64_trunc(i32 addrspace(1)* noalias %out, i64 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: global_load_dwordx2 v[1:2], v1, s[2:3] ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_ffbl_b32_e32 v3, v2 -; GFX10-NEXT: v_ffbl_b32_e32 v4, v1 -; GFX10-NEXT: v_or_b32_e32 v2, v2, v1 -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 -; GFX10-NEXT: v_add_nc_u32_e32 v3, 32, v3 -; GFX10-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc_lo -; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2 -; GFX10-NEXT: v_cndmask_b32_e32 v1, 64, v1, vcc_lo +; GFX10-NEXT: v_ffbl_b32_e32 v2, v2 +; GFX10-NEXT: v_ffbl_b32_e32 v1, v1 +; GFX10-NEXT: v_add_nc_u32_e64 v2, v2, 32 clamp +; GFX10-NEXT: v_min3_u32 v1, v1, v2, 64 ; GFX10-NEXT: global_store_dword v0, v1, s[0:1] ; GFX10-NEXT: s_endpgm ; diff --git a/llvm/test/CodeGen/AMDGPU/cttz_zero_undef.ll b/llvm/test/CodeGen/AMDGPU/cttz_zero_undef.ll index 4e59aad..d13edcc 100644 --- a/llvm/test/CodeGen/AMDGPU/cttz_zero_undef.ll +++ b/llvm/test/CodeGen/AMDGPU/cttz_zero_undef.ll @@ -508,16 +508,14 @@ define amdgpu_kernel void @s_cttz_zero_undef_i64_with_select(i64 addrspace(1)* n ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xb ; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9 ; SI-NEXT: s_mov_b32 s3, 0xf000 -; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_ff1_i32_b32 s2, s5 -; SI-NEXT: s_ff1_i32_b32 s5, s4 -; SI-NEXT: s_add_i32 s6, s2, 32 ; SI-NEXT: s_mov_b32 s2, -1 -; SI-NEXT: v_mov_b32_e32 v0, s5 -; SI-NEXT: v_mov_b32_e32 v1, s6 -; SI-NEXT: v_cmp_eq_u32_e64 vcc, s4, 0 -; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_ff1_i32_b32 s5, s5 +; SI-NEXT: s_ff1_i32_b32 s4, s4 +; SI-NEXT: s_add_i32 s5, s5, 32 +; SI-NEXT: s_min_u32 s4, s4, s5 ; SI-NEXT: v_mov_b32_e32 v1, 0 +; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -529,10 +527,9 @@ define amdgpu_kernel void @s_cttz_zero_undef_i64_with_select(i64 addrspace(1)* n ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: v_mov_b32_e32 v2, s2 ; VI-NEXT: s_ff1_i32_b32 s1, s1 +; VI-NEXT: s_ff1_i32_b32 s0, s0 ; VI-NEXT: s_add_i32 s1, s1, 32 -; VI-NEXT: s_ff1_i32_b32 s4, s0 -; VI-NEXT: s_cmp_eq_u32 s0, 0 -; VI-NEXT: s_cselect_b32 s0, s1, s4 +; VI-NEXT: s_min_u32 s0, s0, s1 ; VI-NEXT: v_mov_b32_e32 v0, s0 ; VI-NEXT: v_mov_b32_e32 v3, s3 ; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1] @@ -925,8 +922,7 @@ define amdgpu_kernel void @v_cttz_zero_undef_i64_with_select(i64 addrspace(1)* n ; SI-NEXT: v_ffbl_b32_e32 v0, v3 ; SI-NEXT: v_ffbl_b32_e32 v4, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 32, v0 -; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 -; SI-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc +; SI-NEXT: v_min_u32_e32 v0, v4, v0 ; SI-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3] ; SI-NEXT: v_cndmask_b32_e32 v0, 32, v0, vcc ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 @@ -996,9 +992,8 @@ define amdgpu_kernel void @v_cttz_zero_undef_i64_with_select(i64 addrspace(1)* n ; VI-NEXT: v_or_b32_e32 v2, v2, v8 ; VI-NEXT: v_or_b32_e32 v2, v0, v2 ; VI-NEXT: v_ffbl_b32_e32 v0, v2 -; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 -; VI-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc ; VI-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3] +; VI-NEXT: v_min_u32_e32 v0, v0, v4 ; VI-NEXT: v_mov_b32_e32 v2, s2 ; VI-NEXT: v_cndmask_b32_e32 v0, 32, v0, vcc ; VI-NEXT: v_mov_b32_e32 v3, s3 diff --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll index b098eb1..dd1206b 100644 --- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll +++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll @@ -144,104 +144,96 @@ define amdgpu_kernel void @s_test_sdiv(i64 addrspace(1)* %out, i64 %x, i64 %y) { ; GCN-IR-LABEL: s_test_sdiv: ; GCN-IR: ; %bb.0: ; %_udiv-special-cases ; GCN-IR-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 -; GCN-IR-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd +; GCN-IR-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) -; GCN-IR-NEXT: s_ashr_i32 s2, s7, 31 +; GCN-IR-NEXT: s_ashr_i32 s0, s7, 31 +; GCN-IR-NEXT: s_mov_b32 s1, s0 +; GCN-IR-NEXT: s_ashr_i32 s2, s9, 31 +; GCN-IR-NEXT: s_xor_b64 s[6:7], s[0:1], s[6:7] +; GCN-IR-NEXT: s_sub_u32 s10, s6, s0 ; GCN-IR-NEXT: s_mov_b32 s3, s2 -; GCN-IR-NEXT: s_ashr_i32 s8, s1, 31 -; GCN-IR-NEXT: s_xor_b64 s[6:7], s[2:3], s[6:7] -; GCN-IR-NEXT: s_sub_u32 s10, s6, s2 -; GCN-IR-NEXT: s_mov_b32 s9, s8 -; GCN-IR-NEXT: s_subb_u32 s11, s7, s2 -; GCN-IR-NEXT: s_xor_b64 s[0:1], s[8:9], s[0:1] -; GCN-IR-NEXT: s_sub_u32 s6, s0, s8 -; GCN-IR-NEXT: s_flbit_i32_b32 s14, s6 -; GCN-IR-NEXT: s_subb_u32 s7, s1, s8 -; GCN-IR-NEXT: s_add_i32 s14, s14, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s15, s7 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s14 +; GCN-IR-NEXT: s_subb_u32 s11, s7, s0 +; GCN-IR-NEXT: s_xor_b64 s[6:7], s[2:3], s[8:9] +; GCN-IR-NEXT: s_sub_u32 s6, s6, s2 +; GCN-IR-NEXT: s_subb_u32 s7, s7, s2 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[6:7], 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[14:15], s[10:11], 0 +; GCN-IR-NEXT: s_mov_b64 s[8:9], 0 +; GCN-IR-NEXT: s_or_b64 s[18:19], s[12:13], s[14:15] +; GCN-IR-NEXT: s_flbit_i32_b32 s12, s6 ; GCN-IR-NEXT: s_flbit_i32_b32 s14, s10 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s15 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s7, 0 +; GCN-IR-NEXT: s_add_i32 s12, s12, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s13, s7 ; GCN-IR-NEXT: s_add_i32 s14, s14, 32 ; GCN-IR-NEXT: s_flbit_i32_b32 s15, s11 -; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v0, s15 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s14 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s11, 0 -; GCN-IR-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v2, v3 -; GCN-IR-NEXT: v_subb_u32_e64 v1, s[14:15], 0, 0, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[10:11], 0 -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], s[12:13] -; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_xor_b64 s[12:13], s[0:1], -1 -; GCN-IR-NEXT: s_and_b64 s[12:13], s[12:13], vcc -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[12:13] -; GCN-IR-NEXT: s_cbranch_vccz BB0_4 -; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[10:11], v0 -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: s_min_u32 s12, s12, s13 +; GCN-IR-NEXT: s_min_u32 s16, s14, s15 +; GCN-IR-NEXT: s_sub_u32 s14, s12, s16 +; GCN-IR-NEXT: s_subb_u32 s15, 0, 0 +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[20:21], s[14:15], 63 +; GCN-IR-NEXT: s_mov_b32 s13, 0 +; GCN-IR-NEXT: s_or_b64 s[18:19], s[18:19], s[20:21] +; GCN-IR-NEXT: v_cmp_ne_u64_e64 s[20:21], s[14:15], 63 +; GCN-IR-NEXT: s_xor_b64 s[22:23], s[18:19], -1 +; GCN-IR-NEXT: s_and_b64 s[20:21], s[22:23], s[20:21] +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[20:21] ; GCN-IR-NEXT: s_cbranch_vccz BB0_5 +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: s_add_u32 s18, s14, 1 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s14 +; GCN-IR-NEXT: s_addc_u32 s19, s15, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s15 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[18:19], v[0:1] +; GCN-IR-NEXT: s_sub_i32 s14, 63, s14 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_lshl_b64 s[14:15], s[10:11], s14 +; GCN-IR-NEXT: s_cbranch_vccz BB0_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: v_not_b32_e32 v2, v2 -; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[10:11], v4 +; GCN-IR-NEXT: s_lshr_b64 s[18:19], s[10:11], s18 ; GCN-IR-NEXT: s_add_u32 s10, s6, -1 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, v2, v3 -; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 ; GCN-IR-NEXT: s_addc_u32 s11, s7, -1 -; GCN-IR-NEXT: v_addc_u32_e64 v5, s[0:1], -1, 0, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: s_not_b64 s[8:9], s[12:13] +; GCN-IR-NEXT: s_mov_b32 s17, s13 +; GCN-IR-NEXT: s_add_u32 s12, s8, s16 +; GCN-IR-NEXT: s_addc_u32 s13, s9, s13 +; GCN-IR-NEXT: s_mov_b64 s[16:17], 0 +; GCN-IR-NEXT: s_mov_b32 s9, 0 ; GCN-IR-NEXT: BB0_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 -; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v2 -; GCN-IR-NEXT: v_or_b32_e32 v0, v8, v0 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s11 -; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, s10, v6 -; GCN-IR-NEXT: v_subb_u32_e32 v2, vcc, v2, v7, vcc -; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v2 -; GCN-IR-NEXT: v_and_b32_e32 v10, s6, v8 -; GCN-IR-NEXT: v_and_b32_e32 v2, 1, v8 -; GCN-IR-NEXT: v_and_b32_e32 v11, s7, v8 -; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v4 -; GCN-IR-NEXT: v_or_b32_e32 v1, v9, v1 -; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v5, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5] -; GCN-IR-NEXT: v_mov_b32_e32 v4, v8 -; GCN-IR-NEXT: v_sub_i32_e64 v6, s[0:1], v6, v10 -; GCN-IR-NEXT: v_mov_b32_e32 v5, v9 -; GCN-IR-NEXT: v_mov_b32_e32 v9, v3 -; GCN-IR-NEXT: v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1] +; GCN-IR-NEXT: s_lshr_b32 s8, s15, 31 +; GCN-IR-NEXT: s_lshl_b64 s[18:19], s[18:19], 1 +; GCN-IR-NEXT: s_lshl_b64 s[14:15], s[14:15], 1 +; GCN-IR-NEXT: s_or_b64 s[18:19], s[18:19], s[8:9] +; GCN-IR-NEXT: s_or_b64 s[14:15], s[16:17], s[14:15] +; GCN-IR-NEXT: s_sub_u32 s8, s10, s18 +; GCN-IR-NEXT: s_subb_u32 s8, s11, s19 +; GCN-IR-NEXT: s_ashr_i32 s16, s8, 31 +; GCN-IR-NEXT: s_mov_b32 s17, s16 +; GCN-IR-NEXT: s_and_b32 s8, s16, 1 +; GCN-IR-NEXT: s_and_b64 s[20:21], s[16:17], s[6:7] +; GCN-IR-NEXT: s_sub_u32 s18, s18, s20 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s12 +; GCN-IR-NEXT: s_subb_u32 s19, s19, s21 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s13 +; GCN-IR-NEXT: s_add_u32 s12, s12, 1 +; GCN-IR-NEXT: s_addc_u32 s13, s13, 0 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[12:13], v[0:1] +; GCN-IR-NEXT: s_mov_b64 s[16:17], s[8:9] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 ; GCN-IR-NEXT: s_cbranch_vccz BB0_3 +; GCN-IR-NEXT: BB0_4: ; %Flow6 +; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[14:15], 1 +; GCN-IR-NEXT: s_or_b64 s[6:7], s[8:9], s[6:7] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s7 ; GCN-IR-NEXT: s_branch BB0_6 -; GCN-IR-NEXT: BB0_4: +; GCN-IR-NEXT: BB0_5: ; GCN-IR-NEXT: v_mov_b32_e32 v0, s11 -; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[0:1] +; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[18:19] ; GCN-IR-NEXT: v_mov_b32_e32 v0, s10 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[0:1] -; GCN-IR-NEXT: s_branch BB0_7 -; GCN-IR-NEXT: BB0_5: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB0_6: ; %Flow6 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 -; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 -; GCN-IR-NEXT: BB0_7: ; %udiv-end -; GCN-IR-NEXT: s_xor_b64 s[0:1], s[8:9], s[2:3] +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[18:19] +; GCN-IR-NEXT: BB0_6: ; %udiv-end +; GCN-IR-NEXT: s_xor_b64 s[0:1], s[2:3], s[0:1] ; GCN-IR-NEXT: v_xor_b32_e32 v0, s0, v0 ; GCN-IR-NEXT: v_xor_b32_e32 v1, s1, v1 ; GCN-IR-NEXT: v_mov_b32_e32 v2, s1 @@ -404,13 +396,11 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) { ; GCN-IR-NEXT: s_or_b64 s[6:7], vcc, s[4:5] ; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 32, v0 ; GCN-IR-NEXT: v_ffbh_u32_e32 v7, v3 -; GCN-IR-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3 -; GCN-IR-NEXT: v_cndmask_b32_e32 v13, v7, v0, vcc +; GCN-IR-NEXT: v_min_u32_e32 v13, v0, v7 ; GCN-IR-NEXT: v_ffbh_u32_e32 v0, v9 ; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 32, v0 ; GCN-IR-NEXT: v_ffbh_u32_e32 v7, v10 -; GCN-IR-NEXT: v_cmp_eq_u32_e32 vcc, 0, v10 -; GCN-IR-NEXT: v_cndmask_b32_e32 v14, v7, v0, vcc +; GCN-IR-NEXT: v_min_u32_e32 v14, v0, v7 ; GCN-IR-NEXT: v_sub_i32_e32 v7, vcc, v13, v14 ; GCN-IR-NEXT: v_subb_u32_e64 v8, s[4:5], 0, 0, vcc ; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[7:8] @@ -1014,105 +1004,97 @@ define amdgpu_kernel void @s_test_sdiv24_48(i48 addrspace(1)* %out, i48 %x, i48 ; GCN-IR-NEXT: s_load_dword s0, s[0:1], 0xe ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) ; GCN-IR-NEXT: s_sext_i32_i16 s3, s3 +; GCN-IR-NEXT: s_ashr_i64 s[8:9], s[2:3], 24 ; GCN-IR-NEXT: s_sext_i32_i16 s7, s0 -; GCN-IR-NEXT: s_ashr_i64 s[0:1], s[2:3], 24 -; GCN-IR-NEXT: s_ashr_i32 s2, s3, 31 +; GCN-IR-NEXT: s_ashr_i32 s0, s3, 31 +; GCN-IR-NEXT: s_mov_b32 s1, s0 +; GCN-IR-NEXT: s_ashr_i32 s2, s7, 31 +; GCN-IR-NEXT: s_ashr_i64 s[12:13], s[6:7], 24 +; GCN-IR-NEXT: s_xor_b64 s[6:7], s[0:1], s[8:9] +; GCN-IR-NEXT: s_sub_u32 s10, s6, s0 ; GCN-IR-NEXT: s_mov_b32 s3, s2 -; GCN-IR-NEXT: s_ashr_i64 s[8:9], s[6:7], 24 -; GCN-IR-NEXT: s_ashr_i32 s6, s7, 31 -; GCN-IR-NEXT: s_xor_b64 s[0:1], s[2:3], s[0:1] -; GCN-IR-NEXT: s_sub_u32 s10, s0, s2 -; GCN-IR-NEXT: s_mov_b32 s7, s6 -; GCN-IR-NEXT: s_subb_u32 s11, s1, s2 -; GCN-IR-NEXT: s_xor_b64 s[0:1], s[6:7], s[8:9] -; GCN-IR-NEXT: s_sub_u32 s8, s0, s6 -; GCN-IR-NEXT: s_flbit_i32_b32 s14, s8 -; GCN-IR-NEXT: s_subb_u32 s9, s1, s6 -; GCN-IR-NEXT: s_add_i32 s14, s14, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s15, s9 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s14 +; GCN-IR-NEXT: s_subb_u32 s11, s7, s0 +; GCN-IR-NEXT: s_xor_b64 s[6:7], s[2:3], s[12:13] +; GCN-IR-NEXT: s_sub_u32 s6, s6, s2 +; GCN-IR-NEXT: s_subb_u32 s7, s7, s2 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[6:7], 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[14:15], s[10:11], 0 +; GCN-IR-NEXT: s_mov_b64 s[8:9], 0 +; GCN-IR-NEXT: s_or_b64 s[18:19], s[12:13], s[14:15] +; GCN-IR-NEXT: s_flbit_i32_b32 s12, s6 ; GCN-IR-NEXT: s_flbit_i32_b32 s14, s10 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s15 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s9, 0 +; GCN-IR-NEXT: s_add_i32 s12, s12, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s13, s7 ; GCN-IR-NEXT: s_add_i32 s14, s14, 32 ; GCN-IR-NEXT: s_flbit_i32_b32 s15, s11 -; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v0, s15 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s14 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s11, 0 -; GCN-IR-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v2, v3 -; GCN-IR-NEXT: v_subb_u32_e64 v1, s[14:15], 0, 0, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[8:9], 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[10:11], 0 -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], s[12:13] -; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_xor_b64 s[12:13], s[0:1], -1 -; GCN-IR-NEXT: s_and_b64 s[12:13], s[12:13], vcc -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[12:13] -; GCN-IR-NEXT: s_cbranch_vccz BB9_4 -; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[10:11], v0 -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: s_min_u32 s12, s12, s13 +; GCN-IR-NEXT: s_min_u32 s16, s14, s15 +; GCN-IR-NEXT: s_sub_u32 s14, s12, s16 +; GCN-IR-NEXT: s_subb_u32 s15, 0, 0 +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[20:21], s[14:15], 63 +; GCN-IR-NEXT: s_mov_b32 s13, 0 +; GCN-IR-NEXT: s_or_b64 s[18:19], s[18:19], s[20:21] +; GCN-IR-NEXT: v_cmp_ne_u64_e64 s[20:21], s[14:15], 63 +; GCN-IR-NEXT: s_xor_b64 s[22:23], s[18:19], -1 +; GCN-IR-NEXT: s_and_b64 s[20:21], s[22:23], s[20:21] +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[20:21] ; GCN-IR-NEXT: s_cbranch_vccz BB9_5 +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: s_add_u32 s18, s14, 1 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s14 +; GCN-IR-NEXT: s_addc_u32 s19, s15, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s15 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[18:19], v[0:1] +; GCN-IR-NEXT: s_sub_i32 s14, 63, s14 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_lshl_b64 s[14:15], s[10:11], s14 +; GCN-IR-NEXT: s_cbranch_vccz BB9_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: v_not_b32_e32 v2, v2 -; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[10:11], v4 -; GCN-IR-NEXT: s_add_u32 s10, s8, -1 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, v2, v3 -; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 -; GCN-IR-NEXT: s_addc_u32 s11, s9, -1 -; GCN-IR-NEXT: v_addc_u32_e64 v5, s[0:1], -1, 0, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: s_lshr_b64 s[18:19], s[10:11], s18 +; GCN-IR-NEXT: s_add_u32 s10, s6, -1 +; GCN-IR-NEXT: s_addc_u32 s11, s7, -1 +; GCN-IR-NEXT: s_not_b64 s[8:9], s[12:13] +; GCN-IR-NEXT: s_mov_b32 s17, s13 +; GCN-IR-NEXT: s_add_u32 s12, s8, s16 +; GCN-IR-NEXT: s_addc_u32 s13, s9, s13 +; GCN-IR-NEXT: s_mov_b64 s[16:17], 0 +; GCN-IR-NEXT: s_mov_b32 s9, 0 ; GCN-IR-NEXT: BB9_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 -; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v2 -; GCN-IR-NEXT: v_or_b32_e32 v0, v8, v0 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s11 -; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, s10, v6 -; GCN-IR-NEXT: v_subb_u32_e32 v2, vcc, v2, v7, vcc -; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v2 -; GCN-IR-NEXT: v_and_b32_e32 v10, s8, v8 -; GCN-IR-NEXT: v_and_b32_e32 v2, 1, v8 -; GCN-IR-NEXT: v_and_b32_e32 v11, s9, v8 -; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v4 -; GCN-IR-NEXT: v_or_b32_e32 v1, v9, v1 -; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v5, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5] -; GCN-IR-NEXT: v_mov_b32_e32 v4, v8 -; GCN-IR-NEXT: v_sub_i32_e64 v6, s[0:1], v6, v10 -; GCN-IR-NEXT: v_mov_b32_e32 v5, v9 -; GCN-IR-NEXT: v_mov_b32_e32 v9, v3 -; GCN-IR-NEXT: v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1] +; GCN-IR-NEXT: s_lshr_b32 s8, s15, 31 +; GCN-IR-NEXT: s_lshl_b64 s[18:19], s[18:19], 1 +; GCN-IR-NEXT: s_lshl_b64 s[14:15], s[14:15], 1 +; GCN-IR-NEXT: s_or_b64 s[18:19], s[18:19], s[8:9] +; GCN-IR-NEXT: s_or_b64 s[14:15], s[16:17], s[14:15] +; GCN-IR-NEXT: s_sub_u32 s8, s10, s18 +; GCN-IR-NEXT: s_subb_u32 s8, s11, s19 +; GCN-IR-NEXT: s_ashr_i32 s16, s8, 31 +; GCN-IR-NEXT: s_mov_b32 s17, s16 +; GCN-IR-NEXT: s_and_b32 s8, s16, 1 +; GCN-IR-NEXT: s_and_b64 s[20:21], s[16:17], s[6:7] +; GCN-IR-NEXT: s_sub_u32 s18, s18, s20 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s12 +; GCN-IR-NEXT: s_subb_u32 s19, s19, s21 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s13 +; GCN-IR-NEXT: s_add_u32 s12, s12, 1 +; GCN-IR-NEXT: s_addc_u32 s13, s13, 0 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[12:13], v[0:1] +; GCN-IR-NEXT: s_mov_b64 s[16:17], s[8:9] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 ; GCN-IR-NEXT: s_cbranch_vccz BB9_3 +; GCN-IR-NEXT: BB9_4: ; %Flow3 +; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[14:15], 1 +; GCN-IR-NEXT: s_or_b64 s[6:7], s[8:9], s[6:7] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s7 ; GCN-IR-NEXT: s_branch BB9_6 -; GCN-IR-NEXT: BB9_4: +; GCN-IR-NEXT: BB9_5: ; GCN-IR-NEXT: v_mov_b32_e32 v0, s11 -; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[0:1] +; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[18:19] ; GCN-IR-NEXT: v_mov_b32_e32 v0, s10 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[0:1] -; GCN-IR-NEXT: s_branch BB9_7 -; GCN-IR-NEXT: BB9_5: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB9_6: ; %Flow3 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 -; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 -; GCN-IR-NEXT: BB9_7: ; %udiv-end -; GCN-IR-NEXT: s_xor_b64 s[0:1], s[6:7], s[2:3] +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[18:19] +; GCN-IR-NEXT: BB9_6: ; %udiv-end +; GCN-IR-NEXT: s_xor_b64 s[0:1], s[2:3], s[0:1] ; GCN-IR-NEXT: v_xor_b32_e32 v0, s0, v0 ; GCN-IR-NEXT: v_xor_b32_e32 v1, s1, v1 ; GCN-IR-NEXT: v_mov_b32_e32 v2, s1 @@ -1255,94 +1237,87 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(i64 addrspace(1)* %out, i64 %x) ; ; GCN-IR-LABEL: s_test_sdiv_k_num_i64: ; GCN-IR: ; %bb.0: ; %_udiv-special-cases -; GCN-IR-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) -; GCN-IR-NEXT: s_ashr_i32 s2, s7, 31 -; GCN-IR-NEXT: s_mov_b32 s3, s2 -; GCN-IR-NEXT: s_xor_b64 s[0:1], s[2:3], s[6:7] -; GCN-IR-NEXT: s_sub_u32 s6, s0, s2 -; GCN-IR-NEXT: s_subb_u32 s7, s1, s2 -; GCN-IR-NEXT: s_flbit_i32_b32 s8, s6 -; GCN-IR-NEXT: s_add_i32 s8, s8, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s9, s7 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s9 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s8 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s7, 0 -; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 0xffffffc5, v2 -; GCN-IR-NEXT: v_addc_u32_e64 v1, s[8:9], 0, -1, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], 0 -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_xor_b64 s[8:9], s[0:1], -1 -; GCN-IR-NEXT: s_and_b64 s[8:9], s[8:9], vcc -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[8:9] -; GCN-IR-NEXT: s_cbranch_vccz BB10_4 -; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[3:4], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], 24, v0 -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: s_ashr_i32 s4, s3, 31 +; GCN-IR-NEXT: s_mov_b32 s5, s4 +; GCN-IR-NEXT: s_xor_b64 s[2:3], s[4:5], s[2:3] +; GCN-IR-NEXT: s_sub_u32 s2, s2, s4 +; GCN-IR-NEXT: s_subb_u32 s3, s3, s4 +; GCN-IR-NEXT: s_flbit_i32_b32 s6, s2 +; GCN-IR-NEXT: s_add_i32 s6, s6, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s7, s3 +; GCN-IR-NEXT: s_min_u32 s10, s6, s7 +; GCN-IR-NEXT: s_add_u32 s8, s10, 0xffffffc5 +; GCN-IR-NEXT: s_addc_u32 s9, 0, -1 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[2:3], 0 +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[14:15], s[8:9], 63 +; GCN-IR-NEXT: s_mov_b64 s[6:7], 0 +; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], s[14:15] +; GCN-IR-NEXT: v_cmp_ne_u64_e64 s[14:15], s[8:9], 63 +; GCN-IR-NEXT: s_xor_b64 s[16:17], s[12:13], -1 +; GCN-IR-NEXT: s_and_b64 s[14:15], s[16:17], s[14:15] +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[14:15] ; GCN-IR-NEXT: s_cbranch_vccz BB10_5 +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: s_add_u32 s14, s8, 1 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s8 +; GCN-IR-NEXT: s_addc_u32 s15, s9, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s9 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[14:15], v[0:1] +; GCN-IR-NEXT: s_sub_i32 s8, 63, s8 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_lshl_b64 s[12:13], 24, s8 +; GCN-IR-NEXT: s_cbranch_vccz BB10_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: s_add_u32 s8, s6, -1 -; GCN-IR-NEXT: v_lshr_b64 v[6:7], 24, v3 -; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, 58, v2 -; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 -; GCN-IR-NEXT: s_addc_u32 s9, s7, -1 -; GCN-IR-NEXT: v_subb_u32_e64 v5, s[0:1], 0, 0, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: s_lshr_b64 s[16:17], 24, s14 +; GCN-IR-NEXT: s_add_u32 s8, s2, -1 +; GCN-IR-NEXT: s_addc_u32 s9, s3, -1 +; GCN-IR-NEXT: s_sub_u32 s10, 58, s10 +; GCN-IR-NEXT: s_subb_u32 s11, 0, 0 +; GCN-IR-NEXT: s_mov_b64 s[14:15], 0 +; GCN-IR-NEXT: s_mov_b32 s7, 0 ; GCN-IR-NEXT: BB10_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 -; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v2 -; GCN-IR-NEXT: v_or_b32_e32 v0, v8, v0 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s9 -; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, s8, v6 -; GCN-IR-NEXT: v_subb_u32_e32 v2, vcc, v2, v7, vcc -; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v2 -; GCN-IR-NEXT: v_and_b32_e32 v10, s6, v8 -; GCN-IR-NEXT: v_and_b32_e32 v2, 1, v8 -; GCN-IR-NEXT: v_and_b32_e32 v11, s7, v8 -; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v4 -; GCN-IR-NEXT: v_or_b32_e32 v1, v9, v1 -; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v5, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5] -; GCN-IR-NEXT: v_mov_b32_e32 v4, v8 -; GCN-IR-NEXT: v_sub_i32_e64 v6, s[0:1], v6, v10 -; GCN-IR-NEXT: v_mov_b32_e32 v5, v9 -; GCN-IR-NEXT: v_mov_b32_e32 v9, v3 -; GCN-IR-NEXT: v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1] +; GCN-IR-NEXT: s_lshr_b32 s6, s13, 31 +; GCN-IR-NEXT: s_lshl_b64 s[16:17], s[16:17], 1 +; GCN-IR-NEXT: s_lshl_b64 s[12:13], s[12:13], 1 +; GCN-IR-NEXT: s_or_b64 s[16:17], s[16:17], s[6:7] +; GCN-IR-NEXT: s_or_b64 s[12:13], s[14:15], s[12:13] +; GCN-IR-NEXT: s_sub_u32 s6, s8, s16 +; GCN-IR-NEXT: s_subb_u32 s6, s9, s17 +; GCN-IR-NEXT: s_ashr_i32 s14, s6, 31 +; GCN-IR-NEXT: s_mov_b32 s15, s14 +; GCN-IR-NEXT: s_and_b32 s6, s14, 1 +; GCN-IR-NEXT: s_and_b64 s[18:19], s[14:15], s[2:3] +; GCN-IR-NEXT: s_sub_u32 s16, s16, s18 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s10 +; GCN-IR-NEXT: s_subb_u32 s17, s17, s19 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s11 +; GCN-IR-NEXT: s_add_u32 s10, s10, 1 +; GCN-IR-NEXT: s_addc_u32 s11, s11, 0 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[10:11], v[0:1] +; GCN-IR-NEXT: s_mov_b64 s[14:15], s[6:7] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 ; GCN-IR-NEXT: s_cbranch_vccz BB10_3 +; GCN-IR-NEXT: BB10_4: ; %Flow5 +; GCN-IR-NEXT: s_lshl_b64 s[2:3], s[12:13], 1 +; GCN-IR-NEXT: s_or_b64 s[2:3], s[6:7], s[2:3] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s2 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s3 ; GCN-IR-NEXT: s_branch BB10_6 -; GCN-IR-NEXT: BB10_4: -; GCN-IR-NEXT: v_mov_b32_e32 v1, 0 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, 24, 0, s[0:1] -; GCN-IR-NEXT: s_branch BB10_7 ; GCN-IR-NEXT: BB10_5: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB10_6: ; %Flow5 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 -; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 -; GCN-IR-NEXT: BB10_7: ; %udiv-end -; GCN-IR-NEXT: v_xor_b32_e32 v0, s2, v0 -; GCN-IR-NEXT: v_xor_b32_e32 v1, s3, v1 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s3 -; GCN-IR-NEXT: v_subrev_i32_e32 v0, vcc, s2, v0 -; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 -; GCN-IR-NEXT: s_mov_b32 s6, -1 +; GCN-IR-NEXT: v_mov_b32_e32 v1, 0 +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, 24, 0, s[12:13] +; GCN-IR-NEXT: BB10_6: ; %udiv-end +; GCN-IR-NEXT: v_xor_b32_e32 v0, s4, v0 +; GCN-IR-NEXT: v_xor_b32_e32 v1, s5, v1 +; GCN-IR-NEXT: v_mov_b32_e32 v2, s5 +; GCN-IR-NEXT: v_subrev_i32_e32 v0, vcc, s4, v0 +; GCN-IR-NEXT: s_mov_b32 s3, 0xf000 +; GCN-IR-NEXT: s_mov_b32 s2, -1 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc -; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GCN-IR-NEXT: s_endpgm %result = sdiv i64 24, %x store i64 %result, i64 addrspace(1)* %out @@ -1473,12 +1448,11 @@ define i64 @v_test_sdiv_k_num_i64(i64 %x) { ; GCN-IR-NEXT: v_xor_b32_e32 v0, v2, v0 ; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v2 ; GCN-IR-NEXT: v_xor_b32_e32 v1, v2, v1 -; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v0 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc +; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v0 ; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 32, v4 ; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v1 -; GCN-IR-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; GCN-IR-NEXT: v_cndmask_b32_e32 v10, v5, v4, vcc +; GCN-IR-NEXT: v_min_u32_e32 v10, v4, v5 ; GCN-IR-NEXT: s_movk_i32 s6, 0xffc5 ; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, s6, v10 ; GCN-IR-NEXT: v_addc_u32_e64 v5, s[6:7], 0, -1, vcc @@ -1683,12 +1657,11 @@ define i64 @v_test_sdiv_pow2_k_num_i64(i64 %x) { ; GCN-IR-NEXT: v_xor_b32_e32 v0, v2, v0 ; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v2 ; GCN-IR-NEXT: v_xor_b32_e32 v1, v2, v1 -; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v0 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc +; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v0 ; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 32, v4 ; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v1 -; GCN-IR-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; GCN-IR-NEXT: v_cndmask_b32_e32 v6, v5, v4, vcc +; GCN-IR-NEXT: v_min_u32_e32 v6, v4, v5 ; GCN-IR-NEXT: s_movk_i32 s6, 0xffd0 ; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, s6, v6 ; GCN-IR-NEXT: v_addc_u32_e64 v5, s[6:7], 0, -1, vcc @@ -1789,13 +1762,12 @@ define i64 @v_test_sdiv_pow2_k_den_i64(i64 %x) { ; GCN-IR-NEXT: v_ashrrev_i32_e32 v2, 31, v1 ; GCN-IR-NEXT: v_xor_b32_e32 v0, v2, v0 ; GCN-IR-NEXT: v_sub_i32_e32 v7, vcc, v0, v2 -; GCN-IR-NEXT: v_ffbh_u32_e32 v0, v7 ; GCN-IR-NEXT: v_xor_b32_e32 v1, v2, v1 ; GCN-IR-NEXT: v_subb_u32_e32 v8, vcc, v1, v2, vcc +; GCN-IR-NEXT: v_ffbh_u32_e32 v0, v7 ; GCN-IR-NEXT: v_add_i32_e64 v0, s[4:5], 32, v0 ; GCN-IR-NEXT: v_ffbh_u32_e32 v1, v8 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v8 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v1, v0, s[4:5] +; GCN-IR-NEXT: v_min_u32_e32 v0, v0, v1 ; GCN-IR-NEXT: v_sub_i32_e64 v3, s[4:5], 48, v0 ; GCN-IR-NEXT: v_subb_u32_e64 v4, s[4:5], 0, 0, s[4:5] ; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[7:8] diff --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll index 1900a61..4168a97 100644 --- a/llvm/test/CodeGen/AMDGPU/srem64.ll +++ b/llvm/test/CodeGen/AMDGPU/srem64.ll @@ -127,97 +127,89 @@ define amdgpu_kernel void @s_test_srem(i64 addrspace(1)* %out, i64 %x, i64 %y) { ; GCN-IR-LABEL: s_test_srem: ; GCN-IR: ; %bb.0: ; %_udiv-special-cases ; GCN-IR-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 -; GCN-IR-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; GCN-IR-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd +; GCN-IR-NEXT: s_mov_b64 s[2:3], 0 ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[6:7], 0 -; GCN-IR-NEXT: s_flbit_i32_b32 s10, s2 -; GCN-IR-NEXT: s_add_i32 s10, s10, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s11, s3 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s10 -; GCN-IR-NEXT: s_flbit_i32_b32 s10, s6 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s11 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s3, 0 -; GCN-IR-NEXT: s_add_i32 s10, s10, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s11, s7 -; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v0, s11 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s10 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s7, 0 -; GCN-IR-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v2, v3 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[2:3], 0 -; GCN-IR-NEXT: v_subb_u32_e64 v1, s[10:11], 0, 0, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], s[8:9] -; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_xor_b64 s[8:9], s[0:1], -1 -; GCN-IR-NEXT: s_and_b64 s[8:9], s[8:9], vcc -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[8:9] -; GCN-IR-NEXT: s_cbranch_vccz BB0_4 -; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[6:7], v0 -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[6:7], 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[0:1], 0 +; GCN-IR-NEXT: s_flbit_i32_b32 s12, s0 +; GCN-IR-NEXT: s_add_i32 s14, s12, 32 +; GCN-IR-NEXT: s_or_b64 s[12:13], s[8:9], s[10:11] +; GCN-IR-NEXT: s_flbit_i32_b32 s8, s1 +; GCN-IR-NEXT: s_min_u32 s10, s14, s8 +; GCN-IR-NEXT: s_flbit_i32_b32 s8, s6 +; GCN-IR-NEXT: s_add_i32 s8, s8, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s9, s7 +; GCN-IR-NEXT: s_min_u32 s14, s8, s9 +; GCN-IR-NEXT: s_sub_u32 s8, s10, s14 +; GCN-IR-NEXT: s_subb_u32 s9, 0, 0 +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[16:17], s[8:9], 63 +; GCN-IR-NEXT: s_mov_b32 s11, 0 +; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], s[16:17] +; GCN-IR-NEXT: v_cmp_ne_u64_e64 s[16:17], s[8:9], 63 +; GCN-IR-NEXT: s_xor_b64 s[18:19], s[12:13], -1 +; GCN-IR-NEXT: s_and_b64 s[16:17], s[18:19], s[16:17] +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[16:17] ; GCN-IR-NEXT: s_cbranch_vccz BB0_5 +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: s_add_u32 s16, s8, 1 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s8 +; GCN-IR-NEXT: s_addc_u32 s17, s9, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s9 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[16:17], v[0:1] +; GCN-IR-NEXT: s_sub_i32 s8, 63, s8 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_lshl_b64 s[12:13], s[6:7], s8 +; GCN-IR-NEXT: s_cbranch_vccz BB0_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: v_not_b32_e32 v2, v2 -; GCN-IR-NEXT: s_add_u32 s8, s2, -1 -; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[6:7], v4 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, v2, v3 -; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 -; GCN-IR-NEXT: s_addc_u32 s9, s3, -1 -; GCN-IR-NEXT: v_addc_u32_e64 v5, s[0:1], -1, 0, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: s_lshr_b64 s[16:17], s[6:7], s16 +; GCN-IR-NEXT: s_add_u32 s8, s0, -1 +; GCN-IR-NEXT: s_addc_u32 s9, s1, -1 +; GCN-IR-NEXT: s_not_b64 s[2:3], s[10:11] +; GCN-IR-NEXT: s_mov_b32 s15, s11 +; GCN-IR-NEXT: s_add_u32 s10, s2, s14 +; GCN-IR-NEXT: s_addc_u32 s11, s3, s11 +; GCN-IR-NEXT: s_mov_b64 s[14:15], 0 +; GCN-IR-NEXT: s_mov_b32 s3, 0 ; GCN-IR-NEXT: BB0_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 -; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v2 -; GCN-IR-NEXT: v_or_b32_e32 v0, v8, v0 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s9 -; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, s8, v6 -; GCN-IR-NEXT: v_subb_u32_e32 v2, vcc, v2, v7, vcc -; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v2 -; GCN-IR-NEXT: v_and_b32_e32 v10, s2, v8 -; GCN-IR-NEXT: v_and_b32_e32 v2, 1, v8 -; GCN-IR-NEXT: v_and_b32_e32 v11, s3, v8 -; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v4 -; GCN-IR-NEXT: v_or_b32_e32 v1, v9, v1 -; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v5, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5] -; GCN-IR-NEXT: v_mov_b32_e32 v4, v8 -; GCN-IR-NEXT: v_sub_i32_e64 v6, s[0:1], v6, v10 -; GCN-IR-NEXT: v_mov_b32_e32 v5, v9 -; GCN-IR-NEXT: v_mov_b32_e32 v9, v3 -; GCN-IR-NEXT: v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1] +; GCN-IR-NEXT: s_lshr_b32 s2, s13, 31 +; GCN-IR-NEXT: s_lshl_b64 s[16:17], s[16:17], 1 +; GCN-IR-NEXT: s_lshl_b64 s[12:13], s[12:13], 1 +; GCN-IR-NEXT: s_or_b64 s[16:17], s[16:17], s[2:3] +; GCN-IR-NEXT: s_or_b64 s[12:13], s[14:15], s[12:13] +; GCN-IR-NEXT: s_sub_u32 s2, s8, s16 +; GCN-IR-NEXT: s_subb_u32 s2, s9, s17 +; GCN-IR-NEXT: s_ashr_i32 s14, s2, 31 +; GCN-IR-NEXT: s_mov_b32 s15, s14 +; GCN-IR-NEXT: s_and_b32 s2, s14, 1 +; GCN-IR-NEXT: s_and_b64 s[18:19], s[14:15], s[0:1] +; GCN-IR-NEXT: s_sub_u32 s16, s16, s18 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s10 +; GCN-IR-NEXT: s_subb_u32 s17, s17, s19 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s11 +; GCN-IR-NEXT: s_add_u32 s10, s10, 1 +; GCN-IR-NEXT: s_addc_u32 s11, s11, 0 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[10:11], v[0:1] +; GCN-IR-NEXT: s_mov_b64 s[14:15], s[2:3] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 ; GCN-IR-NEXT: s_cbranch_vccz BB0_3 +; GCN-IR-NEXT: BB0_4: ; %Flow6 +; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[12:13], 1 +; GCN-IR-NEXT: s_or_b64 s[2:3], s[2:3], s[8:9] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s2 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s3 ; GCN-IR-NEXT: s_branch BB0_6 -; GCN-IR-NEXT: BB0_4: +; GCN-IR-NEXT: BB0_5: ; GCN-IR-NEXT: v_mov_b32_e32 v0, s7 -; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[0:1] +; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[12:13] ; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[0:1] -; GCN-IR-NEXT: s_branch BB0_7 -; GCN-IR-NEXT: BB0_5: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB0_6: ; %Flow6 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 -; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 -; GCN-IR-NEXT: BB0_7: ; %udiv-end -; GCN-IR-NEXT: v_mul_lo_u32 v1, s2, v1 -; GCN-IR-NEXT: v_mul_hi_u32 v2, s2, v0 -; GCN-IR-NEXT: v_mul_lo_u32 v3, s3, v0 -; GCN-IR-NEXT: v_mul_lo_u32 v0, s2, v0 +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[12:13] +; GCN-IR-NEXT: BB0_6: ; %udiv-end +; GCN-IR-NEXT: v_mul_lo_u32 v1, s0, v1 +; GCN-IR-NEXT: v_mul_hi_u32 v2, s0, v0 +; GCN-IR-NEXT: v_mul_lo_u32 v3, s1, v0 +; GCN-IR-NEXT: v_mul_lo_u32 v0, s0, v0 ; GCN-IR-NEXT: s_mov_b32 s11, 0xf000 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v2, v1 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v1, v3 @@ -380,13 +372,11 @@ define i64 @v_test_srem(i64 %x, i64 %y) { ; GCN-IR-NEXT: s_or_b64 s[6:7], vcc, s[4:5] ; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, 32, v3 ; GCN-IR-NEXT: v_ffbh_u32_e32 v7, v6 -; GCN-IR-NEXT: v_cmp_eq_u32_e32 vcc, 0, v6 -; GCN-IR-NEXT: v_cndmask_b32_e32 v12, v7, v3, vcc +; GCN-IR-NEXT: v_min_u32_e32 v12, v3, v7 ; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v0 ; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, 32, v3 ; GCN-IR-NEXT: v_ffbh_u32_e32 v7, v1 -; GCN-IR-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; GCN-IR-NEXT: v_cndmask_b32_e32 v14, v7, v3, vcc +; GCN-IR-NEXT: v_min_u32_e32 v14, v3, v7 ; GCN-IR-NEXT: v_sub_i32_e32 v7, vcc, v12, v14 ; GCN-IR-NEXT: v_subb_u32_e64 v8, s[4:5], 0, 0, vcc ; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[7:8] @@ -1025,117 +1015,109 @@ define amdgpu_kernel void @s_test_srem33_64(i64 addrspace(1)* %out, i64 %x, i64 ; GCN-IR-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 ; GCN-IR-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) -; GCN-IR-NEXT: s_ashr_i32 s2, s7, 31 -; GCN-IR-NEXT: s_ashr_i64 s[10:11], s[0:1], 31 -; GCN-IR-NEXT: s_ashr_i32 s0, s1, 31 +; GCN-IR-NEXT: s_ashr_i64 s[2:3], s[6:7], 31 +; GCN-IR-NEXT: s_ashr_i64 s[8:9], s[0:1], 31 +; GCN-IR-NEXT: s_ashr_i32 s0, s7, 31 +; GCN-IR-NEXT: s_ashr_i32 s6, s1, 31 ; GCN-IR-NEXT: s_mov_b32 s1, s0 -; GCN-IR-NEXT: s_ashr_i64 s[8:9], s[6:7], 31 -; GCN-IR-NEXT: s_mov_b32 s3, s2 -; GCN-IR-NEXT: s_xor_b64 s[6:7], s[8:9], s[2:3] -; GCN-IR-NEXT: s_xor_b64 s[10:11], s[10:11], s[0:1] -; GCN-IR-NEXT: s_sub_u32 s8, s6, s2 -; GCN-IR-NEXT: s_subb_u32 s9, s7, s2 -; GCN-IR-NEXT: s_sub_u32 s10, s10, s0 -; GCN-IR-NEXT: s_flbit_i32_b32 s12, s10 -; GCN-IR-NEXT: s_subb_u32 s11, s11, s0 -; GCN-IR-NEXT: s_add_i32 s12, s12, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s13, s11 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s12 -; GCN-IR-NEXT: s_flbit_i32_b32 s12, s8 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s13 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s11, 0 -; GCN-IR-NEXT: s_add_i32 s12, s12, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s13, s9 -; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v0, s13 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s12 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s9, 0 -; GCN-IR-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v2, v3 -; GCN-IR-NEXT: v_subb_u32_e64 v1, s[12:13], 0, 0, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[10:11], 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[8:9], 0 -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], s[6:7] -; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_xor_b64 s[6:7], s[0:1], -1 -; GCN-IR-NEXT: s_and_b64 s[6:7], s[6:7], vcc -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[6:7] -; GCN-IR-NEXT: s_cbranch_vccz BB8_4 -; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[8:9], v0 -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: s_mov_b32 s7, s6 +; GCN-IR-NEXT: s_xor_b64 s[2:3], s[2:3], s[0:1] +; GCN-IR-NEXT: s_xor_b64 s[8:9], s[8:9], s[6:7] +; GCN-IR-NEXT: s_sub_u32 s2, s2, s0 +; GCN-IR-NEXT: s_subb_u32 s3, s3, s0 +; GCN-IR-NEXT: s_sub_u32 s8, s8, s6 +; GCN-IR-NEXT: s_subb_u32 s9, s9, s6 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[8:9], 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[2:3], 0 +; GCN-IR-NEXT: s_mov_b64 s[6:7], 0 +; GCN-IR-NEXT: s_or_b64 s[14:15], s[10:11], s[12:13] +; GCN-IR-NEXT: s_flbit_i32_b32 s10, s8 +; GCN-IR-NEXT: s_add_i32 s10, s10, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s11, s9 +; GCN-IR-NEXT: s_min_u32 s12, s10, s11 +; GCN-IR-NEXT: s_flbit_i32_b32 s10, s2 +; GCN-IR-NEXT: s_add_i32 s10, s10, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s11, s3 +; GCN-IR-NEXT: s_min_u32 s16, s10, s11 +; GCN-IR-NEXT: s_sub_u32 s10, s12, s16 +; GCN-IR-NEXT: s_subb_u32 s11, 0, 0 +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[18:19], s[10:11], 63 +; GCN-IR-NEXT: s_mov_b32 s13, 0 +; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[18:19] +; GCN-IR-NEXT: v_cmp_ne_u64_e64 s[18:19], s[10:11], 63 +; GCN-IR-NEXT: s_xor_b64 s[20:21], s[14:15], -1 +; GCN-IR-NEXT: s_and_b64 s[18:19], s[20:21], s[18:19] +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[18:19] ; GCN-IR-NEXT: s_cbranch_vccz BB8_5 +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: s_add_u32 s18, s10, 1 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s10 +; GCN-IR-NEXT: s_addc_u32 s19, s11, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s11 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[18:19], v[0:1] +; GCN-IR-NEXT: s_sub_i32 s10, 63, s10 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_lshl_b64 s[14:15], s[2:3], s10 +; GCN-IR-NEXT: s_cbranch_vccz BB8_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: v_not_b32_e32 v2, v2 -; GCN-IR-NEXT: s_add_u32 s6, s10, -1 -; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[8:9], v4 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, v2, v3 -; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 -; GCN-IR-NEXT: s_addc_u32 s7, s11, -1 -; GCN-IR-NEXT: v_addc_u32_e64 v5, s[0:1], -1, 0, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: s_lshr_b64 s[18:19], s[2:3], s18 +; GCN-IR-NEXT: s_add_u32 s10, s8, -1 +; GCN-IR-NEXT: s_addc_u32 s11, s9, -1 +; GCN-IR-NEXT: s_not_b64 s[6:7], s[12:13] +; GCN-IR-NEXT: s_mov_b32 s17, s13 +; GCN-IR-NEXT: s_add_u32 s12, s6, s16 +; GCN-IR-NEXT: s_addc_u32 s13, s7, s13 +; GCN-IR-NEXT: s_mov_b64 s[16:17], 0 +; GCN-IR-NEXT: s_mov_b32 s7, 0 ; GCN-IR-NEXT: BB8_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 -; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v2 -; GCN-IR-NEXT: v_or_b32_e32 v0, v8, v0 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s7 -; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, s6, v6 -; GCN-IR-NEXT: v_subb_u32_e32 v2, vcc, v2, v7, vcc -; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v2 -; GCN-IR-NEXT: v_and_b32_e32 v10, s10, v8 -; GCN-IR-NEXT: v_and_b32_e32 v2, 1, v8 -; GCN-IR-NEXT: v_and_b32_e32 v11, s11, v8 -; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v4 -; GCN-IR-NEXT: v_or_b32_e32 v1, v9, v1 -; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v5, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5] -; GCN-IR-NEXT: v_mov_b32_e32 v4, v8 -; GCN-IR-NEXT: v_sub_i32_e64 v6, s[0:1], v6, v10 -; GCN-IR-NEXT: v_mov_b32_e32 v5, v9 -; GCN-IR-NEXT: v_mov_b32_e32 v9, v3 -; GCN-IR-NEXT: v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1] +; GCN-IR-NEXT: s_lshr_b32 s6, s15, 31 +; GCN-IR-NEXT: s_lshl_b64 s[18:19], s[18:19], 1 +; GCN-IR-NEXT: s_lshl_b64 s[14:15], s[14:15], 1 +; GCN-IR-NEXT: s_or_b64 s[18:19], s[18:19], s[6:7] +; GCN-IR-NEXT: s_or_b64 s[14:15], s[16:17], s[14:15] +; GCN-IR-NEXT: s_sub_u32 s6, s10, s18 +; GCN-IR-NEXT: s_subb_u32 s6, s11, s19 +; GCN-IR-NEXT: s_ashr_i32 s16, s6, 31 +; GCN-IR-NEXT: s_mov_b32 s17, s16 +; GCN-IR-NEXT: s_and_b32 s6, s16, 1 +; GCN-IR-NEXT: s_and_b64 s[20:21], s[16:17], s[8:9] +; GCN-IR-NEXT: s_sub_u32 s18, s18, s20 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s12 +; GCN-IR-NEXT: s_subb_u32 s19, s19, s21 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s13 +; GCN-IR-NEXT: s_add_u32 s12, s12, 1 +; GCN-IR-NEXT: s_addc_u32 s13, s13, 0 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[12:13], v[0:1] +; GCN-IR-NEXT: s_mov_b64 s[16:17], s[6:7] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 ; GCN-IR-NEXT: s_cbranch_vccz BB8_3 +; GCN-IR-NEXT: BB8_4: ; %Flow6 +; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[14:15], 1 +; GCN-IR-NEXT: s_or_b64 s[6:7], s[6:7], s[10:11] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s7 ; GCN-IR-NEXT: s_branch BB8_6 -; GCN-IR-NEXT: BB8_4: -; GCN-IR-NEXT: v_mov_b32_e32 v0, s9 -; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[0:1] -; GCN-IR-NEXT: v_mov_b32_e32 v0, s8 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[0:1] -; GCN-IR-NEXT: s_branch BB8_7 ; GCN-IR-NEXT: BB8_5: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB8_6: ; %Flow6 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 -; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 -; GCN-IR-NEXT: BB8_7: ; %udiv-end -; GCN-IR-NEXT: v_mul_lo_u32 v1, s10, v1 -; GCN-IR-NEXT: v_mul_hi_u32 v2, s10, v0 -; GCN-IR-NEXT: v_mul_lo_u32 v3, s11, v0 -; GCN-IR-NEXT: v_mul_lo_u32 v0, s10, v0 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s3 +; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[14:15] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s2 +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[14:15] +; GCN-IR-NEXT: BB8_6: ; %udiv-end +; GCN-IR-NEXT: v_mul_lo_u32 v1, s8, v1 +; GCN-IR-NEXT: v_mul_hi_u32 v2, s8, v0 +; GCN-IR-NEXT: v_mul_lo_u32 v3, s9, v0 +; GCN-IR-NEXT: v_mul_lo_u32 v0, s8, v0 ; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v2, v1 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v1, v3 -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, s8, v0 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s9 -; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v2, v1, vcc -; GCN-IR-NEXT: v_xor_b32_e32 v0, s2, v0 -; GCN-IR-NEXT: v_xor_b32_e32 v1, s3, v1 +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, s2, v0 ; GCN-IR-NEXT: v_mov_b32_e32 v2, s3 -; GCN-IR-NEXT: v_subrev_i32_e32 v0, vcc, s2, v0 +; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v2, v1, vcc +; GCN-IR-NEXT: v_xor_b32_e32 v0, s0, v0 +; GCN-IR-NEXT: v_xor_b32_e32 v1, s1, v1 +; GCN-IR-NEXT: v_mov_b32_e32 v2, s1 +; GCN-IR-NEXT: v_subrev_i32_e32 v0, vcc, s0, v0 ; GCN-IR-NEXT: s_mov_b32 s6, -1 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc ; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 @@ -1194,119 +1176,111 @@ define amdgpu_kernel void @s_test_srem24_48(i48 addrspace(1)* %out, i48 %x, i48 ; GCN-IR-NEXT: s_load_dword s0, s[0:1], 0xe ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) ; GCN-IR-NEXT: s_sext_i32_i16 s3, s3 +; GCN-IR-NEXT: s_ashr_i64 s[8:9], s[2:3], 24 ; GCN-IR-NEXT: s_sext_i32_i16 s7, s0 -; GCN-IR-NEXT: s_ashr_i64 s[0:1], s[2:3], 24 -; GCN-IR-NEXT: s_ashr_i32 s2, s3, 31 -; GCN-IR-NEXT: s_ashr_i32 s10, s7, 31 -; GCN-IR-NEXT: s_mov_b32 s3, s2 -; GCN-IR-NEXT: s_ashr_i64 s[8:9], s[6:7], 24 -; GCN-IR-NEXT: s_mov_b32 s11, s10 -; GCN-IR-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3] -; GCN-IR-NEXT: s_xor_b64 s[8:9], s[8:9], s[10:11] -; GCN-IR-NEXT: s_sub_u32 s6, s0, s2 -; GCN-IR-NEXT: s_subb_u32 s7, s1, s2 -; GCN-IR-NEXT: s_sub_u32 s8, s8, s10 -; GCN-IR-NEXT: s_flbit_i32_b32 s12, s8 -; GCN-IR-NEXT: s_subb_u32 s9, s9, s10 -; GCN-IR-NEXT: s_add_i32 s12, s12, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s13, s9 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s12 -; GCN-IR-NEXT: s_flbit_i32_b32 s12, s6 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s13 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s9, 0 -; GCN-IR-NEXT: s_add_i32 s12, s12, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s13, s7 -; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v0, s13 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s12 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s7, 0 -; GCN-IR-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v2, v3 -; GCN-IR-NEXT: v_subb_u32_e64 v1, s[12:13], 0, 0, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[8:9], 0 +; GCN-IR-NEXT: s_ashr_i32 s0, s3, 31 +; GCN-IR-NEXT: s_ashr_i32 s12, s7, 31 +; GCN-IR-NEXT: s_mov_b32 s1, s0 +; GCN-IR-NEXT: s_ashr_i64 s[10:11], s[6:7], 24 +; GCN-IR-NEXT: s_mov_b32 s13, s12 +; GCN-IR-NEXT: s_xor_b64 s[2:3], s[8:9], s[0:1] +; GCN-IR-NEXT: s_xor_b64 s[6:7], s[10:11], s[12:13] +; GCN-IR-NEXT: s_sub_u32 s2, s2, s0 +; GCN-IR-NEXT: s_subb_u32 s3, s3, s0 +; GCN-IR-NEXT: s_sub_u32 s6, s6, s12 +; GCN-IR-NEXT: s_subb_u32 s7, s7, s12 ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[6:7], 0 -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], s[10:11] -; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_xor_b64 s[10:11], s[0:1], -1 -; GCN-IR-NEXT: s_and_b64 s[10:11], s[10:11], vcc -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[10:11] -; GCN-IR-NEXT: s_cbranch_vccz BB9_4 -; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[6:7], v0 -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[2:3], 0 +; GCN-IR-NEXT: s_mov_b64 s[8:9], 0 +; GCN-IR-NEXT: s_or_b64 s[14:15], s[10:11], s[12:13] +; GCN-IR-NEXT: s_flbit_i32_b32 s10, s6 +; GCN-IR-NEXT: s_add_i32 s10, s10, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s11, s7 +; GCN-IR-NEXT: s_min_u32 s12, s10, s11 +; GCN-IR-NEXT: s_flbit_i32_b32 s10, s2 +; GCN-IR-NEXT: s_add_i32 s10, s10, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s11, s3 +; GCN-IR-NEXT: s_min_u32 s16, s10, s11 +; GCN-IR-NEXT: s_sub_u32 s10, s12, s16 +; GCN-IR-NEXT: s_subb_u32 s11, 0, 0 +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[18:19], s[10:11], 63 +; GCN-IR-NEXT: s_mov_b32 s13, 0 +; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[18:19] +; GCN-IR-NEXT: v_cmp_ne_u64_e64 s[18:19], s[10:11], 63 +; GCN-IR-NEXT: s_xor_b64 s[20:21], s[14:15], -1 +; GCN-IR-NEXT: s_and_b64 s[18:19], s[20:21], s[18:19] +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[18:19] ; GCN-IR-NEXT: s_cbranch_vccz BB9_5 +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: s_add_u32 s18, s10, 1 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s10 +; GCN-IR-NEXT: s_addc_u32 s19, s11, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s11 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[18:19], v[0:1] +; GCN-IR-NEXT: s_sub_i32 s10, 63, s10 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_lshl_b64 s[14:15], s[2:3], s10 +; GCN-IR-NEXT: s_cbranch_vccz BB9_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: v_not_b32_e32 v2, v2 -; GCN-IR-NEXT: s_add_u32 s10, s8, -1 -; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[6:7], v4 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, v2, v3 -; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 -; GCN-IR-NEXT: s_addc_u32 s11, s9, -1 -; GCN-IR-NEXT: v_addc_u32_e64 v5, s[0:1], -1, 0, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: s_lshr_b64 s[18:19], s[2:3], s18 +; GCN-IR-NEXT: s_add_u32 s10, s6, -1 +; GCN-IR-NEXT: s_addc_u32 s11, s7, -1 +; GCN-IR-NEXT: s_not_b64 s[8:9], s[12:13] +; GCN-IR-NEXT: s_mov_b32 s17, s13 +; GCN-IR-NEXT: s_add_u32 s12, s8, s16 +; GCN-IR-NEXT: s_addc_u32 s13, s9, s13 +; GCN-IR-NEXT: s_mov_b64 s[16:17], 0 +; GCN-IR-NEXT: s_mov_b32 s9, 0 ; GCN-IR-NEXT: BB9_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 -; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v2 -; GCN-IR-NEXT: v_or_b32_e32 v0, v8, v0 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s11 -; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, s10, v6 -; GCN-IR-NEXT: v_subb_u32_e32 v2, vcc, v2, v7, vcc -; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v2 -; GCN-IR-NEXT: v_and_b32_e32 v10, s8, v8 -; GCN-IR-NEXT: v_and_b32_e32 v2, 1, v8 -; GCN-IR-NEXT: v_and_b32_e32 v11, s9, v8 -; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v4 -; GCN-IR-NEXT: v_or_b32_e32 v1, v9, v1 -; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v5, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5] -; GCN-IR-NEXT: v_mov_b32_e32 v4, v8 -; GCN-IR-NEXT: v_sub_i32_e64 v6, s[0:1], v6, v10 -; GCN-IR-NEXT: v_mov_b32_e32 v5, v9 -; GCN-IR-NEXT: v_mov_b32_e32 v9, v3 -; GCN-IR-NEXT: v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1] +; GCN-IR-NEXT: s_lshr_b32 s8, s15, 31 +; GCN-IR-NEXT: s_lshl_b64 s[18:19], s[18:19], 1 +; GCN-IR-NEXT: s_lshl_b64 s[14:15], s[14:15], 1 +; GCN-IR-NEXT: s_or_b64 s[18:19], s[18:19], s[8:9] +; GCN-IR-NEXT: s_or_b64 s[14:15], s[16:17], s[14:15] +; GCN-IR-NEXT: s_sub_u32 s8, s10, s18 +; GCN-IR-NEXT: s_subb_u32 s8, s11, s19 +; GCN-IR-NEXT: s_ashr_i32 s16, s8, 31 +; GCN-IR-NEXT: s_mov_b32 s17, s16 +; GCN-IR-NEXT: s_and_b32 s8, s16, 1 +; GCN-IR-NEXT: s_and_b64 s[20:21], s[16:17], s[6:7] +; GCN-IR-NEXT: s_sub_u32 s18, s18, s20 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s12 +; GCN-IR-NEXT: s_subb_u32 s19, s19, s21 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s13 +; GCN-IR-NEXT: s_add_u32 s12, s12, 1 +; GCN-IR-NEXT: s_addc_u32 s13, s13, 0 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[12:13], v[0:1] +; GCN-IR-NEXT: s_mov_b64 s[16:17], s[8:9] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 ; GCN-IR-NEXT: s_cbranch_vccz BB9_3 +; GCN-IR-NEXT: BB9_4: ; %Flow3 +; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[14:15], 1 +; GCN-IR-NEXT: s_or_b64 s[8:9], s[8:9], s[10:11] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s8 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s9 ; GCN-IR-NEXT: s_branch BB9_6 -; GCN-IR-NEXT: BB9_4: -; GCN-IR-NEXT: v_mov_b32_e32 v0, s7 -; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[0:1] -; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[0:1] -; GCN-IR-NEXT: s_branch BB9_7 ; GCN-IR-NEXT: BB9_5: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB9_6: ; %Flow3 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 -; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 -; GCN-IR-NEXT: BB9_7: ; %udiv-end -; GCN-IR-NEXT: v_mul_lo_u32 v1, s8, v1 -; GCN-IR-NEXT: v_mul_hi_u32 v2, s8, v0 -; GCN-IR-NEXT: v_mul_lo_u32 v3, s9, v0 -; GCN-IR-NEXT: v_mul_lo_u32 v0, s8, v0 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s3 +; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[14:15] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s2 +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[14:15] +; GCN-IR-NEXT: BB9_6: ; %udiv-end +; GCN-IR-NEXT: v_mul_lo_u32 v1, s6, v1 +; GCN-IR-NEXT: v_mul_hi_u32 v2, s6, v0 +; GCN-IR-NEXT: v_mul_lo_u32 v3, s7, v0 +; GCN-IR-NEXT: v_mul_lo_u32 v0, s6, v0 +; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v2, v1 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v1, v3 -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, s6, v0 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s7 -; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v2, v1, vcc -; GCN-IR-NEXT: v_xor_b32_e32 v0, s2, v0 -; GCN-IR-NEXT: v_xor_b32_e32 v1, s3, v1 +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, s2, v0 ; GCN-IR-NEXT: v_mov_b32_e32 v2, s3 -; GCN-IR-NEXT: v_subrev_i32_e32 v0, vcc, s2, v0 +; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v2, v1, vcc +; GCN-IR-NEXT: v_xor_b32_e32 v0, s0, v0 +; GCN-IR-NEXT: v_xor_b32_e32 v1, s1, v1 +; GCN-IR-NEXT: v_mov_b32_e32 v2, s1 +; GCN-IR-NEXT: v_subrev_i32_e32 v0, vcc, s0, v0 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc -; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 ; GCN-IR-NEXT: s_mov_b32 s6, -1 ; GCN-IR-NEXT: buffer_store_short v1, off, s[4:7], 0 offset:4 ; GCN-IR-NEXT: buffer_store_dword v0, off, s[4:7], 0 @@ -1437,97 +1411,90 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(i64 addrspace(1)* %out, i64 %x) ; ; GCN-IR-LABEL: s_test_srem_k_num_i64: ; GCN-IR: ; %bb.0: ; %_udiv-special-cases -; GCN-IR-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) -; GCN-IR-NEXT: s_ashr_i32 s0, s7, 31 -; GCN-IR-NEXT: s_mov_b32 s1, s0 -; GCN-IR-NEXT: s_xor_b64 s[2:3], s[6:7], s[0:1] -; GCN-IR-NEXT: s_sub_u32 s2, s2, s0 -; GCN-IR-NEXT: s_subb_u32 s3, s3, s0 -; GCN-IR-NEXT: s_flbit_i32_b32 s6, s2 -; GCN-IR-NEXT: s_add_i32 s6, s6, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s7, s3 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s7 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s6 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s3, 0 -; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 0xffffffc5, v2 -; GCN-IR-NEXT: v_addc_u32_e64 v1, s[6:7], 0, -1, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[2:3], 0 -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_xor_b64 s[6:7], s[0:1], -1 -; GCN-IR-NEXT: s_and_b64 s[6:7], s[6:7], vcc -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[6:7] -; GCN-IR-NEXT: s_cbranch_vccz BB10_4 -; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[3:4], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], 24, v0 -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: s_ashr_i32 s6, s3, 31 +; GCN-IR-NEXT: s_mov_b32 s7, s6 +; GCN-IR-NEXT: s_xor_b64 s[2:3], s[2:3], s[6:7] +; GCN-IR-NEXT: s_sub_u32 s4, s2, s6 +; GCN-IR-NEXT: s_subb_u32 s5, s3, s6 +; GCN-IR-NEXT: s_flbit_i32_b32 s2, s4 +; GCN-IR-NEXT: s_add_i32 s2, s2, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s3, s5 +; GCN-IR-NEXT: s_min_u32 s8, s2, s3 +; GCN-IR-NEXT: s_add_u32 s6, s8, 0xffffffc5 +; GCN-IR-NEXT: s_addc_u32 s7, 0, -1 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[4:5], 0 +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[12:13], s[6:7], 63 +; GCN-IR-NEXT: s_mov_b64 s[2:3], 0 +; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[12:13] +; GCN-IR-NEXT: v_cmp_ne_u64_e64 s[12:13], s[6:7], 63 +; GCN-IR-NEXT: s_xor_b64 s[14:15], s[10:11], -1 +; GCN-IR-NEXT: s_and_b64 s[12:13], s[14:15], s[12:13] +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[12:13] ; GCN-IR-NEXT: s_cbranch_vccz BB10_5 +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: s_add_u32 s12, s6, 1 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 +; GCN-IR-NEXT: s_addc_u32 s13, s7, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s7 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[12:13], v[0:1] +; GCN-IR-NEXT: s_sub_i32 s6, 63, s6 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_lshl_b64 s[10:11], 24, s6 +; GCN-IR-NEXT: s_cbranch_vccz BB10_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: s_add_u32 s6, s2, -1 -; GCN-IR-NEXT: v_lshr_b64 v[6:7], 24, v3 -; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, 58, v2 -; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 -; GCN-IR-NEXT: s_addc_u32 s7, s3, -1 -; GCN-IR-NEXT: v_subb_u32_e64 v5, s[0:1], 0, 0, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: s_lshr_b64 s[14:15], 24, s12 +; GCN-IR-NEXT: s_add_u32 s6, s4, -1 +; GCN-IR-NEXT: s_addc_u32 s7, s5, -1 +; GCN-IR-NEXT: s_sub_u32 s8, 58, s8 +; GCN-IR-NEXT: s_subb_u32 s9, 0, 0 +; GCN-IR-NEXT: s_mov_b64 s[12:13], 0 +; GCN-IR-NEXT: s_mov_b32 s3, 0 ; GCN-IR-NEXT: BB10_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 -; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v2 -; GCN-IR-NEXT: v_or_b32_e32 v0, v8, v0 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s7 -; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, s6, v6 -; GCN-IR-NEXT: v_subb_u32_e32 v2, vcc, v2, v7, vcc -; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v2 -; GCN-IR-NEXT: v_and_b32_e32 v10, s2, v8 -; GCN-IR-NEXT: v_and_b32_e32 v2, 1, v8 -; GCN-IR-NEXT: v_and_b32_e32 v11, s3, v8 -; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v4 -; GCN-IR-NEXT: v_or_b32_e32 v1, v9, v1 -; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v5, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5] -; GCN-IR-NEXT: v_mov_b32_e32 v4, v8 -; GCN-IR-NEXT: v_sub_i32_e64 v6, s[0:1], v6, v10 -; GCN-IR-NEXT: v_mov_b32_e32 v5, v9 -; GCN-IR-NEXT: v_mov_b32_e32 v9, v3 -; GCN-IR-NEXT: v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1] +; GCN-IR-NEXT: s_lshr_b32 s2, s11, 31 +; GCN-IR-NEXT: s_lshl_b64 s[14:15], s[14:15], 1 +; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[10:11], 1 +; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[2:3] +; GCN-IR-NEXT: s_or_b64 s[10:11], s[12:13], s[10:11] +; GCN-IR-NEXT: s_sub_u32 s2, s6, s14 +; GCN-IR-NEXT: s_subb_u32 s2, s7, s15 +; GCN-IR-NEXT: s_ashr_i32 s12, s2, 31 +; GCN-IR-NEXT: s_mov_b32 s13, s12 +; GCN-IR-NEXT: s_and_b32 s2, s12, 1 +; GCN-IR-NEXT: s_and_b64 s[16:17], s[12:13], s[4:5] +; GCN-IR-NEXT: s_sub_u32 s14, s14, s16 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s8 +; GCN-IR-NEXT: s_subb_u32 s15, s15, s17 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s9 +; GCN-IR-NEXT: s_add_u32 s8, s8, 1 +; GCN-IR-NEXT: s_addc_u32 s9, s9, 0 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[0:1] +; GCN-IR-NEXT: s_mov_b64 s[12:13], s[2:3] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 ; GCN-IR-NEXT: s_cbranch_vccz BB10_3 +; GCN-IR-NEXT: BB10_4: ; %Flow5 +; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[10:11], 1 +; GCN-IR-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s2 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s3 ; GCN-IR-NEXT: s_branch BB10_6 -; GCN-IR-NEXT: BB10_4: -; GCN-IR-NEXT: v_mov_b32_e32 v1, 0 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, 24, 0, s[0:1] -; GCN-IR-NEXT: s_branch BB10_7 ; GCN-IR-NEXT: BB10_5: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB10_6: ; %Flow5 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 -; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 -; GCN-IR-NEXT: BB10_7: ; %udiv-end -; GCN-IR-NEXT: v_mul_lo_u32 v1, s2, v1 -; GCN-IR-NEXT: v_mul_hi_u32 v2, s2, v0 -; GCN-IR-NEXT: v_mul_lo_u32 v3, s3, v0 -; GCN-IR-NEXT: v_mul_lo_u32 v0, s2, v0 -; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 +; GCN-IR-NEXT: v_mov_b32_e32 v1, 0 +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, 24, 0, s[10:11] +; GCN-IR-NEXT: BB10_6: ; %udiv-end +; GCN-IR-NEXT: v_mul_lo_u32 v1, s4, v1 +; GCN-IR-NEXT: v_mul_hi_u32 v2, s4, v0 +; GCN-IR-NEXT: v_mul_lo_u32 v3, s5, v0 +; GCN-IR-NEXT: v_mul_lo_u32 v0, s4, v0 +; GCN-IR-NEXT: s_mov_b32 s3, 0xf000 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v2, v1 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v1, v3 ; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 24, v0 -; GCN-IR-NEXT: s_mov_b32 s6, -1 +; GCN-IR-NEXT: s_mov_b32 s2, -1 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, 0, v1, vcc -; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GCN-IR-NEXT: s_endpgm %result = srem i64 24, %x store i64 %result, i64 addrspace(1)* %out @@ -1657,8 +1624,7 @@ define i64 @v_test_srem_k_num_i64(i64 %x) { ; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0 ; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 32, v2 ; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1 -; GCN-IR-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; GCN-IR-NEXT: v_cndmask_b32_e32 v8, v3, v2, vcc +; GCN-IR-NEXT: v_min_u32_e32 v8, v2, v3 ; GCN-IR-NEXT: s_movk_i32 s6, 0xffc5 ; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, s6, v8 ; GCN-IR-NEXT: v_addc_u32_e64 v4, s[6:7], 0, -1, vcc @@ -1865,8 +1831,7 @@ define i64 @v_test_srem_pow2_k_num_i64(i64 %x) { ; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0 ; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 32, v2 ; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1 -; GCN-IR-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; GCN-IR-NEXT: v_cndmask_b32_e32 v4, v3, v2, vcc +; GCN-IR-NEXT: v_min_u32_e32 v4, v2, v3 ; GCN-IR-NEXT: s_movk_i32 s6, 0xffd0 ; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, s6, v4 ; GCN-IR-NEXT: v_addc_u32_e64 v3, s[6:7], 0, -1, vcc @@ -1973,12 +1938,11 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) { ; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v2 ; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v2 ; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v2 -; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v0 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc +; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v0 ; GCN-IR-NEXT: v_add_i32_e64 v3, s[4:5], 32, v3 ; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v1 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v1 -; GCN-IR-NEXT: v_cndmask_b32_e64 v8, v4, v3, s[4:5] +; GCN-IR-NEXT: v_min_u32_e32 v8, v3, v4 ; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 48, v8 ; GCN-IR-NEXT: v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5] ; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll index d06a516..a5b7017 100644 --- a/llvm/test/CodeGen/AMDGPU/udiv64.ll +++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll @@ -128,93 +128,85 @@ define amdgpu_kernel void @s_test_udiv_i64(i64 addrspace(1)* %out, i64 %x, i64 % ; GCN-IR-LABEL: s_test_udiv_i64: ; GCN-IR: ; %bb.0: ; %_udiv-special-cases ; GCN-IR-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 -; GCN-IR-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; GCN-IR-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd +; GCN-IR-NEXT: s_mov_b64 s[2:3], 0 ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[6:7], 0 -; GCN-IR-NEXT: s_flbit_i32_b32 s10, s2 -; GCN-IR-NEXT: s_add_i32 s10, s10, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s11, s3 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s10 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[6:7], 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[0:1], 0 +; GCN-IR-NEXT: s_flbit_i32_b32 s12, s0 +; GCN-IR-NEXT: s_or_b64 s[14:15], s[8:9], s[10:11] ; GCN-IR-NEXT: s_flbit_i32_b32 s10, s6 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s11 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s3, 0 +; GCN-IR-NEXT: s_add_i32 s12, s12, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s8, s1 ; GCN-IR-NEXT: s_add_i32 s10, s10, 32 ; GCN-IR-NEXT: s_flbit_i32_b32 s11, s7 -; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v0, s11 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s10 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s7, 0 -; GCN-IR-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v2, v3 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[2:3], 0 -; GCN-IR-NEXT: v_subb_u32_e64 v1, s[10:11], 0, 0, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], s[8:9] -; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_xor_b64 s[8:9], s[0:1], -1 -; GCN-IR-NEXT: s_and_b64 s[8:9], s[8:9], vcc -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[8:9] -; GCN-IR-NEXT: s_cbranch_vccz BB0_4 -; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[6:7], v0 -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: s_min_u32 s8, s12, s8 +; GCN-IR-NEXT: s_min_u32 s12, s10, s11 +; GCN-IR-NEXT: s_sub_u32 s10, s8, s12 +; GCN-IR-NEXT: s_subb_u32 s11, 0, 0 +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[16:17], s[10:11], 63 +; GCN-IR-NEXT: s_mov_b32 s9, 0 +; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[16:17] +; GCN-IR-NEXT: v_cmp_ne_u64_e64 s[16:17], s[10:11], 63 +; GCN-IR-NEXT: s_xor_b64 s[18:19], s[14:15], -1 +; GCN-IR-NEXT: s_and_b64 s[16:17], s[18:19], s[16:17] +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[16:17] ; GCN-IR-NEXT: s_cbranch_vccz BB0_5 +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: s_add_u32 s14, s10, 1 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s10 +; GCN-IR-NEXT: s_addc_u32 s15, s11, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s11 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[14:15], v[0:1] +; GCN-IR-NEXT: s_sub_i32 s10, 63, s10 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[6:7], s10 +; GCN-IR-NEXT: s_cbranch_vccz BB0_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: v_not_b32_e32 v2, v2 -; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[6:7], v4 -; GCN-IR-NEXT: s_add_u32 s6, s2, -1 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, v2, v3 -; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 -; GCN-IR-NEXT: s_addc_u32 s7, s3, -1 -; GCN-IR-NEXT: v_addc_u32_e64 v5, s[0:1], -1, 0, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: s_lshr_b64 s[14:15], s[6:7], s14 +; GCN-IR-NEXT: s_add_u32 s6, s0, -1 +; GCN-IR-NEXT: s_addc_u32 s7, s1, -1 +; GCN-IR-NEXT: s_not_b64 s[2:3], s[8:9] +; GCN-IR-NEXT: s_mov_b32 s13, s9 +; GCN-IR-NEXT: s_add_u32 s8, s2, s12 +; GCN-IR-NEXT: s_addc_u32 s9, s3, s9 +; GCN-IR-NEXT: s_mov_b64 s[12:13], 0 +; GCN-IR-NEXT: s_mov_b32 s3, 0 ; GCN-IR-NEXT: BB0_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 -; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v2 -; GCN-IR-NEXT: v_or_b32_e32 v0, v8, v0 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s7 -; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, s6, v6 -; GCN-IR-NEXT: v_subb_u32_e32 v2, vcc, v2, v7, vcc -; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v2 -; GCN-IR-NEXT: v_and_b32_e32 v10, s2, v8 -; GCN-IR-NEXT: v_and_b32_e32 v2, 1, v8 -; GCN-IR-NEXT: v_and_b32_e32 v11, s3, v8 -; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v4 -; GCN-IR-NEXT: v_or_b32_e32 v1, v9, v1 -; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v5, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5] -; GCN-IR-NEXT: v_mov_b32_e32 v4, v8 -; GCN-IR-NEXT: v_sub_i32_e64 v6, s[0:1], v6, v10 -; GCN-IR-NEXT: v_mov_b32_e32 v5, v9 -; GCN-IR-NEXT: v_mov_b32_e32 v9, v3 -; GCN-IR-NEXT: v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1] +; GCN-IR-NEXT: s_lshr_b32 s2, s11, 31 +; GCN-IR-NEXT: s_lshl_b64 s[14:15], s[14:15], 1 +; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[10:11], 1 +; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[2:3] +; GCN-IR-NEXT: s_or_b64 s[10:11], s[12:13], s[10:11] +; GCN-IR-NEXT: s_sub_u32 s2, s6, s14 +; GCN-IR-NEXT: s_subb_u32 s2, s7, s15 +; GCN-IR-NEXT: s_ashr_i32 s12, s2, 31 +; GCN-IR-NEXT: s_mov_b32 s13, s12 +; GCN-IR-NEXT: s_and_b32 s2, s12, 1 +; GCN-IR-NEXT: s_and_b64 s[16:17], s[12:13], s[0:1] +; GCN-IR-NEXT: s_sub_u32 s14, s14, s16 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s8 +; GCN-IR-NEXT: s_subb_u32 s15, s15, s17 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s9 +; GCN-IR-NEXT: s_add_u32 s8, s8, 1 +; GCN-IR-NEXT: s_addc_u32 s9, s9, 0 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[0:1] +; GCN-IR-NEXT: s_mov_b64 s[12:13], s[2:3] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 ; GCN-IR-NEXT: s_cbranch_vccz BB0_3 +; GCN-IR-NEXT: BB0_4: ; %Flow6 +; GCN-IR-NEXT: s_lshl_b64 s[0:1], s[10:11], 1 +; GCN-IR-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s0 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s1 ; GCN-IR-NEXT: s_branch BB0_6 -; GCN-IR-NEXT: BB0_4: +; GCN-IR-NEXT: BB0_5: ; GCN-IR-NEXT: v_mov_b32_e32 v0, s7 -; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[0:1] +; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[14:15] ; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[0:1] -; GCN-IR-NEXT: s_branch BB0_7 -; GCN-IR-NEXT: BB0_5: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB0_6: ; %Flow6 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 -; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 -; GCN-IR-NEXT: BB0_7: ; %udiv-end +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[14:15] +; GCN-IR-NEXT: BB0_6: ; %udiv-end ; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 ; GCN-IR-NEXT: s_mov_b32 s6, -1 ; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 @@ -347,13 +339,11 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) { ; GCN-IR-NEXT: s_or_b64 s[4:5], vcc, s[4:5] ; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 32, v4 ; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v3 -; GCN-IR-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3 -; GCN-IR-NEXT: v_cndmask_b32_e32 v8, v5, v4, vcc +; GCN-IR-NEXT: v_min_u32_e32 v8, v4, v5 ; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v0 ; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 32, v4 ; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v1 -; GCN-IR-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; GCN-IR-NEXT: v_cndmask_b32_e32 v10, v5, v4, vcc +; GCN-IR-NEXT: v_min_u32_e32 v10, v4, v5 ; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v8, v10 ; GCN-IR-NEXT: v_subb_u32_e64 v7, s[6:7], 0, 0, vcc ; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[6:7] @@ -836,93 +826,85 @@ define amdgpu_kernel void @s_test_udiv24_i48(i48 addrspace(1)* %out, i48 %x, i48 ; GCN-IR-NEXT: s_and_b32 s0, s2, s9 ; GCN-IR-NEXT: s_and_b32 s3, s7, s8 ; GCN-IR-NEXT: s_and_b32 s2, s6, s9 -; GCN-IR-NEXT: s_lshr_b64 s[2:3], s[2:3], 24 -; GCN-IR-NEXT: s_flbit_i32_b32 s10, s2 ; GCN-IR-NEXT: s_lshr_b64 s[6:7], s[0:1], 24 -; GCN-IR-NEXT: s_add_i32 s10, s10, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s11, s3 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s10 +; GCN-IR-NEXT: s_lshr_b64 s[2:3], s[2:3], 24 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[2:3], 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[6:7], 0 +; GCN-IR-NEXT: s_mov_b64 s[0:1], 0 +; GCN-IR-NEXT: s_or_b64 s[14:15], s[8:9], s[10:11] +; GCN-IR-NEXT: s_flbit_i32_b32 s8, s2 ; GCN-IR-NEXT: s_flbit_i32_b32 s10, s6 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s11 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s3, 0 +; GCN-IR-NEXT: s_add_i32 s8, s8, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s9, s3 ; GCN-IR-NEXT: s_add_i32 s10, s10, 32 ; GCN-IR-NEXT: s_flbit_i32_b32 s11, s7 -; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v0, s11 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s10 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s7, 0 -; GCN-IR-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v2, v3 -; GCN-IR-NEXT: v_subb_u32_e64 v1, s[10:11], 0, 0, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[2:3], 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[6:7], 0 -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], s[8:9] -; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_xor_b64 s[8:9], s[0:1], -1 -; GCN-IR-NEXT: s_and_b64 s[8:9], s[8:9], vcc -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[8:9] -; GCN-IR-NEXT: s_cbranch_vccz BB7_4 -; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[6:7], v0 -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: s_min_u32 s8, s8, s9 +; GCN-IR-NEXT: s_min_u32 s12, s10, s11 +; GCN-IR-NEXT: s_sub_u32 s10, s8, s12 +; GCN-IR-NEXT: s_subb_u32 s11, 0, 0 +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[16:17], s[10:11], 63 +; GCN-IR-NEXT: s_mov_b32 s9, 0 +; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[16:17] +; GCN-IR-NEXT: v_cmp_ne_u64_e64 s[16:17], s[10:11], 63 +; GCN-IR-NEXT: s_xor_b64 s[18:19], s[14:15], -1 +; GCN-IR-NEXT: s_and_b64 s[16:17], s[18:19], s[16:17] +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[16:17] ; GCN-IR-NEXT: s_cbranch_vccz BB7_5 +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: s_add_u32 s14, s10, 1 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s10 +; GCN-IR-NEXT: s_addc_u32 s15, s11, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s11 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[14:15], v[0:1] +; GCN-IR-NEXT: s_sub_i32 s10, 63, s10 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[6:7], s10 +; GCN-IR-NEXT: s_cbranch_vccz BB7_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: v_not_b32_e32 v2, v2 -; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[6:7], v4 +; GCN-IR-NEXT: s_lshr_b64 s[14:15], s[6:7], s14 ; GCN-IR-NEXT: s_add_u32 s6, s2, -1 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, v2, v3 -; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 ; GCN-IR-NEXT: s_addc_u32 s7, s3, -1 -; GCN-IR-NEXT: v_addc_u32_e64 v5, s[0:1], -1, 0, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: s_not_b64 s[0:1], s[8:9] +; GCN-IR-NEXT: s_mov_b32 s13, s9 +; GCN-IR-NEXT: s_add_u32 s8, s0, s12 +; GCN-IR-NEXT: s_addc_u32 s9, s1, s9 +; GCN-IR-NEXT: s_mov_b64 s[12:13], 0 +; GCN-IR-NEXT: s_mov_b32 s1, 0 ; GCN-IR-NEXT: BB7_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 -; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v2 -; GCN-IR-NEXT: v_or_b32_e32 v0, v8, v0 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s7 -; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, s6, v6 -; GCN-IR-NEXT: v_subb_u32_e32 v2, vcc, v2, v7, vcc -; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v2 -; GCN-IR-NEXT: v_and_b32_e32 v10, s2, v8 -; GCN-IR-NEXT: v_and_b32_e32 v2, 1, v8 -; GCN-IR-NEXT: v_and_b32_e32 v11, s3, v8 -; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v4 -; GCN-IR-NEXT: v_or_b32_e32 v1, v9, v1 -; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v5, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5] -; GCN-IR-NEXT: v_mov_b32_e32 v4, v8 -; GCN-IR-NEXT: v_sub_i32_e64 v6, s[0:1], v6, v10 -; GCN-IR-NEXT: v_mov_b32_e32 v5, v9 -; GCN-IR-NEXT: v_mov_b32_e32 v9, v3 -; GCN-IR-NEXT: v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1] +; GCN-IR-NEXT: s_lshr_b32 s0, s11, 31 +; GCN-IR-NEXT: s_lshl_b64 s[14:15], s[14:15], 1 +; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[10:11], 1 +; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[0:1] +; GCN-IR-NEXT: s_or_b64 s[10:11], s[12:13], s[10:11] +; GCN-IR-NEXT: s_sub_u32 s0, s6, s14 +; GCN-IR-NEXT: s_subb_u32 s0, s7, s15 +; GCN-IR-NEXT: s_ashr_i32 s12, s0, 31 +; GCN-IR-NEXT: s_mov_b32 s13, s12 +; GCN-IR-NEXT: s_and_b32 s0, s12, 1 +; GCN-IR-NEXT: s_and_b64 s[16:17], s[12:13], s[2:3] +; GCN-IR-NEXT: s_sub_u32 s14, s14, s16 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s8 +; GCN-IR-NEXT: s_subb_u32 s15, s15, s17 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s9 +; GCN-IR-NEXT: s_add_u32 s8, s8, 1 +; GCN-IR-NEXT: s_addc_u32 s9, s9, 0 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[0:1] +; GCN-IR-NEXT: s_mov_b64 s[12:13], s[0:1] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 ; GCN-IR-NEXT: s_cbranch_vccz BB7_3 +; GCN-IR-NEXT: BB7_4: ; %Flow3 +; GCN-IR-NEXT: s_lshl_b64 s[2:3], s[10:11], 1 +; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s0 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s1 ; GCN-IR-NEXT: s_branch BB7_6 -; GCN-IR-NEXT: BB7_4: +; GCN-IR-NEXT: BB7_5: ; GCN-IR-NEXT: v_mov_b32_e32 v0, s7 -; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[0:1] +; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[14:15] ; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[0:1] -; GCN-IR-NEXT: s_branch BB7_7 -; GCN-IR-NEXT: BB7_5: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB7_6: ; %Flow3 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 -; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 -; GCN-IR-NEXT: BB7_7: ; %udiv-end +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[14:15] +; GCN-IR-NEXT: BB7_6: ; %udiv-end ; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 ; GCN-IR-NEXT: s_mov_b32 s6, -1 ; GCN-IR-NEXT: buffer_store_short v1, off, s[4:7], 0 offset:4 @@ -1047,84 +1029,77 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(i64 addrspace(1)* %out, i64 %x) ; ; GCN-IR-LABEL: s_test_udiv_k_num_i64: ; GCN-IR: ; %bb.0: ; %_udiv-special-cases -; GCN-IR-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) -; GCN-IR-NEXT: s_flbit_i32_b32 s2, s6 -; GCN-IR-NEXT: s_flbit_i32_b32 s3, s7 -; GCN-IR-NEXT: s_add_i32 s2, s2, 32 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s3 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s2 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s7, 0 -; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 0xffffffc5, v2 -; GCN-IR-NEXT: v_addc_u32_e64 v1, s[2:3], 0, -1, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], 0 -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_xor_b64 s[2:3], s[0:1], -1 -; GCN-IR-NEXT: s_and_b64 s[2:3], s[2:3], vcc -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[2:3] -; GCN-IR-NEXT: s_cbranch_vccz BB8_4 -; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[3:4], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], 24, v0 -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: s_flbit_i32_b32 s4, s2 +; GCN-IR-NEXT: s_flbit_i32_b32 s5, s3 +; GCN-IR-NEXT: s_add_i32 s4, s4, 32 +; GCN-IR-NEXT: s_min_u32 s8, s4, s5 +; GCN-IR-NEXT: s_add_u32 s6, s8, 0xffffffc5 +; GCN-IR-NEXT: s_addc_u32 s7, 0, -1 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[2:3], 0 +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[12:13], s[6:7], 63 +; GCN-IR-NEXT: s_mov_b64 s[4:5], 0 +; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[12:13] +; GCN-IR-NEXT: v_cmp_ne_u64_e64 s[12:13], s[6:7], 63 +; GCN-IR-NEXT: s_xor_b64 s[14:15], s[10:11], -1 +; GCN-IR-NEXT: s_and_b64 s[12:13], s[14:15], s[12:13] +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[12:13] ; GCN-IR-NEXT: s_cbranch_vccz BB8_5 +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: s_add_u32 s12, s6, 1 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 +; GCN-IR-NEXT: s_addc_u32 s13, s7, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s7 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[12:13], v[0:1] +; GCN-IR-NEXT: s_sub_i32 s6, 63, s6 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_lshl_b64 s[10:11], 24, s6 +; GCN-IR-NEXT: s_cbranch_vccz BB8_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: s_add_u32 s2, s6, -1 -; GCN-IR-NEXT: v_lshr_b64 v[6:7], 24, v3 -; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, 58, v2 -; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 -; GCN-IR-NEXT: s_addc_u32 s3, s7, -1 -; GCN-IR-NEXT: v_subb_u32_e64 v5, s[0:1], 0, 0, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: s_lshr_b64 s[14:15], 24, s12 +; GCN-IR-NEXT: s_add_u32 s6, s2, -1 +; GCN-IR-NEXT: s_addc_u32 s7, s3, -1 +; GCN-IR-NEXT: s_sub_u32 s8, 58, s8 +; GCN-IR-NEXT: s_subb_u32 s9, 0, 0 +; GCN-IR-NEXT: s_mov_b64 s[12:13], 0 +; GCN-IR-NEXT: s_mov_b32 s5, 0 ; GCN-IR-NEXT: BB8_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 -; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v2 -; GCN-IR-NEXT: v_or_b32_e32 v0, v8, v0 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s3 -; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, s2, v6 -; GCN-IR-NEXT: v_subb_u32_e32 v2, vcc, v2, v7, vcc -; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v2 -; GCN-IR-NEXT: v_and_b32_e32 v10, s6, v8 -; GCN-IR-NEXT: v_and_b32_e32 v2, 1, v8 -; GCN-IR-NEXT: v_and_b32_e32 v11, s7, v8 -; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v4 -; GCN-IR-NEXT: v_or_b32_e32 v1, v9, v1 -; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v5, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5] -; GCN-IR-NEXT: v_mov_b32_e32 v4, v8 -; GCN-IR-NEXT: v_sub_i32_e64 v6, s[0:1], v6, v10 -; GCN-IR-NEXT: v_mov_b32_e32 v5, v9 -; GCN-IR-NEXT: v_mov_b32_e32 v9, v3 -; GCN-IR-NEXT: v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1] +; GCN-IR-NEXT: s_lshr_b32 s4, s11, 31 +; GCN-IR-NEXT: s_lshl_b64 s[14:15], s[14:15], 1 +; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[10:11], 1 +; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[4:5] +; GCN-IR-NEXT: s_or_b64 s[10:11], s[12:13], s[10:11] +; GCN-IR-NEXT: s_sub_u32 s4, s6, s14 +; GCN-IR-NEXT: s_subb_u32 s4, s7, s15 +; GCN-IR-NEXT: s_ashr_i32 s12, s4, 31 +; GCN-IR-NEXT: s_mov_b32 s13, s12 +; GCN-IR-NEXT: s_and_b32 s4, s12, 1 +; GCN-IR-NEXT: s_and_b64 s[16:17], s[12:13], s[2:3] +; GCN-IR-NEXT: s_sub_u32 s14, s14, s16 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s8 +; GCN-IR-NEXT: s_subb_u32 s15, s15, s17 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s9 +; GCN-IR-NEXT: s_add_u32 s8, s8, 1 +; GCN-IR-NEXT: s_addc_u32 s9, s9, 0 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[0:1] +; GCN-IR-NEXT: s_mov_b64 s[12:13], s[4:5] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 ; GCN-IR-NEXT: s_cbranch_vccz BB8_3 +; GCN-IR-NEXT: BB8_4: ; %Flow5 +; GCN-IR-NEXT: s_lshl_b64 s[2:3], s[10:11], 1 +; GCN-IR-NEXT: s_or_b64 s[2:3], s[4:5], s[2:3] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s2 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s3 ; GCN-IR-NEXT: s_branch BB8_6 -; GCN-IR-NEXT: BB8_4: -; GCN-IR-NEXT: v_mov_b32_e32 v1, 0 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, 24, 0, s[0:1] -; GCN-IR-NEXT: s_branch BB8_7 ; GCN-IR-NEXT: BB8_5: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB8_6: ; %Flow5 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 -; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 -; GCN-IR-NEXT: BB8_7: ; %udiv-end -; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 -; GCN-IR-NEXT: s_mov_b32 s6, -1 -; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; GCN-IR-NEXT: v_mov_b32_e32 v1, 0 +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, 24, 0, s[10:11] +; GCN-IR-NEXT: BB8_6: ; %udiv-end +; GCN-IR-NEXT: s_mov_b32 s3, 0xf000 +; GCN-IR-NEXT: s_mov_b32 s2, -1 +; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GCN-IR-NEXT: s_endpgm %result = udiv i64 24, %x store i64 %result, i64 addrspace(1)* %out @@ -1241,69 +1216,68 @@ define i64 @v_test_udiv_pow2_k_num_i64(i64 %x) { ; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0 ; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 32, v2 ; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1 -; GCN-IR-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; GCN-IR-NEXT: v_cndmask_b32_e32 v4, v3, v2, vcc -; GCN-IR-NEXT: v_add_i32_e32 v5, vcc, 0xffffffd0, v4 -; GCN-IR-NEXT: v_addc_u32_e64 v6, s[6:7], 0, -1, vcc +; GCN-IR-NEXT: v_min_u32_e32 v4, v2, v3 +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 0xffffffd0, v4 +; GCN-IR-NEXT: v_addc_u32_e64 v7, s[6:7], 0, -1, vcc ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1] -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[5:6] +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[6:7] ; GCN-IR-NEXT: s_mov_b64 s[8:9], 0x8000 ; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], vcc ; GCN-IR-NEXT: v_mov_b32_e32 v2, s8 -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[5:6] -; GCN-IR-NEXT: v_mov_b32_e32 v7, 0 +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[6:7] +; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 ; GCN-IR-NEXT: v_cndmask_b32_e64 v2, v2, 0, s[4:5] ; GCN-IR-NEXT: s_xor_b64 s[4:5], s[4:5], -1 -; GCN-IR-NEXT: v_mov_b32_e32 v3, v7 +; GCN-IR-NEXT: v_mov_b32_e32 v3, v5 ; GCN-IR-NEXT: s_and_b64 s[4:5], s[4:5], vcc ; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5] ; GCN-IR-NEXT: s_cbranch_execz BB9_6 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v5 -; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v6, vcc -; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v5 -; GCN-IR-NEXT: v_cmp_ge_u64_e32 vcc, v[8:9], v[5:6] -; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 -; GCN-IR-NEXT: v_lshl_b64 v[2:3], s[8:9], v2 +; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v6 +; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v7, vcc +; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v6 +; GCN-IR-NEXT: v_cmp_ge_u64_e32 vcc, v[8:9], v[6:7] ; GCN-IR-NEXT: v_mov_b32_e32 v6, 0 +; GCN-IR-NEXT: v_lshl_b64 v[2:3], s[8:9], v2 +; GCN-IR-NEXT: v_mov_b32_e32 v7, 0 ; GCN-IR-NEXT: s_mov_b64 s[10:11], 0 ; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc ; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5] ; GCN-IR-NEXT: s_cbranch_execz BB9_5 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader ; GCN-IR-NEXT: s_mov_b64 s[4:5], 0x8000 -; GCN-IR-NEXT: v_lshr_b64 v[12:13], s[4:5], v8 +; GCN-IR-NEXT: v_lshr_b64 v[10:11], s[4:5], v8 ; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, -1, v0 ; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, -1, v1, vcc -; GCN-IR-NEXT: v_sub_i32_e32 v10, vcc, 47, v4 -; GCN-IR-NEXT: v_mov_b32_e32 v14, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v15, 0 -; GCN-IR-NEXT: v_subb_u32_e32 v11, vcc, 0, v7, vcc +; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, 47, v4 +; GCN-IR-NEXT: v_mov_b32_e32 v12, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v13, 0 +; GCN-IR-NEXT: v_subb_u32_e32 v5, vcc, 0, v5, vcc ; GCN-IR-NEXT: BB9_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-IR-NEXT: v_lshl_b64 v[12:13], v[12:13], 1 -; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3 -; GCN-IR-NEXT: v_or_b32_e32 v4, v12, v4 +; GCN-IR-NEXT: v_lshl_b64 v[10:11], v[10:11], 1 +; GCN-IR-NEXT: v_lshrrev_b32_e32 v6, 31, v3 +; GCN-IR-NEXT: v_or_b32_e32 v10, v10, v6 ; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1 -; GCN-IR-NEXT: v_sub_i32_e32 v5, vcc, v8, v4 -; GCN-IR-NEXT: v_subb_u32_e32 v5, vcc, v9, v13, vcc -; GCN-IR-NEXT: v_or_b32_e32 v2, v14, v2 -; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, 1, v10 -; GCN-IR-NEXT: v_ashrrev_i32_e32 v7, 31, v5 -; GCN-IR-NEXT: v_or_b32_e32 v3, v15, v3 -; GCN-IR-NEXT: v_addc_u32_e32 v15, vcc, 0, v11, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[14:15], v[10:11] -; GCN-IR-NEXT: v_mov_b32_e32 v10, v14 -; GCN-IR-NEXT: v_mov_b32_e32 v6, 0 -; GCN-IR-NEXT: v_and_b32_e32 v5, 1, v7 -; GCN-IR-NEXT: v_and_b32_e32 v16, v7, v1 -; GCN-IR-NEXT: v_and_b32_e32 v7, v7, v0 -; GCN-IR-NEXT: v_sub_i32_e64 v12, s[4:5], v4, v7 -; GCN-IR-NEXT: v_mov_b32_e32 v11, v15 -; GCN-IR-NEXT: v_mov_b32_e32 v15, v6 -; GCN-IR-NEXT: v_subb_u32_e64 v13, s[4:5], v13, v16, s[4:5] +; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v8, v10 +; GCN-IR-NEXT: v_subb_u32_e32 v6, vcc, v9, v11, vcc +; GCN-IR-NEXT: v_or_b32_e32 v2, v12, v2 +; GCN-IR-NEXT: v_ashrrev_i32_e32 v12, 31, v6 +; GCN-IR-NEXT: v_and_b32_e32 v15, v12, v0 +; GCN-IR-NEXT: v_and_b32_e32 v6, 1, v12 +; GCN-IR-NEXT: v_and_b32_e32 v14, v12, v1 +; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, 1, v4 +; GCN-IR-NEXT: v_or_b32_e32 v3, v13, v3 +; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, 0, v5, vcc +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[12:13], v[4:5] +; GCN-IR-NEXT: v_mov_b32_e32 v4, v12 +; GCN-IR-NEXT: v_mov_b32_e32 v7, 0 +; GCN-IR-NEXT: v_sub_i32_e64 v10, s[4:5], v10, v15 +; GCN-IR-NEXT: v_mov_b32_e32 v5, v13 +; GCN-IR-NEXT: v_mov_b32_e32 v13, v7 +; GCN-IR-NEXT: v_subb_u32_e64 v11, s[4:5], v11, v14, s[4:5] ; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11] -; GCN-IR-NEXT: v_mov_b32_e32 v14, v5 +; GCN-IR-NEXT: v_mov_b32_e32 v12, v6 ; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11] ; GCN-IR-NEXT: s_cbranch_execnz BB9_3 ; GCN-IR-NEXT: ; %bb.4: ; %Flow @@ -1311,8 +1285,8 @@ define i64 @v_test_udiv_pow2_k_num_i64(i64 %x) { ; GCN-IR-NEXT: BB9_5: ; %Flow3 ; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9] ; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[2:3], 1 -; GCN-IR-NEXT: v_or_b32_e32 v3, v6, v1 -; GCN-IR-NEXT: v_or_b32_e32 v2, v5, v0 +; GCN-IR-NEXT: v_or_b32_e32 v3, v7, v1 +; GCN-IR-NEXT: v_or_b32_e32 v2, v6, v0 ; GCN-IR-NEXT: BB9_6: ; %Flow4 ; GCN-IR-NEXT: s_or_b64 exec, exec, s[6:7] ; GCN-IR-NEXT: v_mov_b32_e32 v0, v2 @@ -1336,8 +1310,7 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) { ; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0 ; GCN-IR-NEXT: v_add_i32_e64 v2, s[4:5], 32, v2 ; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v1 -; GCN-IR-NEXT: v_cndmask_b32_e64 v6, v3, v2, s[4:5] +; GCN-IR-NEXT: v_min_u32_e32 v6, v2, v3 ; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 48, v6 ; GCN-IR-NEXT: v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5] ; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] @@ -1523,82 +1496,76 @@ define amdgpu_kernel void @s_test_udiv_k_den_i64(i64 addrspace(1)* %out, i64 %x) ; ; GCN-IR-LABEL: s_test_udiv_k_den_i64: ; GCN-IR: ; %bb.0: ; %_udiv-special-cases -; GCN-IR-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) -; GCN-IR-NEXT: s_flbit_i32_b32 s2, s6 -; GCN-IR-NEXT: s_flbit_i32_b32 s3, s7 -; GCN-IR-NEXT: s_add_i32 s2, s2, 32 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s3 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s2 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s7, 0 -; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 59, v2 -; GCN-IR-NEXT: v_subb_u32_e64 v1, s[2:3], 0, 0, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], 0 -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_xor_b64 s[2:3], s[0:1], -1 -; GCN-IR-NEXT: s_and_b64 s[2:3], s[2:3], vcc -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[2:3] -; GCN-IR-NEXT: s_cbranch_vccz BB11_4 -; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[3:4], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[6:7], v0 -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: s_flbit_i32_b32 s4, s2 +; GCN-IR-NEXT: s_flbit_i32_b32 s5, s3 +; GCN-IR-NEXT: s_add_i32 s4, s4, 32 +; GCN-IR-NEXT: s_min_u32 s8, s4, s5 +; GCN-IR-NEXT: s_sub_u32 s6, 59, s8 +; GCN-IR-NEXT: s_subb_u32 s7, 0, 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[2:3], 0 +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[12:13], s[6:7], 63 +; GCN-IR-NEXT: s_mov_b64 s[4:5], 0 +; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[12:13] +; GCN-IR-NEXT: v_cmp_ne_u64_e64 s[12:13], s[6:7], 63 +; GCN-IR-NEXT: s_xor_b64 s[14:15], s[10:11], -1 +; GCN-IR-NEXT: s_and_b64 s[12:13], s[14:15], s[12:13] +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[12:13] ; GCN-IR-NEXT: s_cbranch_vccz BB11_5 +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: s_add_u32 s10, s6, 1 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 +; GCN-IR-NEXT: s_addc_u32 s11, s7, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s7 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[10:11], v[0:1] +; GCN-IR-NEXT: s_sub_i32 s6, 63, s6 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[2:3], s6 +; GCN-IR-NEXT: s_cbranch_vccz BB11_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[6:7], v3 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 0xffffffc4, v2 -; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 -; GCN-IR-NEXT: v_addc_u32_e64 v5, s[0:1], 0, -1, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: s_lshr_b64 s[10:11], s[2:3], s10 +; GCN-IR-NEXT: s_add_u32 s2, s8, 0xffffffc4 +; GCN-IR-NEXT: s_addc_u32 s3, 0, -1 +; GCN-IR-NEXT: s_mov_b64 s[8:9], 0 +; GCN-IR-NEXT: s_mov_b32 s5, 0 ; GCN-IR-NEXT: BB11_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 -; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 -; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v2 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_sub_i32_e32 v2, vcc, 23, v6 -; GCN-IR-NEXT: v_subb_u32_e32 v2, vcc, 0, v7, vcc -; GCN-IR-NEXT: v_or_b32_e32 v0, v8, v0 -; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v2 -; GCN-IR-NEXT: v_and_b32_e32 v10, 24, v8 -; GCN-IR-NEXT: v_and_b32_e32 v2, 1, v8 -; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v4 -; GCN-IR-NEXT: v_or_b32_e32 v1, v9, v1 -; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v5, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5] -; GCN-IR-NEXT: v_mov_b32_e32 v4, v8 -; GCN-IR-NEXT: v_sub_i32_e64 v6, s[0:1], v6, v10 -; GCN-IR-NEXT: v_mov_b32_e32 v5, v9 -; GCN-IR-NEXT: v_mov_b32_e32 v9, v3 -; GCN-IR-NEXT: v_subbrev_u32_e64 v7, s[0:1], 0, v7, s[0:1] +; GCN-IR-NEXT: s_lshr_b32 s4, s7, 31 +; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[10:11], 1 +; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[6:7], 1 +; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[4:5] +; GCN-IR-NEXT: s_or_b64 s[6:7], s[8:9], s[6:7] +; GCN-IR-NEXT: s_sub_u32 s4, 23, s10 +; GCN-IR-NEXT: s_subb_u32 s4, 0, s11 +; GCN-IR-NEXT: s_ashr_i32 s8, s4, 31 +; GCN-IR-NEXT: s_and_b32 s4, s8, 1 +; GCN-IR-NEXT: s_and_b32 s8, s8, 24 +; GCN-IR-NEXT: s_sub_u32 s10, s10, s8 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s2 +; GCN-IR-NEXT: s_subb_u32 s11, s11, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s3 +; GCN-IR-NEXT: s_add_u32 s2, s2, 1 +; GCN-IR-NEXT: s_addc_u32 s3, s3, 0 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1] +; GCN-IR-NEXT: s_mov_b64 s[8:9], s[4:5] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 ; GCN-IR-NEXT: s_cbranch_vccz BB11_3 +; GCN-IR-NEXT: BB11_4: ; %Flow5 +; GCN-IR-NEXT: s_lshl_b64 s[2:3], s[6:7], 1 +; GCN-IR-NEXT: s_or_b64 s[2:3], s[4:5], s[2:3] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s2 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s3 ; GCN-IR-NEXT: s_branch BB11_6 -; GCN-IR-NEXT: BB11_4: -; GCN-IR-NEXT: v_mov_b32_e32 v0, s7 -; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[0:1] -; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[0:1] -; GCN-IR-NEXT: s_branch BB11_7 ; GCN-IR-NEXT: BB11_5: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB11_6: ; %Flow5 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 -; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 -; GCN-IR-NEXT: BB11_7: ; %udiv-end -; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 -; GCN-IR-NEXT: s_mov_b32 s6, -1 -; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s3 +; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[10:11] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s2 +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[10:11] +; GCN-IR-NEXT: BB11_6: ; %udiv-end +; GCN-IR-NEXT: s_mov_b32 s3, 0xf000 +; GCN-IR-NEXT: s_mov_b32 s2, -1 +; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GCN-IR-NEXT: s_endpgm %result = udiv i64 %x, 24 store i64 %result, i64 addrspace(1)* %out @@ -1713,8 +1680,7 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) { ; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0 ; GCN-IR-NEXT: v_add_i32_e64 v2, s[4:5], 32, v2 ; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v1 -; GCN-IR-NEXT: v_cndmask_b32_e64 v6, v3, v2, s[4:5] +; GCN-IR-NEXT: v_min_u32_e32 v6, v2, v3 ; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 59, v6 ; GCN-IR-NEXT: v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5] ; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] diff --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll index bcb0442..1cd8d78 100644 --- a/llvm/test/CodeGen/AMDGPU/urem64.ll +++ b/llvm/test/CodeGen/AMDGPU/urem64.ll @@ -127,97 +127,89 @@ define amdgpu_kernel void @s_test_urem_i64(i64 addrspace(1)* %out, i64 %x, i64 % ; GCN-IR-LABEL: s_test_urem_i64: ; GCN-IR: ; %bb.0: ; %_udiv-special-cases ; GCN-IR-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 -; GCN-IR-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; GCN-IR-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd +; GCN-IR-NEXT: s_mov_b64 s[2:3], 0 ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[6:7], 0 -; GCN-IR-NEXT: s_flbit_i32_b32 s10, s2 -; GCN-IR-NEXT: s_add_i32 s10, s10, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s11, s3 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s10 -; GCN-IR-NEXT: s_flbit_i32_b32 s10, s6 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s11 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s3, 0 -; GCN-IR-NEXT: s_add_i32 s10, s10, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s11, s7 -; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v0, s11 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s10 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s7, 0 -; GCN-IR-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v2, v3 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[2:3], 0 -; GCN-IR-NEXT: v_subb_u32_e64 v1, s[10:11], 0, 0, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], s[8:9] -; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_xor_b64 s[8:9], s[0:1], -1 -; GCN-IR-NEXT: s_and_b64 s[8:9], s[8:9], vcc -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[8:9] -; GCN-IR-NEXT: s_cbranch_vccz BB0_4 -; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[6:7], v0 -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[6:7], 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[0:1], 0 +; GCN-IR-NEXT: s_flbit_i32_b32 s12, s0 +; GCN-IR-NEXT: s_add_i32 s14, s12, 32 +; GCN-IR-NEXT: s_or_b64 s[12:13], s[8:9], s[10:11] +; GCN-IR-NEXT: s_flbit_i32_b32 s8, s1 +; GCN-IR-NEXT: s_min_u32 s10, s14, s8 +; GCN-IR-NEXT: s_flbit_i32_b32 s8, s6 +; GCN-IR-NEXT: s_add_i32 s8, s8, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s9, s7 +; GCN-IR-NEXT: s_min_u32 s14, s8, s9 +; GCN-IR-NEXT: s_sub_u32 s8, s10, s14 +; GCN-IR-NEXT: s_subb_u32 s9, 0, 0 +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[16:17], s[8:9], 63 +; GCN-IR-NEXT: s_mov_b32 s11, 0 +; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], s[16:17] +; GCN-IR-NEXT: v_cmp_ne_u64_e64 s[16:17], s[8:9], 63 +; GCN-IR-NEXT: s_xor_b64 s[18:19], s[12:13], -1 +; GCN-IR-NEXT: s_and_b64 s[16:17], s[18:19], s[16:17] +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[16:17] ; GCN-IR-NEXT: s_cbranch_vccz BB0_5 +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: s_add_u32 s16, s8, 1 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s8 +; GCN-IR-NEXT: s_addc_u32 s17, s9, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s9 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[16:17], v[0:1] +; GCN-IR-NEXT: s_sub_i32 s8, 63, s8 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_lshl_b64 s[12:13], s[6:7], s8 +; GCN-IR-NEXT: s_cbranch_vccz BB0_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: v_not_b32_e32 v2, v2 -; GCN-IR-NEXT: s_add_u32 s8, s2, -1 -; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[6:7], v4 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, v2, v3 -; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 -; GCN-IR-NEXT: s_addc_u32 s9, s3, -1 -; GCN-IR-NEXT: v_addc_u32_e64 v5, s[0:1], -1, 0, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: s_lshr_b64 s[16:17], s[6:7], s16 +; GCN-IR-NEXT: s_add_u32 s8, s0, -1 +; GCN-IR-NEXT: s_addc_u32 s9, s1, -1 +; GCN-IR-NEXT: s_not_b64 s[2:3], s[10:11] +; GCN-IR-NEXT: s_mov_b32 s15, s11 +; GCN-IR-NEXT: s_add_u32 s10, s2, s14 +; GCN-IR-NEXT: s_addc_u32 s11, s3, s11 +; GCN-IR-NEXT: s_mov_b64 s[14:15], 0 +; GCN-IR-NEXT: s_mov_b32 s3, 0 ; GCN-IR-NEXT: BB0_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 -; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v2 -; GCN-IR-NEXT: v_or_b32_e32 v0, v8, v0 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s9 -; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, s8, v6 -; GCN-IR-NEXT: v_subb_u32_e32 v2, vcc, v2, v7, vcc -; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v2 -; GCN-IR-NEXT: v_and_b32_e32 v10, s2, v8 -; GCN-IR-NEXT: v_and_b32_e32 v2, 1, v8 -; GCN-IR-NEXT: v_and_b32_e32 v11, s3, v8 -; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v4 -; GCN-IR-NEXT: v_or_b32_e32 v1, v9, v1 -; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v5, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5] -; GCN-IR-NEXT: v_mov_b32_e32 v4, v8 -; GCN-IR-NEXT: v_sub_i32_e64 v6, s[0:1], v6, v10 -; GCN-IR-NEXT: v_mov_b32_e32 v5, v9 -; GCN-IR-NEXT: v_mov_b32_e32 v9, v3 -; GCN-IR-NEXT: v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1] +; GCN-IR-NEXT: s_lshr_b32 s2, s13, 31 +; GCN-IR-NEXT: s_lshl_b64 s[16:17], s[16:17], 1 +; GCN-IR-NEXT: s_lshl_b64 s[12:13], s[12:13], 1 +; GCN-IR-NEXT: s_or_b64 s[16:17], s[16:17], s[2:3] +; GCN-IR-NEXT: s_or_b64 s[12:13], s[14:15], s[12:13] +; GCN-IR-NEXT: s_sub_u32 s2, s8, s16 +; GCN-IR-NEXT: s_subb_u32 s2, s9, s17 +; GCN-IR-NEXT: s_ashr_i32 s14, s2, 31 +; GCN-IR-NEXT: s_mov_b32 s15, s14 +; GCN-IR-NEXT: s_and_b32 s2, s14, 1 +; GCN-IR-NEXT: s_and_b64 s[18:19], s[14:15], s[0:1] +; GCN-IR-NEXT: s_sub_u32 s16, s16, s18 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s10 +; GCN-IR-NEXT: s_subb_u32 s17, s17, s19 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s11 +; GCN-IR-NEXT: s_add_u32 s10, s10, 1 +; GCN-IR-NEXT: s_addc_u32 s11, s11, 0 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[10:11], v[0:1] +; GCN-IR-NEXT: s_mov_b64 s[14:15], s[2:3] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 ; GCN-IR-NEXT: s_cbranch_vccz BB0_3 +; GCN-IR-NEXT: BB0_4: ; %Flow6 +; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[12:13], 1 +; GCN-IR-NEXT: s_or_b64 s[2:3], s[2:3], s[8:9] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s2 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s3 ; GCN-IR-NEXT: s_branch BB0_6 -; GCN-IR-NEXT: BB0_4: +; GCN-IR-NEXT: BB0_5: ; GCN-IR-NEXT: v_mov_b32_e32 v0, s7 -; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[0:1] +; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[12:13] ; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[0:1] -; GCN-IR-NEXT: s_branch BB0_7 -; GCN-IR-NEXT: BB0_5: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB0_6: ; %Flow6 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 -; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 -; GCN-IR-NEXT: BB0_7: ; %udiv-end -; GCN-IR-NEXT: v_mul_lo_u32 v1, s2, v1 -; GCN-IR-NEXT: v_mul_hi_u32 v2, s2, v0 -; GCN-IR-NEXT: v_mul_lo_u32 v3, s3, v0 -; GCN-IR-NEXT: v_mul_lo_u32 v0, s2, v0 +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[12:13] +; GCN-IR-NEXT: BB0_6: ; %udiv-end +; GCN-IR-NEXT: v_mul_lo_u32 v1, s0, v1 +; GCN-IR-NEXT: v_mul_hi_u32 v2, s0, v0 +; GCN-IR-NEXT: v_mul_lo_u32 v3, s1, v0 +; GCN-IR-NEXT: v_mul_lo_u32 v0, s0, v0 ; GCN-IR-NEXT: s_mov_b32 s11, 0xf000 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v2, v1 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v1, v3 @@ -356,13 +348,11 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) { ; GCN-IR-NEXT: s_or_b64 s[4:5], vcc, s[4:5] ; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 32, v4 ; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v3 -; GCN-IR-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3 -; GCN-IR-NEXT: v_cndmask_b32_e32 v10, v5, v4, vcc +; GCN-IR-NEXT: v_min_u32_e32 v10, v4, v5 ; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v0 ; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 32, v4 ; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v1 -; GCN-IR-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; GCN-IR-NEXT: v_cndmask_b32_e32 v12, v5, v4, vcc +; GCN-IR-NEXT: v_min_u32_e32 v12, v4, v5 ; GCN-IR-NEXT: v_sub_i32_e32 v5, vcc, v10, v12 ; GCN-IR-NEXT: v_subb_u32_e64 v6, s[6:7], 0, 0, vcc ; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[5:6] @@ -858,94 +848,87 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(i64 addrspace(1)* %out, i64 %x) ; ; GCN-IR-LABEL: s_test_urem_k_num_i64: ; GCN-IR: ; %bb.0: ; %_udiv-special-cases -; GCN-IR-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) -; GCN-IR-NEXT: s_flbit_i32_b32 s2, s6 -; GCN-IR-NEXT: s_flbit_i32_b32 s3, s7 -; GCN-IR-NEXT: s_add_i32 s2, s2, 32 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s3 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s2 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s7, 0 -; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 0xffffffc5, v2 -; GCN-IR-NEXT: v_addc_u32_e64 v1, s[2:3], 0, -1, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], 0 -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_xor_b64 s[2:3], s[0:1], -1 -; GCN-IR-NEXT: s_and_b64 s[2:3], s[2:3], vcc -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[2:3] -; GCN-IR-NEXT: s_cbranch_vccz BB6_4 -; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[3:4], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], 24, v0 -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: s_flbit_i32_b32 s4, s2 +; GCN-IR-NEXT: s_flbit_i32_b32 s5, s3 +; GCN-IR-NEXT: s_add_i32 s4, s4, 32 +; GCN-IR-NEXT: s_min_u32 s8, s4, s5 +; GCN-IR-NEXT: s_add_u32 s6, s8, 0xffffffc5 +; GCN-IR-NEXT: s_addc_u32 s7, 0, -1 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[2:3], 0 +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[12:13], s[6:7], 63 +; GCN-IR-NEXT: s_mov_b64 s[4:5], 0 +; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[12:13] +; GCN-IR-NEXT: v_cmp_ne_u64_e64 s[12:13], s[6:7], 63 +; GCN-IR-NEXT: s_xor_b64 s[14:15], s[10:11], -1 +; GCN-IR-NEXT: s_and_b64 s[12:13], s[14:15], s[12:13] +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[12:13] ; GCN-IR-NEXT: s_cbranch_vccz BB6_5 +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: s_add_u32 s12, s6, 1 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 +; GCN-IR-NEXT: s_addc_u32 s13, s7, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s7 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[12:13], v[0:1] +; GCN-IR-NEXT: s_sub_i32 s6, 63, s6 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_lshl_b64 s[10:11], 24, s6 +; GCN-IR-NEXT: s_cbranch_vccz BB6_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: s_add_u32 s2, s6, -1 -; GCN-IR-NEXT: v_lshr_b64 v[6:7], 24, v3 -; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, 58, v2 -; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 -; GCN-IR-NEXT: s_addc_u32 s3, s7, -1 -; GCN-IR-NEXT: v_subb_u32_e64 v5, s[0:1], 0, 0, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: s_lshr_b64 s[14:15], 24, s12 +; GCN-IR-NEXT: s_add_u32 s6, s2, -1 +; GCN-IR-NEXT: s_addc_u32 s7, s3, -1 +; GCN-IR-NEXT: s_sub_u32 s8, 58, s8 +; GCN-IR-NEXT: s_subb_u32 s9, 0, 0 +; GCN-IR-NEXT: s_mov_b64 s[12:13], 0 +; GCN-IR-NEXT: s_mov_b32 s5, 0 ; GCN-IR-NEXT: BB6_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 -; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v2 -; GCN-IR-NEXT: v_or_b32_e32 v0, v8, v0 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s3 -; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, s2, v6 -; GCN-IR-NEXT: v_subb_u32_e32 v2, vcc, v2, v7, vcc -; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v2 -; GCN-IR-NEXT: v_and_b32_e32 v10, s6, v8 -; GCN-IR-NEXT: v_and_b32_e32 v2, 1, v8 -; GCN-IR-NEXT: v_and_b32_e32 v11, s7, v8 -; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v4 -; GCN-IR-NEXT: v_or_b32_e32 v1, v9, v1 -; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v5, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5] -; GCN-IR-NEXT: v_mov_b32_e32 v4, v8 -; GCN-IR-NEXT: v_sub_i32_e64 v6, s[0:1], v6, v10 -; GCN-IR-NEXT: v_mov_b32_e32 v5, v9 -; GCN-IR-NEXT: v_mov_b32_e32 v9, v3 -; GCN-IR-NEXT: v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1] +; GCN-IR-NEXT: s_lshr_b32 s4, s11, 31 +; GCN-IR-NEXT: s_lshl_b64 s[14:15], s[14:15], 1 +; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[10:11], 1 +; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[4:5] +; GCN-IR-NEXT: s_or_b64 s[10:11], s[12:13], s[10:11] +; GCN-IR-NEXT: s_sub_u32 s4, s6, s14 +; GCN-IR-NEXT: s_subb_u32 s4, s7, s15 +; GCN-IR-NEXT: s_ashr_i32 s12, s4, 31 +; GCN-IR-NEXT: s_mov_b32 s13, s12 +; GCN-IR-NEXT: s_and_b32 s4, s12, 1 +; GCN-IR-NEXT: s_and_b64 s[16:17], s[12:13], s[2:3] +; GCN-IR-NEXT: s_sub_u32 s14, s14, s16 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s8 +; GCN-IR-NEXT: s_subb_u32 s15, s15, s17 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s9 +; GCN-IR-NEXT: s_add_u32 s8, s8, 1 +; GCN-IR-NEXT: s_addc_u32 s9, s9, 0 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[0:1] +; GCN-IR-NEXT: s_mov_b64 s[12:13], s[4:5] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 ; GCN-IR-NEXT: s_cbranch_vccz BB6_3 +; GCN-IR-NEXT: BB6_4: ; %Flow5 +; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[10:11], 1 +; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s4 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s5 ; GCN-IR-NEXT: s_branch BB6_6 -; GCN-IR-NEXT: BB6_4: -; GCN-IR-NEXT: v_mov_b32_e32 v1, 0 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, 24, 0, s[0:1] -; GCN-IR-NEXT: s_branch BB6_7 ; GCN-IR-NEXT: BB6_5: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB6_6: ; %Flow5 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 -; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 -; GCN-IR-NEXT: BB6_7: ; %udiv-end -; GCN-IR-NEXT: v_mul_lo_u32 v1, s6, v1 -; GCN-IR-NEXT: v_mul_hi_u32 v2, s6, v0 -; GCN-IR-NEXT: v_mul_lo_u32 v3, s7, v0 -; GCN-IR-NEXT: v_mul_lo_u32 v0, s6, v0 -; GCN-IR-NEXT: s_mov_b32 s3, 0xf000 +; GCN-IR-NEXT: v_mov_b32_e32 v1, 0 +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, 24, 0, s[10:11] +; GCN-IR-NEXT: BB6_6: ; %udiv-end +; GCN-IR-NEXT: v_mul_lo_u32 v1, s2, v1 +; GCN-IR-NEXT: v_mul_hi_u32 v2, s2, v0 +; GCN-IR-NEXT: v_mul_lo_u32 v3, s3, v0 +; GCN-IR-NEXT: v_mul_lo_u32 v0, s2, v0 +; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v2, v1 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v1, v3 ; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 24, v0 -; GCN-IR-NEXT: s_mov_b32 s2, -1 -; GCN-IR-NEXT: s_mov_b32 s0, s4 -; GCN-IR-NEXT: s_mov_b32 s1, s5 +; GCN-IR-NEXT: s_mov_b32 s6, -1 +; GCN-IR-NEXT: s_mov_b32 s4, s0 +; GCN-IR-NEXT: s_mov_b32 s5, s1 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, 0, v1, vcc -; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 +; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GCN-IR-NEXT: s_endpgm %result = urem i64 24, %x store i64 %result, i64 addrspace(1)* %out @@ -1061,91 +1044,85 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(i64 addrspace(1)* %out, i64 %x) ; ; GCN-IR-LABEL: s_test_urem_k_den_i64: ; GCN-IR: ; %bb.0: ; %_udiv-special-cases -; GCN-IR-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) -; GCN-IR-NEXT: s_flbit_i32_b32 s2, s6 -; GCN-IR-NEXT: s_flbit_i32_b32 s3, s7 -; GCN-IR-NEXT: s_add_i32 s2, s2, 32 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s3 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s2 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s7, 0 -; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 59, v2 -; GCN-IR-NEXT: v_subb_u32_e64 v1, s[2:3], 0, 0, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], 0 -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_xor_b64 s[2:3], s[0:1], -1 -; GCN-IR-NEXT: s_and_b64 s[2:3], s[2:3], vcc -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[2:3] -; GCN-IR-NEXT: s_cbranch_vccz BB7_4 -; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[3:4], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[6:7], v0 -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: s_flbit_i32_b32 s4, s2 +; GCN-IR-NEXT: s_flbit_i32_b32 s5, s3 +; GCN-IR-NEXT: s_add_i32 s4, s4, 32 +; GCN-IR-NEXT: s_min_u32 s6, s4, s5 +; GCN-IR-NEXT: s_sub_u32 s8, 59, s6 +; GCN-IR-NEXT: s_subb_u32 s9, 0, 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[2:3], 0 +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[12:13], s[8:9], 63 +; GCN-IR-NEXT: s_mov_b64 s[4:5], 0 +; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[12:13] +; GCN-IR-NEXT: v_cmp_ne_u64_e64 s[12:13], s[8:9], 63 +; GCN-IR-NEXT: s_xor_b64 s[14:15], s[10:11], -1 +; GCN-IR-NEXT: s_and_b64 s[12:13], s[14:15], s[12:13] +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[12:13] ; GCN-IR-NEXT: s_cbranch_vccz BB7_5 +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: s_add_u32 s10, s8, 1 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s8 +; GCN-IR-NEXT: s_addc_u32 s11, s9, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s9 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[10:11], v[0:1] +; GCN-IR-NEXT: s_sub_i32 s7, 63, s8 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[2:3], s7 +; GCN-IR-NEXT: s_cbranch_vccz BB7_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[6:7], v3 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 0xffffffc4, v2 -; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 -; GCN-IR-NEXT: v_addc_u32_e64 v5, s[0:1], 0, -1, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: s_lshr_b64 s[12:13], s[2:3], s10 +; GCN-IR-NEXT: s_add_u32 s6, s6, 0xffffffc4 +; GCN-IR-NEXT: s_addc_u32 s7, 0, -1 +; GCN-IR-NEXT: s_mov_b64 s[10:11], 0 +; GCN-IR-NEXT: s_mov_b32 s5, 0 ; GCN-IR-NEXT: BB7_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 -; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 -; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v2 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_sub_i32_e32 v2, vcc, 23, v6 -; GCN-IR-NEXT: v_subb_u32_e32 v2, vcc, 0, v7, vcc -; GCN-IR-NEXT: v_or_b32_e32 v0, v8, v0 -; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v2 -; GCN-IR-NEXT: v_and_b32_e32 v10, 24, v8 -; GCN-IR-NEXT: v_and_b32_e32 v2, 1, v8 -; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v4 -; GCN-IR-NEXT: v_or_b32_e32 v1, v9, v1 -; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v5, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5] -; GCN-IR-NEXT: v_mov_b32_e32 v4, v8 -; GCN-IR-NEXT: v_sub_i32_e64 v6, s[0:1], v6, v10 -; GCN-IR-NEXT: v_mov_b32_e32 v5, v9 -; GCN-IR-NEXT: v_mov_b32_e32 v9, v3 -; GCN-IR-NEXT: v_subbrev_u32_e64 v7, s[0:1], 0, v7, s[0:1] +; GCN-IR-NEXT: s_lshr_b32 s4, s9, 31 +; GCN-IR-NEXT: s_lshl_b64 s[12:13], s[12:13], 1 +; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[8:9], 1 +; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], s[4:5] +; GCN-IR-NEXT: s_or_b64 s[8:9], s[10:11], s[8:9] +; GCN-IR-NEXT: s_sub_u32 s4, 23, s12 +; GCN-IR-NEXT: s_subb_u32 s4, 0, s13 +; GCN-IR-NEXT: s_ashr_i32 s10, s4, 31 +; GCN-IR-NEXT: s_and_b32 s4, s10, 1 +; GCN-IR-NEXT: s_and_b32 s10, s10, 24 +; GCN-IR-NEXT: s_sub_u32 s12, s12, s10 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 +; GCN-IR-NEXT: s_subb_u32 s13, s13, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s7 +; GCN-IR-NEXT: s_add_u32 s6, s6, 1 +; GCN-IR-NEXT: s_addc_u32 s7, s7, 0 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1] +; GCN-IR-NEXT: s_mov_b64 s[10:11], s[4:5] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 ; GCN-IR-NEXT: s_cbranch_vccz BB7_3 +; GCN-IR-NEXT: BB7_4: ; %Flow5 +; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[8:9], 1 +; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s4 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s5 ; GCN-IR-NEXT: s_branch BB7_6 -; GCN-IR-NEXT: BB7_4: -; GCN-IR-NEXT: v_mov_b32_e32 v0, s7 -; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[0:1] -; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[0:1] -; GCN-IR-NEXT: s_branch BB7_7 ; GCN-IR-NEXT: BB7_5: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB7_6: ; %Flow5 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 -; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 -; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 -; GCN-IR-NEXT: BB7_7: ; %udiv-end +; GCN-IR-NEXT: v_mov_b32_e32 v0, s3 +; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[10:11] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s2 +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[10:11] +; GCN-IR-NEXT: BB7_6: ; %udiv-end ; GCN-IR-NEXT: v_mul_hi_u32 v2, v0, 24 ; GCN-IR-NEXT: v_mul_lo_u32 v1, v1, 24 ; GCN-IR-NEXT: v_mul_lo_u32 v0, v0, 24 -; GCN-IR-NEXT: s_mov_b32 s3, 0xf000 -; GCN-IR-NEXT: s_mov_b32 s2, -1 +; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 +; GCN-IR-NEXT: s_mov_b32 s6, -1 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v2, v1 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s7 -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, s6, v0 -; GCN-IR-NEXT: s_mov_b32 s0, s4 -; GCN-IR-NEXT: s_mov_b32 s1, s5 +; GCN-IR-NEXT: v_mov_b32_e32 v2, s3 +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, s2, v0 +; GCN-IR-NEXT: s_mov_b32 s4, s0 +; GCN-IR-NEXT: s_mov_b32 s5, s1 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v2, v1, vcc -; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 +; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GCN-IR-NEXT: s_endpgm %result = urem i64 %x, 24 store i64 %result, i64 addrspace(1)* %out @@ -1262,10 +1239,8 @@ define i64 @v_test_urem_pow2_k_num_i64(i64 %x) { ; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0 ; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 32, v2 ; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1 -; GCN-IR-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; GCN-IR-NEXT: v_cndmask_b32_e32 v4, v3, v2, vcc -; GCN-IR-NEXT: s_movk_i32 s6, 0xffd0 -; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, s6, v4 +; GCN-IR-NEXT: v_min_u32_e32 v4, v2, v3 +; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 0xffffffd0, v4 ; GCN-IR-NEXT: v_addc_u32_e64 v3, s[6:7], 0, -1, vcc ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1] ; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[2:3] @@ -1364,8 +1339,7 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) { ; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0 ; GCN-IR-NEXT: v_add_i32_e64 v2, s[4:5], 32, v2 ; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v1 -; GCN-IR-NEXT: v_cndmask_b32_e64 v6, v3, v2, s[4:5] +; GCN-IR-NEXT: v_min_u32_e32 v6, v2, v3 ; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 48, v6 ; GCN-IR-NEXT: v_subb_u32_e64 v3, s[4:5], 0, 0, s[4:5] ; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] -- 2.7.4