From a96ec3f36016eee02c346e9b8100f31f28a6f079 Mon Sep 17 00:00:00 2001 From: Stanislav Mekhanoshin Date: Tue, 23 May 2017 15:59:58 +0000 Subject: [PATCH] [AMDGPU] Convert shl (add) into add (shl) shl (or|add x, c2), c1 => or|add (shl x, c1), (c2 << c1) This allows to fold a constant into an address in some cases as well as to eliminate second shift if the expression is used as an address and second shift is a result of a GEP. Differential Revision: https://reviews.llvm.org/D33432 llvm-svn: 303641 --- llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 42 ++++++++++++++++++++++++-- llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h | 3 ++ llvm/test/CodeGen/AMDGPU/shl-add-to-add-shl.ll | 40 ++++++++++++++++++++++++ 3 files changed, 83 insertions(+), 2 deletions(-) create mode 100644 llvm/test/CodeGen/AMDGPU/shl-add-to-add-shl.ll diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp index 5ec46a8..9d7562d 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -127,6 +127,29 @@ EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) { return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32); } +bool AMDGPUTargetLowering::isOrEquivalentToAdd(SelectionDAG &DAG, SDValue Op) +{ + assert(Op.getOpcode() == ISD::OR); + + SDValue N0 = Op->getOperand(0); + SDValue N1 = Op->getOperand(1); + EVT VT = N0.getValueType(); + + if (VT.isInteger() && !VT.isVector()) { + KnownBits LHSKnown, RHSKnown; + DAG.computeKnownBits(N0, LHSKnown); + + if (LHSKnown.Zero.getBoolValue()) { + DAG.computeKnownBits(N1, RHSKnown); + + if (!(~RHSKnown.Zero & ~LHSKnown.Zero)) + return true; + } + } + + return false; +} + AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM, const AMDGPUSubtarget &STI) : TargetLowering(TM), Subtarget(&STI) { @@ -2596,8 +2619,6 @@ SDValue AMDGPUTargetLowering::splitBinaryBitConstantOpImpl( SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N, DAGCombinerInfo &DCI) const { EVT VT = N->getValueType(0); - if (VT != MVT::i64) - return SDValue(); ConstantSDNode *RHS = dyn_cast(N->getOperand(1)); if (!RHS) @@ -2618,6 +2639,8 @@ SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N, case ISD::SIGN_EXTEND: case ISD::ANY_EXTEND: { // shl (ext x) => zext (shl x), if shift does not overflow int + if (VT != MVT::i64) + break; KnownBits Known; SDValue X = LHS->getOperand(0); DAG.computeKnownBits(X, Known); @@ -2628,8 +2651,23 @@ SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N, SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(RHS, 0)); return DAG.getZExtOrTrunc(Shl, SL, VT); } + case ISD::OR: if (!isOrEquivalentToAdd(DAG, LHS)) break; + case ISD::ADD: { // Fall through from above + // shl (or|add x, c2), c1 => or|add (shl x, c1), (c2 << c1) + if (ConstantSDNode *C2 = dyn_cast(LHS->getOperand(1))) { + SDValue Shl = DAG.getNode(ISD::SHL, SL, VT, LHS->getOperand(0), + SDValue(RHS, 0)); + SDValue C2V = DAG.getConstant(C2->getAPIntValue() << RHSVal, + SDLoc(C2), VT); + return DAG.getNode(LHS->getOpcode(), SL, VT, Shl, C2V); + } + break; + } } + if (VT != MVT::i64) + return SDValue(); + // i64 (shl x, C) -> (build_pair 0, (shl x, C -32)) // On some subtargets, 64-bit shift is a quarter rate instruction. In the diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h index fb2f150..0d066cd 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h @@ -34,6 +34,9 @@ private: /// compare. SDValue getFFBH_U32(SelectionDAG &DAG, SDValue Op, const SDLoc &DL) const; +public: + static bool isOrEquivalentToAdd(SelectionDAG &DAG, SDValue Op); + protected: const AMDGPUSubtarget *Subtarget; AMDGPUAS AMDGPUASI; diff --git a/llvm/test/CodeGen/AMDGPU/shl-add-to-add-shl.ll b/llvm/test/CodeGen/AMDGPU/shl-add-to-add-shl.ll new file mode 100644 index 0000000..1cdfec9 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/shl-add-to-add-shl.ll @@ -0,0 +1,40 @@ +; RUN: llc -march=amdgcn -mcpu=fiji < %s | FileCheck %s + +; Check transformation shl (or|add x, c2), c1 => or|add (shl x, c1), (c2 << c1) +; Only one shift if expected, GEP shall not produce a separate shift + +; CHECK-LABEL: {{^}}add_const_offset: +; CHECK: v_lshlrev_b32_e32 v[[SHL:[0-9]+]], 4, v0 +; CHECK: v_add_i32_e32 v[[ADD:[0-9]+]], vcc, 0xc80, v[[SHL]] +; CHECK-NOT: v_lshl +; CHECK: v_add_i32_e32 v[[ADDRLO:[0-9]+]], vcc, s{{[0-9]+}}, v[[ADD]] +; CHECK: load_dword v{{[0-9]+}}, v{{\[}}[[ADDRLO]]: +define amdgpu_kernel void @add_const_offset(i32 addrspace(1)* nocapture %arg) { +bb: + %id = tail call i32 @llvm.amdgcn.workitem.id.x() + %add = add i32 %id, 200 + %shl = shl i32 %add, 2 + %ptr = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %shl + %val = load i32, i32 addrspace(1)* %ptr, align 4 + store i32 %val, i32 addrspace(1)* %arg, align 4 + ret void +} + +; CHECK-LABEL: {{^}}or_const_offset: +; CHECK: v_lshlrev_b32_e32 v[[SHL:[0-9]+]], 4, v0 +; CHECK: v_or_b32_e32 v[[OR:[0-9]+]], 0x1000, v[[SHL]] +; CHECK-NOT: v_lshl +; CHECK: v_add_i32_e32 v[[ADDRLO:[0-9]+]], vcc, s{{[0-9]+}}, v[[OR]] +; CHECK: load_dword v{{[0-9]+}}, v{{\[}}[[ADDRLO]]: +define amdgpu_kernel void @or_const_offset(i32 addrspace(1)* nocapture %arg) { +bb: + %id = tail call i32 @llvm.amdgcn.workitem.id.x() + %add = or i32 %id, 256 + %shl = shl i32 %add, 2 + %ptr = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %shl + %val = load i32, i32 addrspace(1)* %ptr, align 4 + store i32 %val, i32 addrspace(1)* %arg, align 4 + ret void +} + +declare i32 @llvm.amdgcn.workitem.id.x() -- 2.7.4