From 8dbeb9256cb60fe551fdcbd40580589ffce59e37 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Mon, 3 Jun 2019 18:41:34 +0000 Subject: [PATCH] TTI: Improve default costs for addrspacecast For some reason multiple places need to do this, and the variant the loop unroller and inliner use was not handling it. Also, introduce a new wrapper to be slightly more precise, since on AMDGPU some addrspacecasts are free, but not no-ops. llvm-svn: 362436 --- llvm/include/llvm/CodeGen/BasicTTIImpl.h | 8 ++- llvm/include/llvm/CodeGen/TargetLowering.h | 5 +- llvm/lib/CodeGen/CodeGenPrepare.cpp | 4 +- llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 4 +- llvm/lib/Target/AMDGPU/SIISelLowering.h | 2 +- .../Analysis/CostModel/AMDGPU/addrspacecast.ll | 33 ++++++++-- .../LoopUnroll/AMDGPU/unroll-cost-addrspacecast.ll | 77 ++++++++++++++++++++++ 7 files changed, 119 insertions(+), 14 deletions(-) create mode 100644 llvm/test/Transforms/LoopUnroll/AMDGPU/unroll-cost-addrspacecast.ll diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h index 0f575c8..9a3be5c 100644 --- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h +++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h @@ -413,6 +413,12 @@ public: if (TLI->isZExtFree(OpTy, Ty)) return TargetTransformInfo::TCC_Free; return TargetTransformInfo::TCC_Basic; + + case Instruction::AddrSpaceCast: + if (TLI->isFreeAddrSpaceCast(OpTy->getPointerAddressSpace(), + Ty->getPointerAddressSpace())) + return TargetTransformInfo::TCC_Free; + return TargetTransformInfo::TCC_Basic; } return BaseT::getOperationCost(Opcode, Ty, OpTy); @@ -656,7 +662,7 @@ public: return 0; if (Opcode == Instruction::AddrSpaceCast && - TLI->isNoopAddrSpaceCast(Src->getPointerAddressSpace(), + TLI->isFreeAddrSpaceCast(Src->getPointerAddressSpace(), Dst->getPointerAddressSpace())) return 0; diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h index f06e01a..d00cc16 100644 --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -1591,8 +1591,9 @@ public: } /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we - /// are happy to sink it into basic blocks. - virtual bool isCheapAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const { + /// are happy to sink it into basic blocks. A cast may be free, but not + /// necessarily a no-op. e.g. a free truncate from a 64-bit to 32-bit pointer. + virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const { return isNoopAddrSpaceCast(SrcAS, DestAS); } diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp index 488cfe6..797064b 100644 --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -1140,8 +1140,8 @@ static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI, // Sink only "cheap" (or nop) address-space casts. This is a weaker condition // than sinking only nop casts, but is helpful on some platforms. if (auto *ASC = dyn_cast(CI)) { - if (!TLI.isCheapAddrSpaceCast(ASC->getSrcAddressSpace(), - ASC->getDestAddressSpace())) + if (!TLI.isFreeAddrSpaceCast(ASC->getSrcAddressSpace(), + ASC->getDestAddressSpace())) return false; } diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index ac90399..1ca11da 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -1261,8 +1261,8 @@ bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const { return I && I->getMetadata("amdgpu.noclobber"); } -bool SITargetLowering::isCheapAddrSpaceCast(unsigned SrcAS, - unsigned DestAS) const { +bool SITargetLowering::isFreeAddrSpaceCast(unsigned SrcAS, + unsigned DestAS) const { // Flat -> private/local is a simple truncate. // Flat -> global is no-op if (SrcAS == AMDGPUAS::FLAT_ADDRESS) diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h index a63d751..4d7dac9 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.h +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h @@ -246,7 +246,7 @@ public: bool isMemOpUniform(const SDNode *N) const; bool isMemOpHasNoClobberedMemOperand(const SDNode *N) const; bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override; - bool isCheapAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override; + bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override; TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override; diff --git a/llvm/test/Analysis/CostModel/AMDGPU/addrspacecast.ll b/llvm/test/Analysis/CostModel/AMDGPU/addrspacecast.ll index ddb3148..f15ab50 100644 --- a/llvm/test/Analysis/CostModel/AMDGPU/addrspacecast.ll +++ b/llvm/test/Analysis/CostModel/AMDGPU/addrspacecast.ll @@ -1,45 +1,66 @@ ; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=kaveri < %s | FileCheck %s -; CHECK: 'addrspacecast_global_to_flat' +; CHECK-LABEL: 'addrspacecast_global_to_flat' ; CHECK: estimated cost of 0 for {{.*}} addrspacecast i8 addrspace(1)* %ptr to i8* define i8* @addrspacecast_global_to_flat(i8 addrspace(1)* %ptr) #0 { %cast = addrspacecast i8 addrspace(1)* %ptr to i8* ret i8* %cast } -; CHECK: 'addrspacecast_global_to_flat_v2' +; CHECK-LABEL: 'addrspacecast_global_to_flat_v2' ; CHECK: estimated cost of 0 for {{.*}} addrspacecast <2 x i8 addrspace(1)*> %ptr to <2 x i8*> define <2 x i8*> @addrspacecast_global_to_flat_v2(<2 x i8 addrspace(1)*> %ptr) #0 { %cast = addrspacecast <2 x i8 addrspace(1)*> %ptr to <2 x i8*> ret <2 x i8*> %cast } -; CHECK: 'addrspacecast_global_to_flat_v32' +; CHECK-LABEL: 'addrspacecast_global_to_flat_v32' ; CHECK: estimated cost of 0 for {{.*}} addrspacecast <32 x i8 addrspace(1)*> %ptr to <32 x i8*> define <32 x i8*> @addrspacecast_global_to_flat_v32(<32 x i8 addrspace(1)*> %ptr) #0 { %cast = addrspacecast <32 x i8 addrspace(1)*> %ptr to <32 x i8*> ret <32 x i8*> %cast } -; CHECK: 'addrspacecast_local_to_flat' +; CHECK-LABEL: 'addrspacecast_local_to_flat' ; CHECK: estimated cost of 1 for {{.*}} addrspacecast i8 addrspace(3)* %ptr to i8* define i8* @addrspacecast_local_to_flat(i8 addrspace(3)* %ptr) #0 { %cast = addrspacecast i8 addrspace(3)* %ptr to i8* ret i8* %cast } -; CHECK: 'addrspacecast_local_to_flat_v2' +; CHECK-LABEL: 'addrspacecast_local_to_flat_v2' ; CHECK: estimated cost of 2 for {{.*}} addrspacecast <2 x i8 addrspace(3)*> %ptr to <2 x i8*> define <2 x i8*> @addrspacecast_local_to_flat_v2(<2 x i8 addrspace(3)*> %ptr) #0 { %cast = addrspacecast <2 x i8 addrspace(3)*> %ptr to <2 x i8*> ret <2 x i8*> %cast } -; CHECK: 'addrspacecast_local_to_flat_v32' +; CHECK-LABEL: 'addrspacecast_local_to_flat_v32' ; CHECK: estimated cost of 32 for {{.*}} addrspacecast <32 x i8 addrspace(3)*> %ptr to <32 x i8*> define <32 x i8*> @addrspacecast_local_to_flat_v32(<32 x i8 addrspace(3)*> %ptr) #0 { %cast = addrspacecast <32 x i8 addrspace(3)*> %ptr to <32 x i8*> ret <32 x i8*> %cast } +; CHECK-LABEL: 'addrspacecast_flat_to_local' +; CHECK: estimated cost of 0 for {{.*}} addrspacecast i8* %ptr to i8 addrspace(3)* +define i8 addrspace(3)* @addrspacecast_flat_to_local(i8* %ptr) #0 { + %cast = addrspacecast i8* %ptr to i8 addrspace(3)* + ret i8 addrspace(3)* %cast +} + +; CHECK-LABEL: 'addrspacecast_flat_to_local_v2' +; CHECK: estimated cost of 0 for {{.*}} addrspacecast <2 x i8*> %ptr to <2 x i8 addrspace(3)*> +define <2 x i8 addrspace(3)*> @addrspacecast_flat_to_local_v2(<2 x i8*> %ptr) #0 { + %cast = addrspacecast <2 x i8*> %ptr to <2 x i8 addrspace(3)*> + ret <2 x i8 addrspace(3)*> %cast +} + +; CHECK-LABEL: 'addrspacecast_flat_to_local_v32' +; CHECK: estimated cost of 0 for {{.*}} addrspacecast <32 x i8*> %ptr to <32 x i8 addrspace(3)*> +define <32 x i8 addrspace(3)*> @addrspacecast_flat_to_local_v32(<32 x i8*> %ptr) #0 { + %cast = addrspacecast <32 x i8*> %ptr to <32 x i8 addrspace(3)*> + ret <32 x i8 addrspace(3)*> %cast +} + attributes #0 = { nounwind readnone } diff --git a/llvm/test/Transforms/LoopUnroll/AMDGPU/unroll-cost-addrspacecast.ll b/llvm/test/Transforms/LoopUnroll/AMDGPU/unroll-cost-addrspacecast.ll new file mode 100644 index 0000000..761aa07 --- /dev/null +++ b/llvm/test/Transforms/LoopUnroll/AMDGPU/unroll-cost-addrspacecast.ll @@ -0,0 +1,77 @@ +; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -mcpu=hawaii -loop-unroll -unroll-threshold=75 -unroll-peel-count=0 -unroll-allow-partial=false -unroll-max-iteration-count-to-analyze=16 < %s | FileCheck %s + +; CHECK-LABEL: @test_func_addrspacecast_cost_noop( +; CHECK-NOT: br i1 +define amdgpu_kernel void @test_func_addrspacecast_cost_noop(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture %in) #0 { +entry: + br label %for.body + +for.body: + %indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ] + %sum.02 = phi float [ %fmul, %for.body ], [ 0.0, %entry ] + %arrayidx.in = getelementptr inbounds float, float addrspace(1)* %in, i32 %indvars.iv + %arrayidx.out = getelementptr inbounds float, float addrspace(1)* %out, i32 %indvars.iv + %cast.in = addrspacecast float addrspace(1)* %arrayidx.in to float* + %cast.out = addrspacecast float addrspace(1)* %arrayidx.out to float* + %load = load float, float* %cast.in + %fmul = fmul float %load, %sum.02 + store float %fmul, float* %cast.out + %indvars.iv.next = add i32 %indvars.iv, 1 + %exitcond = icmp eq i32 %indvars.iv.next, 16 + br i1 %exitcond, label %for.end, label %for.body + +for.end: + ret void +} + +; Free, but not a no-op +; CHECK-LABEL: @test_func_addrspacecast_cost_free( +; CHECK-NOT: br i1 +define amdgpu_kernel void @test_func_addrspacecast_cost_free(float* noalias nocapture %out, float* noalias nocapture %in) #0 { +entry: + br label %for.body + +for.body: + %indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ] + %sum.02 = phi float [ %fmul, %for.body ], [ 0.0, %entry ] + %arrayidx.in = getelementptr inbounds float, float* %in, i32 %indvars.iv + %arrayidx.out = getelementptr inbounds float, float* %out, i32 %indvars.iv + %cast.in = addrspacecast float* %arrayidx.in to float addrspace(3)* + %cast.out = addrspacecast float* %arrayidx.out to float addrspace(3)* + %load = load float, float addrspace(3)* %cast.in + %fmul = fmul float %load, %sum.02 + store float %fmul, float addrspace(3)* %cast.out + %indvars.iv.next = add i32 %indvars.iv, 1 + %exitcond = icmp eq i32 %indvars.iv.next, 16 + br i1 %exitcond, label %for.end, label %for.body + +for.end: + ret void +} + +; CHECK-LABEL: @test_func_addrspacecast_cost_nonfree( +; CHECK: br i1 %exitcond +define amdgpu_kernel void @test_func_addrspacecast_cost_nonfree(float addrspace(3)* noalias nocapture %out, float addrspace(3)* noalias nocapture %in) #0 { +entry: + br label %for.body + +for.body: + %indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ] + %sum.02 = phi float [ %fmul, %for.body ], [ 0.0, %entry ] + %arrayidx.in = getelementptr inbounds float, float addrspace(3)* %in, i32 %indvars.iv + %arrayidx.out = getelementptr inbounds float, float addrspace(3)* %out, i32 %indvars.iv + %cast.in = addrspacecast float addrspace(3)* %arrayidx.in to float* + %cast.out = addrspacecast float addrspace(3)* %arrayidx.out to float* + %load = load float, float* %cast.in + %fmul = fmul float %load, %sum.02 + store float %fmul, float* %cast.out + %indvars.iv.next = add i32 %indvars.iv, 1 + %exitcond = icmp eq i32 %indvars.iv.next, 16 + br i1 %exitcond, label %for.end, label %for.body + +for.end: + ret void +} + +attributes #0 = { nounwind } +attributes #1 = { nounwind readnone speculatable } -- 2.7.4