From 635d479322b9240af586e36acdcbf56db42778c2 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Wed, 3 Oct 2018 02:47:25 +0000 Subject: [PATCH] AMDGPU: Always run AMDGPUAlwaysInline Even if calls are enabled, it still needs to be run for forcing inline of functions that use LDS. llvm-svn: 343657 --- llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp | 21 +++++++++------------ ...force-alwaysinline-lds-global-address-codegen.ll | 21 +++++++++++++++++++++ 2 files changed, 30 insertions(+), 12 deletions(-) create mode 100644 llvm/test/CodeGen/AMDGPU/force-alwaysinline-lds-global-address-codegen.ll diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp index f6ebc3d..ccefdf3 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -605,18 +605,15 @@ void AMDGPUPassConfig::addIRPasses() { addPass(createAtomicExpandPass()); addPass(createAMDGPULowerIntrinsicsPass()); - if (TM.getTargetTriple().getArch() == Triple::r600 || - !EnableAMDGPUFunctionCalls) { - // Function calls are not supported, so make sure we inline everything. - addPass(createAMDGPUAlwaysInlinePass()); - addPass(createAlwaysInlinerLegacyPass()); - // We need to add the barrier noop pass, otherwise adding the function - // inlining pass will cause all of the PassConfigs passes to be run - // one function at a time, which means if we have a nodule with two - // functions, then we will generate code for the first function - // without ever running any passes on the second. - addPass(createBarrierNoopPass()); - } + // Function calls are not supported, so make sure we inline everything. + addPass(createAMDGPUAlwaysInlinePass()); + addPass(createAlwaysInlinerLegacyPass()); + // We need to add the barrier noop pass, otherwise adding the function + // inlining pass will cause all of the PassConfigs passes to be run + // one function at a time, which means if we have a nodule with two + // functions, then we will generate code for the first function + // without ever running any passes on the second. + addPass(createBarrierNoopPass()); if (TM.getTargetTriple().getArch() == Triple::amdgcn) { // TODO: May want to move later or split into an early and late one. diff --git a/llvm/test/CodeGen/AMDGPU/force-alwaysinline-lds-global-address-codegen.ll b/llvm/test/CodeGen/AMDGPU/force-alwaysinline-lds-global-address-codegen.ll new file mode 100644 index 0000000..6d90cf9 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/force-alwaysinline-lds-global-address-codegen.ll @@ -0,0 +1,21 @@ +; RUN: llc -mtriple=amdgcn-amd-amdhsa -amdgpu-function-calls -amdgpu-stress-function-calls < %s | FileCheck -check-prefix=GCN %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -amdgpu-stress-function-calls < %s | FileCheck -check-prefix=GCN %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa < %s | FileCheck -check-prefix=GCN %s + +@lds0 = addrspace(3) global i32 undef, align 4 + +; GCN-NOT: load_lds_simple + +define internal i32 @load_lds_simple() { + %load = load i32, i32 addrspace(3)* @lds0, align 4 + ret i32 %load +} + +; GCN-LABEL: {{^}}kernel: +; GCN: v_mov_b32_e32 [[ADDR:v[0-9]+]], 0 +; GCN: ds_read_b32 v{{[0-9]+}}, [[ADDR]] +define amdgpu_kernel void @kernel(i32 addrspace(1)* %out) { + %call = call i32 @load_lds_simple() + store i32 %call, i32 addrspace(1)* %out + ret void +} -- 2.7.4