From a7cb31123c2526f04e6a587d6ada4084cefe6fb4 Mon Sep 17 00:00:00 2001 From: Farhana Aleen Date: Fri, 9 Mar 2018 17:41:39 +0000 Subject: [PATCH] [AMDGPU] Supported ds_read_b128 generation; Widened vector length for local address-space. Summary: Starting from GCN 2nd generation, ISA supports ds_read_b128 on top of ds_read_b64. This patch supports ds_read_b128 instruction pattern and generation of this instruction. In the vectorizer, this patch also widen the vector length so that vectorizer generates 128 bit loads for local address-space which gets translated to ds_read_b128. Since the performance benefit is not clear; compiler generates ds_read_b128 under -amdgpu-ds128. Author: FarhanaAleen Reviewed By: rampitec, arsenm Subscribers: llvm-commits, AMDGPU Differential Revision: https://reviews.llvm.org/D44210 llvm-svn: 327153 --- llvm/lib/Target/AMDGPU/AMDGPUInstructions.td | 8 ++++++++ llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h | 6 ++++++ llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp | 8 ++++---- llvm/lib/Target/AMDGPU/DSInstructions.td | 1 + llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 16 ++++++++++------ llvm/lib/Target/AMDGPU/SIInstrInfo.td | 4 ++++ llvm/test/CodeGen/AMDGPU/load-local-f32.ll | 19 +++++++++++++++++++ llvm/test/CodeGen/AMDGPU/load-local-f64.ll | 18 ++++++++++++++++++ llvm/test/CodeGen/AMDGPU/load-local-i16.ll | 18 ++++++++++++++++++ llvm/test/CodeGen/AMDGPU/load-local-i32.ll | 19 +++++++++++++++++++ llvm/test/CodeGen/AMDGPU/load-local-i64.ll | 14 ++++++++++++++ llvm/test/CodeGen/AMDGPU/load-local-i8.ll | 17 +++++++++++++++++ .../LoadStoreVectorizer/AMDGPU/merge-stores.ll | 3 +-- .../LoadStoreVectorizer/AMDGPU/multiple_tails.ll | 3 +-- 14 files changed, 140 insertions(+), 14 deletions(-) diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td index 4f28d6f..e719933 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td @@ -248,6 +248,10 @@ class Aligned8Bytes : PatFrag (N)->getAlignment() % 8 == 0; }]>; +class Aligned16Bytes : PatFrag (N)->getAlignment() >= 16; +}]>; + class LoadFrag : PatFrag<(ops node:$ptr), (op node:$ptr)>; class StoreFrag : PatFrag < @@ -371,6 +375,10 @@ def load_align8_local : Aligned8Bytes < (ops node:$ptr), (load_local node:$ptr) >; +def load_align16_local : Aligned16Bytes < + (ops node:$ptr), (load_local node:$ptr) +>; + def store_align8_local : Aligned8Bytes < (ops node:$val, node:$ptr), (store_local node:$val, node:$ptr) >; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h index 830c077..0006636 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h @@ -414,6 +414,12 @@ public: return FlatForGlobal; } + /// \returns If target supports ds_read/write_b128 and user enables generation + /// of ds_read/write_b128. + bool useDS128(bool UserEnable) const { + return CIInsts && UserEnable; + } + /// \returns If MUBUF instructions always perform range checking, even for /// buffer resources used for private memory access. bool privateMemoryResourceIsRangeChecked() const { diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp index 4292575..a22c180 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp @@ -265,11 +265,11 @@ unsigned AMDGPUTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const { return 512; } - if (AddrSpace == AS.FLAT_ADDRESS) - return 128; - if (AddrSpace == AS.LOCAL_ADDRESS || + if (AddrSpace == AS.FLAT_ADDRESS || + AddrSpace == AS.LOCAL_ADDRESS || AddrSpace == AS.REGION_ADDRESS) - return 64; + return 128; + if (AddrSpace == AS.PRIVATE_ADDRESS) return 8 * ST->getMaxPrivateElementSize(); diff --git a/llvm/lib/Target/AMDGPU/DSInstructions.td b/llvm/lib/Target/AMDGPU/DSInstructions.td index ec85da7..88484c06 100644 --- a/llvm/lib/Target/AMDGPU/DSInstructions.td +++ b/llvm/lib/Target/AMDGPU/DSInstructions.td @@ -649,6 +649,7 @@ defm : DSReadPat_mc ; let AddedComplexity = 100 in { defm : DSReadPat_mc ; +defm : DSReadPat_mc ; } // End AddedComplexity = 100 diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 68a45cb..8463b22 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -94,6 +94,11 @@ static cl::opt EnableVGPRIndexMode( cl::desc("Use GPR indexing mode instead of movrel for vector indexing"), cl::init(false)); +static cl::opt EnableDS128( + "amdgpu-ds128", + cl::desc("Use DS_read/write_b128"), + cl::init(false)); + static cl::opt AssumeFrameIndexHighZeroBits( "amdgpu-frame-index-zero-bits", cl::desc("High bits of frame index assumed to be zero"), @@ -5425,14 +5430,13 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { llvm_unreachable("unsupported private_element_size"); } } else if (AS == AMDGPUASI.LOCAL_ADDRESS) { - if (NumElements > 2) - return SplitVectorLoad(Op, DAG); - - if (NumElements == 2) + // Use ds_read_b128 if possible. + if (Subtarget->useDS128(EnableDS128) && Load->getAlignment() >= 16 && + MemVT.getStoreSize() == 16) return SDValue(); - // If properly aligned, if we split we might be able to use ds_read_b64. - return SplitVectorLoad(Op, DAG); + if (NumElements > 2) + return SplitVectorLoad(Op, DAG); } return SDValue(); } diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td index 10f5c3b..fb46d17 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td @@ -410,6 +410,9 @@ def sextloadi16_glue : PatFrag<(ops node:$ptr), (sextload_glue node:$ptr), [{ def load_glue_align8 : Aligned8Bytes < (ops node:$ptr), (load_glue node:$ptr) >; +def load_glue_align16 : Aligned16Bytes < + (ops node:$ptr), (load_glue node:$ptr) +>; def load_local_m0 : LoadFrag, LocalAddress; @@ -418,6 +421,7 @@ def sextloadi16_local_m0 : LoadFrag, LocalAddress; def az_extloadi8_local_m0 : LoadFrag, LocalAddress; def az_extloadi16_local_m0 : LoadFrag, LocalAddress; def load_align8_local_m0 : LoadFrag , LocalAddress; +def load_align16_local_m0 : LoadFrag , LocalAddress; def AMDGPUst_glue : SDNode <"ISD::STORE", SDTStore, diff --git a/llvm/test/CodeGen/AMDGPU/load-local-f32.ll b/llvm/test/CodeGen/AMDGPU/load-local-f32.ll index f035d22..3d87208 100644 --- a/llvm/test/CodeGen/AMDGPU/load-local-f32.ll +++ b/llvm/test/CodeGen/AMDGPU/load-local-f32.ll @@ -2,6 +2,11 @@ ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SICIVI,FUNC %s ; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefixes=EG,FUNC %s +; Testing for ds_read_128 +; RUN: llc -march=amdgcn -mcpu=tahiti -amdgpu-ds128 < %s | FileCheck -check-prefixes=SI,FUNC %s +; RUN: llc -march=amdgcn -mcpu=tonga -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s + ; FUNC-LABEL: {{^}}load_f32_local: ; SICIVI: s_mov_b32 m0 ; GFX9-NOT: m0 @@ -122,4 +127,18 @@ entry: ret void } +; Tests if ds_read_b128 gets generated for the 16 byte aligned load. +; FUNC-LABEL: {{^}}local_v4f32_to_128: +; SI-NOT: ds_read_b128 +; CIVI: ds_read_b128 +; EG: LDS_READ_RET +; EG: LDS_READ_RET +; EG: LDS_READ_RET +; EG: LDS_READ_RET +define amdgpu_kernel void @local_v4f32_to_128(<4 x float> addrspace(3)* %out, <4 x float> addrspace(3)* %in) { + %ld = load <4 x float>, <4 x float> addrspace(3)* %in, align 16 + store <4 x float> %ld, <4 x float> addrspace(3)* %out + ret void +} + attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/AMDGPU/load-local-f64.ll b/llvm/test/CodeGen/AMDGPU/load-local-f64.ll index ffb6710..14c31e6 100644 --- a/llvm/test/CodeGen/AMDGPU/load-local-f64.ll +++ b/llvm/test/CodeGen/AMDGPU/load-local-f64.ll @@ -4,6 +4,10 @@ ; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9,FUNC %s ; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefixes=EG,FUNC %s +; Testing for ds_read_b128 +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s + ; FUNC-LABEL: {{^}}local_load_f64: ; SICIV: s_mov_b32 m0 ; GFX9-NOT: m0 @@ -170,4 +174,18 @@ entry: ret void } +; Tests if ds_read_b128 gets generated for the 16 byte aligned load. +; FUNC-LABEL: {{^}}local_load_v2f64_to_128: +; CIVI: ds_read_b128 +; EG: LDS_READ_RET +; EG: LDS_READ_RET +; EG: LDS_READ_RET +; EG: LDS_READ_RET +define amdgpu_kernel void @local_load_v2f64_to_128(<2 x double> addrspace(3)* %out, <2 x double> addrspace(3)* %in) { +entry: + %ld = load <2 x double>, <2 x double> addrspace(3)* %in, align 16 + store <2 x double> %ld, <2 x double> addrspace(3)* %out + ret void +} + attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/AMDGPU/load-local-i16.ll b/llvm/test/CodeGen/AMDGPU/load-local-i16.ll index d3557c1..7438fd2 100644 --- a/llvm/test/CodeGen/AMDGPU/load-local-i16.ll +++ b/llvm/test/CodeGen/AMDGPU/load-local-i16.ll @@ -3,6 +3,10 @@ ; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9,GFX89,FUNC %s ; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s +; Testing for ds_read_b128 +; RUN: llc -march=amdgcn -mcpu=tonga -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s + ; FUNC-LABEL: {{^}}local_load_i16: ; GFX9-NOT: m0 ; SICIVI: s_mov_b32 m0 @@ -935,4 +939,18 @@ define amdgpu_kernel void @local_sextload_v32i16_to_v32i64(<32 x i64> addrspace( ; ret void ; } +; Tests if ds_read_b128 gets generated for the 16 byte aligned load. +; FUNC-LABEL: {{^}}local_v8i16_to_128: +; SI-NOT: ds_read_b128 +; CIVI: ds_read_b128 +; EG: LDS_READ_RET +; EG: LDS_READ_RET +; EG: LDS_READ_RET +; EG: LDS_READ_RET +define amdgpu_kernel void @local_v8i16_to_128(<8 x i16> addrspace(3)* %out, <8 x i16> addrspace(3)* %in) { + %ld = load <8 x i16>, <8 x i16> addrspace(3)* %in, align 16 + store <8 x i16> %ld, <8 x i16> addrspace(3)* %out + ret void +} + attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/AMDGPU/load-local-i32.ll b/llvm/test/CodeGen/AMDGPU/load-local-i32.ll index c736586..1dd7daf 100644 --- a/llvm/test/CodeGen/AMDGPU/load-local-i32.ll +++ b/llvm/test/CodeGen/AMDGPU/load-local-i32.ll @@ -3,6 +3,11 @@ ; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI,FUNC %s ; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s +; Testing for ds_read_128 +; RUN: llc -march=amdgcn -mcpu=tahiti -amdgpu-ds128 < %s | FileCheck -check-prefixes=SI,FUNC %s +; RUN: llc -march=amdgcn -mcpu=tonga -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s + ; FUNC-LABEL: {{^}}local_load_i32: ; GCN-NOT: s_wqm_b64 ; SICIVI: s_mov_b32 m0, -1 @@ -175,6 +180,20 @@ define amdgpu_kernel void @local_sextload_v4i32_to_v4i64(<4 x i64> addrspace(3)* ret void } +; Tests if ds_read_b128 gets generated for the 16 byte aligned load. +; FUNC-LABEL: {{^}}local_v4i32_to_128: +; SI-NOT: ds_read_b128 +; CIVI: ds_read_b128 +; EG: LDS_READ_RET +; EG: LDS_READ_RET +; EG: LDS_READ_RET +; EG: LDS_READ_RET +define amdgpu_kernel void @local_v4i32_to_128(<4 x i32> addrspace(3)* %out, <4 x i32> addrspace(3)* %in) { + %ld = load <4 x i32>, <4 x i32> addrspace(3)* %in, align 16 + store <4 x i32> %ld, <4 x i32> addrspace(3)* %out + ret void +} + ; FUNC-LABEL: {{^}}local_zextload_v8i32_to_v8i64: ; SICIVI: s_mov_b32 m0, -1 ; GFX9-NOT: m0 diff --git a/llvm/test/CodeGen/AMDGPU/load-local-i64.ll b/llvm/test/CodeGen/AMDGPU/load-local-i64.ll index 376f6f5..359fbb4 100644 --- a/llvm/test/CodeGen/AMDGPU/load-local-i64.ll +++ b/llvm/test/CodeGen/AMDGPU/load-local-i64.ll @@ -4,6 +4,10 @@ ; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9,FUNC %s ; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefixes=EG,FUNC %s +; Testing for ds_read_b128 +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s + ; FUNC-LABEL: {{^}}local_load_i64: ; SICIVI: s_mov_b32 m0 ; GFX9-NOT: m0 @@ -36,6 +40,16 @@ entry: ret void } +; Tests if ds_read_b128 gets generated for the 16 byte aligned load. +; FUNC-LABEL: {{^}}local_load_v2i64_to_128: +; CIVI: ds_read_b128 +define amdgpu_kernel void @local_load_v2i64_to_128(<2 x i64> addrspace(3)* %out, <2 x i64> addrspace(3)* %in) { +entry: + %ld = load <2 x i64>, <2 x i64> addrspace(3)* %in + store <2 x i64> %ld, <2 x i64> addrspace(3)* %out + ret void +} + ; FUNC-LABEL: {{^}}local_load_v3i64: ; SICIVI: s_mov_b32 m0 ; GFX9-NOT: m0 diff --git a/llvm/test/CodeGen/AMDGPU/load-local-i8.ll b/llvm/test/CodeGen/AMDGPU/load-local-i8.ll index 72f5408..e1931af 100644 --- a/llvm/test/CodeGen/AMDGPU/load-local-i8.ll +++ b/llvm/test/CodeGen/AMDGPU/load-local-i8.ll @@ -3,6 +3,9 @@ ; RUN: llc -march=amdgcn -mtriple=amdgcn---amdgiz -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9,FUNC %s ; RUN: llc -march=r600 -mtriple=r600---amdgiz -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s +; Testing for ds_read_b128 +; RUN: llc -march=amdgcn -mcpu=tonga -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s ; FUNC-LABEL: {{^}}local_load_i8: ; GCN-NOT: s_wqm_b64 @@ -1021,4 +1024,18 @@ define amdgpu_kernel void @local_sextload_v32i8_to_v32i16(<32 x i16> addrspace(3 ; ret void ; } +; Tests if ds_read_b128 gets generated for the 16 byte aligned load. +; FUNC-LABEL: {{^}}local_v16i8_to_128: +; SI-NOT: ds_read_b128 +; CIVI: ds_read_b128 +; EG: LDS_READ_RET +; EG: LDS_READ_RET +; EG: LDS_READ_RET +; EG: LDS_READ_RET +define amdgpu_kernel void @local_v16i8_to_128(<16 x i8> addrspace(3)* %out, <16 x i8> addrspace(3)* %in) { + %ld = load <16 x i8>, <16 x i8> addrspace(3)* %in, align 16 + store <16 x i8> %ld, <16 x i8> addrspace(3)* %out + ret void +} + attributes #0 = { nounwind } diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll index 19fc44b..5eb3b25 100644 --- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll @@ -504,8 +504,7 @@ define amdgpu_kernel void @merge_local_store_2_constants_i32_align_2(i32 addrspa } ; CHECK-LABEL: @merge_local_store_4_constants_i32 -; CHECK: store <2 x i32> , <2 x i32> addrspace(3)* -; CHECK: store <2 x i32> , <2 x i32> addrspace(3)* +; CHECK: store <4 x i32> , <4 x i32> addrspace(3)* define amdgpu_kernel void @merge_local_store_4_constants_i32(i32 addrspace(3)* %out) #0 { %out.gep.1 = getelementptr i32, i32 addrspace(3)* %out, i32 1 %out.gep.2 = getelementptr i32, i32 addrspace(3)* %out, i32 2 diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll index 8a78f3d..b684ca8 100644 --- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll @@ -29,11 +29,10 @@ define amdgpu_kernel void @no_crash(i32 %arg) { ; longest chain vectorized ; CHECK-LABEL: @interleave_get_longest -; CHECK: load <2 x i32> +; CHECK: load <4 x i32> ; CHECK: load i32 ; CHECK: store <2 x i32> zeroinitializer ; CHECK: load i32 -; CHECK: load <2 x i32> ; CHECK: load i32 ; CHECK: load i32 -- 2.7.4