From 7d1b6c81af232c1a18a3465c0a9524c0d1ac29ef Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Fri, 29 Apr 2016 06:25:10 +0000 Subject: [PATCH] AMDGPU: Stop reporting an addressing mode for unknown addrspace This was being treated the same as private, which has an immediate offset. For unknown, it probably means it's for a computation not actually being used for accessing memory, so it should not have a nontrivial addressing mode. llvm-svn: 268002 --- llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 7 ++++++- .../AMDGPU/lsr-postinc-pos-addrspace.ll | 24 ++++++++++++++++++---- 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 5422150..c35ecf1 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -454,7 +454,6 @@ bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL, } case AMDGPUAS::PRIVATE_ADDRESS: - case AMDGPUAS::UNKNOWN_ADDRESS_SPACE: return isLegalMUBUFAddressingMode(AM); case AMDGPUAS::LOCAL_ADDRESS: @@ -475,6 +474,12 @@ bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL, return false; } case AMDGPUAS::FLAT_ADDRESS: + case AMDGPUAS::UNKNOWN_ADDRESS_SPACE: + // For an unknown address space, this usually means that this is for some + // reason being used for pure arithmetic, and not based on some addressing + // computation. We don't have instructions that compute pointers with any + // addressing modes, so treat them as having no offset like flat + // instructions. return isLegalFlatAddressingMode(AM); default: diff --git a/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll b/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll index bd80302..8556bdc 100644 --- a/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll +++ b/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll @@ -9,10 +9,11 @@ target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24: ; CHECK: bb11: ; CHECK: %lsr.iv1 = phi i32 [ %lsr.iv.next2, %bb ], [ -2, %entry ] ; CHECK: %lsr.iv = phi i32 [ %lsr.iv.next, %bb ], [ undef, %entry ] - -; CHECK: bb: ; CHECK: %lsr.iv.next = add i32 %lsr.iv, -1 ; CHECK: %lsr.iv.next2 = add i32 %lsr.iv1, 2 +; CHECK: br i1 + +; CHECK: bb: ; CHECK: %scevgep = getelementptr i8, i8 addrspace(3)* %t, i32 %lsr.iv.next2 ; CHECK: %c1 = icmp ult i8 addrspace(3)* %scevgep, undef define void @local_cmp_user() nounwind { @@ -37,8 +38,13 @@ bb13: } ; CHECK-LABEL: @global_cmp_user( +; CHECK: %lsr.iv1 = phi i64 +; CHECK: %lsr.iv = phi i64 ; CHECK: %lsr.iv.next = add i64 %lsr.iv, -1 ; CHECK: %lsr.iv.next2 = add i64 %lsr.iv1, 2 +; CHECK: br i1 + +; CHECK: bb: ; CHECK: %scevgep = getelementptr i8, i8 addrspace(1)* %t, i64 %lsr.iv.next2 define void @global_cmp_user() nounwind { entry: @@ -62,9 +68,14 @@ bb13: } ; CHECK-LABEL: @global_gep_user( -; CHECK: %p = getelementptr i8, i8 addrspace(1)* %t, i32 %lsr.iv1 +; CHECK: %lsr.iv1 = phi i32 [ %lsr.iv.next2, %bb ], [ 0, %entry ] +; CHECK: %lsr.iv = phi i32 [ %lsr.iv.next, %bb ], [ undef, %entry ] ; CHECK: %lsr.iv.next = add i32 %lsr.iv, -1 ; CHECK: %lsr.iv.next2 = add i32 %lsr.iv1, 2 +; CHECK: br i1 + +; CHECK: bb: +; CHECK: %p = getelementptr i8, i8 addrspace(1)* %t, i32 %lsr.iv1 define void @global_gep_user() nounwind { entry: br label %bb11 @@ -87,9 +98,14 @@ bb13: } ; CHECK-LABEL: @global_sext_scale_user( -; CHECK: %p = getelementptr i8, i8 addrspace(1)* %t, i64 %ii.ext +; CHECK: %lsr.iv1 = phi i32 [ %lsr.iv.next2, %bb ], [ 0, %entry ] +; CHECK: %lsr.iv = phi i32 [ %lsr.iv.next, %bb ], [ undef, %entry ] ; CHECK: %lsr.iv.next = add i32 %lsr.iv, -1 ; CHECK: %lsr.iv.next2 = add i32 %lsr.iv1, 2 +; CHECK: br i1 + +; CHECK: bb +; CHECK: %p = getelementptr i8, i8 addrspace(1)* %t, i64 %ii.ext define void @global_sext_scale_user() nounwind { entry: br label %bb11 -- 2.7.4