From: Matt Arsenault Date: Sat, 30 Jul 2016 01:40:36 +0000 (+0000) Subject: AMDGPU: Fix shouldConvertConstantLoadToIntImm behavior X-Git-Tag: llvmorg-4.0.0-rc1~13755 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=749035b7b1dc0450a2f157df9801d9f7534fbed9;p=platform%2Fupstream%2Fllvm.git AMDGPU: Fix shouldConvertConstantLoadToIntImm behavior This should really be true for any immediate, not just inline ones. llvm-svn: 277260 --- diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 0333610..6f56920 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -535,8 +535,8 @@ SITargetLowering::getPreferredVectorAction(EVT VT) const { bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const { - const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); - return TII->isInlineConstant(Imm); + // FIXME: Could be smarter if called for vector constants. + return true; } bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const { diff --git a/llvm/test/CodeGen/AMDGPU/llvm.memcpy.ll b/llvm/test/CodeGen/AMDGPU/llvm.memcpy.ll index 8398309..fdaca92 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.memcpy.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.memcpy.ll @@ -1,8 +1,9 @@ -; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s declare void @llvm.memcpy.p3i8.p3i8.i32(i8 addrspace(3)* nocapture, i8 addrspace(3)* nocapture, i32, i32, i1) nounwind declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace(1)* nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p1i8.p2i8.i64(i8 addrspace(1)* nocapture, i8 addrspace(2)* nocapture, i64, i32, i1) nounwind ; FUNC-LABEL: {{^}}test_small_memcpy_i64_lds_to_lds_align1: @@ -325,3 +326,47 @@ define void @test_small_memcpy_i64_global_to_global_align16(i64 addrspace(1)* no call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %bcout, i8 addrspace(1)* %bcin, i64 32, i32 16, i1 false) nounwind ret void } + +; Test shouldConvertConstantLoadToIntImm +@hello.align4 = private unnamed_addr addrspace(2) constant [16 x i8] c"constant string\00", align 4 +@hello.align1 = private unnamed_addr addrspace(2) constant [16 x i8] c"constant string\00", align 1 + +; FUNC-LABEL: {{^}}test_memcpy_const_string_align4: +; SI: s_getpc_b64 +; SI: s_add_u32 s{{[0-9]+}}, s{{[0-9]+}}, hello.align4+4 +; SI: s_addc_u32 +; SI: s_load_dwordx4 +; SI: s_load_dwordx4 +; SI: s_load_dwordx2 +; SI: buffer_store_dwordx4 +; SI: buffer_store_dwordx4 +define void @test_memcpy_const_string_align4(i8 addrspace(1)* noalias %out) nounwind { + %str = bitcast [16 x i8] addrspace(2)* @hello.align4 to i8 addrspace(2)* + call void @llvm.memcpy.p1i8.p2i8.i64(i8 addrspace(1)* %out, i8 addrspace(2)* %str, i64 32, i32 4, i1 false) + ret void +} + +; FUNC-LABEL: {{^}}test_memcpy_const_string_align1: +; SI-NOT: buffer_load +; SI: v_mov_b32_e32 v{{[0-9]+}}, 0x69 +; SI: buffer_store_byte +; SI: buffer_store_byte +; SI: buffer_store_byte +; SI: buffer_store_byte +; SI: buffer_store_byte +; SI: buffer_store_byte +; SI: buffer_store_byte +; SI: buffer_store_byte +; SI: buffer_store_byte +; SI: buffer_store_byte +; SI: buffer_store_byte +; SI: buffer_store_byte +; SI: buffer_store_byte +; SI: buffer_store_byte +; SI: buffer_store_byte +; SI: buffer_store_byte +define void @test_memcpy_const_string_align1(i8 addrspace(1)* noalias %out) nounwind { + %str = bitcast [16 x i8] addrspace(2)* @hello.align1 to i8 addrspace(2)* + call void @llvm.memcpy.p1i8.p2i8.i64(i8 addrspace(1)* %out, i8 addrspace(2)* %str, i64 32, i32 1, i1 false) + ret void +}