From 629d12de70959f49f0b8f78eb9e6e217103a24c7 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Fri, 22 Apr 2016 20:21:36 +0000 Subject: [PATCH] DAGCombiner: Relax alignment restriction when changing load type If the target allows the alignment, this should still be OK. llvm-svn: 267209 --- llvm/include/llvm/Target/TargetLowering.h | 17 ++++++++-- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 7 ++-- .../CodeGen/AMDGPU/reduce-load-width-alignment.ll | 38 ++++++++++++++++++++++ llvm/test/CodeGen/X86/avx512-mask-op.ll | 6 ++-- llvm/test/CodeGen/X86/masked_gather_scatter.ll | 2 +- .../CodeGen/X86/merge-consecutive-loads-512.ll | 2 +- 6 files changed, 62 insertions(+), 10 deletions(-) create mode 100644 llvm/test/CodeGen/AMDGPU/reduce-load-width-alignment.ll diff --git a/llvm/include/llvm/Target/TargetLowering.h b/llvm/include/llvm/Target/TargetLowering.h index 2074367..9b5ff57 100644 --- a/llvm/include/llvm/Target/TargetLowering.h +++ b/llvm/include/llvm/Target/TargetLowering.h @@ -268,8 +268,21 @@ public: /// efficiently, casting the load to a smaller vector of larger types and /// loading is more efficient, however, this can be undone by optimizations in /// dag combiner. - virtual bool isLoadBitCastBeneficial(EVT /* Load */, - EVT /* Bitcast */) const { + virtual bool isLoadBitCastBeneficial(EVT LoadVT, + EVT BitcastVT) const { + // Don't do if we could do an indexed load on the original type, but not on + // the new one. + if (!LoadVT.isSimple() || !BitcastVT.isSimple()) + return true; + + MVT LoadMVT = LoadVT.getSimpleVT(); + + // Don't bother doing this if it's just going to be promoted again later, as + // doing so might interfere with other combines. + if (getOperationAction(ISD::LOAD, LoadMVT) == Promote && + getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT()) + return false; + return true; } diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 6c98008..690a53e 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -7366,11 +7366,12 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) { (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)) && TLI.isLoadBitCastBeneficial(N0.getValueType(), VT)) { LoadSDNode *LN0 = cast(N0); - unsigned Align = DAG.getDataLayout().getABITypeAlignment( - VT.getTypeForEVT(*DAG.getContext())); unsigned OrigAlign = LN0->getAlignment(); - if (Align <= OrigAlign) { + bool Fast = false; + if (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, + LN0->getAddressSpace(), OrigAlign, &Fast) && + Fast) { SDValue Load = DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(), LN0->getPointerInfo(), LN0->isVolatile(), LN0->isNonTemporal(), diff --git a/llvm/test/CodeGen/AMDGPU/reduce-load-width-alignment.ll b/llvm/test/CodeGen/AMDGPU/reduce-load-width-alignment.ll new file mode 100644 index 0000000..c255b05 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/reduce-load-width-alignment.ll @@ -0,0 +1,38 @@ +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s +; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s + +; GCN-LABEL: {{^}}reduce_i64_load_align_4_width_to_i32: +; GCN: buffer_load_dword [[VAL:v[0-9]+]] +; GCN: v_and_b32_e32 v{{[0-9]+}}, 0x12d687, [[VAL]] +; GCN: buffer_store_dwordx2 +define void @reduce_i64_load_align_4_width_to_i32(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #0 { + %a = load i64, i64 addrspace(1)* %in, align 4 + %and = and i64 %a, 1234567 + store i64 %and, i64 addrspace(1)* %out, align 8 + ret void +} + +; GCN-LABEL: {{^}}reduce_i64_align_4_bitcast_v2i32_elt0: +; GCN: buffer_load_dword [[VAL:v[0-9]+]] +; GCN: buffer_store_dword [[VAL]] +define void @reduce_i64_align_4_bitcast_v2i32_elt0(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #0 { + %a = load i64, i64 addrspace(1)* %in, align 4 + %vec = bitcast i64 %a to <2 x i32> + %elt0 = extractelement <2 x i32> %vec, i32 0 + store i32 %elt0, i32 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}reduce_i64_align_4_bitcast_v2i32_elt1: +; GCN: buffer_load_dword [[VAL:v[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0 offset:4 +; GCN: buffer_store_dword [[VAL]] +define void @reduce_i64_align_4_bitcast_v2i32_elt1(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #0 { + %a = load i64, i64 addrspace(1)* %in, align 4 + %vec = bitcast i64 %a to <2 x i32> + %elt0 = extractelement <2 x i32> %vec, i32 1 + store i32 %elt0, i32 addrspace(1)* %out + ret void +} + +attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/X86/avx512-mask-op.ll b/llvm/test/CodeGen/X86/avx512-mask-op.ll index ea6a7af..de0c97c 100644 --- a/llvm/test/CodeGen/X86/avx512-mask-op.ll +++ b/llvm/test/CodeGen/X86/avx512-mask-op.ll @@ -53,7 +53,7 @@ define void @mask16_mem(i16* %ptr) { define void @mask8_mem(i8* %ptr) { ; KNL-LABEL: mask8_mem: ; KNL: ## BB#0: -; KNL-NEXT: movb (%rdi), %al +; KNL-NEXT: movzbw (%rdi), %ax ; KNL-NEXT: kmovw %eax, %k0 ; KNL-NEXT: knotw %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax @@ -951,7 +951,7 @@ define <16 x i32> @load_16i1(<16 x i1>* %a) { define <2 x i16> @load_2i1(<2 x i1>* %a) { ; KNL-LABEL: load_2i1: ; KNL: ## BB#0: -; KNL-NEXT: movb (%rdi), %al +; KNL-NEXT: movzbw (%rdi), %ax ; KNL-NEXT: kmovw %eax, %k1 ; KNL-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z} ; KNL-NEXT: retq @@ -969,7 +969,7 @@ define <2 x i16> @load_2i1(<2 x i1>* %a) { define <4 x i16> @load_4i1(<4 x i1>* %a) { ; KNL-LABEL: load_4i1: ; KNL: ## BB#0: -; KNL-NEXT: movb (%rdi), %al +; KNL-NEXT: movzbw (%rdi), %ax ; KNL-NEXT: kmovw %eax, %k1 ; KNL-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z} ; KNL-NEXT: vpmovqd %zmm0, %ymm0 diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll index ed88be0..f153338a 100644 --- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll +++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll @@ -291,7 +291,7 @@ define <8 x i32> @test7(i32* %base, <8 x i32> %ind, i8 %mask) { ; KNL_32-LABEL: test7: ; KNL_32: # BB#0: ; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax -; KNL_32-NEXT: movb {{[0-9]+}}(%esp), %cl +; KNL_32-NEXT: movzbw {{[0-9]+}}(%esp), %cx ; KNL_32-NEXT: kmovw %ecx, %k1 ; KNL_32-NEXT: vpmovsxdq %ymm0, %zmm0 ; KNL_32-NEXT: kmovw %k1, %k2 diff --git a/llvm/test/CodeGen/X86/merge-consecutive-loads-512.ll b/llvm/test/CodeGen/X86/merge-consecutive-loads-512.ll index 3b6582f..fafd796 100644 --- a/llvm/test/CodeGen/X86/merge-consecutive-loads-512.ll +++ b/llvm/test/CodeGen/X86/merge-consecutive-loads-512.ll @@ -234,7 +234,7 @@ define <8 x i64> @merge_8i64_i64_1u3u5zu8(i64* %ptr) nounwind uwtable noinline s ; X32-AVX512F-LABEL: merge_8i64_i64_1u3u5zu8: ; X32-AVX512F: # BB#0: ; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-AVX512F-NEXT: vmovdqu32 8(%eax), %zmm0 +; X32-AVX512F-NEXT: vmovdqu64 8(%eax), %zmm0 ; X32-AVX512F-NEXT: vpxord %zmm1, %zmm1, %zmm1 ; X32-AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = <0,0,u,u,2,0,u,u,4,0,13,0,u,u,7,0> ; X32-AVX512F-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 -- 2.7.4