From: Hal Finkel Date: Fri, 21 Nov 2014 04:35:51 +0000 (+0000) Subject: [PPC] Use SeparateConstOffsetFromGEP X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=f413be11f0df6374d4091114ec81a758a14b4f18;p=platform%2Fupstream%2Fllvm.git [PPC] Use SeparateConstOffsetFromGEP This mirrors r222331, which enabled SeparateConstOffsetFromGEP on AArch64, in the PowerPC backend. Yields, on a POWER7 machine, a 30% speedup on SingleSource/Benchmarks/Shootout/nestedloop (this might just be from LICM, there is a store moved out of the inner loop) and a potential speedup on MultiSource/Benchmarks/mediabench/mpeg2/mpeg2dec/mpeg2decode. Regardless, it makes some code look cleaner, and synchronizing the backends in this regard seems like a generally good thing. llvm-svn: 222504 --- diff --git a/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp b/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp index f15189c..e930a9c 100644 --- a/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp +++ b/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp @@ -22,6 +22,7 @@ #include "llvm/Support/FormattedStream.h" #include "llvm/Support/TargetRegistry.h" #include "llvm/Target/TargetOptions.h" +#include "llvm/Transforms/Scalar.h" using namespace llvm; static cl:: @@ -32,6 +33,11 @@ static cl::opt VSXFMAMutateEarly("schedule-ppc-vsx-fma-mutation-early", cl::Hidden, cl::desc("Schedule VSX FMA instruction mutation early")); +static cl::opt +EnableGEPOpt("ppc-gep-opt", cl::Hidden, + cl::desc("Enable optimizations on complex GEPs"), + cl::init(true)); + extern "C" void LLVMInitializePowerPCTarget() { // Register the targets RegisterTargetMachine A(ThePPC32Target); @@ -168,6 +174,20 @@ TargetPassConfig *PPCTargetMachine::createPassConfig(PassManagerBase &PM) { void PPCPassConfig::addIRPasses() { addPass(createAtomicExpandPass(&getPPCTargetMachine())); + + if (TM->getOptLevel() == CodeGenOpt::Aggressive && EnableGEPOpt) { + // Call SeparateConstOffsetFromGEP pass to extract constants within indices + // and lower a GEP with multiple indices to either arithmetic operations or + // multiple GEPs with single index. + addPass(createSeparateConstOffsetFromGEPPass(TM, true)); + // Call EarlyCSE pass to find and remove subexpressions in the lowered + // result. + addPass(createEarlyCSEPass()); + // Do loop invariant code motion in case part of the lowered result is + // invariant. + addPass(createLICMPass()); + } + TargetPassConfig::addIRPasses(); } diff --git a/llvm/test/CodeGen/PowerPC/ppc64-gep-opt.ll b/llvm/test/CodeGen/PowerPC/ppc64-gep-opt.ll new file mode 100644 index 0000000..14cf9a7 --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/ppc64-gep-opt.ll @@ -0,0 +1,157 @@ +; RUN: llc -O3 -mcpu=pwr7 < %s | FileCheck %s +; RUN: llc -O3 -print-after=codegenprepare -mcpu=ppc64 < %s >%t 2>&1 && FileCheck --check-prefix=CHECK-NoAA <%t %s +; RUN: llc -O3 -print-after=codegenprepare -mcpu=pwr7 < %s >%t 2>&1 && FileCheck --check-prefix=CHECK-UseAA <%t %s +target datalayout = "E-m:e-i64:64-n32:64" +target triple = "powerpc64-unknown-linux-gnu" + +; Following test cases test enabling SeparateConstOffsetFromGEP pass in the PPC +; backend. If useAA() returns true, it will lower a GEP with multiple indices +; into GEPs with a single index, otherwise it will lower it into a +; "ptrtoint+arithmetics+inttoptr" form. + +%struct = type { i32, i32, i32, i32, [20 x i32] } + +; Check that when two complex GEPs are used in two basic blocks, LLVM can +; elimilate the common subexpression for the second use. +define void @test_GEP_CSE([240 x %struct]* %string, i32* %adj, i32 %lib, i64 %idxprom) { + %liberties = getelementptr [240 x %struct]* %string, i64 1, i64 %idxprom, i32 3 + %1 = load i32* %liberties, align 4 + %cmp = icmp eq i32 %1, %lib + br i1 %cmp, label %if.then, label %if.end + +if.then: ; preds = %entry + %origin = getelementptr [240 x %struct]* %string, i64 1, i64 %idxprom, i32 2 + %2 = load i32* %origin, align 4 + store i32 %2, i32* %adj, align 4 + br label %if.end + +if.end: ; preds = %if.then, %entry + ret void +} + +; CHECK-NoAA-LABEL: @test_GEP_CSE( +; CHECK-NoAA: [[PTR0:%[a-zA-Z0-9]+]] = ptrtoint [240 x %struct]* %string to i64 +; CHECK-NoAA: [[PTR1:%[a-zA-Z0-9]+]] = mul i64 %idxprom, 96 +; CHECK-NoAA: [[PTR2:%[a-zA-Z0-9]+]] = add i64 [[PTR0]], [[PTR1]] +; CHECK-NoAA: add i64 [[PTR2]], 23052 +; CHECK-NoAA: inttoptr +; CHECK-NoAA: if.then: +; CHECK-NoAA-NOT: ptrtoint +; CHECK-NoAA-NOT: mul +; CHECK-NoAA: add i64 [[PTR2]], 23048 +; CHECK-NoAA: inttoptr + +; CHECK-UseAA-LABEL: @test_GEP_CSE( +; CHECK-UseAA: [[PTR0:%[a-zA-Z0-9]+]] = bitcast [240 x %struct]* %string to i8* +; CHECK-UseAA: [[IDX:%[a-zA-Z0-9]+]] = mul i64 %idxprom, 96 +; CHECK-UseAA: [[PTR1:%[a-zA-Z0-9]+]] = getelementptr i8* [[PTR0]], i64 [[IDX]] +; CHECK-UseAA: getelementptr i8* [[PTR1]], i64 23052 +; CHECK-UseAA: bitcast +; CHECK-UseAA: if.then: +; CHECK-UseAA: getelementptr i8* [[PTR1]], i64 23048 +; CHECK-UseAA: bitcast + +%class.my = type { i32, [128 x i32], i32, [256 x %struct.pt]} +%struct.pt = type { %struct.point*, i32, i32 } +%struct.point = type { i32, i32 } + +; Check when a GEP is used across two basic block, LLVM can sink the address +; calculation and code gen can generate a better addressing mode for the second +; use. +define void @test_GEP_across_BB(%class.my* %this, i64 %idx) { + %1 = getelementptr %class.my* %this, i64 0, i32 3, i64 %idx, i32 1 + %2 = load i32* %1, align 4 + %3 = getelementptr %class.my* %this, i64 0, i32 3, i64 %idx, i32 2 + %4 = load i32* %3, align 4 + %5 = icmp eq i32 %2, %4 + br i1 %5, label %if.true, label %exit + +if.true: + %6 = shl i32 %4, 1 + store i32 %6, i32* %3, align 4 + br label %exit + +exit: + %7 = add nsw i32 %4, 1 + store i32 %7, i32* %1, align 4 + ret void +} +; CHECK-LABEL: test_GEP_across_BB: +; CHECK-NOT: lwzu +; CHECK: blr + +; CHECK-NoAA-LABEL: test_GEP_across_BB( +; CHECK-NoAA: add i64 [[TMP:%[a-zA-Z0-9]+]], 528 +; CHECK-NoAA: add i64 [[TMP]], 532 +; CHECK-NoAA: if.true: +; CHECK-NoAA: {{%sunk[a-zA-Z0-9]+}} = add i64 [[TMP]], 532 +; CHECK-NoAA: exit: +; CHECK-NoAA: {{%sunk[a-zA-Z0-9]+}} = add i64 [[TMP]], 528 + +; CHECK-UseAA-LABEL: test_GEP_across_BB( +; CHECK-UseAA: [[PTR0:%[a-zA-Z0-9]+]] = getelementptr +; CHECK-UseAA: getelementptr i8* [[PTR0]], i64 528 +; CHECK-UseAA: getelementptr i8* [[PTR0]], i64 532 +; CHECK-UseAA: if.true: +; CHECK-UseAA: {{%sunk[a-zA-Z0-9]+}} = getelementptr i8* [[PTR0]], i64 532 +; CHECK-UseAA: exit: +; CHECK-UseAA: {{%sunk[a-zA-Z0-9]+}} = getelementptr i8* [[PTR0]], i64 528 + +%struct.S = type { float, double } +@struct_array = global [1024 x %struct.S] zeroinitializer, align 16 + +; The following two test cases check we can extract constant from indices of +; struct type. +; The constant offsets are from indices "i64 %idxprom" and "i32 1". As the +; alloca size of %struct.S is 16, and "i32 1" is the 2rd element whose field +; offset is 8, the total constant offset is (5 * 16 + 8) = 88. +define double* @test-struct_1(i32 %i) { +entry: + %add = add nsw i32 %i, 5 + %idxprom = sext i32 %add to i64 + %p = getelementptr [1024 x %struct.S]* @struct_array, i64 0, i64 %idxprom, i32 1 + ret double* %p +} +; CHECK-NoAA-LABEL: @test-struct_1( +; CHECK-NoAA-NOT: getelementptr +; CHECK-NoAA: add i64 %{{[a-zA-Z0-9]+}}, 88 + +; CHECK-UseAA-LABEL: @test-struct_1( +; CHECK-UseAA: getelementptr i8* %{{[a-zA-Z0-9]+}}, i64 88 + +%struct3 = type { i64, i32 } +%struct2 = type { %struct3, i32 } +%struct1 = type { i64, %struct2 } +%struct0 = type { i32, i32, i64*, [100 x %struct1] } + +; The constant offsets are from indices "i32 3", "i64 %arrayidx" and "i32 1". +; "i32 3" is the 4th element whose field offset is 16. The alloca size of +; %struct1 is 32. "i32 1" is the 2rd element whose field offset is 8. So the +; total constant offset is 16 + (-2 * 32) + 8 = -40 +define %struct2* @test-struct_2(%struct0* %ptr, i64 %idx) { +entry: + %arrayidx = add nsw i64 %idx, -2 + %ptr2 = getelementptr %struct0* %ptr, i64 0, i32 3, i64 %arrayidx, i32 1 + ret %struct2* %ptr2 +} +; CHECK-NoAA-LABEL: @test-struct_2( +; CHECK-NoAA-NOT: = getelementptr +; CHECK-NoAA: add i64 %{{[a-zA-Z0-9]+}}, -40 + +; CHECK-UseAA-LABEL: @test-struct_2( +; CHECK-UseAA: getelementptr i8* %{{[a-zA-Z0-9]+}}, i64 -40 + +; Test that when a index is added from two constant, SeparateConstOffsetFromGEP +; pass does not generate incorrect result. +define void @test_const_add([3 x i32]* %in) { + %inc = add nsw i32 2, 1 + %idxprom = sext i32 %inc to i64 + %arrayidx = getelementptr [3 x i32]* %in, i64 %idxprom, i64 2 + store i32 0, i32* %arrayidx, align 4 + ret void +} +; CHECK-LABEL: test_const_add: +; CHECK: li [[REG:[0-9]+]], 0 +; CHECK: stw [[REG]], 44(3) +; CHECK: blr +