From 70dd7f999ddc454316a3a05053d1d02714fedc7b Mon Sep 17 00:00:00 2001 From: Michael Liao Date: Wed, 20 Mar 2013 22:01:10 +0000 Subject: [PATCH] Correct cost model for vector shift on AVX2 - After moving logic recognizing vector shift with scalar amount from DAG combining into DAG lowering, we declare to customize all vector shifts even vector shift on AVX is legal. As a result, the cost model needs special tuning to identify these legal cases. llvm-svn: 177586 --- llvm/lib/Target/X86/X86TargetTransformInfo.cpp | 23 +++++++++++ llvm/test/Analysis/CostModel/X86/arith.ll | 54 ++++++++++++++++++++++++++ 2 files changed, 77 insertions(+) diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp index 777ef50..3e3b86e 100644 --- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -169,6 +169,29 @@ unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty) const { int ISD = TLI->InstructionOpcodeToISD(Opcode); assert(ISD && "Invalid opcode"); + static const CostTblEntry AVX2CostTable[] = { + // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to + // customize them to detect the cases where shift amount is a scalar one. + { ISD::SHL, MVT::v4i32, 1 }, + { ISD::SRL, MVT::v4i32, 1 }, + { ISD::SRA, MVT::v4i32, 1 }, + { ISD::SHL, MVT::v8i32, 1 }, + { ISD::SRL, MVT::v8i32, 1 }, + { ISD::SRA, MVT::v8i32, 1 }, + { ISD::SHL, MVT::v2i64, 1 }, + { ISD::SRL, MVT::v2i64, 1 }, + { ISD::SHL, MVT::v4i64, 1 }, + { ISD::SRL, MVT::v4i64, 1 }, + }; + + // Look for AVX2 lowering tricks. + if (ST->hasAVX2()) { + int Idx = CostTableLookup(AVX2CostTable, array_lengthof(AVX2CostTable), + ISD, LT.second); + if (Idx != -1) + return LT.first * AVX2CostTable[Idx].Cost; + } + static const CostTblEntry AVX1CostTable[] = { // We don't have to scalarize unsupported ops. We can issue two half-sized // operations and we only need to extract the upper YMM half. diff --git a/llvm/test/Analysis/CostModel/X86/arith.ll b/llvm/test/Analysis/CostModel/X86/arith.ll index f0521ba..5f94441 100644 --- a/llvm/test/Analysis/CostModel/X86/arith.ll +++ b/llvm/test/Analysis/CostModel/X86/arith.ll @@ -72,3 +72,57 @@ define i32 @fmul(i32 %arg) { %B = fmul <8 x float> undef, undef ret i32 undef } + +; AVX: shift +; AVX2: shift +define void @shift() { + ; AVX: cost of 2 {{.*}} shl + ; AVX2: cost of 1 {{.*}} shl + %A0 = shl <4 x i32> undef, undef + ; AVX: cost of 2 {{.*}} shl + ; AVX2: cost of 1 {{.*}} shl + %A1 = shl <2 x i64> undef, undef + + ; AVX: cost of 2 {{.*}} lshr + ; AVX2: cost of 1 {{.*}} lshr + %B0 = lshr <4 x i32> undef, undef + ; AVX: cost of 2 {{.*}} lshr + ; AVX2: cost of 1 {{.*}} lshr + %B1 = lshr <2 x i64> undef, undef + + ; AVX: cost of 2 {{.*}} ashr + ; AVX2: cost of 1 {{.*}} ashr + %C0 = ashr <4 x i32> undef, undef + ; AVX: cost of 6 {{.*}} ashr + ; AVX2: cost of 6 {{.*}} ashr + %C1 = ashr <2 x i64> undef, undef + + ret void +} + +; AVX: avx2shift +; AVX2: avx2shift +define void @avx2shift() { + ; AVX: cost of 2 {{.*}} shl + ; AVX2: cost of 1 {{.*}} shl + %A0 = shl <8 x i32> undef, undef + ; AVX: cost of 2 {{.*}} shl + ; AVX2: cost of 1 {{.*}} shl + %A1 = shl <4 x i64> undef, undef + + ; AVX: cost of 2 {{.*}} lshr + ; AVX2: cost of 1 {{.*}} lshr + %B0 = lshr <8 x i32> undef, undef + ; AVX: cost of 2 {{.*}} lshr + ; AVX2: cost of 1 {{.*}} lshr + %B1 = lshr <4 x i64> undef, undef + + ; AVX: cost of 2 {{.*}} ashr + ; AVX2: cost of 1 {{.*}} ashr + %C0 = ashr <8 x i32> undef, undef + ; AVX: cost of 12 {{.*}} ashr + ; AVX2: cost of 12 {{.*}} ashr + %C1 = ashr <4 x i64> undef, undef + + ret void +} -- 2.7.4