From 20ef54f4c1d883773d1aa6e638a0603f6718ccb9 Mon Sep 17 00:00:00 2001 From: Arnold Schwaighofer Date: Sat, 2 Mar 2013 04:02:52 +0000 Subject: [PATCH] X86 cost model: Adjust cost for custom lowered vector multiplies This matters for example in following matrix multiply: int **mmult(int rows, int cols, int **m1, int **m2, int **m3) { int i, j, k, val; for (i=0; ihasAVX()) { - int Idx = CostTableLookup(AVX1CostTable, array_lengthof(AVX1CostTable), ISD, - LT.second); + if (ST->hasAVX() && !ST->hasAVX2()) { + int Idx = CostTableLookup(AVX1CostTable, array_lengthof(AVX1CostTable), + ISD, LT.second); if (Idx != -1) return LT.first * AVX1CostTable[Idx].Cost; } + + // Custom lowering of vectors. + static const CostTblEntry CustomLowered[] = { + // A v2i64/v4i64 and multiply is custom lowered as a series of long + // multiplies(3), shifts(4) and adds(2). + { ISD::MUL, MVT::v2i64, 9 }, + { ISD::MUL, MVT::v4i64, 9 }, + }; + int Idx = CostTableLookup(CustomLowered, array_lengthof(CustomLowered), + ISD, LT.second); + if (Idx != -1) + return LT.first * CustomLowered[Idx].Cost; + + // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle, + // 2x pmuludq, 2x shuffle. + if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() && + !ST->hasSSE41()) + return 6; + // Fallback to the default implementation. return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty); } diff --git a/llvm/test/Analysis/CostModel/X86/arith.ll b/llvm/test/Analysis/CostModel/X86/arith.ll index ae78d44..f0521ba 100644 --- a/llvm/test/Analysis/CostModel/X86/arith.ll +++ b/llvm/test/Analysis/CostModel/X86/arith.ll @@ -1,4 +1,6 @@ ; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s +; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=core2 | FileCheck %s --check-prefix=SSE3 +; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=core-avx2 | FileCheck %s --check-prefix=AVX2 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-apple-macosx10.8.0" @@ -32,7 +34,37 @@ define i32 @xor(i32 %arg) { ret i32 undef } +; CHECK: mul +define void @mul() { + ; A <2 x i32> gets expanded to a <2 x i64> vector. + ; A <2 x i64> vector multiply is implemented using + ; 3 PMULUDQ and 2 PADDS and 4 shifts. + ;CHECK: cost of 9 {{.*}} mul + %A0 = mul <2 x i32> undef, undef + ;CHECK: cost of 9 {{.*}} mul + %A1 = mul <2 x i64> undef, undef + ;CHECK: cost of 18 {{.*}} mul + %A2 = mul <4 x i64> undef, undef + ret void +} + +; SSE3: sse3mull +define void @sse3mull() { + ; SSE3: cost of 6 {{.*}} mul + %A0 = mul <4 x i32> undef, undef + ret void + ; SSE3: avx2mull +} + +; AVX2: avx2mull +define void @avx2mull() { + ; AVX2: cost of 9 {{.*}} mul + %A0 = mul <4 x i64> undef, undef + ret void + ; AVX2: fmul +} +; CHECK: fmul define i32 @fmul(i32 %arg) { ;CHECK: cost of 1 {{.*}} fmul %A = fmul <4 x float> undef, undef diff --git a/llvm/test/Transforms/LoopVectorize/X86/avx1.ll b/llvm/test/Transforms/LoopVectorize/X86/avx1.ll index a85c6fe..6c0366e 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/avx1.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/avx1.ll @@ -27,7 +27,7 @@ define i32 @read_mod_write_single_ptr(float* nocapture %a, i32 %n) nounwind uwta ;CHECK: @read_mod_i64 -;CHECK: load <4 x i64> +;CHECK: load <2 x i64> ;CHECK: ret i32 define i32 @read_mod_i64(i64* nocapture %a, i32 %n) nounwind uwtable ssp { %1 = icmp sgt i32 %n, 0 @@ -37,7 +37,7 @@ define i32 @read_mod_i64(i64* nocapture %a, i32 %n) nounwind uwtable ssp { %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ] %2 = getelementptr inbounds i64* %a, i64 %indvars.iv %3 = load i64* %2, align 4 - %4 = mul i64 %3, 3 + %4 = add i64 %3, 3 store i64 %4, i64* %2, align 4 %indvars.iv.next = add i64 %indvars.iv, 1 %lftr.wideiv = trunc i64 %indvars.iv.next to i32 -- 2.7.4