{ ISD::MUL, MVT::v8i32, 4 },
{ ISD::SUB, MVT::v8i32, 4 },
{ ISD::ADD, MVT::v8i32, 4 },
- { ISD::MUL, MVT::v4i64, 4 },
{ ISD::SUB, MVT::v4i64, 4 },
{ ISD::ADD, MVT::v4i64, 4 },
- };
+ // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
+ // are lowered as a series of long multiplies(3), shifts(4) and adds(2)
+ // Because we believe v4i64 to be a legal type, we must also include the
+ // split factor of two in the cost table. Therefore, the cost here is 18
+ // instead of 9.
+ { ISD::MUL, MVT::v4i64, 18 },
+ };
// Look for AVX1 lowering tricks.
- if (ST->hasAVX()) {
- int Idx = CostTableLookup<MVT>(AVX1CostTable, array_lengthof(AVX1CostTable), ISD,
- LT.second);
+ if (ST->hasAVX() && !ST->hasAVX2()) {
+ int Idx = CostTableLookup<MVT>(AVX1CostTable, array_lengthof(AVX1CostTable),
+ ISD, LT.second);
if (Idx != -1)
return LT.first * AVX1CostTable[Idx].Cost;
}
+
+ // Custom lowering of vectors.
+ static const CostTblEntry<MVT> CustomLowered[] = {
+ // A v2i64/v4i64 and multiply is custom lowered as a series of long
+ // multiplies(3), shifts(4) and adds(2).
+ { ISD::MUL, MVT::v2i64, 9 },
+ { ISD::MUL, MVT::v4i64, 9 },
+ };
+ int Idx = CostTableLookup<MVT>(CustomLowered, array_lengthof(CustomLowered),
+ ISD, LT.second);
+ if (Idx != -1)
+ return LT.first * CustomLowered[Idx].Cost;
+
+ // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle,
+ // 2x pmuludq, 2x shuffle.
+ if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() &&
+ !ST->hasSSE41())
+ return 6;
+
// Fallback to the default implementation.
return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty);
}
; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=core2 | FileCheck %s --check-prefix=SSE3
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=core-avx2 | FileCheck %s --check-prefix=AVX2
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
ret i32 undef
}
+; CHECK: mul
+define void @mul() {
+ ; A <2 x i32> gets expanded to a <2 x i64> vector.
+ ; A <2 x i64> vector multiply is implemented using
+ ; 3 PMULUDQ and 2 PADDS and 4 shifts.
+ ;CHECK: cost of 9 {{.*}} mul
+ %A0 = mul <2 x i32> undef, undef
+ ;CHECK: cost of 9 {{.*}} mul
+ %A1 = mul <2 x i64> undef, undef
+ ;CHECK: cost of 18 {{.*}} mul
+ %A2 = mul <4 x i64> undef, undef
+ ret void
+}
+
+; SSE3: sse3mull
+define void @sse3mull() {
+ ; SSE3: cost of 6 {{.*}} mul
+ %A0 = mul <4 x i32> undef, undef
+ ret void
+ ; SSE3: avx2mull
+}
+
+; AVX2: avx2mull
+define void @avx2mull() {
+ ; AVX2: cost of 9 {{.*}} mul
+ %A0 = mul <4 x i64> undef, undef
+ ret void
+ ; AVX2: fmul
+}
+; CHECK: fmul
define i32 @fmul(i32 %arg) {
;CHECK: cost of 1 {{.*}} fmul
%A = fmul <4 x float> undef, undef
;CHECK: @read_mod_i64
-;CHECK: load <4 x i64>
+;CHECK: load <2 x i64>
;CHECK: ret i32
define i32 @read_mod_i64(i64* nocapture %a, i32 %n) nounwind uwtable ssp {
%1 = icmp sgt i32 %n, 0
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%2 = getelementptr inbounds i64* %a, i64 %indvars.iv
%3 = load i64* %2, align 4
- %4 = mul i64 %3, 3
+ %4 = add i64 %3, 3
store i64 %4, i64* %2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32