int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Invalid opcode");
+ static const CostTblEntry<MVT> AVX2CostTable[] = {
+ // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
+ // customize them to detect the cases where shift amount is a scalar one.
+ { ISD::SHL, MVT::v4i32, 1 },
+ { ISD::SRL, MVT::v4i32, 1 },
+ { ISD::SRA, MVT::v4i32, 1 },
+ { ISD::SHL, MVT::v8i32, 1 },
+ { ISD::SRL, MVT::v8i32, 1 },
+ { ISD::SRA, MVT::v8i32, 1 },
+ { ISD::SHL, MVT::v2i64, 1 },
+ { ISD::SRL, MVT::v2i64, 1 },
+ { ISD::SHL, MVT::v4i64, 1 },
+ { ISD::SRL, MVT::v4i64, 1 },
+ };
+
+ // Look for AVX2 lowering tricks.
+ if (ST->hasAVX2()) {
+ int Idx = CostTableLookup<MVT>(AVX2CostTable, array_lengthof(AVX2CostTable),
+ ISD, LT.second);
+ if (Idx != -1)
+ return LT.first * AVX2CostTable[Idx].Cost;
+ }
+
static const CostTblEntry<MVT> AVX1CostTable[] = {
// We don't have to scalarize unsupported ops. We can issue two half-sized
// operations and we only need to extract the upper YMM half.
%B = fmul <8 x float> undef, undef
ret i32 undef
}
+
+; AVX: shift
+; AVX2: shift
+define void @shift() {
+ ; AVX: cost of 2 {{.*}} shl
+ ; AVX2: cost of 1 {{.*}} shl
+ %A0 = shl <4 x i32> undef, undef
+ ; AVX: cost of 2 {{.*}} shl
+ ; AVX2: cost of 1 {{.*}} shl
+ %A1 = shl <2 x i64> undef, undef
+
+ ; AVX: cost of 2 {{.*}} lshr
+ ; AVX2: cost of 1 {{.*}} lshr
+ %B0 = lshr <4 x i32> undef, undef
+ ; AVX: cost of 2 {{.*}} lshr
+ ; AVX2: cost of 1 {{.*}} lshr
+ %B1 = lshr <2 x i64> undef, undef
+
+ ; AVX: cost of 2 {{.*}} ashr
+ ; AVX2: cost of 1 {{.*}} ashr
+ %C0 = ashr <4 x i32> undef, undef
+ ; AVX: cost of 6 {{.*}} ashr
+ ; AVX2: cost of 6 {{.*}} ashr
+ %C1 = ashr <2 x i64> undef, undef
+
+ ret void
+}
+
+; AVX: avx2shift
+; AVX2: avx2shift
+define void @avx2shift() {
+ ; AVX: cost of 2 {{.*}} shl
+ ; AVX2: cost of 1 {{.*}} shl
+ %A0 = shl <8 x i32> undef, undef
+ ; AVX: cost of 2 {{.*}} shl
+ ; AVX2: cost of 1 {{.*}} shl
+ %A1 = shl <4 x i64> undef, undef
+
+ ; AVX: cost of 2 {{.*}} lshr
+ ; AVX2: cost of 1 {{.*}} lshr
+ %B0 = lshr <8 x i32> undef, undef
+ ; AVX: cost of 2 {{.*}} lshr
+ ; AVX2: cost of 1 {{.*}} lshr
+ %B1 = lshr <4 x i64> undef, undef
+
+ ; AVX: cost of 2 {{.*}} ashr
+ ; AVX2: cost of 1 {{.*}} ashr
+ %C0 = ashr <8 x i32> undef, undef
+ ; AVX: cost of 12 {{.*}} ashr
+ ; AVX2: cost of 12 {{.*}} ashr
+ %C1 = ashr <4 x i64> undef, undef
+
+ ret void
+}