From 138fda5dd2098da9b0857d34f415f723971a1c5c Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Tue, 27 Oct 2020 17:23:42 -0400 Subject: [PATCH] [CostModel] add tests for FP reductions; NFC --- .../Analysis/CostModel/X86/intrinsic-cost-kinds.ll | 44 ++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/llvm/test/Analysis/CostModel/X86/intrinsic-cost-kinds.ll b/llvm/test/Analysis/CostModel/X86/intrinsic-cost-kinds.ll index 1bedeb7..4671325 100644 --- a/llvm/test/Analysis/CostModel/X86/intrinsic-cost-kinds.ll +++ b/llvm/test/Analysis/CostModel/X86/intrinsic-cost-kinds.ll @@ -28,6 +28,8 @@ declare <16 x i32> @llvm.fshl.v16i32(<16 x i32>, <16 x i32>, <16 x i32>) declare <16 x float> @llvm.masked.gather.v16f32.v16p0f32(<16 x float*>, i32, <16 x i1>, <16 x float>) declare void @llvm.masked.scatter.v16f32.v16p0f32(<16 x float>, <16 x float*>, i32, <16 x i1>) declare float @llvm.vector.reduce.fmax.v16f32(<16 x float>) +declare float @llvm.vector.reduce.fmul.v16f32(float, <16 x float>) +declare float @llvm.vector.reduce.fadd.v16f32(float, <16 x float>) declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i1) @@ -224,6 +226,48 @@ define void @reduce_fmax(<16 x float> %va) { ret void } +define void @reduce_fmul(<16 x float> %va) { +; THRU-LABEL: 'reduce_fmul' +; THRU-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v = call float @llvm.vector.reduce.fmul.v16f32(float 4.200000e+01, <16 x float> %va) +; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; LATE-LABEL: 'reduce_fmul' +; LATE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v = call float @llvm.vector.reduce.fmul.v16f32(float 4.200000e+01, <16 x float> %va) +; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void +; +; SIZE-LABEL: 'reduce_fmul' +; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v = call float @llvm.vector.reduce.fmul.v16f32(float 4.200000e+01, <16 x float> %va) +; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void +; +; SIZE_LATE-LABEL: 'reduce_fmul' +; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v = call float @llvm.vector.reduce.fmul.v16f32(float 4.200000e+01, <16 x float> %va) +; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void +; + %v = call float @llvm.vector.reduce.fmul.v16f32(float 42.0, <16 x float> %va) + ret void +} + +define void @reduce_fadd_fast(<16 x float> %va) { +; THRU-LABEL: 'reduce_fadd_fast' +; THRU-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v = call fast float @llvm.vector.reduce.fadd.v16f32(float 0.000000e+00, <16 x float> %va) +; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; LATE-LABEL: 'reduce_fadd_fast' +; LATE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v = call fast float @llvm.vector.reduce.fadd.v16f32(float 0.000000e+00, <16 x float> %va) +; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void +; +; SIZE-LABEL: 'reduce_fadd_fast' +; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v = call fast float @llvm.vector.reduce.fadd.v16f32(float 0.000000e+00, <16 x float> %va) +; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void +; +; SIZE_LATE-LABEL: 'reduce_fadd_fast' +; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v = call fast float @llvm.vector.reduce.fadd.v16f32(float 0.000000e+00, <16 x float> %va) +; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void +; + %v = call fast float @llvm.vector.reduce.fadd.v16f32(float 0.0, <16 x float> %va) + ret void +} + define void @memcpy(i8* %a, i8* %b, i32 %c) { ; THRU-LABEL: 'memcpy' ; THRU-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %a, i8* align 1 %b, i32 32, i1 false) -- 2.7.4