From: Craig Topper Date: Sun, 12 Nov 2017 18:51:08 +0000 (+0000) Subject: [X86] Add test cases and command lines demonstrating how we accidentally select vrang... X-Git-Tag: llvmorg-6.0.0-rc1~3593 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=6b53c4a982b5c1c609cc877a804c08a94a1cea0f;p=platform%2Fupstream%2Fllvm.git [X86] Add test cases and command lines demonstrating how we accidentally select vrangeps/vrangepd from vrangess/vrangesd instrinsics when the rounding mode is CUR_DIRECTION llvm-svn: 317998 --- diff --git a/llvm/test/CodeGen/X86/avx512dq-intrinsics.ll b/llvm/test/CodeGen/X86/avx512dq-intrinsics.ll index be03d2f..7e31214 100644 --- a/llvm/test/CodeGen/X86/avx512dq-intrinsics.ll +++ b/llvm/test/CodeGen/X86/avx512dq-intrinsics.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512dq | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512dq | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512DQ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512dq,avx512vl | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512DQVL declare <8 x i64> @llvm.x86.avx512.mask.cvtpd2qq.512(<8 x double>, <8 x i64>, i8, i32) @@ -276,17 +277,31 @@ define <4 x float>@test_int_x86_avx512_mask_reduce_ss(<4 x float> %x0, <4 x floa declare <4 x float> @llvm.x86.avx512.mask.range.ss(<4 x float>, <4 x float>,<4 x float>, i8, i32, i32) define <4 x float>@test_int_x86_avx512_mask_range_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x3, i8 %x4) { -; CHECK-LABEL: test_int_x86_avx512_mask_range_ss: -; CHECK: ## BB#0: -; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vrangess $4, {sae}, %xmm1, %xmm0, %xmm2 {%k1} -; CHECK-NEXT: vrangess $4, {sae}, %xmm1, %xmm0, %xmm0 -; CHECK-NEXT: vaddps %xmm0, %xmm2, %xmm0 -; CHECK-NEXT: retq +; AVX512DQ-LABEL: test_int_x86_avx512_mask_range_ss: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: kmovw %edi, %k1 +; AVX512DQ-NEXT: vrangess $4, {sae}, %xmm1, %xmm0, %xmm2 {%k1} +; AVX512DQ-NEXT: vrangess $4, {sae}, %xmm1, %xmm0, %xmm3 +; AVX512DQ-NEXT: vrangess $4, %xmm1, %xmm0, %xmm0 +; AVX512DQ-NEXT: vaddps %xmm3, %xmm2, %xmm1 +; AVX512DQ-NEXT: vaddps %xmm1, %xmm0, %xmm0 +; AVX512DQ-NEXT: retq +; +; AVX512DQVL-LABEL: test_int_x86_avx512_mask_range_ss: +; AVX512DQVL: ## BB#0: +; AVX512DQVL-NEXT: kmovw %edi, %k1 +; AVX512DQVL-NEXT: vrangess $4, {sae}, %xmm1, %xmm0, %xmm2 {%k1} +; AVX512DQVL-NEXT: vrangess $4, {sae}, %xmm1, %xmm0, %xmm3 +; AVX512DQVL-NEXT: vaddps %xmm3, %xmm2, %xmm2 +; AVX512DQVL-NEXT: vrangeps $4, %xmm1, %xmm0, %xmm0 +; AVX512DQVL-NEXT: vaddps %xmm2, %xmm0, %xmm0 +; AVX512DQVL-NEXT: retq %res = call <4 x float> @llvm.x86.avx512.mask.range.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x3, i8 %x4, i32 4, i32 8) %res1 = call <4 x float> @llvm.x86.avx512.mask.range.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x3, i8 -1, i32 4, i32 8) - %res2 = fadd <4 x float> %res, %res1 - ret <4 x float> %res2 + %res2 = call <4 x float> @llvm.x86.avx512.mask.range.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x3, i8 -1, i32 4, i32 4) + %res3 = fadd <4 x float> %res, %res1 + %res4 = fadd <4 x float> %res2, %res3 + ret <4 x float> %res4 } declare <2 x double> @llvm.x86.avx512.mask.reduce.sd(<2 x double>, <2 x double>,<2 x double>, i8, i32, i32) @@ -308,17 +323,31 @@ define <2 x double>@test_int_x86_avx512_mask_reduce_sd(<2 x double> %x0, <2 x do declare <2 x double> @llvm.x86.avx512.mask.range.sd(<2 x double>, <2 x double>,<2 x double>, i8, i32, i32) define <2 x double>@test_int_x86_avx512_mask_range_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x3, i8 %x4) { -; CHECK-LABEL: test_int_x86_avx512_mask_range_sd: -; CHECK: ## BB#0: -; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vrangesd $4, %xmm1, %xmm0, %xmm2 {%k1} -; CHECK-NEXT: vrangesd $4, {sae}, %xmm1, %xmm0, %xmm0 -; CHECK-NEXT: vaddpd %xmm0, %xmm2, %xmm0 -; CHECK-NEXT: retq +; AVX512DQ-LABEL: test_int_x86_avx512_mask_range_sd: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vrangesd $4, %xmm1, %xmm0, %xmm3 +; AVX512DQ-NEXT: kmovw %edi, %k1 +; AVX512DQ-NEXT: vrangesd $4, %xmm1, %xmm0, %xmm2 {%k1} +; AVX512DQ-NEXT: vrangesd $4, {sae}, %xmm1, %xmm0, %xmm0 +; AVX512DQ-NEXT: vaddpd %xmm0, %xmm2, %xmm0 +; AVX512DQ-NEXT: vaddpd %xmm0, %xmm3, %xmm0 +; AVX512DQ-NEXT: retq +; +; AVX512DQVL-LABEL: test_int_x86_avx512_mask_range_sd: +; AVX512DQVL: ## BB#0: +; AVX512DQVL-NEXT: vrangepd $4, %xmm1, %xmm0, %xmm3 +; AVX512DQVL-NEXT: kmovw %edi, %k1 +; AVX512DQVL-NEXT: vrangesd $4, %xmm1, %xmm0, %xmm2 {%k1} +; AVX512DQVL-NEXT: vrangesd $4, {sae}, %xmm1, %xmm0, %xmm0 +; AVX512DQVL-NEXT: vaddpd %xmm0, %xmm2, %xmm0 +; AVX512DQVL-NEXT: vaddpd %xmm0, %xmm3, %xmm0 +; AVX512DQVL-NEXT: retq %res = call <2 x double> @llvm.x86.avx512.mask.range.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x3, i8 %x4, i32 4, i32 4) %res1 = call <2 x double> @llvm.x86.avx512.mask.range.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x3, i8 -1, i32 4, i32 8) - %res2 = fadd <2 x double> %res, %res1 - ret <2 x double> %res2 + %res2 = call <2 x double> @llvm.x86.avx512.mask.range.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x3, i8 -1, i32 4, i32 4) + %res3 = fadd <2 x double> %res, %res1 + %res4 = fadd <2 x double> %res2, %res3 + ret <2 x double> %res4 } declare i8 @llvm.x86.avx512.mask.fpclass.pd.512(<8 x double>, i32, i8)