From 6f2c01f7124691c58a2df72ef989b5e93510fa5b Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Mon, 29 Feb 2016 23:59:00 +0000 Subject: [PATCH] [x86, InstCombine] transform more x86 masked loads to LLVM intrinsics Continuation of: http://reviews.llvm.org/rL262269 llvm-svn: 262273 --- .../Transforms/InstCombine/InstCombineCalls.cpp | 8 ++- .../Transforms/InstCombine/x86-masked-memops.ll | 82 ++++++++++++++++++++++ 2 files changed, 89 insertions(+), 1 deletion(-) diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index 43b1bf0..b8396ee 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -1663,7 +1663,13 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { break; case Intrinsic::x86_avx_maskload_ps: - // TODO: Add the other masked load variants. + case Intrinsic::x86_avx_maskload_pd: + case Intrinsic::x86_avx_maskload_ps_256: + case Intrinsic::x86_avx_maskload_pd_256: + case Intrinsic::x86_avx2_maskload_d: + case Intrinsic::x86_avx2_maskload_q: + case Intrinsic::x86_avx2_maskload_d_256: + case Intrinsic::x86_avx2_maskload_q_256: if (Instruction *I = simplifyX86MaskedLoad(*II, *this)) return I; break; diff --git a/llvm/test/Transforms/InstCombine/x86-masked-memops.ll b/llvm/test/Transforms/InstCombine/x86-masked-memops.ll index eb13639..970ee04 100644 --- a/llvm/test/Transforms/InstCombine/x86-masked-memops.ll +++ b/llvm/test/Transforms/InstCombine/x86-masked-memops.ll @@ -57,6 +57,83 @@ define <4 x float> @mload_one_one(i8* %f) { ; CHECK-NEXT: ret <4 x float> %1 } +; Try doubles. + +define <2 x double> @mload_one_one_double(i8* %f) { + %ld = tail call <2 x double> @llvm.x86.avx.maskload.pd(i8* %f, <2 x i64> ) + ret <2 x double> %ld + +; CHECK-LABEL: @mload_one_one_double( +; CHECK-NEXT: %castvec = bitcast i8* %f to <2 x double>* +; CHECK-NEXT: %1 = call <2 x double> @llvm.masked.load.v2f64(<2 x double>* %castvec, i32 1, <2 x i1> , <2 x double> undef) +; CHECK-NEXT: ret <2 x double> %1 +} + +; Try 256-bit FP ops. + +define <8 x float> @mload_v8f32(i8* %f) { + %ld = tail call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %f, <8 x i32> ) + ret <8 x float> %ld + +; CHECK-LABEL: @mload_v8f32( +; CHECK-NEXT: %castvec = bitcast i8* %f to <8 x float>* +; CHECK-NEXT: %1 = call <8 x float> @llvm.masked.load.v8f32(<8 x float>* %castvec, i32 1, <8 x i1> , <8 x float> undef) +; CHECK-NEXT: ret <8 x float> %1 +} + +define <4 x double> @mload_v4f64(i8* %f) { + %ld = tail call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %f, <4 x i64> ) + ret <4 x double> %ld + +; CHECK-LABEL: @mload_v4f64( +; CHECK-NEXT: %castvec = bitcast i8* %f to <4 x double>* +; CHECK-NEXT: %1 = call <4 x double> @llvm.masked.load.v4f64(<4 x double>* %castvec, i32 1, <4 x i1> , <4 x double> undef) +; CHECK-NEXT: ret <4 x double> %1 +} + +; Try the AVX2 variants. + +define <4 x i32> @mload_v4i32(i8* %f) { + %ld = tail call <4 x i32> @llvm.x86.avx2.maskload.d(i8* %f, <4 x i32> ) + ret <4 x i32> %ld + +; CHECK-LABEL: @mload_v4i32( +; CHECK-NEXT: %castvec = bitcast i8* %f to <4 x i32>* +; CHECK-NEXT: %1 = call <4 x i32> @llvm.masked.load.v4i32(<4 x i32>* %castvec, i32 1, <4 x i1> , <4 x i32> undef) +; CHECK-NEXT: ret <4 x i32> %1 +} + +define <2 x i64> @mload_v2i64(i8* %f) { + %ld = tail call <2 x i64> @llvm.x86.avx2.maskload.q(i8* %f, <2 x i64> ) + ret <2 x i64> %ld + +; CHECK-LABEL: @mload_v2i64( +; CHECK-NEXT: %castvec = bitcast i8* %f to <2 x i64>* +; CHECK-NEXT: %1 = call <2 x i64> @llvm.masked.load.v2i64(<2 x i64>* %castvec, i32 1, <2 x i1> , <2 x i64> undef) +; CHECK-NEXT: ret <2 x i64> %1 +} + +define <8 x i32> @mload_v8i32(i8* %f) { + %ld = tail call <8 x i32> @llvm.x86.avx2.maskload.d.256(i8* %f, <8 x i32> ) + ret <8 x i32> %ld + +; CHECK-LABEL: @mload_v8i32( +; CHECK-NEXT: %castvec = bitcast i8* %f to <8 x i32>* +; CHECK-NEXT: %1 = call <8 x i32> @llvm.masked.load.v8i32(<8 x i32>* %castvec, i32 1, <8 x i1> , <8 x i32> undef) +; CHECK-NEXT: ret <8 x i32> %1 +} + +define <4 x i64> @mload_v4i64(i8* %f) { + %ld = tail call <4 x i64> @llvm.x86.avx2.maskload.q.256(i8* %f, <4 x i64> ) + ret <4 x i64> %ld + +; CHECK-LABEL: @mload_v4i64( +; CHECK-NEXT: %castvec = bitcast i8* %f to <4 x i64>* +; CHECK-NEXT: %1 = call <4 x i64> @llvm.masked.load.v4i64(<4 x i64>* %castvec, i32 1, <4 x i1> , <4 x i64> undef) +; CHECK-NEXT: ret <4 x i64> %1 +} + + ;; MASKED STORES ; If the mask isn't constant, do nothing. @@ -195,6 +272,11 @@ declare <2 x double> @llvm.x86.avx.maskload.pd(i8*, <2 x i64>) declare <8 x float> @llvm.x86.avx.maskload.ps.256(i8*, <8 x i32>) declare <4 x double> @llvm.x86.avx.maskload.pd.256(i8*, <4 x i64>) +declare <4 x i32> @llvm.x86.avx2.maskload.d(i8*, <4 x i32>) +declare <2 x i64> @llvm.x86.avx2.maskload.q(i8*, <2 x i64>) +declare <8 x i32> @llvm.x86.avx2.maskload.d.256(i8*, <8 x i32>) +declare <4 x i64> @llvm.x86.avx2.maskload.q.256(i8*, <4 x i64>) + declare void @llvm.x86.avx.maskstore.ps(i8*, <4 x i32>, <4 x float>) declare void @llvm.x86.avx.maskstore.pd(i8*, <2 x i64>, <2 x double>) declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x i32>, <8 x float>) -- 2.7.4