From da6a63db1c9e7ec8674d9efde1bed9b9500a0d19 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sat, 12 Nov 2016 18:04:46 +0000 Subject: [PATCH] [AVX-512] Remove the remaining masked shift by immediate or by single value. Autoupgrade them to recently introduced unmasked versions and a select. After this I'll add the unmasked intrinsics to InstCombineCalls to finish making our handling of these types of shuffles consistent between AVX-512 and the legacy intrinsics. llvm-svn: 286725 --- llvm/include/llvm/IR/IntrinsicsX86.td | 65 -------- llvm/lib/IR/AutoUpgrade.cpp | 139 +++++++++------- llvm/lib/Target/X86/X86IntrinsicsInfo.h | 22 --- llvm/test/CodeGen/X86/avx512-intrinsics-upgrade.ll | 119 ++++++++++++++ llvm/test/CodeGen/X86/avx512-intrinsics.ll | 120 -------------- .../CodeGen/X86/avx512bw-intrinsics-upgrade.ll | 180 +++++++++++++++++++++ llvm/test/CodeGen/X86/avx512bw-intrinsics.ll | 180 --------------------- .../CodeGen/X86/avx512vl-intrinsics-upgrade.ll | 80 +++++++++ llvm/test/CodeGen/X86/avx512vl-intrinsics.ll | 84 +--------- 9 files changed, 465 insertions(+), 524 deletions(-) diff --git a/llvm/include/llvm/IR/IntrinsicsX86.td b/llvm/include/llvm/IR/IntrinsicsX86.td index 13da4c9..d3d6074 100644 --- a/llvm/include/llvm/IR/IntrinsicsX86.td +++ b/llvm/include/llvm/IR/IntrinsicsX86.td @@ -1882,45 +1882,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_i32_ty], [IntrNoMem]>; - def int_x86_avx512_mask_psrl_w_512 : GCCBuiltin<"__builtin_ia32_psrlw512_mask">, - Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, - llvm_v8i16_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>; - def int_x86_avx512_mask_psrl_wi_512 : GCCBuiltin<"__builtin_ia32_psrlwi512_mask">, - Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, - llvm_i32_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>; - - def int_x86_avx512_mask_psra_w_512 : GCCBuiltin<"__builtin_ia32_psraw512_mask">, - Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, - llvm_v8i16_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>; - def int_x86_avx512_mask_psra_wi_512 : GCCBuiltin<"__builtin_ia32_psrawi512_mask">, - Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, - llvm_i32_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>; - - def int_x86_avx512_mask_psll_d : GCCBuiltin<"__builtin_ia32_pslld512_mask">, - Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, - llvm_v4i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>; - def int_x86_avx512_mask_psll_q : GCCBuiltin<"__builtin_ia32_psllq512_mask">, - Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, - llvm_v2i64_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask_psrl_d : GCCBuiltin<"__builtin_ia32_psrld512_mask">, - Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, - llvm_v4i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>; - def int_x86_avx512_mask_psrl_q : GCCBuiltin<"__builtin_ia32_psrlq512_mask">, - Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, - llvm_v2i64_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask_psra_d : GCCBuiltin<"__builtin_ia32_psrad512_mask">, - Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, - llvm_v4i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>; - def int_x86_avx512_mask_psra_q : GCCBuiltin<"__builtin_ia32_psraq512_mask">, - Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, - llvm_v2i64_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>; - - def int_x86_avx512_mask_psll_w_512 : GCCBuiltin<"__builtin_ia32_psllw512_mask">, - Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, - llvm_v8i16_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>; - def int_x86_avx512_mask_psll_wi_512 : GCCBuiltin<"__builtin_ia32_psllwi512_mask">, - Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, - llvm_i32_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>; def int_x86_avx512_mask_psllv16_hi : GCCBuiltin<"__builtin_ia32_psllv16hi_mask">, Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty, llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>; @@ -1931,32 +1892,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask_psra_di_512 : GCCBuiltin<"__builtin_ia32_psradi512_mask">, - Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, - llvm_i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>; - def int_x86_avx512_mask_psra_q_128 : GCCBuiltin<"__builtin_ia32_psraq128_mask">, - Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, - llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask_psra_q_256 : GCCBuiltin<"__builtin_ia32_psraq256_mask">, - Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, - llvm_v2i64_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask_psra_qi_128 : GCCBuiltin<"__builtin_ia32_psraqi128_mask">, - Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, - llvm_i32_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask_psra_qi_256 : GCCBuiltin<"__builtin_ia32_psraqi256_mask">, - Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, - llvm_i32_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask_psra_qi_512 : GCCBuiltin<"__builtin_ia32_psraqi512_mask">, - Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, - llvm_i32_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>; - - def int_x86_avx512_mask_psrl_di_512: GCCBuiltin<"__builtin_ia32_psrldi512_mask">, - Intrinsic<[llvm_v16i32_ty], [ llvm_v16i32_ty, - llvm_i32_ty, llvm_v16i32_ty, llvm_i16_ty ], [IntrNoMem]>; - - def int_x86_avx512_mask_psrl_qi_512: GCCBuiltin<"__builtin_ia32_psrlqi512_mask">, - Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, - llvm_i32_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>; def int_x86_avx512_mask_pmultishift_qb_128: GCCBuiltin<"__builtin_ia32_vpmultishiftqb128_mask">, Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp index 49db064..016bf87 100644 --- a/llvm/lib/IR/AutoUpgrade.cpp +++ b/llvm/lib/IR/AutoUpgrade.cpp @@ -298,38 +298,18 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) { Name == "avx512.mask.sub.pd.256" || Name == "avx512.mask.sub.ps.128" || Name == "avx512.mask.sub.ps.256" || - Name == "avx512.mask.psll.d.128" || - Name == "avx512.mask.psll.d.256" || - Name == "avx512.mask.psll.q.128" || - Name == "avx512.mask.psll.q.256" || - Name == "avx512.mask.psll.w.128" || - Name == "avx512.mask.psll.w.256" || - Name == "avx512.mask.psra.d.128" || - Name == "avx512.mask.psra.d.256" || - Name == "avx512.mask.psra.w.128" || - Name == "avx512.mask.psra.w.256" || - Name == "avx512.mask.psrl.d.128" || - Name == "avx512.mask.psrl.d.256" || - Name == "avx512.mask.psrl.q.128" || - Name == "avx512.mask.psrl.q.256" || - Name == "avx512.mask.psrl.w.128" || - Name == "avx512.mask.psrl.w.256" || - Name == "avx512.mask.psll.di.128" || - Name == "avx512.mask.psll.di.256" || - Name == "avx512.mask.psll.qi.128" || - Name == "avx512.mask.psll.qi.256" || - Name == "avx512.mask.psll.wi.128" || - Name == "avx512.mask.psll.wi.256" || - Name == "avx512.mask.psra.di.128" || - Name == "avx512.mask.psra.di.256" || - Name == "avx512.mask.psra.wi.128" || - Name == "avx512.mask.psra.wi.256" || - Name == "avx512.mask.psrl.di.128" || - Name == "avx512.mask.psrl.di.256" || - Name == "avx512.mask.psrl.qi.128" || - Name == "avx512.mask.psrl.qi.256" || - Name == "avx512.mask.psrl.wi.128" || - Name == "avx512.mask.psrl.wi.256" || + Name.startswith("avx512.mask.psll.d") || + Name.startswith("avx512.mask.psll.q") || + Name.startswith("avx512.mask.psll.w") || + Name.startswith("avx512.mask.psra.d") || + Name.startswith("avx512.mask.psra.q") || + Name.startswith("avx512.mask.psra.w") || + Name.startswith("avx512.mask.psrl.d") || + Name.startswith("avx512.mask.psrl.q") || + Name.startswith("avx512.mask.psrl.w") || + Name.startswith("avx512.mask.pslli") || + Name.startswith("avx512.mask.psrai") || + Name.startswith("avx512.mask.psrli") || Name == "avx512.mask.psllv2.di" || Name == "avx512.mask.psllv4.di" || Name == "avx512.mask.psllv4.si" || @@ -433,23 +413,6 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) { Intrinsic::x86_xop_vfrcz_sd); return true; } - if (IsX86 && (Name.startswith("avx512.mask.pslli.") || - Name.startswith("avx512.mask.psrai.") || - Name.startswith("avx512.mask.psrli."))) { - Intrinsic::ID ShiftID; - if (Name.slice(12, 16) == "psll") - ShiftID = Name[18] == 'd' ? Intrinsic::x86_avx512_mask_psll_di_512 - : Intrinsic::x86_avx512_mask_psll_qi_512; - else if (Name.slice(12, 16) == "psra") - ShiftID = Name[18] == 'd' ? Intrinsic::x86_avx512_mask_psra_di_512 - : Intrinsic::x86_avx512_mask_psra_qi_512; - else - ShiftID = Name[18] == 'd' ? Intrinsic::x86_avx512_mask_psrl_di_512 - : Intrinsic::x86_avx512_mask_psrl_qi_512; - rename(F); - NewFn = Intrinsic::getDeclaration(F->getParent(), ShiftID); - return true; - } // Upgrade any XOP PERMIL2 index operand still using a float/double vector. if (IsX86 && Name.startswith("xop.vpermil2")) { auto Params = F->getFunctionType()->params(); @@ -1396,66 +1359,138 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_sse2_psll_d); } else if (IsX86 && Name == "avx512.mask.psll.d.256") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_avx2_psll_d); + } else if (IsX86 && Name == "avx512.mask.psll.d") { + Rep = UpgradeX86MaskedShift(Builder, *CI, + Intrinsic::x86_avx512_psll_d_512); } else if (IsX86 && Name == "avx512.mask.psll.q.128") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_sse2_psll_q); } else if (IsX86 && Name == "avx512.mask.psll.q.256") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_avx2_psll_q); + } else if (IsX86 && Name == "avx512.mask.psll.q") { + Rep = UpgradeX86MaskedShift(Builder, *CI, + Intrinsic::x86_avx512_psll_q_512); } else if (IsX86 && Name == "avx512.mask.psll.w.128") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_sse2_psll_w); } else if (IsX86 && Name == "avx512.mask.psll.w.256") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_avx2_psll_w); + } else if (IsX86 && Name == "avx512.mask.psll.w.512") { + Rep = UpgradeX86MaskedShift(Builder, *CI, + Intrinsic::x86_avx512_psll_w_512); } else if (IsX86 && Name == "avx512.mask.psra.d.128") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_sse2_psra_d); } else if (IsX86 && Name == "avx512.mask.psra.d.256") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_avx2_psra_d); + } else if (IsX86 && Name == "avx512.mask.psra.d") { + Rep = UpgradeX86MaskedShift(Builder, *CI, + Intrinsic::x86_avx512_psra_d_512); + } else if (IsX86 && Name == "avx512.mask.psra.q.128") { + Rep = UpgradeX86MaskedShift(Builder, *CI, + Intrinsic::x86_avx512_psra_q_128); + } else if (IsX86 && Name == "avx512.mask.psra.q.256") { + Rep = UpgradeX86MaskedShift(Builder, *CI, + Intrinsic::x86_avx512_psra_q_256); + } else if (IsX86 && Name == "avx512.mask.psra.q") { + Rep = UpgradeX86MaskedShift(Builder, *CI, + Intrinsic::x86_avx512_psra_q_512); } else if (IsX86 && Name == "avx512.mask.psra.w.128") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_sse2_psra_w); } else if (IsX86 && Name == "avx512.mask.psra.w.256") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_avx2_psra_w); + } else if (IsX86 && Name == "avx512.mask.psra.w.512") { + Rep = UpgradeX86MaskedShift(Builder, *CI, + Intrinsic::x86_avx512_psra_w_512); } else if (IsX86 && Name == "avx512.mask.psrl.d.128") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_sse2_psrl_d); } else if (IsX86 && Name == "avx512.mask.psrl.d.256") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_avx2_psrl_d); + } else if (IsX86 && Name == "avx512.mask.psrl.d") { + Rep = UpgradeX86MaskedShift(Builder, *CI, + Intrinsic::x86_avx512_psrl_d_512); } else if (IsX86 && Name == "avx512.mask.psrl.q.128") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_sse2_psrl_q); } else if (IsX86 && Name == "avx512.mask.psrl.q.256") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_avx2_psrl_q); + } else if (IsX86 && Name == "avx512.mask.psrl.q") { + Rep = UpgradeX86MaskedShift(Builder, *CI, + Intrinsic::x86_avx512_psrl_q_512); } else if (IsX86 && Name == "avx512.mask.psrl.w.128") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_sse2_psrl_w); } else if (IsX86 && Name == "avx512.mask.psrl.w.256") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_avx2_psrl_w); + } else if (IsX86 && Name == "avx512.mask.psrl.w.512") { + Rep = UpgradeX86MaskedShift(Builder, *CI, + Intrinsic::x86_avx512_psrl_w_512); } else if (IsX86 && Name == "avx512.mask.psll.di.128") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_sse2_pslli_d); } else if (IsX86 && Name == "avx512.mask.psll.di.256") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_avx2_pslli_d); + } else if (IsX86 && (Name == "avx512.mask.psll.di.512" || + Name == "avx512.mask.pslli.d")) { + Rep = UpgradeX86MaskedShift(Builder, *CI, + Intrinsic::x86_avx512_pslli_d_512); } else if (IsX86 && Name == "avx512.mask.psll.qi.128") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_sse2_pslli_q); } else if (IsX86 && Name == "avx512.mask.psll.qi.256") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_avx2_pslli_q); + } else if (IsX86 && (Name == "avx512.mask.psll.qi.512" || + Name == "avx512.mask.pslli.q")) { + Rep = UpgradeX86MaskedShift(Builder, *CI, + Intrinsic::x86_avx512_pslli_q_512); } else if (IsX86 && Name == "avx512.mask.psll.wi.128") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_sse2_pslli_w); } else if (IsX86 && Name == "avx512.mask.psll.wi.256") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_avx2_pslli_w); + } else if (IsX86 && Name == "avx512.mask.psll.wi.512") { + Rep = UpgradeX86MaskedShift(Builder, *CI, + Intrinsic::x86_avx512_pslli_w_512); } else if (IsX86 && Name == "avx512.mask.psra.di.128") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_sse2_psrai_d); } else if (IsX86 && Name == "avx512.mask.psra.di.256") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_avx2_psrai_d); + } else if (IsX86 && (Name == "avx512.mask.psra.di.512" || + Name == "avx512.mask.psrai.d")) { + Rep = UpgradeX86MaskedShift(Builder, *CI, + Intrinsic::x86_avx512_psrai_d_512); + } else if (IsX86 && Name == "avx512.mask.psra.qi.128") { + Rep = UpgradeX86MaskedShift(Builder, *CI, + Intrinsic::x86_avx512_psrai_q_128); + } else if (IsX86 && Name == "avx512.mask.psra.qi.256") { + Rep = UpgradeX86MaskedShift(Builder, *CI, + Intrinsic::x86_avx512_psrai_q_256); + } else if (IsX86 && (Name == "avx512.mask.psra.qi.512" || + Name == "avx512.mask.psrai.q")) { + Rep = UpgradeX86MaskedShift(Builder, *CI, + Intrinsic::x86_avx512_psrai_q_512); } else if (IsX86 && Name == "avx512.mask.psra.wi.128") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_sse2_psrai_w); } else if (IsX86 && Name == "avx512.mask.psra.wi.256") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_avx2_psrai_w); + } else if (IsX86 && Name == "avx512.mask.psra.wi.512") { + Rep = UpgradeX86MaskedShift(Builder, *CI, + Intrinsic::x86_avx512_psrai_w_512); } else if (IsX86 && Name == "avx512.mask.psrl.di.128") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_sse2_psrli_d); } else if (IsX86 && Name == "avx512.mask.psrl.di.256") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_avx2_psrli_d); + } else if (IsX86 && (Name == "avx512.mask.psrl.di.512" || + Name == "avx512.mask.psrli.d")) { + Rep = UpgradeX86MaskedShift(Builder, *CI, + Intrinsic::x86_avx512_psrli_d_512); } else if (IsX86 && Name == "avx512.mask.psrl.qi.128") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_sse2_psrli_q); } else if (IsX86 && Name == "avx512.mask.psrl.qi.256") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_avx2_psrli_q); + } else if (IsX86 && (Name == "avx512.mask.psrl.qi.512" || + Name == "avx512.mask.psrli.q")) { + Rep = UpgradeX86MaskedShift(Builder, *CI, + Intrinsic::x86_avx512_psrli_q_512); } else if (IsX86 && Name == "avx512.mask.psrl.wi.128") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_sse2_psrli_w); } else if (IsX86 && Name == "avx512.mask.psrl.wi.256") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_avx2_psrli_w); + } else if (IsX86 && Name == "avx512.mask.psrl.wi.512") { + Rep = UpgradeX86MaskedShift(Builder, *CI, + Intrinsic::x86_avx512_psrli_w_512); } else if (IsX86 && Name == "avx512.mask.psllv2.di") { Rep = UpgradeX86MaskedShift(Builder, *CI, Intrinsic::x86_avx2_psllv_q); } else if (IsX86 && Name == "avx512.mask.psllv4.di") { @@ -1494,12 +1529,6 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { default: llvm_unreachable("Unknown function for CallInst upgrade."); - case Intrinsic::x86_avx512_mask_psll_di_512: - case Intrinsic::x86_avx512_mask_psra_di_512: - case Intrinsic::x86_avx512_mask_psrl_di_512: - case Intrinsic::x86_avx512_mask_psll_qi_512: - case Intrinsic::x86_avx512_mask_psra_qi_512: - case Intrinsic::x86_avx512_mask_psrl_qi_512: case Intrinsic::arm_neon_vld1: case Intrinsic::arm_neon_vld2: case Intrinsic::arm_neon_vld3: diff --git a/llvm/lib/Target/X86/X86IntrinsicsInfo.h b/llvm/lib/Target/X86/X86IntrinsicsInfo.h index 04ff7f0..b450dc3 100644 --- a/llvm/lib/Target/X86/X86IntrinsicsInfo.h +++ b/llvm/lib/Target/X86/X86IntrinsicsInfo.h @@ -1073,27 +1073,11 @@ static const IntrinsicData IntrinsicsWithoutChain[] = { X86_INTRINSIC_DATA(avx512_mask_prorv_q_512, INTR_TYPE_2OP_MASK, ISD::ROTR, 0), X86_INTRINSIC_DATA(avx512_mask_pshuf_b_512, INTR_TYPE_2OP_MASK, X86ISD::PSHUFB, 0), - X86_INTRINSIC_DATA(avx512_mask_psll_d, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0), - X86_INTRINSIC_DATA(avx512_mask_psll_di_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSHLI, 0), - X86_INTRINSIC_DATA(avx512_mask_psll_q, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0), - X86_INTRINSIC_DATA(avx512_mask_psll_qi_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSHLI, 0), - X86_INTRINSIC_DATA(avx512_mask_psll_w_512, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0), - X86_INTRINSIC_DATA(avx512_mask_psll_wi_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSHLI, 0), X86_INTRINSIC_DATA(avx512_mask_psllv_d, INTR_TYPE_2OP_MASK, ISD::SHL, 0), X86_INTRINSIC_DATA(avx512_mask_psllv_q, INTR_TYPE_2OP_MASK, ISD::SHL, 0), X86_INTRINSIC_DATA(avx512_mask_psllv16_hi, INTR_TYPE_2OP_MASK, ISD::SHL, 0), X86_INTRINSIC_DATA(avx512_mask_psllv32hi, INTR_TYPE_2OP_MASK, ISD::SHL, 0), X86_INTRINSIC_DATA(avx512_mask_psllv8_hi, INTR_TYPE_2OP_MASK, ISD::SHL, 0), - X86_INTRINSIC_DATA(avx512_mask_psra_d, INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0), - X86_INTRINSIC_DATA(avx512_mask_psra_di_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRAI, 0), - X86_INTRINSIC_DATA(avx512_mask_psra_q, INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0), - X86_INTRINSIC_DATA(avx512_mask_psra_q_128, INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0), - X86_INTRINSIC_DATA(avx512_mask_psra_q_256, INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0), - X86_INTRINSIC_DATA(avx512_mask_psra_qi_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRAI, 0), - X86_INTRINSIC_DATA(avx512_mask_psra_qi_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRAI, 0), - X86_INTRINSIC_DATA(avx512_mask_psra_qi_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRAI, 0), - X86_INTRINSIC_DATA(avx512_mask_psra_w_512, INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0), - X86_INTRINSIC_DATA(avx512_mask_psra_wi_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRAI, 0), X86_INTRINSIC_DATA(avx512_mask_psrav_d, INTR_TYPE_2OP_MASK, X86ISD::VSRAV, 0), X86_INTRINSIC_DATA(avx512_mask_psrav_q, INTR_TYPE_2OP_MASK, X86ISD::VSRAV, 0), X86_INTRINSIC_DATA(avx512_mask_psrav_q_128, INTR_TYPE_2OP_MASK, X86ISD::VSRAV, 0), @@ -1101,12 +1085,6 @@ static const IntrinsicData IntrinsicsWithoutChain[] = { X86_INTRINSIC_DATA(avx512_mask_psrav16_hi, INTR_TYPE_2OP_MASK, X86ISD::VSRAV, 0), X86_INTRINSIC_DATA(avx512_mask_psrav32_hi, INTR_TYPE_2OP_MASK, X86ISD::VSRAV, 0), X86_INTRINSIC_DATA(avx512_mask_psrav8_hi, INTR_TYPE_2OP_MASK, X86ISD::VSRAV, 0), - X86_INTRINSIC_DATA(avx512_mask_psrl_d, INTR_TYPE_2OP_MASK, X86ISD::VSRL, 0), - X86_INTRINSIC_DATA(avx512_mask_psrl_di_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRLI, 0), - X86_INTRINSIC_DATA(avx512_mask_psrl_q, INTR_TYPE_2OP_MASK, X86ISD::VSRL, 0), - X86_INTRINSIC_DATA(avx512_mask_psrl_qi_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRLI, 0), - X86_INTRINSIC_DATA(avx512_mask_psrl_w_512, INTR_TYPE_2OP_MASK, X86ISD::VSRL, 0), - X86_INTRINSIC_DATA(avx512_mask_psrl_wi_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRLI, 0), X86_INTRINSIC_DATA(avx512_mask_psrlv_d, INTR_TYPE_2OP_MASK, ISD::SRL, 0), X86_INTRINSIC_DATA(avx512_mask_psrlv_q, INTR_TYPE_2OP_MASK, ISD::SRL, 0), X86_INTRINSIC_DATA(avx512_mask_psrlv16_hi, INTR_TYPE_2OP_MASK, ISD::SRL, 0), diff --git a/llvm/test/CodeGen/X86/avx512-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avx512-intrinsics-upgrade.ll index a5bf152..e16a6ab 100644 --- a/llvm/test/CodeGen/X86/avx512-intrinsics-upgrade.ll +++ b/llvm/test/CodeGen/X86/avx512-intrinsics-upgrade.ll @@ -1967,3 +1967,122 @@ define <8 x i64>@test_int_x86_avx512_mask_pmovsxw_q_512(<8 x i16> %x0, <8 x i64> ret <8 x i64> %res4 } +declare <8 x i64> @llvm.x86.avx512.mask.psrl.qi.512(<8 x i64>, i32, <8 x i64>, i8) + +define <8 x i64>@test_int_x86_avx512_mask_psrl_qi_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) { +; CHECK-LABEL: test_int_x86_avx512_mask_psrl_qi_512: +; CHECK: ## BB#0: +; CHECK-NEXT: vpsrlq $4, %zmm0, %zmm2 +; CHECK-NEXT: kmovw %esi, %k1 +; CHECK-NEXT: vpsrlq $4, %zmm0, %zmm1 {%k1} +; CHECK-NEXT: vpsrlq $4, %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: vpaddq %zmm2, %zmm1, %zmm1 +; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = call <8 x i64> @llvm.x86.avx512.mask.psrl.qi.512(<8 x i64> %x0, i32 4, <8 x i64> %x2, i8 %x3) + %res1 = call <8 x i64> @llvm.x86.avx512.mask.psrl.qi.512(<8 x i64> %x0, i32 4, <8 x i64> %x2, i8 -1) + %res2 = call <8 x i64> @llvm.x86.avx512.mask.psrl.qi.512(<8 x i64> %x0, i32 4, <8 x i64> zeroinitializer, i8 %x3) + %res3 = add <8 x i64> %res, %res1 + %res4 = add <8 x i64> %res3, %res2 + ret <8 x i64> %res4 +} + +declare <16 x i32> @llvm.x86.avx512.mask.psrl.di.512(<16 x i32>, i32, <16 x i32>, i16) + +define <16 x i32>@test_int_x86_avx512_mask_psrl_di_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) { +; CHECK-LABEL: test_int_x86_avx512_mask_psrl_di_512: +; CHECK: ## BB#0: +; CHECK-NEXT: vpsrld $4, %zmm0, %zmm2 +; CHECK-NEXT: kmovw %esi, %k1 +; CHECK-NEXT: vpsrld $4, %zmm0, %zmm1 {%k1} +; CHECK-NEXT: vpsrld $4, %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: vpaddd %zmm2, %zmm1, %zmm1 +; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = call <16 x i32> @llvm.x86.avx512.mask.psrl.di.512(<16 x i32> %x0, i32 4, <16 x i32> %x2, i16 %x3) + %res1 = call <16 x i32> @llvm.x86.avx512.mask.psrl.di.512(<16 x i32> %x0, i32 4, <16 x i32> %x2, i16 -1) + %res2 = call <16 x i32> @llvm.x86.avx512.mask.psrl.di.512(<16 x i32> %x0, i32 4, <16 x i32> zeroinitializer, i16 %x3) + %res3 = add <16 x i32> %res, %res1 + %res4 = add <16 x i32> %res3, %res2 + ret <16 x i32> %res4 +} + +declare <16 x i32> @llvm.x86.avx512.mask.psra.di.512(<16 x i32>, i32, <16 x i32>, i16) + +define <16 x i32>@test_int_x86_avx512_mask_psra_di_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) { +; CHECK-LABEL: test_int_x86_avx512_mask_psra_di_512: +; CHECK: ## BB#0: +; CHECK-NEXT: vpsrad $3, %zmm0, %zmm2 +; CHECK-NEXT: kmovw %esi, %k1 +; CHECK-NEXT: vpsrad $3, %zmm0, %zmm1 {%k1} +; CHECK-NEXT: vpsrad $3, %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: vpaddd %zmm2, %zmm0, %zmm0 +; CHECK-NEXT: retq + %res = call <16 x i32> @llvm.x86.avx512.mask.psra.di.512(<16 x i32> %x0, i32 3, <16 x i32> %x2, i16 %x3) + %res1 = call <16 x i32> @llvm.x86.avx512.mask.psra.di.512(<16 x i32> %x0, i32 3, <16 x i32> zeroinitializer, i16 %x3) + %res2 = call <16 x i32> @llvm.x86.avx512.mask.psra.di.512(<16 x i32> %x0, i32 3, <16 x i32> %x2, i16 -1) + %res3 = add <16 x i32> %res, %res1 + %res4 = add <16 x i32> %res3, %res2 + ret <16 x i32> %res4 +} + +declare <8 x i64> @llvm.x86.avx512.mask.psra.qi.512(<8 x i64>, i32, <8 x i64>, i8) + +define <8 x i64>@test_int_x86_avx512_mask_psra_qi_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) { +; CHECK-LABEL: test_int_x86_avx512_mask_psra_qi_512: +; CHECK: ## BB#0: +; CHECK-NEXT: vpsraq $3, %zmm0, %zmm2 +; CHECK-NEXT: kmovw %esi, %k1 +; CHECK-NEXT: vpsraq $3, %zmm0, %zmm1 {%k1} +; CHECK-NEXT: vpsraq $3, %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: vpaddq %zmm2, %zmm0, %zmm0 +; CHECK-NEXT: retq + %res = call <8 x i64> @llvm.x86.avx512.mask.psra.qi.512(<8 x i64> %x0, i32 3, <8 x i64> %x2, i8 %x3) + %res1 = call <8 x i64> @llvm.x86.avx512.mask.psra.qi.512(<8 x i64> %x0, i32 3, <8 x i64> zeroinitializer, i8 %x3) + %res2 = call <8 x i64> @llvm.x86.avx512.mask.psra.qi.512(<8 x i64> %x0, i32 3, <8 x i64> %x2, i8 -1) + %res3 = add <8 x i64> %res, %res1 + %res4 = add <8 x i64> %res3, %res2 + ret <8 x i64> %res4 +} + +declare <16 x i32> @llvm.x86.avx512.mask.psll.di.512(<16 x i32>, i32, <16 x i32>, i16) + +define <16 x i32>@test_int_x86_avx512_mask_psll_di_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) { +; CHECK-LABEL: test_int_x86_avx512_mask_psll_di_512: +; CHECK: ## BB#0: +; CHECK-NEXT: vpslld $3, %zmm0, %zmm2 +; CHECK-NEXT: kmovw %esi, %k1 +; CHECK-NEXT: vpslld $3, %zmm0, %zmm1 {%k1} +; CHECK-NEXT: vpslld $3, %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: vpaddd %zmm2, %zmm0, %zmm0 +; CHECK-NEXT: retq + %res = call <16 x i32> @llvm.x86.avx512.mask.psll.di.512(<16 x i32> %x0, i32 3, <16 x i32> %x2, i16 %x3) + %res1 = call <16 x i32> @llvm.x86.avx512.mask.psll.di.512(<16 x i32> %x0, i32 3, <16 x i32> zeroinitializer, i16 %x3) + %res2 = call <16 x i32> @llvm.x86.avx512.mask.psll.di.512(<16 x i32> %x0, i32 3, <16 x i32> %x2, i16 -1) + %res3 = add <16 x i32> %res, %res1 + %res4 = add <16 x i32> %res3, %res2 + ret <16 x i32> %res4 +} + +declare <8 x i64> @llvm.x86.avx512.mask.psll.qi.512(<8 x i64>, i32, <8 x i64>, i8) + +define <8 x i64>@test_int_x86_avx512_mask_psll_qi_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) { +; CHECK-LABEL: test_int_x86_avx512_mask_psll_qi_512: +; CHECK: ## BB#0: +; CHECK-NEXT: vpsllq $3, %zmm0, %zmm2 +; CHECK-NEXT: kmovw %esi, %k1 +; CHECK-NEXT: vpsllq $3, %zmm0, %zmm1 {%k1} +; CHECK-NEXT: vpsllq $3, %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: vpaddq %zmm2, %zmm0, %zmm0 +; CHECK-NEXT: retq + %res = call <8 x i64> @llvm.x86.avx512.mask.psll.qi.512(<8 x i64> %x0, i32 3, <8 x i64> %x2, i8 %x3) + %res1 = call <8 x i64> @llvm.x86.avx512.mask.psll.qi.512(<8 x i64> %x0, i32 3, <8 x i64> zeroinitializer, i8 %x3) + %res2 = call <8 x i64> @llvm.x86.avx512.mask.psll.qi.512(<8 x i64> %x0, i32 3, <8 x i64> %x2, i8 -1) + %res3 = add <8 x i64> %res, %res1 + %res4 = add <8 x i64> %res3, %res2 + ret <8 x i64> %res4 +} diff --git a/llvm/test/CodeGen/X86/avx512-intrinsics.ll b/llvm/test/CodeGen/X86/avx512-intrinsics.ll index 7d85c19..b80006b8 100644 --- a/llvm/test/CodeGen/X86/avx512-intrinsics.ll +++ b/llvm/test/CodeGen/X86/avx512-intrinsics.ll @@ -4804,126 +4804,6 @@ define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x4_512(<4 x i64> %x0, <8 x ret <8 x i64> %res5 } -declare <8 x i64> @llvm.x86.avx512.mask.psrl.qi.512(<8 x i64>, i32, <8 x i64>, i8) - -define <8 x i64>@test_int_x86_avx512_mask_psrl_qi_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) { -; CHECK-LABEL: test_int_x86_avx512_mask_psrl_qi_512: -; CHECK: ## BB#0: -; CHECK-NEXT: kmovw %esi, %k1 -; CHECK-NEXT: vpsrlq $255, %zmm0, %zmm1 {%k1} -; CHECK-NEXT: vpsrlq $255, %zmm0, %zmm2 {%k1} {z} -; CHECK-NEXT: vpsrlq $255, %zmm0, %zmm0 -; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0 -; CHECK-NEXT: vpaddq %zmm2, %zmm0, %zmm0 -; CHECK-NEXT: retq - %res = call <8 x i64> @llvm.x86.avx512.mask.psrl.qi.512(<8 x i64> %x0, i32 255, <8 x i64> %x2, i8 %x3) - %res1 = call <8 x i64> @llvm.x86.avx512.mask.psrl.qi.512(<8 x i64> %x0, i32 255, <8 x i64> %x2, i8 -1) - %res2 = call <8 x i64> @llvm.x86.avx512.mask.psrl.qi.512(<8 x i64> %x0, i32 255, <8 x i64> zeroinitializer, i8 %x3) - %res3 = add <8 x i64> %res, %res1 - %res4 = add <8 x i64> %res3, %res2 - ret <8 x i64> %res4 -} - -declare <16 x i32> @llvm.x86.avx512.mask.psrl.di.512(<16 x i32>, i32, <16 x i32>, i16) - -define <16 x i32>@test_int_x86_avx512_mask_psrl_di_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) { -; CHECK-LABEL: test_int_x86_avx512_mask_psrl_di_512: -; CHECK: ## BB#0: -; CHECK-NEXT: kmovw %esi, %k1 -; CHECK-NEXT: vpsrld $255, %zmm0, %zmm1 {%k1} -; CHECK-NEXT: vpsrld $255, %zmm0, %zmm2 {%k1} {z} -; CHECK-NEXT: vpsrld $255, %zmm0, %zmm0 -; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0 -; CHECK-NEXT: vpaddd %zmm2, %zmm0, %zmm0 -; CHECK-NEXT: retq - %res = call <16 x i32> @llvm.x86.avx512.mask.psrl.di.512(<16 x i32> %x0, i32 255, <16 x i32> %x2, i16 %x3) - %res1 = call <16 x i32> @llvm.x86.avx512.mask.psrl.di.512(<16 x i32> %x0, i32 255, <16 x i32> %x2, i16 -1) - %res2 = call <16 x i32> @llvm.x86.avx512.mask.psrl.di.512(<16 x i32> %x0, i32 255, <16 x i32> zeroinitializer, i16 %x3) - %res3 = add <16 x i32> %res, %res1 - %res4 = add <16 x i32> %res3, %res2 - ret <16 x i32> %res4 -} - -declare <16 x i32> @llvm.x86.avx512.mask.psra.di.512(<16 x i32>, i32, <16 x i32>, i16) - -define <16 x i32>@test_int_x86_avx512_mask_psra_di_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) { -; CHECK-LABEL: test_int_x86_avx512_mask_psra_di_512: -; CHECK: ## BB#0: -; CHECK-NEXT: kmovw %esi, %k1 -; CHECK-NEXT: vpsrad $3, %zmm0, %zmm1 {%k1} -; CHECK-NEXT: vpsrad $3, %zmm0, %zmm2 {%k1} {z} -; CHECK-NEXT: vpsrad $3, %zmm0, %zmm0 -; CHECK-NEXT: vpaddd %zmm2, %zmm1, %zmm1 -; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0 -; CHECK-NEXT: retq - %res = call <16 x i32> @llvm.x86.avx512.mask.psra.di.512(<16 x i32> %x0, i32 3, <16 x i32> %x2, i16 %x3) - %res1 = call <16 x i32> @llvm.x86.avx512.mask.psra.di.512(<16 x i32> %x0, i32 3, <16 x i32> zeroinitializer, i16 %x3) - %res2 = call <16 x i32> @llvm.x86.avx512.mask.psra.di.512(<16 x i32> %x0, i32 3, <16 x i32> %x2, i16 -1) - %res3 = add <16 x i32> %res, %res1 - %res4 = add <16 x i32> %res3, %res2 - ret <16 x i32> %res4 -} - -declare <8 x i64> @llvm.x86.avx512.mask.psra.qi.512(<8 x i64>, i32, <8 x i64>, i8) - -define <8 x i64>@test_int_x86_avx512_mask_psra_qi_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) { -; CHECK-LABEL: test_int_x86_avx512_mask_psra_qi_512: -; CHECK: ## BB#0: -; CHECK-NEXT: kmovw %esi, %k1 -; CHECK-NEXT: vpsraq $3, %zmm0, %zmm1 {%k1} -; CHECK-NEXT: vpsraq $3, %zmm0, %zmm2 {%k1} {z} -; CHECK-NEXT: vpsraq $3, %zmm0, %zmm0 -; CHECK-NEXT: vpaddq %zmm2, %zmm1, %zmm1 -; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0 -; CHECK-NEXT: retq - %res = call <8 x i64> @llvm.x86.avx512.mask.psra.qi.512(<8 x i64> %x0, i32 3, <8 x i64> %x2, i8 %x3) - %res1 = call <8 x i64> @llvm.x86.avx512.mask.psra.qi.512(<8 x i64> %x0, i32 3, <8 x i64> zeroinitializer, i8 %x3) - %res2 = call <8 x i64> @llvm.x86.avx512.mask.psra.qi.512(<8 x i64> %x0, i32 3, <8 x i64> %x2, i8 -1) - %res3 = add <8 x i64> %res, %res1 - %res4 = add <8 x i64> %res3, %res2 - ret <8 x i64> %res4 -} - -declare <16 x i32> @llvm.x86.avx512.mask.psll.di.512(<16 x i32>, i32, <16 x i32>, i16) - -define <16 x i32>@test_int_x86_avx512_mask_psll_di_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) { -; CHECK-LABEL: test_int_x86_avx512_mask_psll_di_512: -; CHECK: ## BB#0: -; CHECK-NEXT: kmovw %esi, %k1 -; CHECK-NEXT: vpslld $3, %zmm0, %zmm1 {%k1} -; CHECK-NEXT: vpslld $3, %zmm0, %zmm2 {%k1} {z} -; CHECK-NEXT: vpslld $3, %zmm0, %zmm0 -; CHECK-NEXT: vpaddd %zmm2, %zmm1, %zmm1 -; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0 -; CHECK-NEXT: retq - %res = call <16 x i32> @llvm.x86.avx512.mask.psll.di.512(<16 x i32> %x0, i32 3, <16 x i32> %x2, i16 %x3) - %res1 = call <16 x i32> @llvm.x86.avx512.mask.psll.di.512(<16 x i32> %x0, i32 3, <16 x i32> zeroinitializer, i16 %x3) - %res2 = call <16 x i32> @llvm.x86.avx512.mask.psll.di.512(<16 x i32> %x0, i32 3, <16 x i32> %x2, i16 -1) - %res3 = add <16 x i32> %res, %res1 - %res4 = add <16 x i32> %res3, %res2 - ret <16 x i32> %res4 -} - -declare <8 x i64> @llvm.x86.avx512.mask.psll.qi.512(<8 x i64>, i32, <8 x i64>, i8) - -define <8 x i64>@test_int_x86_avx512_mask_psll_qi_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) { -; CHECK-LABEL: test_int_x86_avx512_mask_psll_qi_512: -; CHECK: ## BB#0: -; CHECK-NEXT: kmovw %esi, %k1 -; CHECK-NEXT: vpsllq $3, %zmm0, %zmm1 {%k1} -; CHECK-NEXT: vpsllq $3, %zmm0, %zmm2 {%k1} {z} -; CHECK-NEXT: vpsllq $3, %zmm0, %zmm0 -; CHECK-NEXT: vpaddq %zmm2, %zmm1, %zmm1 -; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0 -; CHECK-NEXT: retq - %res = call <8 x i64> @llvm.x86.avx512.mask.psll.qi.512(<8 x i64> %x0, i32 3, <8 x i64> %x2, i8 %x3) - %res1 = call <8 x i64> @llvm.x86.avx512.mask.psll.qi.512(<8 x i64> %x0, i32 3, <8 x i64> zeroinitializer, i8 %x3) - %res2 = call <8 x i64> @llvm.x86.avx512.mask.psll.qi.512(<8 x i64> %x0, i32 3, <8 x i64> %x2, i8 -1) - %res3 = add <8 x i64> %res, %res1 - %res4 = add <8 x i64> %res3, %res2 - ret <8 x i64> %res4 -} - declare <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16) define <16 x i32>@test_int_x86_avx512_mask_prorv_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) { diff --git a/llvm/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll index 21e1a05..da9ec32 100644 --- a/llvm/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll +++ b/llvm/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll @@ -788,3 +788,183 @@ define <32 x i16>@test_int_x86_avx512_mask_pmovsxb_w_512(<32 x i8> %x0, <32 x i1 ret <32 x i16> %res4 } +declare <32 x i16> @llvm.x86.avx512.mask.psrl.w.512(<32 x i16>, <8 x i16>, <32 x i16>, i32) + +define <32 x i16>@test_int_x86_avx512_mask_psrl_w_512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 %x3) { +; AVX512BW-LABEL: test_int_x86_avx512_mask_psrl_w_512: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm3 +; AVX512BW-NEXT: kmovd %edi, %k1 +; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm2 {%k1} +; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm0 {%k1} {z} +; AVX512BW-NEXT: vpaddw %zmm3, %zmm2, %zmm1 +; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0 +; AVX512BW-NEXT: retq +; +; AVX512F-32-LABEL: test_int_x86_avx512_mask_psrl_w_512: +; AVX512F-32: # BB#0: +; AVX512F-32-NEXT: vpsrlw %xmm1, %zmm0, %zmm3 +; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1 +; AVX512F-32-NEXT: vpsrlw %xmm1, %zmm0, %zmm2 {%k1} +; AVX512F-32-NEXT: vpsrlw %xmm1, %zmm0, %zmm0 {%k1} {z} +; AVX512F-32-NEXT: vpaddw %zmm3, %zmm2, %zmm1 +; AVX512F-32-NEXT: vpaddw %zmm0, %zmm1, %zmm0 +; AVX512F-32-NEXT: retl + %res = call <32 x i16> @llvm.x86.avx512.mask.psrl.w.512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 %x3) + %res1 = call <32 x i16> @llvm.x86.avx512.mask.psrl.w.512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 -1) + %res2 = call <32 x i16> @llvm.x86.avx512.mask.psrl.w.512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> zeroinitializer, i32 %x3) + %res3 = add <32 x i16> %res, %res1 + %res4 = add <32 x i16> %res3, %res2 + ret <32 x i16> %res4 +} + +declare <32 x i16> @llvm.x86.avx512.mask.psrl.wi.512(<32 x i16>, i32, <32 x i16>, i32) + +define <32 x i16>@test_int_x86_avx512_mask_psrl_wi_512(<32 x i16> %x0, i32 %x1, <32 x i16> %x2, i32 %x3) { +; AVX512BW-LABEL: test_int_x86_avx512_mask_psrl_wi_512: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm2 +; AVX512BW-NEXT: kmovd %esi, %k1 +; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm1 {%k1} +; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm0 {%k1} {z} +; AVX512BW-NEXT: vpaddw %zmm2, %zmm1, %zmm1 +; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0 +; AVX512BW-NEXT: retq +; +; AVX512F-32-LABEL: test_int_x86_avx512_mask_psrl_wi_512: +; AVX512F-32: # BB#0: +; AVX512F-32-NEXT: vpsrlw $3, %zmm0, %zmm2 +; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1 +; AVX512F-32-NEXT: vpsrlw $3, %zmm0, %zmm1 {%k1} +; AVX512F-32-NEXT: vpsrlw $3, %zmm0, %zmm0 {%k1} {z} +; AVX512F-32-NEXT: vpaddw %zmm2, %zmm1, %zmm1 +; AVX512F-32-NEXT: vpaddw %zmm0, %zmm1, %zmm0 +; AVX512F-32-NEXT: retl + %res = call <32 x i16> @llvm.x86.avx512.mask.psrl.wi.512(<32 x i16> %x0, i32 3, <32 x i16> %x2, i32 %x3) + %res1 = call <32 x i16> @llvm.x86.avx512.mask.psrl.wi.512(<32 x i16> %x0, i32 3, <32 x i16> %x2, i32 -1) + %res2 = call <32 x i16> @llvm.x86.avx512.mask.psrl.wi.512(<32 x i16> %x0, i32 3, <32 x i16> zeroinitializer, i32 %x3) + %res3 = add <32 x i16> %res, %res1 + %res4 = add <32 x i16> %res3, %res2 + ret <32 x i16> %res4 +} + +declare <32 x i16> @llvm.x86.avx512.mask.psra.w.512(<32 x i16>, <8 x i16>, <32 x i16>, i32) + +define <32 x i16>@test_int_x86_avx512_mask_psra_w_512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 %x3) { +; AVX512BW-LABEL: test_int_x86_avx512_mask_psra_w_512: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsraw %xmm1, %zmm0, %zmm3 +; AVX512BW-NEXT: kmovd %edi, %k1 +; AVX512BW-NEXT: vpsraw %xmm1, %zmm0, %zmm2 {%k1} +; AVX512BW-NEXT: vpsraw %xmm1, %zmm0, %zmm0 {%k1} {z} +; AVX512BW-NEXT: vpaddw %zmm0, %zmm2, %zmm0 +; AVX512BW-NEXT: vpaddw %zmm3, %zmm0, %zmm0 +; AVX512BW-NEXT: retq +; +; AVX512F-32-LABEL: test_int_x86_avx512_mask_psra_w_512: +; AVX512F-32: # BB#0: +; AVX512F-32-NEXT: vpsraw %xmm1, %zmm0, %zmm3 +; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1 +; AVX512F-32-NEXT: vpsraw %xmm1, %zmm0, %zmm2 {%k1} +; AVX512F-32-NEXT: vpsraw %xmm1, %zmm0, %zmm0 {%k1} {z} +; AVX512F-32-NEXT: vpaddw %zmm0, %zmm2, %zmm0 +; AVX512F-32-NEXT: vpaddw %zmm3, %zmm0, %zmm0 +; AVX512F-32-NEXT: retl + %res = call <32 x i16> @llvm.x86.avx512.mask.psra.w.512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 %x3) + %res1 = call <32 x i16> @llvm.x86.avx512.mask.psra.w.512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> zeroinitializer, i32 %x3) + %res2 = call <32 x i16> @llvm.x86.avx512.mask.psra.w.512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 -1) + %res3 = add <32 x i16> %res, %res1 + %res4 = add <32 x i16> %res3, %res2 + ret <32 x i16> %res4 +} + +declare <32 x i16> @llvm.x86.avx512.mask.psra.wi.512(<32 x i16>, i32, <32 x i16>, i32) + +define <32 x i16>@test_int_x86_avx512_mask_psra_wi_512(<32 x i16> %x0, i32 %x1, <32 x i16> %x2, i32 %x3) { +; AVX512BW-LABEL: test_int_x86_avx512_mask_psra_wi_512: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsraw $3, %zmm0, %zmm2 +; AVX512BW-NEXT: kmovd %esi, %k1 +; AVX512BW-NEXT: vpsraw $3, %zmm0, %zmm1 {%k1} +; AVX512BW-NEXT: vpsraw $3, %zmm0, %zmm0 {%k1} {z} +; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0 +; AVX512BW-NEXT: vpaddw %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: retq +; +; AVX512F-32-LABEL: test_int_x86_avx512_mask_psra_wi_512: +; AVX512F-32: # BB#0: +; AVX512F-32-NEXT: vpsraw $3, %zmm0, %zmm2 +; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1 +; AVX512F-32-NEXT: vpsraw $3, %zmm0, %zmm1 {%k1} +; AVX512F-32-NEXT: vpsraw $3, %zmm0, %zmm0 {%k1} {z} +; AVX512F-32-NEXT: vpaddw %zmm0, %zmm1, %zmm0 +; AVX512F-32-NEXT: vpaddw %zmm2, %zmm0, %zmm0 +; AVX512F-32-NEXT: retl + %res = call <32 x i16> @llvm.x86.avx512.mask.psra.wi.512(<32 x i16> %x0, i32 3, <32 x i16> %x2, i32 %x3) + %res1 = call <32 x i16> @llvm.x86.avx512.mask.psra.wi.512(<32 x i16> %x0, i32 3, <32 x i16> zeroinitializer, i32 %x3) + %res2 = call <32 x i16> @llvm.x86.avx512.mask.psra.wi.512(<32 x i16> %x0, i32 3, <32 x i16> %x2, i32 -1) + %res3 = add <32 x i16> %res, %res1 + %res4 = add <32 x i16> %res3, %res2 + ret <32 x i16> %res4 +} + +declare <32 x i16> @llvm.x86.avx512.mask.psll.w.512(<32 x i16>, <8 x i16>, <32 x i16>, i32) + +define <32 x i16>@test_int_x86_avx512_mask_psll_w_512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 %x3) { +; AVX512BW-LABEL: test_int_x86_avx512_mask_psll_w_512: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsllw %xmm1, %zmm0, %zmm3 +; AVX512BW-NEXT: kmovd %edi, %k1 +; AVX512BW-NEXT: vpsllw %xmm1, %zmm0, %zmm2 {%k1} +; AVX512BW-NEXT: vpsllw %xmm1, %zmm0, %zmm0 {%k1} {z} +; AVX512BW-NEXT: vpaddw %zmm0, %zmm2, %zmm0 +; AVX512BW-NEXT: vpaddw %zmm3, %zmm0, %zmm0 +; AVX512BW-NEXT: retq +; +; AVX512F-32-LABEL: test_int_x86_avx512_mask_psll_w_512: +; AVX512F-32: # BB#0: +; AVX512F-32-NEXT: vpsllw %xmm1, %zmm0, %zmm3 +; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1 +; AVX512F-32-NEXT: vpsllw %xmm1, %zmm0, %zmm2 {%k1} +; AVX512F-32-NEXT: vpsllw %xmm1, %zmm0, %zmm0 {%k1} {z} +; AVX512F-32-NEXT: vpaddw %zmm0, %zmm2, %zmm0 +; AVX512F-32-NEXT: vpaddw %zmm3, %zmm0, %zmm0 +; AVX512F-32-NEXT: retl + %res = call <32 x i16> @llvm.x86.avx512.mask.psll.w.512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 %x3) + %res1 = call <32 x i16> @llvm.x86.avx512.mask.psll.w.512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> zeroinitializer, i32 %x3) + %res2 = call <32 x i16> @llvm.x86.avx512.mask.psll.w.512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 -1) + %res3 = add <32 x i16> %res, %res1 + %res4 = add <32 x i16> %res3, %res2 + ret <32 x i16> %res4 +} + +declare <32 x i16> @llvm.x86.avx512.mask.psll.wi.512(<32 x i16>, i32, <32 x i16>, i32) + +define <32 x i16>@test_int_x86_avx512_mask_psll_wi_512(<32 x i16> %x0, i32 %x1, <32 x i16> %x2, i32 %x3) { +; AVX512BW-LABEL: test_int_x86_avx512_mask_psll_wi_512: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsllw $3, %zmm0, %zmm2 +; AVX512BW-NEXT: kmovd %esi, %k1 +; AVX512BW-NEXT: vpsllw $3, %zmm0, %zmm1 {%k1} +; AVX512BW-NEXT: vpsllw $3, %zmm0, %zmm0 {%k1} {z} +; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0 +; AVX512BW-NEXT: vpaddw %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: retq +; +; AVX512F-32-LABEL: test_int_x86_avx512_mask_psll_wi_512: +; AVX512F-32: # BB#0: +; AVX512F-32-NEXT: vpsllw $3, %zmm0, %zmm2 +; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1 +; AVX512F-32-NEXT: vpsllw $3, %zmm0, %zmm1 {%k1} +; AVX512F-32-NEXT: vpsllw $3, %zmm0, %zmm0 {%k1} {z} +; AVX512F-32-NEXT: vpaddw %zmm0, %zmm1, %zmm0 +; AVX512F-32-NEXT: vpaddw %zmm2, %zmm0, %zmm0 +; AVX512F-32-NEXT: retl + %res = call <32 x i16> @llvm.x86.avx512.mask.psll.wi.512(<32 x i16> %x0, i32 3, <32 x i16> %x2, i32 %x3) + %res1 = call <32 x i16> @llvm.x86.avx512.mask.psll.wi.512(<32 x i16> %x0, i32 3, <32 x i16> zeroinitializer, i32 %x3) + %res2 = call <32 x i16> @llvm.x86.avx512.mask.psll.wi.512(<32 x i16> %x0, i32 3, <32 x i16> %x2, i32 -1) + %res3 = add <32 x i16> %res, %res1 + %res4 = add <32 x i16> %res3, %res2 + ret <32 x i16> %res4 +} + diff --git a/llvm/test/CodeGen/X86/avx512bw-intrinsics.ll b/llvm/test/CodeGen/X86/avx512bw-intrinsics.ll index b71f4b8..acd0fb0 100644 --- a/llvm/test/CodeGen/X86/avx512bw-intrinsics.ll +++ b/llvm/test/CodeGen/X86/avx512bw-intrinsics.ll @@ -2283,66 +2283,6 @@ define <32 x i16>@test_int_x86_avx512_cvtmask2w_512(i32 %x0) { ret <32 x i16> %res } -declare <32 x i16> @llvm.x86.avx512.mask.psrl.w.512(<32 x i16>, <8 x i16>, <32 x i16>, i32) - -define <32 x i16>@test_int_x86_avx512_mask_psrl_w_512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 %x3) { -; AVX512BW-LABEL: test_int_x86_avx512_mask_psrl_w_512: -; AVX512BW: ## BB#0: -; AVX512BW-NEXT: kmovd %edi, %k1 -; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm2 {%k1} -; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm3 {%k1} {z} -; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpaddw %zmm0, %zmm2, %zmm0 -; AVX512BW-NEXT: vpaddw %zmm3, %zmm0, %zmm0 -; AVX512BW-NEXT: retq -; -; AVX512F-32-LABEL: test_int_x86_avx512_mask_psrl_w_512: -; AVX512F-32: # BB#0: -; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1 -; AVX512F-32-NEXT: vpsrlw %xmm1, %zmm0, %zmm2 {%k1} -; AVX512F-32-NEXT: vpsrlw %xmm1, %zmm0, %zmm3 {%k1} {z} -; AVX512F-32-NEXT: vpsrlw %xmm1, %zmm0, %zmm0 -; AVX512F-32-NEXT: vpaddw %zmm0, %zmm2, %zmm0 -; AVX512F-32-NEXT: vpaddw %zmm3, %zmm0, %zmm0 -; AVX512F-32-NEXT: retl - %res = call <32 x i16> @llvm.x86.avx512.mask.psrl.w.512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 %x3) - %res1 = call <32 x i16> @llvm.x86.avx512.mask.psrl.w.512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 -1) - %res2 = call <32 x i16> @llvm.x86.avx512.mask.psrl.w.512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> zeroinitializer, i32 %x3) - %res3 = add <32 x i16> %res, %res1 - %res4 = add <32 x i16> %res3, %res2 - ret <32 x i16> %res4 -} - -declare <32 x i16> @llvm.x86.avx512.mask.psrl.wi.512(<32 x i16>, i32, <32 x i16>, i32) - -define <32 x i16>@test_int_x86_avx512_mask_psrl_wi_512(<32 x i16> %x0, i32 %x1, <32 x i16> %x2, i32 %x3) { -; AVX512BW-LABEL: test_int_x86_avx512_mask_psrl_wi_512: -; AVX512BW: ## BB#0: -; AVX512BW-NEXT: kmovd %esi, %k1 -; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm1 {%k1} -; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm2 {%k1} {z} -; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm0 -; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0 -; AVX512BW-NEXT: vpaddw %zmm2, %zmm0, %zmm0 -; AVX512BW-NEXT: retq -; -; AVX512F-32-LABEL: test_int_x86_avx512_mask_psrl_wi_512: -; AVX512F-32: # BB#0: -; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1 -; AVX512F-32-NEXT: vpsrlw $3, %zmm0, %zmm1 {%k1} -; AVX512F-32-NEXT: vpsrlw $3, %zmm0, %zmm2 {%k1} {z} -; AVX512F-32-NEXT: vpsrlw $3, %zmm0, %zmm0 -; AVX512F-32-NEXT: vpaddw %zmm0, %zmm1, %zmm0 -; AVX512F-32-NEXT: vpaddw %zmm2, %zmm0, %zmm0 -; AVX512F-32-NEXT: retl - %res = call <32 x i16> @llvm.x86.avx512.mask.psrl.wi.512(<32 x i16> %x0, i32 3, <32 x i16> %x2, i32 %x3) - %res1 = call <32 x i16> @llvm.x86.avx512.mask.psrl.wi.512(<32 x i16> %x0, i32 3, <32 x i16> %x2, i32 -1) - %res2 = call <32 x i16> @llvm.x86.avx512.mask.psrl.wi.512(<32 x i16> %x0, i32 3, <32 x i16> zeroinitializer, i32 %x3) - %res3 = add <32 x i16> %res, %res1 - %res4 = add <32 x i16> %res3, %res2 - ret <32 x i16> %res4 -} - declare <32 x i16> @llvm.x86.avx512.mask.psrlv32hi(<32 x i16>, <32 x i16>, <32 x i16>, i32) define <32 x i16>@test_int_x86_avx512_mask_psrlv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) { @@ -2373,66 +2313,6 @@ define <32 x i16>@test_int_x86_avx512_mask_psrlv32hi(<32 x i16> %x0, <32 x i16> ret <32 x i16> %res4 } -declare <32 x i16> @llvm.x86.avx512.mask.psra.w.512(<32 x i16>, <8 x i16>, <32 x i16>, i32) - -define <32 x i16>@test_int_x86_avx512_mask_psra_w_512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 %x3) { -; AVX512BW-LABEL: test_int_x86_avx512_mask_psra_w_512: -; AVX512BW: ## BB#0: -; AVX512BW-NEXT: kmovd %edi, %k1 -; AVX512BW-NEXT: vpsraw %xmm1, %zmm0, %zmm2 {%k1} -; AVX512BW-NEXT: vpsraw %xmm1, %zmm0, %zmm3 {%k1} {z} -; AVX512BW-NEXT: vpsraw %xmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpaddw %zmm3, %zmm2, %zmm1 -; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0 -; AVX512BW-NEXT: retq -; -; AVX512F-32-LABEL: test_int_x86_avx512_mask_psra_w_512: -; AVX512F-32: # BB#0: -; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1 -; AVX512F-32-NEXT: vpsraw %xmm1, %zmm0, %zmm2 {%k1} -; AVX512F-32-NEXT: vpsraw %xmm1, %zmm0, %zmm3 {%k1} {z} -; AVX512F-32-NEXT: vpsraw %xmm1, %zmm0, %zmm0 -; AVX512F-32-NEXT: vpaddw %zmm3, %zmm2, %zmm1 -; AVX512F-32-NEXT: vpaddw %zmm0, %zmm1, %zmm0 -; AVX512F-32-NEXT: retl - %res = call <32 x i16> @llvm.x86.avx512.mask.psra.w.512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 %x3) - %res1 = call <32 x i16> @llvm.x86.avx512.mask.psra.w.512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> zeroinitializer, i32 %x3) - %res2 = call <32 x i16> @llvm.x86.avx512.mask.psra.w.512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 -1) - %res3 = add <32 x i16> %res, %res1 - %res4 = add <32 x i16> %res3, %res2 - ret <32 x i16> %res4 -} - -declare <32 x i16> @llvm.x86.avx512.mask.psra.wi.512(<32 x i16>, i32, <32 x i16>, i32) - -define <32 x i16>@test_int_x86_avx512_mask_psra_wi_512(<32 x i16> %x0, i32 %x1, <32 x i16> %x2, i32 %x3) { -; AVX512BW-LABEL: test_int_x86_avx512_mask_psra_wi_512: -; AVX512BW: ## BB#0: -; AVX512BW-NEXT: kmovd %esi, %k1 -; AVX512BW-NEXT: vpsraw $3, %zmm0, %zmm1 {%k1} -; AVX512BW-NEXT: vpsraw $3, %zmm0, %zmm2 {%k1} {z} -; AVX512BW-NEXT: vpsraw $3, %zmm0, %zmm0 -; AVX512BW-NEXT: vpaddw %zmm2, %zmm1, %zmm1 -; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0 -; AVX512BW-NEXT: retq -; -; AVX512F-32-LABEL: test_int_x86_avx512_mask_psra_wi_512: -; AVX512F-32: # BB#0: -; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1 -; AVX512F-32-NEXT: vpsraw $3, %zmm0, %zmm1 {%k1} -; AVX512F-32-NEXT: vpsraw $3, %zmm0, %zmm2 {%k1} {z} -; AVX512F-32-NEXT: vpsraw $3, %zmm0, %zmm0 -; AVX512F-32-NEXT: vpaddw %zmm2, %zmm1, %zmm1 -; AVX512F-32-NEXT: vpaddw %zmm0, %zmm1, %zmm0 -; AVX512F-32-NEXT: retl - %res = call <32 x i16> @llvm.x86.avx512.mask.psra.wi.512(<32 x i16> %x0, i32 3, <32 x i16> %x2, i32 %x3) - %res1 = call <32 x i16> @llvm.x86.avx512.mask.psra.wi.512(<32 x i16> %x0, i32 3, <32 x i16> zeroinitializer, i32 %x3) - %res2 = call <32 x i16> @llvm.x86.avx512.mask.psra.wi.512(<32 x i16> %x0, i32 3, <32 x i16> %x2, i32 -1) - %res3 = add <32 x i16> %res, %res1 - %res4 = add <32 x i16> %res3, %res2 - ret <32 x i16> %res4 -} - declare <32 x i16> @llvm.x86.avx512.mask.psrav32.hi(<32 x i16>, <32 x i16>, <32 x i16>, i32) define <32 x i16>@test_int_x86_avx512_mask_psrav32_hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) { @@ -2481,66 +2361,6 @@ define <32 x i16>@test_int_x86_avx512_mask_psrav32_hi_const(<32 x i16> %x0, <32 ret <32 x i16> %res } -declare <32 x i16> @llvm.x86.avx512.mask.psll.w.512(<32 x i16>, <8 x i16>, <32 x i16>, i32) - -define <32 x i16>@test_int_x86_avx512_mask_psll_w_512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 %x3) { -; AVX512BW-LABEL: test_int_x86_avx512_mask_psll_w_512: -; AVX512BW: ## BB#0: -; AVX512BW-NEXT: kmovd %edi, %k1 -; AVX512BW-NEXT: vpsllw %xmm1, %zmm0, %zmm2 {%k1} -; AVX512BW-NEXT: vpsllw %xmm1, %zmm0, %zmm3 {%k1} {z} -; AVX512BW-NEXT: vpsllw %xmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpaddw %zmm3, %zmm2, %zmm1 -; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0 -; AVX512BW-NEXT: retq -; -; AVX512F-32-LABEL: test_int_x86_avx512_mask_psll_w_512: -; AVX512F-32: # BB#0: -; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1 -; AVX512F-32-NEXT: vpsllw %xmm1, %zmm0, %zmm2 {%k1} -; AVX512F-32-NEXT: vpsllw %xmm1, %zmm0, %zmm3 {%k1} {z} -; AVX512F-32-NEXT: vpsllw %xmm1, %zmm0, %zmm0 -; AVX512F-32-NEXT: vpaddw %zmm3, %zmm2, %zmm1 -; AVX512F-32-NEXT: vpaddw %zmm0, %zmm1, %zmm0 -; AVX512F-32-NEXT: retl - %res = call <32 x i16> @llvm.x86.avx512.mask.psll.w.512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 %x3) - %res1 = call <32 x i16> @llvm.x86.avx512.mask.psll.w.512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> zeroinitializer, i32 %x3) - %res2 = call <32 x i16> @llvm.x86.avx512.mask.psll.w.512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 -1) - %res3 = add <32 x i16> %res, %res1 - %res4 = add <32 x i16> %res3, %res2 - ret <32 x i16> %res4 -} - -declare <32 x i16> @llvm.x86.avx512.mask.psll.wi.512(<32 x i16>, i32, <32 x i16>, i32) - -define <32 x i16>@test_int_x86_avx512_mask_psll_wi_512(<32 x i16> %x0, i32 %x1, <32 x i16> %x2, i32 %x3) { -; AVX512BW-LABEL: test_int_x86_avx512_mask_psll_wi_512: -; AVX512BW: ## BB#0: -; AVX512BW-NEXT: kmovd %esi, %k1 -; AVX512BW-NEXT: vpsllw $3, %zmm0, %zmm1 {%k1} -; AVX512BW-NEXT: vpsllw $3, %zmm0, %zmm2 {%k1} {z} -; AVX512BW-NEXT: vpsllw $3, %zmm0, %zmm0 -; AVX512BW-NEXT: vpaddw %zmm2, %zmm1, %zmm1 -; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0 -; AVX512BW-NEXT: retq -; -; AVX512F-32-LABEL: test_int_x86_avx512_mask_psll_wi_512: -; AVX512F-32: # BB#0: -; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1 -; AVX512F-32-NEXT: vpsllw $3, %zmm0, %zmm1 {%k1} -; AVX512F-32-NEXT: vpsllw $3, %zmm0, %zmm2 {%k1} {z} -; AVX512F-32-NEXT: vpsllw $3, %zmm0, %zmm0 -; AVX512F-32-NEXT: vpaddw %zmm2, %zmm1, %zmm1 -; AVX512F-32-NEXT: vpaddw %zmm0, %zmm1, %zmm0 -; AVX512F-32-NEXT: retl - %res = call <32 x i16> @llvm.x86.avx512.mask.psll.wi.512(<32 x i16> %x0, i32 3, <32 x i16> %x2, i32 %x3) - %res1 = call <32 x i16> @llvm.x86.avx512.mask.psll.wi.512(<32 x i16> %x0, i32 3, <32 x i16> zeroinitializer, i32 %x3) - %res2 = call <32 x i16> @llvm.x86.avx512.mask.psll.wi.512(<32 x i16> %x0, i32 3, <32 x i16> %x2, i32 -1) - %res3 = add <32 x i16> %res, %res1 - %res4 = add <32 x i16> %res3, %res2 - ret <32 x i16> %res4 -} - declare <32 x i16> @llvm.x86.avx512.mask.psllv32hi(<32 x i16>, <32 x i16>, <32 x i16>, i32) define <32 x i16>@test_int_x86_avx512_mask_psllv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) { diff --git a/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll index 191a13b..6fc5ccf 100644 --- a/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll +++ b/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll @@ -4419,3 +4419,83 @@ define <4 x i64>@test_int_x86_avx512_mask_pmovsxw_q_256(<8 x i16> %x0, <4 x i64> ret <4 x i64> %res4 } +declare <2 x i64> @llvm.x86.avx512.mask.psra.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8) + +define <2 x i64>@test_int_x86_avx512_mask_psra_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) { +; CHECK-LABEL: test_int_x86_avx512_mask_psra_q_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpsraq %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf1,0xfd,0x08,0xe2,0xd9] +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vpsraq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0xe2,0xd1] +; CHECK-NEXT: vpsraq %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0xe2,0xc1] +; CHECK-NEXT: vpaddq %xmm0, %xmm2, %xmm0 ## encoding: [0x62,0xf1,0xed,0x08,0xd4,0xc0] +; CHECK-NEXT: vpaddq %xmm3, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x08,0xd4,0xc3] +; CHECK-NEXT: retq ## encoding: [0xc3] + %res = call <2 x i64> @llvm.x86.avx512.mask.psra.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) + %res1 = call <2 x i64> @llvm.x86.avx512.mask.psra.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> zeroinitializer, i8 %x3) + %res2 = call <2 x i64> @llvm.x86.avx512.mask.psra.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 -1) + %res3 = add <2 x i64> %res, %res1 + %res4 = add <2 x i64> %res3, %res2 + ret <2 x i64> %res4 +} + +declare <4 x i64> @llvm.x86.avx512.mask.psra.q.256(<4 x i64>, <2 x i64>, <4 x i64>, i8) + +define <4 x i64>@test_int_x86_avx512_mask_psra_q_256(<4 x i64> %x0, <2 x i64> %x1, <4 x i64> %x2, i8 %x3) { +; CHECK-LABEL: test_int_x86_avx512_mask_psra_q_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpsraq %xmm1, %ymm0, %ymm3 ## encoding: [0x62,0xf1,0xfd,0x28,0xe2,0xd9] +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vpsraq %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0xe2,0xd1] +; CHECK-NEXT: vpsraq %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0xe2,0xc1] +; CHECK-NEXT: vpaddq %ymm0, %ymm2, %ymm0 ## encoding: [0x62,0xf1,0xed,0x28,0xd4,0xc0] +; CHECK-NEXT: vpaddq %ymm3, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x28,0xd4,0xc3] +; CHECK-NEXT: retq ## encoding: [0xc3] + %res = call <4 x i64> @llvm.x86.avx512.mask.psra.q.256(<4 x i64> %x0, <2 x i64> %x1, <4 x i64> %x2, i8 %x3) + %res1 = call <4 x i64> @llvm.x86.avx512.mask.psra.q.256(<4 x i64> %x0, <2 x i64> %x1, <4 x i64> zeroinitializer, i8 %x3) + %res2 = call <4 x i64> @llvm.x86.avx512.mask.psra.q.256(<4 x i64> %x0, <2 x i64> %x1, <4 x i64> %x2, i8 -1) + %res3 = add <4 x i64> %res, %res1 + %res4 = add <4 x i64> %res3, %res2 + ret <4 x i64> %res4 +} + +declare <2 x i64> @llvm.x86.avx512.mask.psra.qi.128(<2 x i64>, i32, <2 x i64>, i8) + +define <2 x i64>@test_int_x86_avx512_mask_psra_qi_128(<2 x i64> %x0, i32 %x1, <2 x i64> %x2, i8 %x3) { +; CHECK-LABEL: test_int_x86_avx512_mask_psra_qi_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpsraq $3, %xmm0, %xmm2 ## encoding: [0x62,0xf1,0xed,0x08,0x72,0xe0,0x03] +; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] +; CHECK-NEXT: vpsraq $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x72,0xe0,0x03] +; CHECK-NEXT: vpsraq $3, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0x72,0xe0,0x03] +; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ## encoding: [0x62,0xf1,0xf5,0x08,0xd4,0xc0] +; CHECK-NEXT: vpaddq %xmm2, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x08,0xd4,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] + %res = call <2 x i64> @llvm.x86.avx512.mask.psra.qi.128(<2 x i64> %x0, i32 3, <2 x i64> %x2, i8 %x3) + %res1 = call <2 x i64> @llvm.x86.avx512.mask.psra.qi.128(<2 x i64> %x0, i32 3, <2 x i64> zeroinitializer, i8 %x3) + %res2 = call <2 x i64> @llvm.x86.avx512.mask.psra.qi.128(<2 x i64> %x0, i32 3, <2 x i64> %x2, i8 -1) + %res3 = add <2 x i64> %res, %res1 + %res4 = add <2 x i64> %res3, %res2 + ret <2 x i64> %res4 +} + +declare <4 x i64> @llvm.x86.avx512.mask.psra.qi.256(<4 x i64>, i32, <4 x i64>, i8) + +define <4 x i64>@test_int_x86_avx512_mask_psra_qi_256(<4 x i64> %x0, i32 %x1, <4 x i64> %x2, i8 %x3) { +; CHECK-LABEL: test_int_x86_avx512_mask_psra_qi_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpsraq $3, %ymm0, %ymm2 ## encoding: [0x62,0xf1,0xed,0x28,0x72,0xe0,0x03] +; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] +; CHECK-NEXT: vpsraq $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x72,0xe0,0x03] +; CHECK-NEXT: vpsraq $3, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x72,0xe0,0x03] +; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## encoding: [0x62,0xf1,0xf5,0x28,0xd4,0xc0] +; CHECK-NEXT: vpaddq %ymm2, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x28,0xd4,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] + %res = call <4 x i64> @llvm.x86.avx512.mask.psra.qi.256(<4 x i64> %x0, i32 3, <4 x i64> %x2, i8 %x3) + %res1 = call <4 x i64> @llvm.x86.avx512.mask.psra.qi.256(<4 x i64> %x0, i32 3, <4 x i64> zeroinitializer, i8 %x3) + %res2 = call <4 x i64> @llvm.x86.avx512.mask.psra.qi.256(<4 x i64> %x0, i32 3, <4 x i64> %x2, i8 -1) + %res3 = add <4 x i64> %res, %res1 + %res4 = add <4 x i64> %res3, %res2 + ret <4 x i64> %res4 +} + diff --git a/llvm/test/CodeGen/X86/avx512vl-intrinsics.ll b/llvm/test/CodeGen/X86/avx512vl-intrinsics.ll index fe7bf94..ae2f8db 100644 --- a/llvm/test/CodeGen/X86/avx512vl-intrinsics.ll +++ b/llvm/test/CodeGen/X86/avx512vl-intrinsics.ll @@ -4422,86 +4422,6 @@ define <8 x i32>@test_int_x86_avx512_mask_broadcasti32x4_256(<4 x i32> %x0, <8 x ret <8 x i32> %res5 } -declare <2 x i64> @llvm.x86.avx512.mask.psra.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8) - -define <2 x i64>@test_int_x86_avx512_mask_psra_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) { -; CHECK-LABEL: test_int_x86_avx512_mask_psra_q_128: -; CHECK: ## BB#0: -; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] -; CHECK-NEXT: vpsraq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0xe2,0xd1] -; CHECK-NEXT: vpsraq %xmm1, %xmm0, %xmm3 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0xe2,0xd9] -; CHECK-NEXT: vpsraq %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x08,0xe2,0xc1] -; CHECK-NEXT: vpaddq %xmm3, %xmm2, %xmm1 ## encoding: [0x62,0xf1,0xed,0x08,0xd4,0xcb] -; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ## encoding: [0x62,0xf1,0xf5,0x08,0xd4,0xc0] -; CHECK-NEXT: retq ## encoding: [0xc3] - %res = call <2 x i64> @llvm.x86.avx512.mask.psra.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) - %res1 = call <2 x i64> @llvm.x86.avx512.mask.psra.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> zeroinitializer, i8 %x3) - %res2 = call <2 x i64> @llvm.x86.avx512.mask.psra.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 -1) - %res3 = add <2 x i64> %res, %res1 - %res4 = add <2 x i64> %res3, %res2 - ret <2 x i64> %res4 -} - -declare <4 x i64> @llvm.x86.avx512.mask.psra.q.256(<4 x i64>, <2 x i64>, <4 x i64>, i8) - -define <4 x i64>@test_int_x86_avx512_mask_psra_q_256(<4 x i64> %x0, <2 x i64> %x1, <4 x i64> %x2, i8 %x3) { -; CHECK-LABEL: test_int_x86_avx512_mask_psra_q_256: -; CHECK: ## BB#0: -; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] -; CHECK-NEXT: vpsraq %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0xe2,0xd1] -; CHECK-NEXT: vpsraq %xmm1, %ymm0, %ymm3 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0xe2,0xd9] -; CHECK-NEXT: vpsraq %xmm1, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x28,0xe2,0xc1] -; CHECK-NEXT: vpaddq %ymm3, %ymm2, %ymm1 ## encoding: [0x62,0xf1,0xed,0x28,0xd4,0xcb] -; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## encoding: [0x62,0xf1,0xf5,0x28,0xd4,0xc0] -; CHECK-NEXT: retq ## encoding: [0xc3] - %res = call <4 x i64> @llvm.x86.avx512.mask.psra.q.256(<4 x i64> %x0, <2 x i64> %x1, <4 x i64> %x2, i8 %x3) - %res1 = call <4 x i64> @llvm.x86.avx512.mask.psra.q.256(<4 x i64> %x0, <2 x i64> %x1, <4 x i64> zeroinitializer, i8 %x3) - %res2 = call <4 x i64> @llvm.x86.avx512.mask.psra.q.256(<4 x i64> %x0, <2 x i64> %x1, <4 x i64> %x2, i8 -1) - %res3 = add <4 x i64> %res, %res1 - %res4 = add <4 x i64> %res3, %res2 - ret <4 x i64> %res4 -} - -declare <2 x i64> @llvm.x86.avx512.mask.psra.qi.128(<2 x i64>, i32, <2 x i64>, i8) - -define <2 x i64>@test_int_x86_avx512_mask_psra_qi_128(<2 x i64> %x0, i32 %x1, <2 x i64> %x2, i8 %x3) { -; CHECK-LABEL: test_int_x86_avx512_mask_psra_qi_128: -; CHECK: ## BB#0: -; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] -; CHECK-NEXT: vpsraq $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x72,0xe0,0x03] -; CHECK-NEXT: vpsraq $3, %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf1,0xed,0x89,0x72,0xe0,0x03] -; CHECK-NEXT: vpsraq $3, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x08,0x72,0xe0,0x03] -; CHECK-NEXT: vpaddq %xmm2, %xmm1, %xmm1 ## encoding: [0x62,0xf1,0xf5,0x08,0xd4,0xca] -; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ## encoding: [0x62,0xf1,0xf5,0x08,0xd4,0xc0] -; CHECK-NEXT: retq ## encoding: [0xc3] - %res = call <2 x i64> @llvm.x86.avx512.mask.psra.qi.128(<2 x i64> %x0, i32 3, <2 x i64> %x2, i8 %x3) - %res1 = call <2 x i64> @llvm.x86.avx512.mask.psra.qi.128(<2 x i64> %x0, i32 3, <2 x i64> zeroinitializer, i8 %x3) - %res2 = call <2 x i64> @llvm.x86.avx512.mask.psra.qi.128(<2 x i64> %x0, i32 3, <2 x i64> %x2, i8 -1) - %res3 = add <2 x i64> %res, %res1 - %res4 = add <2 x i64> %res3, %res2 - ret <2 x i64> %res4 -} - -declare <4 x i64> @llvm.x86.avx512.mask.psra.qi.256(<4 x i64>, i32, <4 x i64>, i8) - -define <4 x i64>@test_int_x86_avx512_mask_psra_qi_256(<4 x i64> %x0, i32 %x1, <4 x i64> %x2, i8 %x3) { -; CHECK-LABEL: test_int_x86_avx512_mask_psra_qi_256: -; CHECK: ## BB#0: -; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] -; CHECK-NEXT: vpsraq $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x72,0xe0,0x03] -; CHECK-NEXT: vpsraq $3, %ymm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf1,0xed,0xa9,0x72,0xe0,0x03] -; CHECK-NEXT: vpsraq $3, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x28,0x72,0xe0,0x03] -; CHECK-NEXT: vpaddq %ymm2, %ymm1, %ymm1 ## encoding: [0x62,0xf1,0xf5,0x28,0xd4,0xca] -; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## encoding: [0x62,0xf1,0xf5,0x28,0xd4,0xc0] -; CHECK-NEXT: retq ## encoding: [0xc3] - %res = call <4 x i64> @llvm.x86.avx512.mask.psra.qi.256(<4 x i64> %x0, i32 3, <4 x i64> %x2, i8 %x3) - %res1 = call <4 x i64> @llvm.x86.avx512.mask.psra.qi.256(<4 x i64> %x0, i32 3, <4 x i64> zeroinitializer, i8 %x3) - %res2 = call <4 x i64> @llvm.x86.avx512.mask.psra.qi.256(<4 x i64> %x0, i32 3, <4 x i64> %x2, i8 -1) - %res3 = add <4 x i64> %res, %res1 - %res4 = add <4 x i64> %res3, %res2 - ret <4 x i64> %res4 -} - declare <2 x i64> @llvm.x86.avx512.mask.psrav.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8) define <2 x i64>@test_int_x86_avx512_mask_psrav_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) { @@ -4527,9 +4447,9 @@ define <2 x i64>@test_int_x86_avx512_mask_psrav_q_128_const(i8 %x3) { ; CHECK: ## BB#0: ; CHECK-NEXT: vmovdqa64 {{.*#+}} xmm0 = [2,18446744073709551607] ; CHECK-NEXT: ## encoding: [0x62,0xf1,0xfd,0x08,0x6f,0x05,A,A,A,A] -; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI270_0-4, kind: reloc_riprel_4byte +; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI266_0-4, kind: reloc_riprel_4byte ; CHECK-NEXT: vpsravq {{.*}}(%rip), %xmm0, %xmm0 ## encoding: [0x62,0xf2,0xfd,0x08,0x46,0x05,A,A,A,A] -; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI270_1-4, kind: reloc_riprel_4byte +; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI266_1-4, kind: reloc_riprel_4byte ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <2 x i64> @llvm.x86.avx512.mask.psrav.q.128(<2 x i64> , <2 x i64> , <2 x i64> zeroinitializer, i8 -1) ret <2 x i64> %res -- 2.7.4